query stringlengths 10 3.85k | ru_query stringlengths 9 3.76k | document stringlengths 17 430k | metadata dict | negatives listlengths 97 100 | negative_scores listlengths 97 100 | document_score stringlengths 5 10 | document_rank stringclasses 2 values |
|---|---|---|---|---|---|---|---|
NewBytesBuffer create a bytes buffer | NewBytesBuffer создает буфер байтов | func NewBytesBuffer(p []byte) *BytesBuffer {
return &BytesBuffer{reader: bytes.NewReader(p)}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewBufferBytes(data []byte) *Buffer {\n\treturn &Buffer{refCount: 0, buf: data, length: len(data)}\n}",
"func newBuffer() Buffer {\n\treturn &buffer{\n\t\tbytes: make([]byte, 0, 64),\n\t}\n}",
"func newBuffer(b []byte) *buffer {\n\treturn &buffer{proto.NewBuffer(b), 0}\n}",
"func NewBuffer(inp []byte) *ByteBuffer {\n\tif inp == nil {\n\t\tinp = make([]byte, 0, 512)\n\t}\n\treturn &ByteBuffer{Buffer: bytes.NewBuffer(inp)}\n}",
"func newBuffer(buf []byte) *Buffer {\n\treturn &Buffer{data: buf}\n}",
"func (b *defaultByteBuffer) NewBuffer() ByteBuffer {\n\treturn NewWriterBuffer(256)\n}",
"func NewBuffer() *Buffer {\n\treturn NewBufferWithSize(initialSize)\n}",
"func New(b []byte) *Buffer {\n\treturn &Buffer{b: b}\n}",
"func NewBuffer() Buffer {\n\treturn &buffer{}\n}",
"func NewBuffer(size int) *Buffer {\n\treturn &Buffer{\n\t\tdata: make([]byte, size),\n\t}\n}",
"func GetBytesBuffer(size int) *bytes.Buffer {\n\tswitch {\n\n\tcase size > 0 && size <= 256:\n\t\treturn GetBytesBuffer256()\n\n\tcase size > 256 && size <= 512:\n\t\treturn GetBytesBuffer512()\n\n\tcase size > 512 && size <= 1024:\n\t\treturn GetBytesBuffer1K()\n\n\tcase size > 1024 && size <= 2048:\n\t\treturn GetBytesBuffer2K()\n\n\tcase size > 2048 && size <= 4096:\n\t\treturn GetBytesBuffer4K()\n\n\tcase size > 4096 && size <= 8192:\n\t\treturn GetBytesBuffer8K()\n\n\tcase size > 8192 && size <= 16384:\n\t\treturn GetBytesBuffer16K()\n\n\tcase size > 16384 && size <= 32768:\n\t\treturn GetBytesBuffer32K()\n\n\tcase size > 32768 && size <= 65536:\n\t\treturn GetBytesBuffer64K()\n\n\tcase size > 65536 && size <= 131072:\n\t\treturn GetBytesBuffer128K()\n\n\tcase size > 131072 && size <= 262144:\n\t\treturn GetBytesBuffer256K()\n\n\tcase size > 262144 && size <= 524288:\n\t\treturn GetBytesBuffer512K()\n\n\tcase size > 524288 && size <= 1048576:\n\t\treturn GetBytesBuffer1M()\n\n\tcase size > 1048576 && size <= 2097152:\n\t\treturn GetBytesBuffer2M()\n\n\tcase size > 2097152 && size <= 4194304:\n\t\treturn GetBytesBuffer4M()\n\n\tcase size > 4194304 && size <= 8388608:\n\t\treturn GetBytesBuffer8M()\n\n\tcase size > 8388608 && size <= 16777216:\n\t\treturn GetBytesBuffer16M()\n\n\tdefault:\n\t\treturn bytes.NewBuffer(make([]byte, size))\n\t}\n}",
"func NewBuffer() *Buffer { return globalPool.NewBuffer() }",
"func NewByteSliceBuffer(size uint64) *ByteSliceBuffer {\n\treturn &ByteSliceBuffer{\n\t\tBuffer: New(size, 0),\n\t\tdata: make([][]byte, size),\n\t}\n}",
"func NewBuffer(length int) *Buffer {\n\treturn &Buffer{\n\t\titems: make([]unsafe.Pointer, length),\n\t}\n}",
"func NewAttachedBytes(buffer []byte, offset int, size int) *Buffer {\n result := NewEmptyBuffer()\n result.AttachBytes(buffer, offset, size)\n return result\n}",
"func NewBuffer(size int) *Buffer {\n\tif size <= 0 {\n\t\treturn &Buffer{}\n\t}\n\treturn &Buffer{\n\t\tstorage: make([]byte, size),\n\t\tsize: size,\n\t}\n}",
"func NewBuffer(conn *sqlite.Conn) (*Buffer, error) {\n\treturn NewBufferSize(conn, 16*1024)\n}",
"func newBuffer() *buffer {\n\treturn &buffer{\n\t\tdata: make([]byte, 0),\n\t\tlen: 0,\n\t\tpkg: nil,\n\t\tconn: nil,\n\t\tpkgCh: make(chan *pkg),\n\t\tevCh: make(chan *pkg),\n\t\terrCh: make(chan error, 1),\n\t}\n}",
"func NewSeekableBufferWithBytes(originalData []byte) *SeekableBuffer {\n\tdata := make([]byte, len(originalData))\n\tcopy(data, originalData)\n\n\treturn &SeekableBuffer{\n\t\tdata: data,\n\t}\n}",
"func NewCapacityBuffer(capacity int) *Buffer {\n return &Buffer{data: make([]byte, capacity)}\n}",
"func NewBuffer() *Buffer {\n\treturn &Buffer{Line: []byte{}, Val: make([]byte, 0, 32)}\n}",
"func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e, length: len(e)}\n}",
"func NewBuffer(capacity int) Buffer {\n\treturn Buffer{\n\t\tcapacity: capacity,\n\t\tcurrentSize: 0,\n\t\tcontents: map[entity.Key]inventoryapi.PostDeltaBody{},\n\t}\n}",
"func NewByteBuffer(buf []byte) *ByteBuffer {\n\treturn &ByteBuffer{\n\t\tbuf: buf,\n\t}\n}",
"func newBuffer(r io.Reader, offset int64) *buffer {\n\treturn &buffer{\n\t\tr: r,\n\t\toffset: offset,\n\t\tbuf: make([]byte, 0, 4096),\n\t\tallowObjptr: true,\n\t\tallowStream: true,\n\t}\n}",
"func NewBuffer() *Buffer {\n\treturn &Buffer{B: &strings.Builder{}}\n}",
"func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}",
"func NewBuffer(m []byte, skip, size int64) (*Buffer, error) {\n\tb := &Buffer{\n\t\toffset: skip,\n\t\tsize: size,\n\t\tdata: m,\n\t}\n\treturn b, nil\n}",
"func newBuffer(bits uint32) buffer {\n\tvar b buffer\n\tb.data = make([]unsafe.Pointer, 1<<bits)\n\tb.free = 1 << bits\n\tb.mask = 1<<bits - 1\n\tb.bits = bits\n\treturn b\n}",
"func GetBytesBuffer() *bytes.Buffer {\n\tbuf := defaultPool.Get().(*bytes.Buffer)\n\tbufCap := buf.Cap()\n\tif bufCap >= minBufCap && bufCap <= maxBufCap && poolObjectNumber.Load() > 0 {\n\t\tpoolObjectNumber.Dec()\n\t}\n\n\treturn buf\n}",
"func GetBytesBuffer8K() *bytes.Buffer {\n\tif b := getb8K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get8K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 8192))\n}",
"func NewByteBuffer(n int) *ByteBuffer {\n\tb := new(ByteBuffer)\n\tif n > 0 {\n\t\tb.B = b.getBuf(n)\n\t\tb.size = n\n\t}\n\treturn b\n}",
"func newBuffer(e []byte) *Buffer {\n\tp := buffer_pool.Get().(*Buffer)\n\tp.buf = e\n\treturn p\n}",
"func NewBuffer(size int) *Buffer {\n\treturn &Buffer{size: size, tail: 0, head: 0, buf: make([]byte, size)}\n}",
"func GetBytesBuffer1M() *bytes.Buffer {\n\tif b := getb1M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get1M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 1048576))\n}",
"func GetBytesBuffer1K() *bytes.Buffer {\n\tif b := getb1K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get1K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 1024))\n}",
"func createBuffer() *bytes.Buffer {\n\tbuf := bytes.Buffer{}\n\treturn &buf\n}",
"func (p *Pool) NewBuffer() *Buffer {\n\treturn &Buffer{pool: p, bufs: make([][]byte, 0, 128), curBufIdx: -1}\n}",
"func NewBuffer(size int) ([]byte, error) {\n\tvar pool *sync.Pool\n\n\t// return buffer size\n\toriginSize := size\n\n\tif size <= 4096 {\n\t\tsize = 4096\n\t\tpool = &buf4kPool\n\t} else if size <= 16*1024 {\n\t\tsize = 16 * 1024\n\t\tpool = &buf16kPool\n\t} else if size <= 64*1024 {\n\t\tsize = 64 * 1024\n\t\tpool = &buf64kPool\n\t} else {\n\t\t// if message is larger than 64K, return err\n\t\treturn nil, ErrTooLarge\n\t}\n\n\tif v := pool.Get(); v != nil {\n\t\treturn v.([]byte)[:originSize], nil\n\t}\n\n\treturn make([]byte, size)[:originSize], nil\n}",
"func new_buffer(conn *websocket.Conn, ctrl chan struct{}, txqueuelen int) *Buffer {\n\tbuf := Buffer{conn: conn}\n\tbuf.pending = make(chan []byte, txqueuelen)\n\tbuf.ctrl = ctrl\n\tbuf.cache = make([]byte, packet.PACKET_LIMIT+2)\n\treturn &buf\n}",
"func NewLocalBuffer(b bytes.Buffer) *LocalBuffer { return &LocalBuffer{b: b} }",
"func (r *Record) NewBuffer() *bytes.Buffer {\n\tif r.Buffer == nil {\n\t\treturn &bytes.Buffer{}\n\t}\n\n\treturn r.Buffer\n}",
"func GetBytesBuffer16K() *bytes.Buffer {\n\tif b := getb16K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get16K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 16384))\n}",
"func GetBytesBuffer8M() *bytes.Buffer {\n\tif b := getb8M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get8M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 8388608))\n}",
"func GetBytesBuffer2K() *bytes.Buffer {\n\tif b := getb2K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get2K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 2048))\n}",
"func NewBipBuffer(size uint32) *bipbuf_t {\n\treturn &bipbuf_t{\n\t\tsize:size,\n\t\ta_start:0,\n\t\ta_end:0,\n\t\tb_end:0,\n\t\tb_inuse:false,\n\t\tdata:make([]byte, size, size),\n\t}\n}",
"func New(size int) *MsgBuffer {\r\n\r\n\treturn &MsgBuffer{\r\n\t\tb: make([]byte, size),\r\n\t}\r\n}",
"func (b *Buffer) Bytes() []byte { return b.buf[:b.length] }",
"func NewBuffer(player *Player, conn net.Conn, ctrl chan bool) *Buffer {\r\n\tmax := DEFAULT_QUEUE_SIZE\r\n\r\n\tbuf := Buffer{conn: conn}\r\n\tbuf.pending = make(chan []byte, max)\r\n\tbuf.ctrl = ctrl\r\n\tbuf.max = max\r\n\treturn &buf\r\n}",
"func NewBufferSize(conn *sqlite.Conn, pageSize int) (*Buffer, error) {\n\tbb := &Buffer{\n\t\tconn: conn,\n\t\trbuf: make([]byte, 0, pageSize),\n\t\twbuf: make([]byte, 0, pageSize),\n\t}\n\tstmt := conn.Prep(\"CREATE TEMP TABLE IF NOT EXISTS BlobBuffer (blob BLOB);\")\n\tif _, err := stmt.Step(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn bb, nil\n}",
"func newDownloadBuffer(length, sectorSize uint64) downloadBuffer {\n\t// Completion the length multiple of sector size(4MB)\n\tif length%sectorSize != 0 {\n\t\tlength += sectorSize - length%sectorSize\n\t}\n\n\tddb := downloadBuffer{\n\t\tbuf: make([][]byte, 0, length/sectorSize),\n\t\tsectorSize: sectorSize,\n\t}\n\tfor length > 0 {\n\t\tddb.buf = append(ddb.buf, make([]byte, sectorSize))\n\t\tlength -= sectorSize\n\t}\n\treturn ddb\n}",
"func NewBufferWithSize(s int) *Buffer {\n\treturn &Buffer{\n\t\tbuf: make([]byte, cheapPrepend+s),\n\t\treaderIndex: cheapPrepend,\n\t\twriterIndex: cheapPrepend,\n\t}\n}",
"func NewReaderBuffer(buf []byte) ByteBuffer {\n\treturn newReaderByteBuffer(buf)\n}",
"func New(i int) *Buffer {\n\treturn &Buffer{\n\t\tsize: i,\n\t}\n}",
"func newBuffer(br *Reader) (*buffer, error) {\n\tn, err := io.ReadFull(br.r, br.buf[:4])\n\t// br.r.Chunk() is only valid after the call the Read(), so this\n\t// must come after the first read in the record.\n\ttx := br.r.Begin()\n\tdefer func() {\n\t\tbr.lastChunk = tx.End()\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 4 {\n\t\treturn nil, errors.New(\"bam: invalid record: short block size\")\n\t}\n\tb := &buffer{data: br.buf[:4]}\n\tsize := int(b.readInt32())\n\tif size == 0 {\n\t\treturn nil, io.EOF\n\t}\n\tif size < 0 {\n\t\treturn nil, errors.New(\"bam: invalid record: invalid block size\")\n\t}\n\tif size > cap(br.buf) {\n\t\tb.off, b.data = 0, make([]byte, size)\n\t} else {\n\t\tb.off, b.data = 0, br.buf[:size]\n\t\tb.shared = true\n\t}\n\tn, err = io.ReadFull(br.r, b.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != size {\n\t\treturn nil, errors.New(\"bam: truncated record\")\n\t}\n\treturn b, nil\n}",
"func NewBytesEntity(t string, b []byte) *BytesEntity {\n return &BytesEntity{bytes.NewBuffer(b), t}\n}",
"func newChunkedBuffer(inChunkSize int64, outChunkSize int64, flags int) intermediateBuffer {\n\treturn &chunkedBuffer{\n\t\toutChunk: outChunkSize,\n\t\tlength: 0,\n\t\tdata: make([]byte, inChunkSize),\n\t\tflags: flags,\n\t}\n}",
"func makeBuf(max int) []byte {\n\tif max > BufferSize {\n\t\tmax = BufferSize\n\t}\n\treturn make([]byte, max)\n}",
"func GetBytesBuffer4K() *bytes.Buffer {\n\tif b := getb4K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4096))\n}",
"func (p *Buffer) Bytes() []byte { return p.buf }",
"func NewBufferBuilder() *BufferBuilder {\n\treturn &BufferBuilder{}\n}",
"func NewBytes(val []byte) *Bytes {\n\taddr := &Bytes{}\n\taddr.Store(val)\n\treturn addr\n}",
"func GetBytesBuffer32K() *bytes.Buffer {\n\tif b := getb32K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get32K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 32768))\n}",
"func GetBytesBuffer256() *bytes.Buffer {\n\tif b := getb256(); b != nil {\n\t\treturn b\n\t}\n\tif p := get256(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 256))\n}",
"func newBufferPool() *bufferPool {\n\treturn &bufferPool{&sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn &bytes.Buffer{}\n\t\t},\n\t}}\n}",
"func NewBuffer(conn *net.TCPConn, buffOb chan bool, maxQueueSize int) *Buffer {\n\tsize := maxQueueSize\n\n\tif size == -1 {\n\t\tsize = DEFAULT_QUEUE_SIZE\n\t}\n\n\tbuf := new(Buffer)\n\tbuf.conn = conn\n\tbuf.pending = make(chan []byte, size)\n\tbuf.ctrl = make(chan bool)\n\tbuf.ob = buffOb\n\tbuf.max = size\n\n\treturn buf\n}",
"func NewEmptyBuffer() *Buffer {\n return &Buffer{data: make([]byte, 0)}\n}",
"func NewByteInput(buf []byte) ByteInput {\n\treturn &ByteBuffer{\n\t\tbuf: buf,\n\t\toff: 0,\n\t}\n}",
"func PutBytesBuffer(buf *bytes.Buffer) {\n\tif poolObjectNumber.Load() > maxPoolObjectNum {\n\t\treturn\n\t}\n\n\tbufCap := buf.Cap()\n\tif bufCap < minBufCap || bufCap > maxBufCap {\n\t\treturn\n\t}\n\n\tdefaultPool.Put(buf)\n\tpoolObjectNumber.Add(1)\n}",
"func NewFromBytes(bts []byte) *BytesObj {\n\treturn &BytesObj{\n\t\ttp: isBts,\n\t\tdata: bts,\n\t}\n}",
"func PutBytesBuffer(b *bytes.Buffer) bool {\n\tif b == nil {\n\t\treturn false\n\t}\n\tsize := b.Cap()\n\tswitch {\n\n\tcase size >= 256 && size < 512:\n\t\tb.Reset()\n\t\tputb256(b)\n\n\tcase size >= 512 && size < 1024:\n\t\tb.Reset()\n\t\tputb512(b)\n\n\tcase size >= 1024 && size < 2048:\n\t\tb.Reset()\n\t\tputb1K(b)\n\n\tcase size >= 2048 && size < 4096:\n\t\tb.Reset()\n\t\tputb2K(b)\n\n\tcase size >= 4096 && size < 8192:\n\t\tb.Reset()\n\t\tputb4K(b)\n\n\tcase size >= 8192 && size < 16384:\n\t\tb.Reset()\n\t\tputb8K(b)\n\n\tcase size >= 16384 && size < 32768:\n\t\tb.Reset()\n\t\tputb16K(b)\n\n\tcase size >= 32768 && size < 65536:\n\t\tb.Reset()\n\t\tputb32K(b)\n\n\tcase size >= 65536 && size < 131072:\n\t\tb.Reset()\n\t\tputb64K(b)\n\n\tcase size >= 131072 && size < 262144:\n\t\tb.Reset()\n\t\tputb128K(b)\n\n\tcase size >= 262144 && size < 524288:\n\t\tb.Reset()\n\t\tputb256K(b)\n\n\tcase size >= 524288 && size < 1048576:\n\t\tb.Reset()\n\t\tputb512K(b)\n\n\tcase size >= 1048576 && size < 2097152:\n\t\tb.Reset()\n\t\tputb1M(b)\n\n\tcase size >= 2097152 && size < 4194304:\n\t\tb.Reset()\n\t\tputb2M(b)\n\n\tcase size >= 4194304 && size < 8388608:\n\t\tb.Reset()\n\t\tputb4M(b)\n\n\tcase size >= 8388608 && size < 16777216:\n\t\tb.Reset()\n\t\tputb8M(b)\n\n\tcase size >= 16777216 && size < 33554432:\n\t\tb.Reset()\n\t\tputb16M(b)\n\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}",
"func NewBuffer(p producer.Producer, size int, flushInterval time.Duration, logger log.Logger) *Buffer {\n\tflush := 1 * time.Second\n\tif flushInterval != 0 {\n\t\tflush = flushInterval\n\t}\n\n\tb := &Buffer{\n\t\trecords: make([]*data.Record, 0, size),\n\t\tmu: new(sync.Mutex),\n\t\tproducer: p,\n\t\tbufferSize: size,\n\t\tlogger: logger,\n\t\tshouldFlush: make(chan bool, 1),\n\t\tflushInterval: flush,\n\t\tlastFlushed: time.Now(),\n\t}\n\n\tgo b.runFlusher()\n\n\treturn b\n}",
"func (pool *BufferPool) New() (buf *bytes.Buffer) {\n\tselect {\n\tcase buf = <-pool.Buffers:\n\tdefault:\n\t\tbuf = &bytes.Buffer{}\n\t}\n\treturn\n}",
"func GetBytesBuffer16M() *bytes.Buffer {\n\tif b := getb16M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get16M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 16777216))\n}",
"func (z *Writer) newBuffers() {\n\tbSize := z.Header.BlockMaxSize\n\tbuf := getBuffer(bSize)\n\tz.data = buf[:bSize] // Uncompressed buffer is the first half.\n}",
"func NewBuffer(data string) Buffer {\n\tif len(data) == 0 {\n\t\treturn nilBuffer\n\t}\n\tvar (\n\t\tidx = 0\n\t\tbuf8 = make([]byte, 0, len(data))\n\t\tbuf16 []uint16\n\t\tbuf32 []rune\n\t)\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tif r < utf8.RuneSelf {\n\t\t\tbuf8 = append(buf8, byte(r))\n\t\t\tcontinue\n\t\t}\n\t\tif r <= 0xffff {\n\t\t\tbuf16 = make([]uint16, len(buf8), len(data))\n\t\t\tfor i, v := range buf8 {\n\t\t\t\tbuf16[i] = uint16(v)\n\t\t\t}\n\t\t\tbuf8 = nil\n\t\t\tbuf16 = append(buf16, uint16(r))\n\t\t\tgoto copy16\n\t\t}\n\t\tbuf32 = make([]rune, len(buf8), len(data))\n\t\tfor i, v := range buf8 {\n\t\t\tbuf32[i] = rune(uint32(v))\n\t\t}\n\t\tbuf8 = nil\n\t\tbuf32 = append(buf32, r)\n\t\tgoto copy32\n\t}\n\treturn &asciiBuffer{\n\t\tarr: buf8,\n\t}\ncopy16:\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tif r <= 0xffff {\n\t\t\tbuf16 = append(buf16, uint16(r))\n\t\t\tcontinue\n\t\t}\n\t\tbuf32 = make([]rune, len(buf16), len(data))\n\t\tfor i, v := range buf16 {\n\t\t\tbuf32[i] = rune(uint32(v))\n\t\t}\n\t\tbuf16 = nil\n\t\tbuf32 = append(buf32, r)\n\t\tgoto copy32\n\t}\n\treturn &basicBuffer{\n\t\tarr: buf16,\n\t}\ncopy32:\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tbuf32 = append(buf32, r)\n\t}\n\treturn &supplementalBuffer{\n\t\tarr: buf32,\n\t}\n}",
"func GetBytesBuffer2M() *bytes.Buffer {\n\tif b := getb2M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get2M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 2097152))\n}",
"func GetBytesBuffer256K() *bytes.Buffer {\n\tif b := getb256K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get256K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 262144))\n}",
"func NewBuffer(aSlice interface{}) *Buffer {\n return &Buffer{buffer: sliceValue(aSlice, false), handler: valueHandler{}}\n}",
"func (b *Buffer) AllocBytes(n int) []byte {\n\tif n > bigValueSize {\n\t\treturn make([]byte, n)\n\t}\n\tif b.curIdx+n > b.curBufLen {\n\t\tb.addBuf()\n\t}\n\tidx := b.curIdx\n\tb.curIdx += n\n\treturn b.curBuf[idx:b.curIdx:b.curIdx]\n}",
"func GetBytesBuffer512() *bytes.Buffer {\n\tif b := getb512(); b != nil {\n\t\treturn b\n\t}\n\tif p := get512(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 512))\n}",
"func NewBuffer() Buffer {\n\treturn Buffer{\n\t\tCellMap: make(map[image.Point]Cell),\n\t\tArea: image.Rectangle{}}\n}",
"func (pk PacketBufferPtr) ToBuffer() buffer.Buffer {\n\tb := pk.buf.Clone()\n\tb.TrimFront(int64(pk.headerOffset()))\n\treturn b\n}",
"func NewFixedBuffer(w io.Writer, size int64) *FixedBuffer {\n\treturn &FixedBuffer{\n\t\tw: w,\n\t\tbuf: make([]byte, size),\n\t}\n}",
"func NewMsgBuffer(bs []byte) *MsgBuffer {\n\treturn &MsgBuffer{\n\t\t*bytes.NewBuffer(bs),\n\t\tnil,\n\t\tnil}\n}",
"func (b *Buffer) AttachNew() {\n b.data = make([]byte, 0)\n b.size = 0\n b.offset = 0\n}",
"func NewFzBuffer() *FzBuffer {\n\treturn (*FzBuffer)(allocFzBufferMemory(1))\n}",
"func RandomBytes(n int) (*Buffer, error) {\n\tb := make([]byte, n)\n\t_, err := rand.Read(b)\n\t// Note that err == nil only if we read len(b) bytes.\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(b), nil\n}",
"func NewPacketBuffer(maxPackets, maxBytes int) *PacketBuffer {\n\treturn &PacketBuffer{\n\t\tcis: make([]gopacket.CaptureInfo, maxPackets),\n\t\toffsets: make([]int, maxPackets),\n\t\tdata: make([]byte, maxBytes),\n\t}\n}",
"func newBlockBuffer(blockSize int64) *blockBuffer {\n\treturn &blockBuffer{\n\t\tblockSize: blockSize,\n\t\tsgl: glMem2.NewSGL(blockSize, blockSize),\n\t\tvalid: false,\n\t}\n}",
"func GetBytesBuffer4M() *bytes.Buffer {\n\tif b := getb4M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4194304))\n}",
"func GetBytesBuffer512K() *bytes.Buffer {\n\tif b := getb512K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get512K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 524288))\n}",
"func NewMessageBuffer(buf []byte, l int) *Buffer {\n\treturn &Buffer{\n\t\tbuf: buf,\n\t\tl: l,\n\t\tpos: 0,\n\t}\n}",
"func newSafeBuffer() *safeBuffer {\n\treturn &safeBuffer{\n\t\tbuf: bytes.NewBuffer(nil),\n\t}\n}",
"func (b *Buf) Bytes() []byte { return b.b }",
"func New(w, h int) *Buffer {\n\tb := &Buffer{\n\t\tWidth: w,\n\t\tHeight: h,\n\t\tCursor: NewCursor(0, 0),\n\t\tTiles: make([]*Tile, w*h),\n\t}\n\tb.Resize(w, h)\n\treturn b\n}",
"func newSafeBuffer(bufsize int) ([]byte, error) {\n\t// Max BSON document size is 16MB.\n\t// https://docs.mongodb.com/manual/reference/limits/\n\t// For simplicity, bound buffer size at 32MB so that headers and so on fit\n\t// too.\n\t// TODO: Can you put multiple large documents in one insert or reply and\n\t// exceed this limit?\n\tif (bufsize < 0) || (bufsize > 32*1024*1024) {\n\t\treturn nil, fmt.Errorf(\"Invalid buffer size %d\", bufsize)\n\t}\n\treturn make([]byte, bufsize), nil\n}",
"func NewDownloadDestinationBuffer(length, pieceSize uint64) downloadDestinationBuffer {\n\t// Round length up to next multiple of SectorSize.\n\tif length%pieceSize != 0 {\n\t\tlength += pieceSize - length%pieceSize\n\t}\n\t// Create buffer\n\tddb := downloadDestinationBuffer{\n\t\tbuf: make([][]byte, 0, length/pieceSize),\n\t\tpieceSize: pieceSize,\n\t}\n\tfor length > 0 {\n\t\tddb.buf = append(ddb.buf, make([]byte, pieceSize))\n\t\tlength -= pieceSize\n\t}\n\treturn ddb\n}",
"func NewBufferPool(alloc int) *BufferPool {\n\treturn &BufferPool{\n\t\talloc: alloc,\n\t\tpool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn bytes.NewBuffer(make([]byte, 0, alloc))\n\t\t\t},\n\t\t},\n\t}\n}",
"func NewBuffer(reader io.Reader, size int64, path string, cursorPosition []string) *Buffer {\n\tb := new(Buffer)\n\tb.LineArray = NewLineArray(size, reader)\n\n\tb.Settings = DefaultLocalSettings()\n\t//\tfor k, v := range globalSettings {\n\t//\t\tif _, ok := b.Settings[k]; ok {\n\t//\t\t\tb.Settings[k] = v\n\t//\t\t}\n\t//\t}\n\n\tif fileformat == 1 {\n\t\tb.Settings[\"fileformat\"] = \"unix\"\n\t} else if fileformat == 2 {\n\t\tb.Settings[\"fileformat\"] = \"dos\"\n\t}\n\n\tb.Path = path\n\n\tb.EventHandler = NewEventHandler(b)\n\n\tb.update()\n\n\tb.Cursor = Cursor{\n\t\tLoc: Loc{0, 0},\n\t\tbuf: b,\n\t}\n\n\t//InitLocalSettings(b)\n\n\tb.cursors = []*Cursor{&b.Cursor}\n\n\treturn b\n}"
] | [
"0.81802195",
"0.7357088",
"0.72746956",
"0.7250504",
"0.7194273",
"0.7021639",
"0.69860196",
"0.6953487",
"0.6906288",
"0.69045806",
"0.6828925",
"0.6827577",
"0.6809075",
"0.67834884",
"0.6779053",
"0.6775601",
"0.67704606",
"0.6725194",
"0.67095757",
"0.66958207",
"0.6691062",
"0.6684943",
"0.6684761",
"0.6659025",
"0.6651296",
"0.66442823",
"0.6627951",
"0.6584847",
"0.657895",
"0.6571579",
"0.65598017",
"0.6545102",
"0.6489962",
"0.64597857",
"0.6446425",
"0.64435434",
"0.64283895",
"0.6401097",
"0.63996536",
"0.6387383",
"0.6372626",
"0.63686967",
"0.6298527",
"0.6288857",
"0.62699795",
"0.6244727",
"0.6228084",
"0.62120754",
"0.6163779",
"0.6143618",
"0.61399776",
"0.6136768",
"0.6117838",
"0.6112444",
"0.6111166",
"0.6092884",
"0.6084956",
"0.607236",
"0.6048336",
"0.6041352",
"0.60333264",
"0.6022768",
"0.60172385",
"0.6014961",
"0.60046417",
"0.59907866",
"0.5980761",
"0.5976132",
"0.5976125",
"0.59737116",
"0.5957397",
"0.5956998",
"0.5951359",
"0.59509045",
"0.5921426",
"0.5914611",
"0.5893107",
"0.58734125",
"0.58697885",
"0.58470494",
"0.582368",
"0.5822665",
"0.5801679",
"0.579548",
"0.5772699",
"0.5764612",
"0.57625395",
"0.57519287",
"0.57514024",
"0.57473063",
"0.5743033",
"0.57425326",
"0.5740325",
"0.5722956",
"0.56951267",
"0.56828976",
"0.5679171",
"0.5672255",
"0.56716824",
"0.5666306"
] | 0.82989466 | 0 |
Read from the byte buffer | Чтение из буфера байтов | func (bb *BytesBuffer) Read(p []byte) (n int, err error) {
return bb.reader.Read(p)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Read(b []byte) { Reader.Read(b) }",
"func (cb *Buffer) Read(buf []byte) (int, error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Target buffer is null or empty\")\n\t}\n\n\ttoRead := min(len(buf), cb.ReadAvailability())\n\n\tlBytes := min(cb.rpos, toRead)\n\tcopy(buf[toRead-lBytes:toRead], cb.buffer[cb.rpos-lBytes:cb.rpos])\n\n\tif toRead > lBytes {\n\t\trBytes := toRead - lBytes\n\t\tcopy(buf[:rBytes], cb.buffer[len(cb.buffer)-rBytes:len(cb.buffer)])\n\t\tcb.rpos = len(cb.buffer) - rBytes\n\t} else {\n\t\tcb.rpos -= lBytes\n\t}\n\n\tcb.full = false\n\treturn toRead, nil\n}",
"func (b *Buffer) Read(reader io.Reader) (error) {\n\tif b.isCompacted {\n\t\tb.isCompacted = false\n\n\t\t// we want to read into the buffer from where it last was,\n\t\tvar slice = b.internal[b.index:]\n\t\tvar length, err = reader.Read(slice)\n\t\tb.index = 0 // start the index over, so reading starts from beginning again\n\t\tb.length += uint32(length) // increment the number of bytes read\n\t\treturn err\n\t}\n\tvar length, err = reader.Read(b.internal)\n\tb.index = 0\n\tb.length = uint32(length)\n\treturn err\n}",
"func ReadBytes(buffer []byte, offset int, size int) []byte {\n return buffer[offset:offset + size]\n}",
"func (r *binaryReader) readBuf(len int) ([]byte, error) {\n\tb := r.buf[:len]\n\tn, err := r.Read(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != len {\n\t\treturn nil, errors.New(\"TODO failed to read enough bytes\")\n\t}\n\treturn b, nil\n}",
"func ReadByte(buffer []byte, offset int) byte {\n return buffer[offset]\n}",
"func (e *msgpackEncoder) Read(p []byte) (int, error) {\n\treturn e.buffer.Read(p)\n}",
"func (de *Decoder) Read(p []byte) (int, error) {\n\treturn de.buffer.Read(p)\n}",
"func (jbobject *JavaNioCharBuffer) Read(a JavaNioCharBufferInterface) (int, error) {\n\tconv_a := javabind.NewGoToJavaCallable()\n\tif err := conv_a.Convert(a); err != nil {\n\t\tpanic(err)\n\t}\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"read\", javabind.Int, conv_a.Value().Cast(\"java/nio/CharBuffer\"))\n\tif err != nil {\n\t\tvar zero int\n\t\treturn zero, err\n\t}\n\tconv_a.CleanUp()\n\treturn jret.(int), nil\n}",
"func (e *Encoder) Read(b []byte) (int, error) {\n\treturn e.buf.Read(b)\n}",
"func (r *MsgBuffer) ReadByte() (b byte, err error) {\r\n\tif r.i == r.l {\r\n\t\treturn 0, io.EOF\r\n\t}\r\n\tb = r.b[r.i]\r\n\tr.i++\r\n\treturn b, err\r\n}",
"func (b *Buffer) Read(p []byte) (n int, err error) {\n\tbuf := b.Bytes()\n\tif len(buf) == 0 {\n\t\tif len(p) == 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, buf)\n\treturn n, nil\n}",
"func (s *safeBuffer) Read(p []byte) (int, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Read(p)\n}",
"func (r *Reader) ReadBytes(length int) []byte {\n\tif len(r.buffer) <= r.index+length-1 {\n\t\tlog.Panic(\"Error reading []byte: buffer is too small!\")\n\t}\n\n\tvar data = r.buffer[r.index : r.index+length]\n\tr.index += length\n\n\treturn data\n}",
"func (s *Stream) Read(byteCount int) ([]byte, error) {\n\tdata := make([]byte, byteCount)\n\tif _, err := io.ReadFull(s.buffer, data); err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}",
"func (b *Body) Read(p []byte) (int, error) {\n\treturn b.buffer.Read(p)\n}",
"func (r *bytesReader) Read(b []byte) (n int, err error) {\n\tif r.index >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[r.index:])\n\tr.index += int64(n)\n\treturn\n}",
"func (b *QueueBuffer) Read(p []byte) (int, error) {\n\tif x := len(*b) - len(p); x >= 0 {\n\t\tn := copy(p, (*b)[x:])\n\t\t*b = (*b)[:x]\n\t\treturn n, nil\n\t}\n\tn := copy(p, *b)\n\t*b = nil\n\treturn n, io.EOF\n}",
"func (b *Buffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}",
"func (e *ObservableEditableBuffer) Read(q0 int, r []rune) (int, error) {\n\treturn e.f.Read(q0, r)\n}",
"func (r *Reader) ReadByte() byte {\n\tif len(r.buffer) <= r.index {\n\t\tlog.Panic(\"Error reading byte: buffer is too small!\")\n\t}\n\n\tvar data = r.buffer[r.index]\n\tr.index++\n\n\treturn data\n}",
"func (b *FixedBuffer) Read(p []byte) (n int, err error) {\n\tif b.r == b.w {\n\t\treturn 0, errReadEmpty\n\t}\n\tn = copy(p, b.buf[b.r:b.w])\n\tb.r += n\n\tif b.r == b.w {\n\t\tb.r = 0\n\t\tb.w = 0\n\t}\n\treturn n, nil\n}",
"func (b *Buffer) ReadByte() (byte, error) {\n\tif b.count == 0 { // no elements exist.\n\t\treturn ' ', errors.New(\"Buffer is empty\")\n\t}\n\tval := b.buf[b.head]\n\tb.count--\n\tb.head++\n\tb.head = b.head % b.size\n\treturn val, nil\n}",
"func (d *videoDecryptor) Read(buf []byte) (int, error) {\n\tn, err := d.Reader.Read(buf)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\txorBuff(n, d.Offset, buf, d.Key1, d.Key2)\n\td.Offset += n\n\treturn n, err\n}",
"func (sb *SeekableBuffer) Read(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = state.(error)\n\t\t}\n\t}()\n\n\tif sb.position >= len64(sb.data) {\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, sb.data[sb.position:])\n\tsb.position += int64(n)\n\n\treturn n, nil\n\n}",
"func (r *trackingreader) Read(b []byte) (int, error) {\n\tn, err := r.Reader.Read(b)\n\tr.pos += int64(n)\n\treturn n, err\n}",
"func (p *TBufferedReadTransport) Read(buf []byte) (int, error) {\n\tin, err := p.readBuf.Read(buf)\n\treturn in, thrift.NewTTransportExceptionFromError(err)\n}",
"func (b *SafeBuffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}",
"func (f *FixedBuffer) ReadFrom() (int, error) {\n\treturn f.r.Read(f.buf)\n}",
"func (b *buffer) read(rd io.Reader) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"panic reading: %v\", r)\n\t\t\tb.err = err\n\t\t}\n\t}()\n\n\tvar n int\n\tbuf := b.buf[0:b.size]\n\tfor n < b.size {\n\t\tn2, err := rd.Read(buf)\n\t\tn += n2\n\t\tif err != nil {\n\t\t\tb.err = err\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[n2:]\n\t}\n\tb.buf = b.buf[0:n]\n\tb.offset = 0\n\treturn b.err\n}",
"func (framed *Reader) Read(buffer []byte) (n int, err error) {\n\tframed.mutex.Lock()\n\tdefer framed.mutex.Unlock()\n\n\tvar nb uint16\n\terr = binary.Read(framed.Stream, endianness, &nb)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tn = int(nb)\n\n\tbufferSize := len(buffer)\n\tif n > bufferSize {\n\t\treturn 0, fmt.Errorf(\"Buffer of size %d is too small to hold frame of size %d\", bufferSize, n)\n\t}\n\n\t// Read into buffer\n\tn, err = io.ReadFull(framed.Stream, buffer[:n])\n\treturn\n}",
"func (s *DownloadStream) Read(buf []uint8) (int, error) {\n\t// acquire mutex\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\t// check if closed\n\tif s.closed {\n\t\treturn 0, gridfs.ErrStreamClosed\n\t}\n\n\t// ensure file is loaded\n\terr := s.load()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// check position\n\tif s.position >= s.file.Length {\n\t\treturn 0, io.EOF\n\t}\n\n\t// fill buffer\n\tread := 0\n\tfor read < len(buf) {\n\t\t// check if buffer is empty\n\t\tif len(s.buffer) == 0 {\n\t\t\t// get next chunk\n\t\t\terr = s.next()\n\t\t\tif err == io.EOF {\n\t\t\t\t// only return EOF if no data has been read\n\t\t\t\tif read == 0 {\n\t\t\t\t\treturn 0, io.EOF\n\t\t\t\t}\n\n\t\t\t\treturn read, nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn read, err\n\t\t\t}\n\t\t}\n\n\t\t// copy data\n\t\tn := copy(buf[read:], s.buffer)\n\n\t\t// resize buffer\n\t\ts.buffer = s.buffer[n:]\n\n\t\t// update position\n\t\ts.position += n\n\n\t\t// increment counter\n\t\tread += n\n\t}\n\n\treturn read, nil\n}",
"func (b *Buffer) ReadByte() (byte, error) {\n\tb.mux.Lock()\n\tdefer b.mux.Unlock()\n\n\tif b.dataSize == 0 {\n\t\treturn 0, errors.New(\"Read from empty buffer\")\n\t}\n\n\tresult := b.data[b.tail]\n\tb.advance(&b.tail)\n\tb.dataSize--\n\n\treturn result, nil\n}",
"func (b *Buffer) Read(out []byte) (n int, err error) {\n\tif b.readCursor >= b.Size() {\n\t\t// we read the entire buffer, let's loop back to the beginning\n\t\tb.readCursor = 0\n\t} else if b.readCursor+int64(len(out)) > b.Size() {\n\t\t// we don't have enough data in our buffer to fill the passed buffer\n\t\t// we need to do multiple passes\n\t\tn := copy(out, b.data[b.offset+b.readCursor:])\n\t\tb.readCursor += int64(n)\n\t\t// TMP check, should remove\n\t\tif b.readCursor != b.Size() {\n\t\t\tpanic(fmt.Sprintf(\"off by one much? %d - %d\", b.readCursor, b.Size()))\n\t\t}\n\t\tn2, _ := b.Read(out[n:])\n\t\tb.readCursor += int64(n2)\n\t\treturn int(n + n2), nil\n\t}\n\tn = copy(out, b.data[b.offset+b.readCursor:])\n\treturn\n}",
"func (r *Reader) Read(bs []byte) (int, error) {\n\treturn r.R(0).Read(bs)\n}",
"func (d *Decoder) Read(b []byte) (int, error) {\n\treturn d.r.Read(b)\n}",
"func (c *Conn) Read(b []byte) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif len(c.recvRest) > 0 {\n\t\tl := copy(b, c.recvRest)\n\t\tc.recvRest = c.recvRest[l:]\n\t\treturn l, nil\n\t}\n\tp, err := c.recvBuf.Pop()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl := copy(b, p)\n\tc.recvRest = p[l:]\n\treturn l, nil\n}",
"func (c *poolConn) ReadBuffer(size int) ([]byte, error) {\n\tif c.mustRead == true {\n\t\terr := c.ReadTcpBlock()\n\t\tif err != nil {\n\t\t\tc.err = err\n\t\t\treturn nil, err\n\t\t}\n\t\tc.buffer.index = 0\n\t\tc.mustRead = false\n\t}\n\n\t//if size < c.buffer.size-c.buffer.index, normal stitching\n\t//if c.buffer.size-c.buffer.index < size < c.buffer.capacity-c.buffer.size+c.buffer.index, move usable data in buffer to front\n\t//if size > c.buffer.capacity, directly read the specified size\n\tif size+2 <= c.buffer.size-c.buffer.index {\n\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tcpy_index := c.buffer.index\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[cpy_index: cpy_index+size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\t} else if size+2 <= c.buffer.capacity-c.buffer.size+c.buffer.index {\n\t\tc.ReadUnsafeBuffer()\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[0:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\n\t} else {\n\t\tvar err error\n\t\tbigBuffer := make([]byte, size+2)\n\t\tcopy(bigBuffer, c.buffer.realBuffer[c.buffer.index:])\n\n\t\t//Make the results right , when the BigSize < buffer.capacity\n\t\tif len(bigBuffer) > c.buffer.size-c.buffer.index {\n\t\t\tbigBuffer, err = c.ReadTcpBigBlockLink(bigBuffer)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t//judge weather the bigBuffer is right\n\t\tif bigBuffer[size] == '\\r' && bigBuffer[size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn bigBuffer[:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"bigBuffer is read wrong!\")\n\t\t}\n\t}\n}",
"func (r *bytesReader) ReadAt(b []byte, offset int64) (n int, err error) {\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"buffer.bytesReader.ReadAt: negative offset\")\n\t}\n\tif offset >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[offset:])\n\tif n < len(b) {\n\t\terr = io.EOF\n\t}\n\treturn\n}",
"func (q *queue) loadReadBuf(b []byte) {\n\tq.readBuf.Write(b)\n}",
"func (r *Reader) Read(p []byte) (n int, err error) {\n\tr.ResetBuf(p)\n\tn, err = r.srcR.Read(r.buf)\n\treturn\n}",
"func (p *atomReader) ReadBytes(b []byte) (int, error) {\n\treturn p.r.Read(b)\n}",
"func (d *Decoder) Read() uint64 {\n\tv := d.buf[d.i]\n\treturn v\n}",
"func (ite *ifdTagEnumerator) ReadBuffer(n int) (buf []byte, err error) {\n\tif n > len(ite.exifReader.rawBuffer) {\n\t\treturn nil, ErrDataLength\n\t}\n\t// Read from underlying exifReader io.ReaderAt interface\n\tn, err = ite.exifReader.ReadAt(ite.exifReader.rawBuffer[:n], int64(ite.offset+ite.ifdOffset))\n\n\tite.offset += uint32(n) // Update reader offset\n\n\treturn ite.exifReader.rawBuffer[:n], err\n}",
"func (p *Stream) ReadBytes() (Bytes, *base.Error) {\n\t// empty bytes\n\tv := p.readFrame[p.readIndex]\n\n\tif v == 192 {\n\t\tif p.CanRead() {\n\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\treturn Bytes{}, nil\n\t\t}\n\t} else if v > 192 && v < 255 {\n\t\tbytesLen := int(v - 192)\n\t\tret := make(Bytes, bytesLen)\n\t\tif p.isSafetyReadNBytesInCurrentFrame(bytesLen + 1) {\n\t\t\tcopy(ret, p.readFrame[p.readIndex+1:])\n\t\t\tp.readIndex += bytesLen + 1\n\t\t\treturn ret, nil\n\t\t} else if p.hasNBytesToRead(bytesLen + 1) {\n\t\t\tcopyBytes := copy(ret, p.readFrame[p.readIndex+1:])\n\t\t\tp.readIndex += copyBytes + 1\n\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\tp.readSeg++\n\t\t\t\tp.readFrame = *(p.frames[p.readSeg])\n\t\t\t\tp.readIndex = copy(ret[copyBytes:], p.readFrame)\n\t\t\t}\n\t\t\treturn ret, nil\n\t\t}\n\t} else if v == 255 {\n\t\treadStart := p.GetReadPos()\n\t\tbytesLen := -1\n\t\tif p.isSafetyReadNBytesInCurrentFrame(5) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tbytesLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 5\n\t\t\tp.readIndex += 5\n\t\t} else if p.hasNBytesToRead(5) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(5)\n\t\t\tbytesLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 5\n\t\t}\n\n\t\tif bytesLen > 62 {\n\t\t\tif p.isSafetyReadNBytesInCurrentFrame(bytesLen) {\n\t\t\t\tret := make(Bytes, bytesLen)\n\t\t\t\tcopy(ret, p.readFrame[p.readIndex:])\n\t\t\t\tp.readIndex += bytesLen\n\t\t\t\treturn ret, nil\n\t\t\t} else if p.hasNBytesToRead(bytesLen) {\n\t\t\t\tret := make(Bytes, bytesLen)\n\t\t\t\treads := 0\n\t\t\t\tfor reads < bytesLen {\n\t\t\t\t\treadLen := copy(ret[reads:], p.readFrame[p.readIndex:])\n\t\t\t\t\treads += readLen\n\t\t\t\t\tp.readIndex += readLen\n\t\t\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\t\t\tp.gotoNextReadFrameUnsafe()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn ret, nil\n\t\t\t}\n\t\t}\n\t\tp.SetReadPos(readStart)\n\t}\n\treturn Bytes{}, base.ErrStream\n}",
"func (c *DecoderReadCloser) Read(b []byte) (int, error) {\n\treturn c.d.Read(b)\n}",
"func (b *Buffer) Read(data []byte, c Cursor) (n int, next Cursor, err error) {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tseq, offset := c.seq, c.offset\n\n\tif seq >= b.nextSeq || offset > b.last {\n\t\treturn 0, next, ErrNotArrived\n\t}\n\n\tf := b.frame(offset)\n\tif f.size() == 0 || f.seq() != seq {\n\t\treturn b.readFirst(data)\n\t}\n\n\treturn b.readOffset(data, offset)\n}",
"func (p *Packet) Read(byteCount int) ([]byte, error) {\n\tstartPos := p.readPos\n\tnextPos := startPos + byteCount\n\tif nextPos > len(p.payload) {\n\t\treturn []byte{}, io.EOF\n\t}\n\tp.readPos = nextPos\n\treturn p.payload[startPos:nextPos], nil\n}",
"func (s *Session) Read(key *Key, offset uint64, size uint64) (b []byte, err error) {\n\t// TODO use reflect.SliceHeader and manage data ourselves?\n\tdata, dataSize, err := s.read(key, offset, size)\n\tif data == nil {\n\t\treturn\n\t}\n\tdefer C.free(data)\n\n\tb = C.GoBytes(unsafe.Pointer(uintptr(data)+readOffset), C.int(dataSize)-C.int(readOffset))\n\treturn\n}",
"func (r byteAtATimeReader) Read(out []byte) (int, error) {\n\treturn r.Reader.Read(out[:1])\n}",
"func (s *Stream) Read(b []byte) (int, error) {\n\tlogf(logTypeConnection, \"Reading from stream %v\", s.Id())\n\tif len(s.in) == 0 {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tif s.in[0].offset > s.readOffset {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tn := copy(b, s.in[0].data)\n\tif n == len(s.in[0].data) {\n\t\ts.in = s.in[1:]\n\t}\n\ts.readOffset += uint64(n)\n\treturn n, nil\n}",
"func (bc BufConn) Read(b []byte) (int, error) {\n\tif bc.IgnoreRead {\n\t\treturn len(b), nil\n\t}\n\tif bc.OnRead != nil {\n\t\treadBytes := bc.OnRead()\n\t\tcopy(b, readBytes)\n\t\treturn len(b), nil\n\t}\n\treturn bc.Buf.Read(b)\n}",
"func (b *Buffer) Bytes() []byte { return b.buf[:b.length] }",
"func (p *dataPacket) Read(r io.Reader, readBuffer []byte) error {\n\tif _, err := io.ReadFull(r, p.nonce[:]); err != nil {\n\t\treturn err\n\t}\n\n\tvar dataLen int64\n\tif err := binary.Read(r, binary.LittleEndian, &dataLen); err != nil {\n\t\treturn err\n\t}\n\n\t// Try to use the readBuffer where possible to avoid extra memory allocation.\n\tif int64(len(readBuffer)) >= dataLen {\n\t\tp.data = readBuffer[:dataLen]\n\t} else {\n\t\tp.data = make([]byte, dataLen)\n\t}\n\t_, err := io.ReadFull(r, p.data)\n\treturn err\n}",
"func readByte(r io.Reader) (uint8, error) {\n\ttmp := []uint8{0}\n\t_, e := r.Read(tmp)\n\treturn tmp[0], e\n}",
"func Read(b []byte) (n int, err error) {\n\treturn io.ReadFull(r, b)\n}",
"func (p *Port) Read(b []byte) (int, error) {\n\treturn p.f.Read(b)\n}",
"func (this *reader) ioRead(buffer []byte) (n int, err error) {\n\tn, err = this.ioReader.Read(buffer)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != len(buffer) {\n\t\terr = fmt.Errorf(\"Reading failed. Expected %v bytes but %v was read\",\n\t\t\tlen(buffer), n)\n\t}\n\treturn\n}",
"func (cb *Buffer) ReadByte() (byte, error) {\n\tbuf := make([]byte, 1)\n\tn, err := cb.Read(buf)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif n == 0 {\n\t\treturn 0, fmt.Errorf(\"Buffer is empty\")\n\t}\n\n\treturn buf[0], nil\n}",
"func (f *messageBytePipe) Read(b []byte) (int, error) {\n\tif f.readEOF {\n\t\treturn 0, io.EOF\n\t}\n\tn, err := f.file.Read(b)\n\tif err == io.EOF {\n\t\t// If this was the result of a zero-byte read, then\n\t\t// it is possible that the read was due to a zero-size\n\t\t// message. Since we are simulating CloseWrite with a\n\t\t// zero-byte message, ensure that all future Read calls\n\t\t// also return EOF.\n\t\tf.readEOF = true\n\t} else if err == windows.ERROR_MORE_DATA {\n\t\t// ERROR_MORE_DATA indicates that the pipe's read mode is message mode\n\t\t// and the message still has more bytes. Treat this as a success, since\n\t\t// this package presents all named pipes as byte streams.\n\t\terr = nil\n\t}\n\treturn n, err\n}",
"func (d *Driver) read() ([]byte, error) {\n\tbuf := make([]byte, 8)\n\tn, err := d.device.Read(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 8 {\n\t\treturn nil, errors.New(\"unexpected read size\")\n\t}\n\t// d.log(\"read\", time.Now(), buf)\n\treturn buf, nil\n}",
"func (w *WatchBuffer) Read(p []byte) (n int, err error) {\n\tif w.closed {\n\t\treturn 0, io.EOF\n\t}\n\tw.read <- p\n\tret := <-w.retc\n\treturn ret.n, ret.e\n}",
"func (r *copyReader) Read(b []byte) (int, error) {\n\tif r.rerr != nil {\n\t\treturn 0, r.rerr\n\t}\n\n\tr.once.Do(r.init)\n\treturn r.rbuf.Read(b)\n}",
"func (console *testConsole) Read(p []byte) (int, error) {\n\n\tif console.isClosed() {\n\t\treturn 0, io.EOF\n\t}\n\n\tconsole.bufMx.RLock()\n\tn := copy(p, console.buf)\n\tconsole.bufMx.RUnlock()\n\n\treturn n, nil\n}",
"func (b *ByteBuffer) GetReadBytes() int64 {\n\treturn int64(b.off)\n}",
"func Read(r io.Reader) ([]byte, error) {\n\tbuf := make([]byte, 4)\n\tif _, err := io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsize := binary.LittleEndian.Uint32(buf)\n\n\tmsg := make([]byte, size)\n\n\t_, err := io.ReadFull(r, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn msg, err\n}",
"func ReadBuffer(src uint32) {\n\tsyscall.Syscall(gpReadBuffer, 1, uintptr(src), 0, 0)\n}",
"func (in *InBuffer) ReadBytes(n int) []byte {\n\tx := make([]byte, n)\n\tcopy(x, in.Slice(n))\n\treturn x\n}",
"func (rc *CryptoReadCloser) Read(b []byte) (int, error) {\n\tif rc.isClosed {\n\t\treturn 0, io.EOF\n\t}\n\treturn rc.Decrypter.Read(b)\n}",
"func (r *msgReader) readBytes(countI32 int32) []byte {\n\tif r.err != nil {\n\t\treturn nil\n\t}\n\n\tcount := int(countI32)\n\n\tif len(r.msgBody)-r.rp < count {\n\t\tr.fatal(errors.New(\"read past end of message\"))\n\t\treturn nil\n\t}\n\n\tb := r.msgBody[r.rp : r.rp+count]\n\tr.rp += count\n\n\tr.cr.KeepLast()\n\n\tif r.shouldLog(LogLevelTrace) {\n\t\tr.log(LogLevelTrace, \"msgReader.readBytes\", \"value\", b, r.msgType, \"rp\", r.rp)\n\t}\n\n\treturn b\n}",
"func (reader *embedFileReader) Read(b []byte) (int, error) {\n\trest := reader.length - reader.offset\n\tif rest <= 0 {\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err := reader.source.ReadAt(b, reader.start+reader.offset)\n\n\tif rest < int64(n) {\n\t\treader.offset += int64(rest)\n\t\treturn int(rest), err\n\t} else {\n\t\treader.offset += int64(n)\n\t\treturn n, err\n\t}\n}",
"func (r *readRune) readByte() (b byte, err error) {\n\tif r.pending > 0 {\n\t\tb = r.pendBuf[0]\n\t\tcopy(r.pendBuf[0:], r.pendBuf[1:])\n\t\tr.pending--\n\t\treturn\n\t}\n\tn, err := io.ReadFull(r.reader, r.pendBuf[:1])\n\tif n != 1 {\n\t\treturn 0, err\n\t}\n\treturn r.pendBuf[0], err\n}",
"func (r *objReader) readByte() byte {\n\tif r.err != nil {\n\t\treturn 0\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn 0\n\t}\n\tb, err := r.b.ReadByte()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\tr.error(err)\n\t\tb = 0\n\t} else {\n\t\tr.offset++\n\t}\n\treturn b\n}",
"func (r *objReader) readByte() byte {\n\tif r.err != nil {\n\t\treturn 0\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn 0\n\t}\n\tb, err := r.b.ReadByte()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\tr.error(err)\n\t\tb = 0\n\t} else {\n\t\tr.offset++\n\t}\n\treturn b\n}",
"func (file *Remote) Read(buf []byte) (int, error) {\n\tfile.m.Lock()\n\tdefer file.m.Unlock()\n\n\tn, err := file.ReadAt(buf, int64(file.pos))\n\tfile.pos += uint64(n)\n\treturn n, err\n}",
"func (c *Conn) Read(p []byte) (n int, err error) {\n\treturn c.bufr.Read(p)\n}",
"func (d *Device) Read(buf []byte) (int, error) {\n\tvar (\n\t\tcbuflen = C.size_t(len(buf))\n\t\tcbuf = C.malloc(cbuflen)\n\t)\n\t// TODO(paultag): Need to check the RV here.\n\trv := C.ibrd(d.descriptor, cbuf, C.long(cbuflen))\n\tif err := status(rv).Err(); err != nil {\n\t\treturn 0, err\n\t}\n\n\tleng := C.ibcntl\n\ti := copy(buf, C.GoBytes(cbuf, C.int(leng)))\n\treturn i, nil\n}",
"func (s *Stream) readByte() (byte, error) {\n\t// since this is readByte functions, therefore, only willRead a byte each time\n\tif err := s.willRead(1); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// pops out a byte from r and return it\n\tb, err := s.r.ReadByte()\n\tif err == io.EOF {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn b, err\n}",
"func (this *Stats) ReadBytes() int { return int(this.ptr.i_read_bytes) }",
"func readByte(r io.Reader) (ret byte, err error) {\n\tvar be [1]byte\n\tvalBytes := be[0:1]\n\n\tif _, err = io.ReadFull(r, valBytes); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn valBytes[0], nil\n}",
"func (r *VarintReader) ReadByte() (c byte, err error) {\n\tn, err := r.Read(r.buf[:])\n\tif n > 0 {\n\t\tc = r.buf[0]\n\t\tr.bytesRead++\n\t}\n\treturn\n}",
"func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}",
"func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}",
"func (rwc *noPIReadWriteCloser) Read(p []byte) (n int, err error) {\n\tn, err = rwc.ReadWriteCloser.Read(rwc.rBuffer)\n\tif err == nil && n >= 4 {\n\t\tcopy(p, rwc.rBuffer[4:n])\n\t\tn -= 4\n\t}\n\treturn\n}",
"func (in *InBuffer) ReadBytes(n int) []byte {\n\tx := make([]byte, n, n)\n\tcopy(x, in.Slice(n))\n\treturn x\n}",
"func ReadBytes(r io.Reader, lenBuf []byte) (flag ControlFlag, m *Message, err error) {\n\t_, err = io.ReadAtLeast(r, lenBuf, 4)\n\tif err == io.EOF {\n\t\tflag = CloseChannel\n\t\treturn flag, NewMessage(CloseChannel, nil), err\n\t}\n\tsize := BytesToUint32(lenBuf)\n\tdata := make([]byte, int(size))\n\t_, err = io.ReadAtLeast(r, data, int(size))\n\tif err != nil || size == 0 {\n\t\treturn CloseChannel, NewMessage(CloseChannel, nil), err\n\t}\n\tmessage := LoadMessage(data)\n\t// println(\"read size:\", size, string(message.Data()), \".\")\n\treturn message.Flag(), message, nil\n}",
"func (e *jsonEncoder) Read(p []byte) (int, error) {\n\treturn e.buffer.Read(p)\n}",
"func (decoder *EbpfDecoder) ReadAmountBytes() int {\n\treturn decoder.cursor\n}",
"func (r *RingBuffer) ReadByte() (b byte, err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos && !r.isFull {\n\t\treturn 0, ErrRingBufEmpty\n\t}\n\n\tb = r.buf[r.rPos]\n\tr.rPos++\n\tif r.rPos == r.size {\n\t\tr.rPos = 0\n\t}\n\n\tr.isFull = false\n\treturn b, nil\n}",
"func (c *RingBuffer) Read(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tn, err := c.peek(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn c.consume(n), nil\n}",
"func (b *Buffer) Data() []byte { return b.data }",
"func (bpr *binaryReader) Read(order binary.ByteOrder, data interface{}) {\n\tif bpr.err == nil {\n\t\tbpr.err = binary.Read(bpr.file, order, data)\n\t\treturn\n\t}\n}",
"func (r *EncReader) ReadByte() (byte, error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tif r.firstRead {\n\t\tr.firstRead = false\n\t\tif _, err := r.readFragment(nil, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tb := r.ciphertextBuffer[0]\n\t\tr.offset = 1\n\t\treturn b, nil\n\t}\n\n\tif r.offset > 0 && r.offset < len(r.ciphertextBuffer) {\n\t\tb := r.ciphertextBuffer[r.offset]\n\t\tr.offset++\n\t\treturn b, nil\n\t}\n\tif r.closed {\n\t\treturn 0, io.EOF\n\t}\n\n\tr.offset = 0\n\tif _, err := r.readFragment(nil, 1); err != nil {\n\t\treturn 0, err\n\t}\n\tb := r.ciphertextBuffer[0]\n\tr.offset = 1\n\treturn b, nil\n}",
"func (d *Device) Read(b []byte) (n int, err error) {\n\t// TODO Check threading iomplication here\n\tfor !d.DataAvailable {\n\t\ttime.Sleep(3 * time.Millisecond)\n\t}\n\td.readLock.Lock()\n\n\tll := d.ReadLength\n\t//\tfmt.Printf(\"RL - %d\\n\", d.ReadLength)\n\tfor i := 0; i < d.ReadLength; i++ {\n\t\tb[i] = d.ReadBuffer[d.ReadPosition]\n\t\td.ReadPosition++\n\t\tif d.ReadPosition >= 1024 {\n\t\t\td.ReadPosition = 0\n\t\t}\n\t}\n\td.ReadLength = 0\n\td.DataAvailable = false\n\td.readLock.Unlock()\n\treturn ll, nil\n\n}",
"func (gc *gcsCache) Read(b []byte) (int, error) {\n\tif gc.closed {\n\t\treturn 0, os.ErrClosed\n\t} else if gc.offset >= gc.size {\n\t\treturn 0, io.EOF\n\t}\n\n\tr, err := gc.oh.NewRangeReader(gc.ctx, gc.offset, -1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer r.Close()\n\n\tn, err := r.Read(b)\n\tgc.offset += int64(n)\n\n\treturn n, err\n}",
"func readByte(r io.Reader) (byte, error) {\n\tif r, ok := r.(io.ByteReader); ok {\n\t\treturn r.ReadByte()\n\t}\n\tvar v [1]byte\n\t_, err := io.ReadFull(r, v[:])\n\treturn v[0], err\n}",
"func (sr *secureReader) Read(b []byte) (msgLen int, err error) {\n\tsr.mu.Lock()\n\tdefer sr.mu.Unlock()\n\n\tif sr.buf != nil {\n\t\tn, err := sr.buf.Read(b)\n\t\tif err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\t// there is no more data in the buffer - read new message frame and create\n\t// new buffer\n\tvar header = make([]byte, 28) // nonce + message size\n\tif _, err := io.ReadFull(sr.r, header); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar nonce [24]byte\n\tcopy(nonce[:], header[:24])\n\n\tmsgSize := endian.Uint32(header[24:])\n\tencr := make([]byte, msgSize)\n\n\tif _, err := io.ReadFull(sr.r, encr); err != nil {\n\t\treturn 0, err\n\t}\n\n\traw := make([]byte, 0, len(encr))\n\traw, ok := box.Open(raw, encr, &nonce, sr.pub, sr.priv)\n\tif !ok {\n\t\treturn 0, ErrCannotDecrypt\n\t}\n\tsr.buf = bytes.NewBuffer(raw)\n\treturn sr.buf.Read(b)\n}",
"func (conn *Conn) read(n int) ([]byte, error) {\n\tresult, err := conn.brw.Peek(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while peeking read buffer\", err)\n\t\treturn result, err\n\t}\n\n\t_, err = conn.brw.Discard(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while discarding read buffer\", err)\n\t}\n\n\treturn result, err\n}",
"func ReadInt8(buffer []byte, offset int) int8 {\n return int8(buffer[offset])\n}",
"func (ch *IsaChannel) Read(b []byte) (int, error) {\n\treturn 0, nil\n}"
] | [
"0.73192525",
"0.71558887",
"0.71507657",
"0.7125411",
"0.7039935",
"0.70364684",
"0.69876087",
"0.69863915",
"0.69758904",
"0.69751304",
"0.69681305",
"0.69678485",
"0.69638294",
"0.6963164",
"0.6959764",
"0.6959191",
"0.6940648",
"0.6894346",
"0.68853796",
"0.6876418",
"0.6853955",
"0.6834477",
"0.6830219",
"0.68041664",
"0.6802002",
"0.6800396",
"0.67810357",
"0.6778112",
"0.6744033",
"0.67147774",
"0.6699036",
"0.6675579",
"0.6660279",
"0.66576916",
"0.6634687",
"0.6625841",
"0.6587325",
"0.6567331",
"0.656664",
"0.65641993",
"0.65519965",
"0.6539365",
"0.6530928",
"0.6529162",
"0.64821965",
"0.64644796",
"0.6461456",
"0.6460247",
"0.6458302",
"0.64522606",
"0.6443896",
"0.6442053",
"0.6441388",
"0.64397436",
"0.64345366",
"0.64307624",
"0.6420471",
"0.6409642",
"0.64075315",
"0.64039266",
"0.6388982",
"0.63880926",
"0.6375696",
"0.6359575",
"0.6355941",
"0.6355314",
"0.6354012",
"0.63519436",
"0.63444686",
"0.63361293",
"0.6334087",
"0.6332642",
"0.63210726",
"0.63210726",
"0.6319289",
"0.6315657",
"0.63152456",
"0.6313261",
"0.63128847",
"0.6311598",
"0.6310596",
"0.6309267",
"0.6309267",
"0.6305879",
"0.6302962",
"0.6301136",
"0.6292395",
"0.628796",
"0.62874275",
"0.628506",
"0.62776923",
"0.6266359",
"0.6257971",
"0.6254263",
"0.6251801",
"0.62461066",
"0.62460274",
"0.62426245",
"0.62311876",
"0.62306863"
] | 0.75947374 | 0 |
Close the bytes buffer | Закройте буфер байтов | func (bb *BytesBuffer) Close() error {
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (b *bufCloser) Close() error { return nil }",
"func (b *Bytes) Close() error {\n\tif b.p != nil {\n\t\tPut(b.p)\n\t\tb.p = nil\n\t}\n\treturn nil\n}",
"func (bc BufConn) Close() error { return nil }",
"func (d Buf) Close() error {\n\treturn nil\n}",
"func (b *Buffer) Close() {\n\tb.length = 0\n\tb.pool.buffers <- b\n}",
"func (buf *Buffer) Close() error {\n\tbuf.Closed = true\n\treturn nil\n}",
"func (buf *logBuffer) close() {\n\tbuf.Flush()\n\tbuf.file.Close()\n\treturn\n}",
"func (b *Buffer) Close() {\n\tclose(b.in)\n\tb.Flush()\n}",
"func (b *Buffer) Close() error {\n\tb.Unmap()\n\treturn os.Remove(b.filename)\n}",
"func (p *bytesViewer) Close() error { return nil }",
"func (r *bytesReader) Close() error {\n\t_, err := r.Seek(0, io.SeekStart)\n\treturn err\n}",
"func (p *InMemoryExchangeBuffer) Close() int {\n\treturn 0\n}",
"func (b *Buffer) Close() {\n\tatomic.StoreInt32(&b.stop, stop)\n}",
"func (a *Allocator) Close() error {\n\tif err := a.flush(); err != nil {\n\t\treturn err\n\t}\n\n\tbuffer.Put(a.bufp)\n\treturn a.f.Close()\n}",
"func (bbw *Writer) Close() ([]byte, error) {\n\tif bbw.clsdPos >= 0 {\n\t\treturn bbw.buf[:bbw.clsdPos], nil\n\t}\n\tif len(bbw.buf)-bbw.offs < 4 {\n\t\tbbw.clsdPos = bbw.offs\n\t\tbbw.buf = bbw.buf[:bbw.clsdPos]\n\t\treturn bbw.buf, nil\n\t}\n\tbinary.BigEndian.PutUint32(bbw.buf[bbw.offs:], uint32(0xFFFFFFFF))\n\tbbw.clsdPos = bbw.offs\n\tbbw.offs = len(bbw.buf)\n\treturn bbw.buf[:bbw.clsdPos], nil\n}",
"func (s *BufferSink) Close() error {\n\ts.open = false\n\treturn nil\n}",
"func (m *pipeBuffer) Close(err error) {\n\tselect {\n\tcase <-m.done:\n\t\treturn\n\tdefault:\n\t}\n\tm.buf[0].Reset()\n\tm.buf[1].Reset()\n\tm.closeError = err\n\tclose(m.done)\n}",
"func (rr *Reader) Close() {\n\tif rr.Err == nil && len(rr.Bytes()) != 0 {\n\t\trr.Err = errors.New(\"excess bytes in buffer\")\n\t}\n}",
"func (w *ChunkWriter) Close() error {\n\tif w.buffer == nil {\n\t\treturn nil\n\t}\n\n\tw.c = NewChunk(w.buffer.Bytes())\n\tw.buffer = nil\n\treturn nil\n}",
"func (b *CompactableBuffer) Close() error {\n\tif atomic.CompareAndSwapInt32(&b.autoCompactionEnabled, 1, 0) {\n\t\tb.notification <- true\n\t\tb.compactionWaitGroup.Wait()\n\t}\n\treadable := b.readableBuffer()\n\twritable := b.writableBuffer()\n\terr := writable.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif readable != writable {\n\t\treturn readable.Close()\n\t}\n\n\treturn nil\n}",
"func (it *ContentLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (s *MemorySink) Close() error { return nil }",
"func (b *Backend) Close() error {\n\tb.clientCancel()\n\terr := b.buf.Close()\n\tif err != nil {\n\t\tb.Logger.Error(\"error closing buffer, continuing with closure of other resources...\", err)\n\t}\n\treturn b.svc.Close()\n}",
"func (rbl *RawBytesLog) Close() error {\n\treturn rbl.logFile.Close()\n}",
"func (p *TBufferedReadTransport) Close() error {\n\treturn nil\n}",
"func (b *BufferWriter) Close() error {\n\treturn b.W.Close()\n}",
"func (d *decompressor) Close() error {\n\tvar err error\n\tfor d.buf.Len() > 0 {\n\t\t_, err = d.writeUncompressed()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.closed = true\n\treturn nil\n}",
"func (p *JSONPkt) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.jsonPkt.Data = base64.StdEncoding.EncodeToString(p.buff.Bytes())\n\treturn nil\n}",
"func (handler *TelnetHandler) Close() {\n\t_ = handler.buffer.Flush()\n\thandler.buffer = nil\n\thandler.telnet.Close()\n}",
"func (s *stream) Close() error {\n\treturn nil\n}",
"func (r *bodyReader) Close() error {\n\tswitch r.contentEncoding {\n\tcase \"\":\n\t\treturn nil\n\tcase \"gzip\":\n\t\treturn r.r.Close()\n\tdefault:\n\t\tpanic(\"Unreachable\")\n\t}\n}",
"func (ch *Channel) Close() {}",
"func (e *encoder) Close() error {\n\t// If there's anything left in the buffer, flush it out\n\tif e.err == nil && e.nbuf > 0 {\n\t\te.enc.Encode(e.out[0:], e.buf[0:e.nbuf])\n\t\tencodedLen := e.enc.EncodedLen(e.nbuf)\n\t\te.nbuf = 0\n\t\t_, e.err = e.w.Write(e.out[0:encodedLen])\n\t}\n\treturn e.err\n}",
"func (c *UDPChannel) Close() {\n\n}",
"func (s *Basememcached_protocolListener) ExitBytes(ctx *BytesContext) {}",
"func (b *profBuf) close() {\n\tif atomic.Load(&b.eof) > 0 {\n\t\tthrow(\"runtime: profBuf already closed\")\n\t}\n\tatomic.Store(&b.eof, 1)\n\tb.wakeupExtra()\n}",
"func (tb *TelemetryBuffer) close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t}\n\n\tif tb.listener != nil {\n\t\ttb.listener.Close()\n\t}\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}",
"func (tb *TelemetryBuffer) close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t}\n\n\tif tb.listener != nil {\n\t\ttb.listener.Close()\n\t}\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}",
"func (r *body) Close() error { return nil }",
"func (b *blockWriter) Close() error {\n\tif b.err != nil {\n\t\treturn b.err\n\t}\n\n\t// precondition: b.buf[0] != 255\n\tn := int(b.buf[0])\n\tif n == 0 {\n\t\tn++ // no short block needed, just terminate\n\t} else {\n\t\tb.buf[n+1] = 0 // append terminator\n\t\tn += 2\n\t}\n\n\tn2, err := b.w.Write(b.buf[0:n])\n\tif n2 < n && err == nil {\n\t\terr = io.ErrShortWrite\n\t}\n\tb.buf[0] = 0\n\tb.err = alreadyClosed\n\treturn err\n}",
"func (c *Conn) Close(b []byte) error {\n\treturn syscall.Close(c.fd)\n}",
"func (r *Reader) Close() error {\n\t//Recycle the buffer if it has been created\n\tif r.buf != nil {\n\t\tr.Session().BufioSource().RecycleReader(r.buf)\n\t\tr.buf = nil\n\t}\n\tvar err error\n\tif r.pipedBody != nil {\n\t\terr = r.pipedBody.Close()\n\t\tr.pipedBody = nil\n\t\tr.rawBody = nil\n\t} else if r.rawBody != nil {\n\t\terr = r.rawBody.Close()\n\t\tr.rawBody = nil\n\t}\n\treturn err\n}",
"func (c *Client) Close() { c.streamLayer.Close() }",
"func (e *encoder) Close() error {\n\t// If there's anything left in the buffer, flush it out\n\tif e.err == nil && e.nbuf > 0 {\n\t\tm := e.enc.Encode(e.out[0:], e.buf[0:e.nbuf])\n\t\t_, e.err = e.w.Write(e.out[0:m])\n\t\te.nbuf = 0\n\t}\n\treturn e.err\n}",
"func (t *transposedChunkWriter) Close() error { return nil }",
"func (s *SeekerWrapper) Close() error { return s.s.Close() }",
"func (v *DCHttpResponse) Close() {\n\tif !v.Raw.Close && v.Raw.Body != nil {\n\t\tv.Raw.Body.Close()\n\t}\n}",
"func ReadCloserClose(rc *zip.ReadCloser,) error",
"func (x *Writer) Close() error {\n\t// Flush any residual data\n\tx.Flush()\n\n\t// Build up an EOF record\n\tvar data = []interface{}{\n\t\tbyte(0), // byte count\n\t\tuint16(0), // standard 16-bit base address\n\t\tbyte(1), // record type (EOF)\n\t}\n\n\t// Write the EOF record; this will be the last\n\t// entity written to the stream.\n\treturn x.emitRecord(data)\n}",
"func (i *Iterator) Close() error {\n\ti.r.SetChunk(nil)\n\treturn i.Error()\n}",
"func (c FinalOutput) Close() {}",
"func (e *Encoder) Close() error {\n\tif e.p != 0 {\n\t\t_, err := e.w.Write(e.buf[:e.p])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\taudioBytes := e.n * int64(e.f.Bytes())\n\t_, err := e.w.Seek(4, os.SEEK_SET)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(buf, uint32(audioBytes)+uint32(e.f.chunkSize())+uint32(chunkHdrSize))\n\t_, err = e.w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = e.w.Seek(hdrChunkSize+int64(e.f.chunkSize())+chunkHdrSize-4, os.SEEK_SET)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbinary.LittleEndian.PutUint32(buf, uint32(audioBytes))\n\t_, err = e.w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn e.w.Close()\n}",
"func (r *Response) Close() error {\n\treleaseByteBuffer(r.payload)\n\tr.payload = nil\n\tresponsePool.Put(r)\n\treturn nil\n}",
"func (rc *CryptoReadCloser) Close() error {\n\trc.isClosed = true\n\treturn rc.Body.Close()\n}",
"func (fss *StreamingService) Close() error { return nil }",
"func (b *BamAt) Close() error {\n\tif b == nil {\n\t\treturn nil\n\t}\n\tif b.Reader != nil {\n\t\tb.Reader.Close()\n\t}\n\tif b.fh != nil {\n\t\treturn b.fh.Close()\n\t}\n\treturn nil\n}",
"func (dec *ZstdDecompressor) Close() {\n\tdec.decoder.Close()\n}",
"func (fwc *Crypto) Close() error {\n\tfwc.Stop()\n\n\tmust.Close(fwc.devM)\n\tmust.Close(fwc.devS)\n\tmust.Close(mempool.FromPtr(unsafe.Pointer(fwc.c.opPool)))\n\tmust.Close(ringbuffer.FromPtr(unsafe.Pointer(fwc.c.input)))\n\teal.Free(unsafe.Pointer(fwc.c))\n\treturn nil\n}",
"func (n *BufferView) Close() {\n\tn.watcher.Close()\n}",
"func (it *LvRecordingLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (w *ReadWriter) Close() error {\n\tif w.withErr != nil {\n\t\treturn w.withErr\n\t}\n\tw.b = w.b[0:0]\n\treturn nil\n}",
"func (z *Writer) Close() error {\n\tif !z.Header.done {\n\t\tif err := z.writeHeader(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := z.Flush(); err != nil {\n\t\treturn err\n\t}\n\tif err := z.close(); err != nil {\n\t\treturn err\n\t}\n\tz.freeBuffers()\n\n\tif debugFlag {\n\t\tdebug(\"writing last empty block\")\n\t}\n\tif err := z.writeUint32(0); err != nil {\n\t\treturn err\n\t}\n\tif z.NoChecksum {\n\t\treturn nil\n\t}\n\tchecksum := z.checksum.Sum32()\n\tif debugFlag {\n\t\tdebug(\"stream checksum %x\", checksum)\n\t}\n\treturn z.writeUint32(checksum)\n}",
"func (npw *Writer) Close() error {\n\tif npw.closed {\n\t\treturn nil\n\t}\n\n\tnpw.closed = true\n\n\tblockBufOffset := npw.offset % BigBlockSize\n\n\tif blockBufOffset > 0 {\n\t\tblockIndex := npw.offset / BigBlockSize\n\t\terr := npw.Pool.Downstream.Store(BlockLocation{FileIndex: npw.FileIndex, BlockIndex: blockIndex}, npw.blockBuf[:blockBufOffset])\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (b *ByteArray) Release() {\n\tb.Truncate(0)\n\tif b.rootChunk != emptyLocation {\n\t\treleaseChunk(b.rootChunk)\n\t\tb.rootChunk = emptyLocation\n\t}\n}",
"func FileClose(f *os.File,) error",
"func (c *DecoderReadCloser) Close() {\n\tc.p.Put(c.d)\n}",
"func (c *pbClientCodec) Close() error {\n\treturn c.rwc.Close()\n}",
"func (decoder *QpackDecoder) Close() error {\n\tclose(decoder.available)\n\treturn nil\n}",
"func (ec *Encrypter) Close() {}",
"func (it *LvRecordableStreamLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (w *BufferedFileWriter) Close() error {\n\tclose(w.stopChan)\n\tw.lock.Lock()\n\terr := w.buffer.Flush()\n\tw.buffer = nil\n\tif err == nil {\n\t\terr = w.file.Close()\n\t} else {\n\t\te := w.file.Close()\n\t\tif e != nil {\n\t\t\tlogError(e)\n\t\t}\n\t}\n\tw.file = nil\n\tw.lock.Unlock()\n\treturn err\n}",
"func (tb *TelemetryBuffer) Close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t\ttb.client = nil\n\t}\n\n\tif tb.listener != nil {\n\t\tlog.Logf(\"server close\")\n\t\ttb.listener.Close()\n\t}\n\n\ttb.mutex.Lock()\n\tdefer tb.mutex.Unlock()\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n\n\ttb.connections = nil\n\ttb.connections = make([]net.Conn, 0)\n}",
"func (tb *TelemetryBuffer) Close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t\ttb.client = nil\n\t}\n\n\tif tb.listener != nil {\n\t\tlog.Logf(\"server close\")\n\t\ttb.listener.Close()\n\t}\n\n\ttb.mutex.Lock()\n\tdefer tb.mutex.Unlock()\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n\n\ttb.connections = nil\n\ttb.connections = make([]net.Conn, 0)\n}",
"func (s *MockStream) Close() {\n\tclose(s.recv)\n\tclose(s.sent)\n}",
"func (b *bitWriter) close() error {\n\t// End mark\n\tb.addBits16Clean(1, 1)\n\t// flush until next byte.\n\tb.flushAlign()\n\treturn nil\n}",
"func (file *Remote) Close() error {\n\t_, err := file.client.Send(&Tclunk{\n\t\tFID: file.fid,\n\t})\n\treturn err\n}",
"func (l *Buffer) Close() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tif l.closed {\n\t\treturn errAlreadyClosed\n\t}\n\tl.closed = true\n\treturn nil\n}",
"func (rb *recordBuilder) Close() error {\n\treturn rb.content.Close()\n}",
"func (bl *LogBuffer) Close() error {\n\tbl.ringBuffer.Close()\n\tfor _, msg := range bl.ringBuffer.Drain() {\n\t\tif err := bl.logger.WriteLogMessage(msg); err != nil {\n\t\t\tlogrus.Debugf(\"failed to write log %v when closing with log driver %s\", msg, bl.logger.Name())\n\t\t}\n\t}\n\n\treturn bl.logger.Close()\n}",
"func (i *Injector) close() error {\n\tif err := unix.Close(i.fd); err != nil {\n\t\treturn fmt.Errorf(\"can't close sniffer socket: %w\", err)\n\t}\n\ti.fd = -1\n\treturn nil\n}",
"func (kc *MessageBufferHandle) Close() error {\n\tkc.closeRenameFile()\n\tkc.allDone = true\n\tkc.provider.CloseProducer()\n\treturn nil\n\n}",
"func (it *ContentLogUint256Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (self *File_Client) Close() {\n\tself.cc.Close()\n}",
"func (c *CryptoStreamConn) Close() error {\n\treturn nil\n}",
"func (r *Receiver) Close() error { return nil }",
"func (fw *Writer) Close() {\n\tfb := fw.buf\n\tif fb.numRecords > 0 {\n\t\tlog.Debug.Printf(\"%v: Start flush (close)\", fb.label)\n\t\tfw.FlushBuf()\n\t} else {\n\t\tfw.bufFreePool.pool.Put(fb)\n\t\tfw.buf = nil\n\t}\n\tif fw.out != nil {\n\t\tfw.rio.Wait()\n\t\tindex := biopb.PAMFieldIndex{\n\t\t\tMagic: FieldIndexMagic,\n\t\t\tVersion: pamutil.DefaultVersion,\n\t\t\tBlocks: fw.blockIndexes,\n\t\t}\n\t\tlog.Debug.Printf(\"creating index with %d blocks\", len(index.Blocks))\n\t\tdata, err := index.Marshal()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfw.rio.SetTrailer(data)\n\t\tif err := fw.rio.Finish(); err != nil {\n\t\t\tfw.err.Set(err)\n\t\t}\n\t\tif err := fw.out.Close(vcontext.Background()); err != nil {\n\t\t\tfw.err.Set(errors.E(err, fmt.Sprintf(\"fieldio close %s\", fw.out.Name())))\n\t\t}\n\t}\n}",
"func (b *Blob) Close() {\n\tb.Call(\"close\")\n}",
"func (response *S3Response) Close() {\n if ! response.hasBeenClosed {\n response.httpResponse.Body.Close()\n response.hasBeenClosed = true\n }\n}",
"func (vec Vector) Close() error {\n\tif len(vec) > 0 {\n\t\tC.rte_pktmbuf_free_bulk(vec.ptr(), C.uint(len(vec)))\n\t}\n\treturn nil\n}",
"func (it *NodeLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}",
"func (w *Writer) Close() error {\n\tif w.lz4Stream != nil {\n\t\tC.LZ4_freeStream(w.lz4Stream)\n\t\tw.lz4Stream = nil\n\t}\n\treturn nil\n}",
"func (c *client) Close() error { return c.c.Close() }",
"func Close() {\n\tglobalBufferSize = 0\n\tglobalSoundBuffer.Release()\n\tglobalPrimarySoundBuffer.Release()\n\tglobalDirectSoundObject.Release()\n}",
"func (b *Backend) Close() error { return nil }",
"func (this *BufferedLog) Close() {\n\tthis.lock.Lock()\n\tdefer this.lock.Unlock()\n\n\tthis.enabled = 0\n\n\tthis.print(\"==== Close log ====\")\n\n\t// stop flush routine\n\tthis.chClose <- nil\n\t<-this.chClose\n\n\t// flush logs\n\tthis.flushLogs()\n\n\t// close file\n\tthis.file.Close()\n}",
"func (w *WriterInterceptor) Close() {\n\tw.mutex.Lock()\n\tif w.closed {\n\t\treturn\n\t}\n\tw.closed = true\n\tw.buf = nil\n\tw.response.Body.Close()\n\tw.mutex.Unlock()\n}",
"func (c *ClientCodec) Close() error {\n\treturn c.w.Close()\n}",
"func (c *Mika) Close() error {\n\tleakyBuf.Put(c.readBuf)\n\treturn c.Conn.Close()\n}",
"func (rbp *requestBodyProgress) Close() error {\n\tif c, ok := rbp.requestBody.(io.Closer); ok {\n\t\treturn c.Close()\n\t}\n\treturn nil\n}",
"func (ref *digest) Close(dst []byte, bits uint8, bcnt uint8) error {\n\tif ln := len(dst); HashSize > ln {\n\t\treturn fmt.Errorf(\"JH Close: dst min length: %d, got %d\", HashSize, ln)\n\t}\n\n\tvar ocnt uintptr\n\tvar buf [128]uint8\n\n\t{\n\t\toff := uint8(0x80) >> bcnt\n\t\tbuf[0] = uint8((bits & -off) | off)\n\t}\n\n\tif ref.ptr == 0 && bcnt == 0 {\n\t\tocnt = 47\n\t} else {\n\t\tocnt = 111 - ref.ptr\n\t}\n\n\tl0 := uint64(bcnt)\n\tl0 += uint64(ref.cnt << 9)\n\tl0 += uint64(ref.ptr << 3)\n\tl1 := uint64(ref.cnt >> 55)\n\n\tencUInt64be(buf[ocnt+1:], l1)\n\tencUInt64be(buf[ocnt+9:], l0)\n\n\tref.Write(buf[:ocnt+17])\n\n\tfor u := uintptr(0); u < 8; u++ {\n\t\tencUInt64le(dst[(u<<3):], ref.h[u+8])\n\t}\n\n\tref.Reset()\n\treturn nil\n}"
] | [
"0.7059667",
"0.68554896",
"0.67279226",
"0.6424046",
"0.6312021",
"0.62552226",
"0.62393683",
"0.6169757",
"0.6165596",
"0.60919654",
"0.60889745",
"0.60793084",
"0.60702556",
"0.5916911",
"0.5909902",
"0.58884627",
"0.5861613",
"0.5854654",
"0.5819335",
"0.58160126",
"0.5748464",
"0.5741663",
"0.5666286",
"0.5656439",
"0.5646195",
"0.5627487",
"0.56194425",
"0.56050897",
"0.560274",
"0.55943185",
"0.55878633",
"0.55797404",
"0.556687",
"0.55528635",
"0.55487865",
"0.554307",
"0.55226743",
"0.55226743",
"0.5520787",
"0.5510258",
"0.5507379",
"0.55014503",
"0.54576194",
"0.54526055",
"0.54467773",
"0.54400337",
"0.5433038",
"0.5431296",
"0.5425678",
"0.5417025",
"0.5402988",
"0.5396188",
"0.5394958",
"0.53743565",
"0.53595364",
"0.53575563",
"0.5340813",
"0.53308874",
"0.5324312",
"0.5323408",
"0.53211576",
"0.5316731",
"0.5316335",
"0.53083724",
"0.5308108",
"0.5307881",
"0.5304959",
"0.5303664",
"0.5302924",
"0.5301183",
"0.52894205",
"0.5279291",
"0.5279291",
"0.52778786",
"0.52733546",
"0.52697635",
"0.5265878",
"0.52616614",
"0.5254219",
"0.52518284",
"0.52463585",
"0.5238767",
"0.5236444",
"0.52213776",
"0.52211636",
"0.5220347",
"0.52168244",
"0.5208994",
"0.5208027",
"0.51984954",
"0.5195715",
"0.5191582",
"0.51897913",
"0.5185563",
"0.5183589",
"0.5175269",
"0.516092",
"0.5160478",
"0.5157224",
"0.514814"
] | 0.76870584 | 0 |
ToEd25519 converts the public key p into a ed25519 key. (x, y) = (sqrt(486664)u/v, (u1)/(u+1)) | ToEd25519 преобразует публичный ключ p в ключ ed25519. (x, y) = (sqrt(486664)u/v, (u1)/(u+1)) | func (p PublicKey) ToEd25519() (ed25519.PublicKey, error) {
a, err := convertMont(p)
if err != nil {
return nil, err
}
return a.Bytes(), nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Ed25519PublicKeyToCurve25519(pk ed25519.PublicKey) []byte {\n\t// ed25519.PublicKey is a little endian representation of the y-coordinate,\n\t// with the most significant bit set based on the sign of the x-coordinate.\n\tbigEndianY := make([]byte, ed25519.PublicKeySize)\n\tfor i, b := range pk {\n\t\tbigEndianY[ed25519.PublicKeySize-i-1] = b\n\t}\n\tbigEndianY[0] &= 0b0111_1111\n\n\t// The Montgomery u-coordinate is derived through the bilinear map\n\t//\n\t// u = (1 + y) / (1 - y)\n\t//\n\t// See https://blog.filippo.io/using-ed25519-keys-for-encryption.\n\ty := new(big.Int).SetBytes(bigEndianY)\n\tdenom := big.NewInt(1)\n\tdenom.ModInverse(denom.Sub(denom, y), curve25519P) // 1 / (1 - y)\n\tu := y.Mul(y.Add(y, big.NewInt(1)), denom)\n\tu.Mod(u, curve25519P)\n\n\tout := make([]byte, curve25519.PointSize)\n\tuBytes := u.Bytes()\n\tfor i, b := range uBytes {\n\t\tout[len(uBytes)-i-1] = b\n\t}\n\n\treturn out\n}",
"func Ed25519PublicKey(pk crypto.PublicKey) PublicKey {\n\treturn PublicKey{\n\t\tAlgorithm: SignatureAlgoEd25519,\n\t\tKey: pk[:],\n\t}\n}",
"func Ed25519PublicKeyToCurve25519(pk ed25519.PublicKey) []byte {\n\tbigEndianY := make([]byte, ed25519.PublicKeySize)\n\tfor i, b := range pk {\n\t\tbigEndianY[ed25519.PublicKeySize-i-1] = b\n\t}\n\tbigEndianY[0] &= 0b0111_1111\n\n\ty := new(big.Int).SetBytes(bigEndianY)\n\tdenom := big.NewInt(1)\n\tdenom.ModInverse(denom.Sub(denom, y), curve25519P)\n\tu := y.Mul(y.Add(y, big.NewInt(1)), denom)\n\tu.Mod(u, curve25519P)\n\n\tout := make([]byte, curve25519.PointSize)\n\tuBytes := u.Bytes()\n\tfor i, b := range uBytes {\n\t\tout[len(uBytes)-i-1] = b\n\t}\n\n\treturn out\n}",
"func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {\n\tkeyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)\n\tok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn keyCurve25519\n}",
"func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {\n\tkeyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)\n\tok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn keyCurve25519\n}",
"func Ed25519PrivateKeyToCurve25519(pk ed25519.PrivateKey) []byte {\n\th := sha512.New()\n\th.Write(pk.Seed())\n\tout := h.Sum(nil)\n\n\t// No idea why, copy-pasted from libsodium\n\tout[0] &= 248\n\tout[31] &= 127\n\tout[31] |= 64\n\n\treturn out[:curve25519.ScalarSize]\n}",
"func parseED25519Key(key ssh.PublicKey) (ed25519.PublicKey, error) {\n\tvar sshWire struct {\n\t\tName string\n\t\tKeyBytes []byte\n\t}\n\tif err := ssh.Unmarshal(key.Marshal(), &sshWire); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal key %v: %v\", key.Type(), err)\n\t}\n\treturn ed25519.PublicKey(sshWire.KeyBytes), nil\n}",
"func NewEdX25519PublicKey(b *[ed25519.PublicKeySize]byte) *EdX25519PublicKey {\n\treturn &EdX25519PublicKey{\n\t\tid: MustID(edx25519KeyHRP, b[:]),\n\t\tpublicKey: b,\n\t}\n}",
"func CURVE25519_convert_X_to_Ed(x []byte) ([]byte, error) {\n\tret := make([]byte, 32)\n\tx25519 := (*C.uchar)(unsafe.Pointer(&x[0]))\n\ted25519 := (*C.uchar)(unsafe.Pointer(&ret[0]))\n\tif C.CURVE25519_convert_X_to_Ed(ed25519, x25519) == 1 {\n\t\treturn ret, nil\n\t}\n\treturn nil, errors.New(\"Invalid x25519 point to convert!\")\n}",
"func (pk *ECPublicKey) toECDSA() *ecdsa.PublicKey {\n\tecdsaPub := new(ecdsa.PublicKey)\n\tecdsaPub.Curve = pk.Curve\n\tecdsaPub.X = pk.X\n\tecdsaPub.Y = pk.Y\n\n\treturn ecdsaPub\n}",
"func PrivateKeyToCurve25519(privateKey []byte) (curvePrivate []byte) {\n\th := sha512.New()\n\th.Write(privateKey)\n\tdigest := h.Sum(nil)\n\n\t// key clamping\n\tdigest[0] &= 248\n\tdigest[31] &= 127\n\tdigest[31] |= 64\n\n\treturn digest[:32]\n}",
"func (privKey *YubiHsmPrivateKey) exportEd25519Pubkey(keyData []byte) error {\n\tif len(keyData) != YubiEd25519PubKeySize {\n\t\treturn errors.New(\"Invalid ed25519 public key data size\")\n\t}\n\n\tprivKey.pubKeyBytes = make([]byte, YubiEd25519PubKeySize)\n\tcopy(privKey.pubKeyBytes[:], keyData[:])\n\n\treturn nil\n}",
"func NewEdX25519PublicKeyFromID(id ID) (*EdX25519PublicKey, error) {\n\tif id == \"\" {\n\t\treturn nil, errors.Errorf(\"empty id\")\n\t}\n\thrp, b, err := id.Decode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hrp != edx25519KeyHRP {\n\t\treturn nil, errors.Errorf(\"invalid key type for edx25519\")\n\t}\n\tif len(b) != ed25519.PublicKeySize {\n\t\treturn nil, errors.Errorf(\"invalid ed25519 public key bytes\")\n\t}\n\treturn &EdX25519PublicKey{\n\t\tid: id,\n\t\tpublicKey: Bytes32(b),\n\t}, nil\n}",
"func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {\n\tpub, priv, err := ed25519.GenerateKey(src)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &Ed25519PrivateKey{\n\t\t\tk: priv,\n\t\t},\n\t\t&Ed25519PublicKey{\n\t\t\tk: pub,\n\t\t},\n\t\tnil\n}",
"func CreatePublicKeyX25519FromBase64(publicKeyBase64 string) (*X25519.PublicKey, error) {\n publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPublicKey(publicKeyBytes), nil\n}",
"func NewEdX25519KeyFromPrivateKey(privateKey *[ed25519.PrivateKeySize]byte) *EdX25519Key {\n\t// Derive public key from private key\n\tedpk := ed25519.PrivateKey(privateKey[:])\n\tpublicKey := edpk.Public().(ed25519.PublicKey)\n\tif len(publicKey) != ed25519.PublicKeySize {\n\t\tpanic(errors.Errorf(\"invalid public key bytes (len=%d)\", len(publicKey)))\n\t}\n\n\tvar privateKeyBytes [ed25519.PrivateKeySize]byte\n\tcopy(privateKeyBytes[:], privateKey[:ed25519.PrivateKeySize])\n\n\tvar publicKeyBytes [ed25519.PublicKeySize]byte\n\tcopy(publicKeyBytes[:], publicKey[:ed25519.PublicKeySize])\n\n\treturn &EdX25519Key{\n\t\tprivateKey: &privateKeyBytes,\n\t\tpublicKey: NewEdX25519PublicKey(&publicKeyBytes),\n\t}\n}",
"func NewX25519PublicKeyFromEdX25519ID(id ID) (*X25519PublicKey, error) {\n\tspk, err := NewEdX25519PublicKeyFromID(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn spk.X25519PublicKey(), nil\n}",
"func SignEd25519(message []byte, pubKeys []ed25519.PublicKey,\n\tpriKey1, priKey2 ed25519.PrivateKey) []byte {\n\n\t// Each cosigner first needs to produce a per-message commit.\n\tcommit1, secret1, _ := cosi.Commit(bytes.NewReader(SEED1))\n\tcommit2, secret2, _ := cosi.Commit(bytes.NewReader(SEED2))\n\tcommits := []cosi.Commitment{commit1, commit2}\n\t/* fmt.Println(\"Ed25519 Sign Secret1 = \", hex.EncodeToString(secret1.Reduced()))*/\n\t//fmt.Println(\"Ed25519 Sign Commit1 = \", hex.EncodeToString(commit1))\n\t//fmt.Println(\"Ed25519 Sign Secret2 = \", hex.EncodeToString(secret2.Reduced()))\n\t//fmt.Println(\"Ed25519 Sign Commit2 = \", hex.EncodeToString(commit2))\n\n\t// The leader then combines these into msg an aggregate commit.\n\tcosigners := cosi.NewCosigners(pubKeys, nil)\n\taggregatePublicKey := cosigners.AggregatePublicKey()\n\taggregateCommit := cosigners.AggregateCommit(commits)\n\t// The cosigners now produce their parts of the collective signature.\n\tfmt.Println(\"------------------ Cosign Ed25519 1 ------------\")\n\tsigPart1 := cosi.Cosign(priKey1, secret1, message, aggregatePublicKey, aggregateCommit)\n\tfmt.Println(\"------------------ Cosign Ed25519 2 ------------\")\n\tsigPart2 := cosi.Cosign(priKey2, secret2, message, aggregatePublicKey, aggregateCommit)\n\tsigParts := []cosi.SignaturePart{sigPart1, sigPart2}\n\tfmt.Println(\"------------------ Aggregate Ed25519 -------------\")\n\tfmt.Println(\"Ed25519 Sign Aggregate = \", hex.EncodeToString(aggregatePublicKey))\n\tfmt.Println(\"Ed25519 Sign AggCommit = \", hex.EncodeToString(aggregateCommit))\n\n\t// Finally, the leader combines the two signature parts\n\t// into a final collective signature.\n\tsig := cosigners.AggregateSignature(aggregateCommit, sigParts)\n\n\treturn sig\n}",
"func (ec *ECPoint) ToPublicKey() *ecdsa.PublicKey {\n\tres := new(ecdsa.PublicKey)\n\tres.X = ec.X\n\tres.Y = ec.Y\n\tres.Curve = ec.Curve\n\n\treturn res\n}",
"func _Ed25519PublicKeyFromString(s string) (*_Ed25519PublicKey, error) {\n\tbyt, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn &_Ed25519PublicKey{}, err\n\t}\n\n\treturn _Ed25519PublicKeyFromBytes(byt)\n}",
"func (x *Ed25519Credentials) PublicKey() PublicKey {\n\n\treturn PublicKey{\n\t\tAlgorithm: AlgorithmEd25519,\n\t\tPublic: base64.URLEncoding.EncodeToString(x.Public[:]),\n\t}\n\n}",
"func ConvertKey(sk *PrivateKey, pk EllipticPoint) *ecdsa.PrivateKey {\n\tpubKey := ecdsa.PublicKey{\n\t\tCurve: pk.C,\n\t\tX: pk.x,\n\t\tY: pk.y,\n\t}\n\n\tvar D *big.Int\n\n\tif sk != nil {\n\t\tD = new(big.Int)\n\t\tD.SetBytes(*sk.d)\n\t}\n\n\tprivKey := ecdsa.PrivateKey{\n\t\tPublicKey: pubKey,\n\t\tD: D,\n\t}\n\n\treturn &privKey\n}",
"func PublicKeyFromPvk(privateKey []byte) []byte {\n\tvar A edwards25519.ExtendedGroupElement\n\tvar hBytes [32]byte\n\tcopy(hBytes[:], privateKey)\n\tedwards25519.GeScalarMultBase(&A, &hBytes)\n\tvar publicKeyBytes [32]byte\n\tA.ToBytes(&publicKeyBytes)\n\n\treturn publicKeyBytes[:]\n}",
"func convertPublicKey(pk []uint8) []uint8 {\n\tvar z = make([]uint8, 32)\n\tvar x = gf()\n\tvar a = gf()\n\tvar b = gf()\n\n\tunpack25519(x, pk)\n\n\tA(a, x, gf1)\n\tZ(b, x, gf1)\n\tinv25519(a, a)\n\tM(a, a, b)\n\n\tpack25519(z, a)\n\treturn z\n}",
"func (s *EdX25519PublicKey) X25519PublicKey() *X25519PublicKey {\n\tedpk := ed25519.PublicKey(s.publicKey[:])\n\tbpk := ed25519PublicKeyToCurve25519(edpk)\n\tif len(bpk) != 32 {\n\t\tpanic(\"unable to convert key: invalid public key bytes\")\n\t}\n\tkey := NewX25519PublicKey(Bytes32(bpk))\n\t// TODO: Copy metadata?\n\t// key.metadata = s.metadata\n\treturn key\n}",
"func (p PrivateKey) PublicKey() (PublicKey, error) {\n\tpub, err := curve25519.X25519(p, curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pub, nil\n}",
"func toECDSA(curveName string, d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\n\tpriv.PublicKey.Curve = CurveType(curveName)\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N,secp256k1N\n\tif priv.D.Cmp(priv.PublicKey.Curve.Params().N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}",
"func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.PublicKey.Curve = S256()\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N\n\tif priv.D.Cmp(secp256k1N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}",
"func (priv PrivateKey) Public() crypto.PublicKey {\n\tpub := ed25519.PrivateKey(priv).Public().(ed25519.PublicKey)\n\treturn PublicKey(pub)\n}",
"func CreatePrivateKeyED25519FromBase64(privateKeyBase64 string) (*ED25519.PrivateKey, error) {\n privateKeyBytes, err := base64.StdEncoding.DecodeString(privateKeyBase64)\n if err != nil {\n return nil, err\n }\n return ED25519.NewPrivateKey(privateKeyBytes), nil\n}",
"func fromED25512Key(key ssh.PublicKey) (security.PublicKey, error) {\n\tk, err := parseED25519Key(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn security.NewED25519PublicKey(k), nil\n}",
"func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(n) / 2\n\tp.X.SetBytes(n[:half])\n\tp.Y.SetBytes(n[half:])\n\tif !p.Curve.IsOnCurve(p.X, p.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}",
"func (k *EdX25519Key) X25519Key() *X25519Key {\n\tsecretKey := ed25519PrivateKeyToCurve25519(ed25519.PrivateKey(k.privateKey[:]))\n\tif len(secretKey) != 32 {\n\t\tpanic(\"failed to convert key: invalid secret key bytes\")\n\t}\n\treturn NewX25519KeyFromPrivateKey(Bytes32(secretKey))\n}",
"func ToEcdsa(key []byte) *ecdsa.PrivateKey {\n\tecdsaKey := new(ecdsa.PrivateKey)\n\tecdsaKey.PublicKey.Curve = elliptic.P256()\n\tecdsaKey.D = new(big.Int).SetBytes(key)\n\tecdsaKey.PublicKey.X, ecdsaKey.PublicKey.Y = ecdsaKey.PublicKey.Curve.ScalarBaseMult(key)\n\treturn ecdsaKey\n}",
"func marshalED25519PrivateKey(key ed25519.PrivateKey) []byte {\n\tmagic := append([]byte(\"openssh-key-v1\"), 0)\n\n\tvar w struct {\n\t\tCipherName string\n\t\tKdfName string\n\t\tKdfOpts string\n\t\tNumKeys uint32\n\t\tPubKey []byte\n\t\tPrivKeyBlock []byte\n\t}\n\n\tpk1 := struct {\n\t\tCheck1 uint32\n\t\tCheck2 uint32\n\t\tKeytype string\n\t\tPub []byte\n\t\tPriv []byte\n\t\tComment string\n\t\tPad []byte `ssh:\"rest\"`\n\t}{}\n\n\tci := rand.Uint32()\n\n\tpk1.Check1 = ci\n\tpk1.Check2 = ci\n\tpk1.Keytype = ssh.KeyAlgoED25519\n\n\tpk, ok := key.Public().(ed25519.PublicKey)\n\tif !ok {\n\t\t//fmt.Fprintln(os.Stderr, \"ed25519.PublicKey type assertion failed on an ed25519 public key. This should never ever happen.\")\n\t\treturn nil\n\t}\n\tpubKey := []byte(pk)\n\n\tpk1.Pub = pubKey\n\tpk1.Priv = key\n\tpk1.Comment = \"\"\n\n\tbs := 8\n\tblockLen := len(ssh.Marshal(pk1))\n\tpadLen := (bs - (blockLen % bs)) % bs\n\tpk1.Pad = make([]byte, padLen)\n\n\tfor i := 0; i < padLen; i++ {\n\t\tpk1.Pad[i] = byte(i + 1)\n\t}\n\n\tprefix := []byte{0x0, 0x0, 0x0, 0x0b}\n\tprefix = append(prefix, []byte(ssh.KeyAlgoED25519)...)\n\tprefix = append(prefix, []byte{0x0, 0x0, 0x0, 0x20}...)\n\n\tw.CipherName = \"none\"\n\tw.KdfName = \"none\"\n\tw.KdfOpts = \"\"\n\tw.NumKeys = 1\n\tw.PubKey = append(prefix, pubKey...)\n\tw.PrivKeyBlock = ssh.Marshal(pk1)\n\n\tmagic = append(magic, ssh.Marshal(w)...)\n\treturn magic\n}",
"func PubkeyToAddress(p ecdsa.PublicKey) common.Address {\n\treturn crypto.PubkeyToAddress(p)\n}",
"func NewAddressPubKeyEd25519(scriptVersion uint16, pubKey Ed25519PublicKey,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyEd25519V0(pubKey, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey addresses for version %d are not supported\",\n\t\tscriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}",
"func GenerateEdX25519Key() *EdX25519Key {\n\tlogger.Infof(\"Generating EdX25519 key...\")\n\tseed := Rand32()\n\tkey := NewEdX25519KeyFromSeed(seed)\n\treturn key\n}",
"func NewAddressPubKeyHashEd25519(scriptVersion uint16, pkHash []byte,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyHashEd25519V0(pkHash, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey hash addresses for version %d are not \"+\n\t\t\"supported\", scriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}",
"func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 {\n\tprivKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.\n\tprivKey := new([64]byte)\n\tcopy(privKey[:32], privKey32)\n\t// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.\n\t// It places the pubkey in the last 32 bytes of privKey, and returns the\n\t// public key.\n\tMakePublicKey(privKey)\n\treturn PrivKeyEd25519(*privKey)\n}",
"func PubkeyToAddress(p ecdsa.PublicKey) common.Address {\n\tpubBytes := SM2PubBytes(&p)\n\tsm3digest := sm3.Hash(pubBytes)\n\treturn common.BytesToAddress(sm3digest[12:])\n}",
"func (priv ECDHPrivate) PublicKey() ECDHPublic {\n\ttoret := make([]byte, ECDHKeyLength)\n\tC.crypto_scalarmult_base((*C.uchar)(&toret[0]),\n\t\t(*C.uchar)(&priv[0]))\n\treturn toret\n}",
"func GetPublicKeyFromSecret(secret string) []byte {\n\tsecretHash := GetSHA256Hash(secret)\n\tpKey, _, _ := ed25519.GenerateKey(bytes.NewReader(secretHash[:sha256.Size]))\n\n\treturn pKey\n}",
"func ecdhAEADPublicKey(t *testing.T, c commonpb.EllipticCurveType, ptfmt commonpb.EcPointFormat, kt ecdhpb.KeyType,\n\tencT *tinkpb.KeyTemplate, x, y, cek []byte) *ecdhpb.EcdhAeadPublicKey {\n\tt.Helper()\n\n\treturn &ecdhpb.EcdhAeadPublicKey{\n\t\tVersion: 0,\n\t\tParams: &ecdhpb.EcdhAeadParams{\n\t\t\tKwParams: &ecdhpb.EcdhKwParams{\n\t\t\t\tCurveType: c,\n\t\t\t\tKeyType: kt,\n\t\t\t},\n\t\t\tEncParams: &ecdhpb.EcdhAeadEncParams{\n\t\t\t\tAeadEnc: encT,\n\t\t\t\tCEK: cek,\n\t\t\t},\n\t\t\tEcPointFormat: ptfmt,\n\t\t},\n\t\tX: x,\n\t\tY: y,\n\t}\n}",
"func (k *EdX25519Key) PublicKey() *EdX25519PublicKey {\n\treturn k.publicKey\n}",
"func (id NodesID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: bgmcrypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(id) / 2\n\tptr.X.SetBytes(id[:half])\n\tptr.Y.SetBytes(id[half:])\n\tif !ptr.Curve.IsOnCurve(ptr.X, ptr.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}",
"func PublicKey(private, p *big.Int, g int64) *big.Int {\n\n\t// calculate the public key based on the following formula\n\t// pubKey = g**privKey mod p\n\tG := big.NewInt(g)\n\tpubKey := G.Exp(G, private, p)\n\n\treturn pubKey\n}",
"func (priv *PrivateKey) derive() (pub *PublicKey) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\n\t/* Derive public key from Q = d*G */\n\tQ := secp256k1.ScalarBaseMult(priv.D)\n\n\t/* Check that Q is on the curve */\n\tif !secp256k1.IsOnCurve(Q) {\n\t\tpanic(\"Catastrophic math logic failure in public key derivation.\")\n\t}\n\n\tpriv.X = Q.X\n\tpriv.Y = Q.Y\n\n\treturn &priv.PublicKey\n}",
"func NewEdX25519KeyFromSeed(seed *[ed25519.SeedSize]byte) *EdX25519Key {\n\tprivateKey := ed25519.NewKeyFromSeed(seed[:])\n\treturn NewEdX25519KeyFromPrivateKey(Bytes64(privateKey))\n}",
"func PublicKey(private, p *big.Int, g int64) *big.Int {\n\treturn new(big.Int).Exp(big.NewInt(g), private, p)\n}",
"func (curve *EdCurve) ToMontgomeryPointForm2(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\tyAddOne := new(big.Int).Add(p.Y, ONE) // y+1\n\tySubOne := new(big.Int).Sub(p.Y, ONE) // y-1\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(yAddOne, ySubOne, curve.P) // (y+1)/(y-1)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(ySubOne, yAddOne, curve.P) // (y-1)/(y+1)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}",
"func init() {\n\tPubKeyMapper.RegisterImplementation(PubKeyEd25519{}, \"ed25519\", 0x1)\n}",
"func NewPublic(x,y []byte) (*ecdsa.PublicKey) {\n\treturn &ecdsa.PublicKey{ Curve: curve(len(x)), \n\t\t\t\t\t\t\t X:new(big.Int).SetBytes(x), \n\t\t\t\t\t\t\t Y:new(big.Int).SetBytes(y) }\n}",
"func (priv *PrivateKey) Public() (*PublicKey, error) {\n\tslice, err := curve25519.X25519(priv[:], curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp, _ := PublicKeyFromSlice(slice)\n\treturn p, nil\n}",
"func PublicKey(a *big.Int, p *big.Int, g int64) *big.Int {\n\tbigG := big.NewInt(g)\n\tbigG.Exp(bigG, a, p)\n\treturn bigG\n}",
"func stringToPublicKey(s string, curve elliptic.Curve) *ecdsa.PublicKey {\n\tnewPub := new(ecdsa.PublicKey)\n\tnewPub.X = new(big.Int)\n\tnewPub.Y = new(big.Int)\n\n\tnewPub.Curve = curve\n\tsplitS := strings.SplitN(s, \"|\", 2)\n\tif len(splitS) != 2 {\n\t\treturn nil\n\t}\n\t_, check := newPub.X.SetString(splitS[0], 16)\n\tif !check {\n\t\treturn nil\n\t}\n\t_, check = newPub.Y.SetString(splitS[1], 16)\n\tif !check {\n\t\treturn nil\n\t}\n\treturn newPub\n}",
"func EncodeX25519Recipient(pk *ecdh.PublicKey) (string, error) {\n\tif pk.Curve() != ecdh.X25519() {\n\t\treturn \"\", fmt.Errorf(\"wrong ecdh Curve\")\n\t}\n\treturn bech32.Encode(\"age\", pk.Bytes())\n}",
"func (x *X25519) PEMPublicKey() string {\n\treturn x.publicPEMKey\n}",
"func ECDH_ECPVP_DSA(sha int, W []byte, F []byte, C []byte, D []byte) int {\n\tres := 0\n\n\tB := core.GPhashit(core.MC_SHA2, sha, int(MODBYTES), 0, F, -1, nil )\n\n\tG := ECP_generator()\n\tr := NewBIGints(CURVE_Order)\n\n\tc := FromBytes(C)\n\td := FromBytes(D)\n\tf := FromBytes(B[:])\n\n\tif c.iszilch() || Comp(c, r) >= 0 || d.iszilch() || Comp(d, r) >= 0 {\n\t\tres = ERROR\n\t}\n\n\tif res == 0 {\n\t\td.Invmodp(r)\n\t\tf.copy(Modmul(f, d, r))\n\t\th2 := Modmul(c, d, r)\n\n\t\tWP := ECP_fromBytes(W)\n\t\tif WP.Is_infinity() {\n\t\t\tres = ERROR\n\t\t} else {\n\t\t\tP := NewECP()\n\t\t\tP.Copy(WP)\n\n\t\t\tP = P.Mul2(h2, G, f)\n\n\t\t\tif P.Is_infinity() {\n\t\t\t\tres = ERROR\n\t\t\t} else {\n\t\t\t\td = P.GetX()\n\t\t\t\td.Mod(r)\n\n\t\t\t\tif Comp(d, c) != 0 {\n\t\t\t\t\tres = ERROR\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res\n}",
"func keyPairsToAesKeyIvLegacy(_ ed25519.PrivateKey, publicKey ed25519.PublicKey) (key, iv []byte, err error) {\n\tvar pri, pub mixin.Key\n\tcopy(pub[:], publicKey)\n\t// privateKeyToCurve25519(pri, privateKey)\n\n\tif !pub.CheckKey() {\n\t\terr = errors.New(\"public key is invalid\")\n\t\treturn\n\t}\n\n\tif !pri.CheckScalar() {\n\t\terr = errors.New(\"private key is invalid\")\n\t\treturn\n\t}\n\n\tvar point edwards25519.ExtendedGroupElement\n\tvar point2 edwards25519.ProjectiveGroupElement\n\n\ttmp := [32]byte(pub)\n\tpoint.FromBytes(&tmp)\n\ttmp = pri\n\tedwards25519.GeScalarMult(&point2, &tmp, &point)\n\n\tpoint2.ToBytes(&tmp)\n\treturn tmp[:16], tmp[16:], nil\n}",
"func parseECDSA(in []byte) (*ecdsa.PublicKey, error) {\n\tvar w struct {\n\t\tCurve string\n\t\tKeyBytes []byte\n\t\tRest []byte `ssh:\"rest\"`\n\t}\n\n\tif err := ssh.Unmarshal(in, &w); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error unmarshaling public key\")\n\t}\n\n\tkey := new(ecdsa.PublicKey)\n\n\tswitch w.Curve {\n\tcase \"nistp256\":\n\t\tkey.Curve = elliptic.P256()\n\tcase \"nistp384\":\n\t\tkey.Curve = elliptic.P384()\n\tcase \"nistp521\":\n\t\tkey.Curve = elliptic.P521()\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported curve %s\", w.Curve)\n\t}\n\n\tkey.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)\n\tif key.X == nil || key.Y == nil {\n\t\treturn nil, errors.New(\"invalid curve point\")\n\t}\n\n\treturn key, nil\n}",
"func keyPubAddr() (crypto.PrivKey, crypto.PubKey, sdk.AccAddress) {\n\tkeyCounter++\n\tseed := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(seed, keyCounter)\n\n\tkey := ed25519.GenPrivKeyFromSecret(seed)\n\tpub := key.PubKey()\n\taddr := sdk.AccAddress(pub.Address())\n\treturn key, pub, addr\n}",
"func CreatePrivateKeyX25519FromBase64(privateKeyBase64 string) (*X25519.PrivateKey, error) {\n privateKeyBytes, err := base64.StdEncoding.DecodeString(privateKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPrivateKey(privateKeyBytes), nil\n}",
"func (privKey PrivKeyEd25519) PubKey() PubKey {\n\tprivKeyBytes := [64]byte(privKey)\n\tinitialized := false\n\t// If the latter 32 bytes of the privkey are all zero, compute the pubkey\n\t// otherwise privkey is initialized and we can use the cached value inside\n\t// of the private key.\n\tfor _, v := range privKeyBytes[32:] {\n\t\tif v != 0 {\n\t\t\tinitialized = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif initialized {\n\t\tvar pubkeyBytes [32]byte\n\t\tcopy(pubkeyBytes[:], privKeyBytes[32:])\n\t\treturn PubKeyEd25519(pubkeyBytes)\n\t}\n\n\tpubBytes := *MakePublicKey(&privKeyBytes)\n\treturn PubKeyEd25519(pubBytes)\n}",
"func parseECDSAKey(key ssh.PublicKey) (*ecdsa.PublicKey, error) {\n\tvar sshWire struct {\n\t\tName string\n\t\tID string\n\t\tKey []byte\n\t}\n\tif err := ssh.Unmarshal(key.Marshal(), &sshWire); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal key type: %v: %v\", key.Type(), err)\n\t}\n\tpk := new(ecdsa.PublicKey)\n\tswitch sshWire.ID {\n\tcase \"nistp256\":\n\t\tpk.Curve = elliptic.P256()\n\tcase \"nistp384\":\n\t\tpk.Curve = elliptic.P384()\n\tcase \"nistp521\":\n\t\tpk.Curve = elliptic.P521()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"uncrecognised ecdsa curve: %v\", sshWire.ID)\n\t}\n\tpk.X, pk.Y = elliptic.Unmarshal(pk.Curve, sshWire.Key)\n\tif pk.X == nil || pk.Y == nil {\n\t\treturn nil, fmt.Errorf(\"invalid curve point\")\n\t}\n\treturn pk, nil\n}",
"func (k *PrivateKey) PublicKey() *PublicKey {\n\tpubKeyG2Point := bls.G2AffineOne.MulFR(k.PrivKey.GetFRElement().ToRepr())\n\n\treturn &PublicKey{g2pubs.NewPublicKeyFromG2(pubKeyG2Point.ToAffine())}\n}",
"func (sk *PrivateKey) Public() crypto.PublicKey {\n\treturn &PublicKey{\n\t\tsk.e.Public().(ed25519.PublicKey),\n\t\t*sk.d.Public().(*mode2.PublicKey),\n\t}\n}",
"func (lib *PKCS11Lib) exportECDSAPublicKey(session pkcs11.SessionHandle, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) {\n\tvar err error\n\tvar attributes []*pkcs11.Attribute\n\tvar pub ecdsa.PublicKey\n\ttemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ECDSA_PARAMS, nil),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_EC_POINT, nil),\n\t}\n\tif attributes, err = lib.Ctx.GetAttributeValue(session, pubHandle, template); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.Curve, err = unmarshalEcParams(attributes[0].Value); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.X, pub.Y, err = unmarshalEcPoint(attributes[1].Value, pub.Curve); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn &pub, nil\n}",
"func convertMont(u PublicKey) (*edwards25519.Point, error) {\n\tum, err := (&field.Element{}).SetBytes(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// y = (u - 1)/(u + 1)\n\ta := new(field.Element).Subtract(um, one)\n\tb := new(field.Element).Add(um, one)\n\ty := new(field.Element).Multiply(a, b.Invert(b)).Bytes()\n\n\t// Set sign to 0\n\ty[31] &= 0x7F\n\n\treturn (&edwards25519.Point{}).SetBytes(y)\n}",
"func _Ed25519PublicKeyFromBytes(bytes []byte) (*_Ed25519PublicKey, error) {\n\tlength := len(bytes)\n\tswitch length {\n\tcase 32:\n\t\treturn _Ed25519PublicKeyFromBytesRaw(bytes)\n\tcase 44:\n\t\treturn _Ed25519PublicKeyFromBytesDer(bytes)\n\tdefault:\n\t\treturn &_Ed25519PublicKey{}, _NewErrBadKeyf(\"invalid public key length: %v bytes\", len(bytes))\n\t}\n}",
"func ImportPublicECDSA(c config.Reader, name string, curve string, public []byte) (KeyAPI, error) {\n\tif name == \"\" {\n\t\treturn nil, fmt.Errorf(\"name cannot be empty\")\n\t}\n\n\tif curve == \"\" {\n\t\treturn nil, fmt.Errorf(\"curve cannot be empty\")\n\t}\n\n\t_, ty, err := getCurve(curve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := mar.DecodePublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpem, perr := enc.EncodePublic(pub)\n\tif perr != nil {\n\t\treturn nil, perr\n\t}\n\n\t// Resulting key will not be complete - create the key struct object anyways\n\tkey := &key{\n\t\tGID: api.GenerateUUID(),\n\t\tName: name,\n\t\tSlug: helpers.NewHaikunator().Haikunate(),\n\t\tKeyType: fmt.Sprintf(\"ecdsa.PublicKey <==> %s\", ty),\n\t\tStatus: api.StatusActive,\n\t\tPublicKeyB64: base64.StdEncoding.EncodeToString([]byte(pem)),\n\t\tPrivateKeyB64: \"\",\n\t\tFingerprintMD5: enc.FingerprintMD5(pub),\n\t\tFingerprintSHA: enc.FingerprintSHA256(pub),\n\t\tCreatedAt: time.Now(),\n\t}\n\n\t// Write the entire key object to FS\n\tif err := key.writeToFS(c, nil, pub); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}",
"func seedFromEd25519PrivateKey(key crypto.PrivKey) ([]byte, error) {\n\t// Similar to (*ed25519).Seed()\n\tif key.Type() != pb.KeyType_Ed25519 {\n\t\treturn nil, errcode.ErrInvalidInput\n\t}\n\n\tr, err := key.Raw()\n\tif err != nil {\n\t\treturn nil, errcode.ErrSerialization.Wrap(err)\n\t}\n\n\tif len(r) != ed25519.PrivateKeySize {\n\t\treturn nil, errcode.ErrInvalidInput\n\t}\n\n\treturn r[:ed25519.PrivateKeySize-ed25519.PublicKeySize], nil\n}",
"func (curve *EdCurve) ToMontgomeryPointForm1(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\toneSubY := new(big.Int).Sub(ONE, p.Y) // 1-y\n\toneAddY := new(big.Int).Add(ONE, p.Y) // 1+y\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(oneAddY, oneSubY, curve.P) // (1+y)/(1-y)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(oneSubY, oneAddY, curve.P) // (1-y)/(1+y)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}",
"func GetPublicKey() ed25519.PublicKey {\n\tkey, _ := DecodePublicKey(publicKey)\n\treturn key\n}",
"func NewE4PubKey(keyPath string) (E4Key, error) {\n\tkeyFile, err := os.Open(keyPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open file %s: %v\", keyPath, err)\n\t}\n\tdefer keyFile.Close()\n\n\tkeyBytes, err := ioutil.ReadAll(keyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read e4key from %s: %v\", keyPath, err)\n\t}\n\n\tif err := e4crypto.ValidateCurve25519PrivKey(keyBytes); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKey, err := curve25519.X25519(keyBytes, curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &e4PubKey{\n\t\tc2PrivKey: keyBytes,\n\t\tc2PubKey: pubKey,\n\t\tkeyPath: keyPath,\n\t}, nil\n}",
"func (k *Ed25519PublicKey) Equals(o Key) bool {\n\tedk, ok := o.(*Ed25519PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn bytes.Equal(k.k, edk.k)\n}",
"func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {\n\treturn (*ecdsa.PrivateKey)(p)\n}",
"func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) {\n\tif l := len(p); l != PrivateKeySize {\n\t\tpanic(\"x25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\tpub, priv, err := p.calculateKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trandom := make([]byte, 64)\n\tif _, err := io.ReadFull(rand, random); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Using same prefix in libsignal-protocol-c implementation, but can be any\n\t// 32 byte prefix. Golang's ed25519 implementation uses:\n\t//\n\t// ph := sha512.Sum512(a.Bytes())\n\t// prefix := ph[32:]\n\tprefix := [32]byte{\n\t\t0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t}\n\n\trh := sha512.New()\n\trh.Write(prefix[:])\n\trh.Write(priv.Bytes())\n\trh.Write(message)\n\trh.Write(random)\n\trDigest := make([]byte, 0, sha512.Size)\n\trDigest = rh.Sum(rDigest)\n\n\tr, err := edwards25519.NewScalar().SetUniformBytes(rDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tR := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs\n\n\thh := sha512.New()\n\thh.Write(R.Bytes())\n\thh.Write(pub)\n\thh.Write(message)\n\thDigest := make([]byte, 0, sha512.Size)\n\thDigest = hh.Sum(hDigest)\n\th, err := edwards25519.NewScalar().SetUniformBytes(hDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv))\n\n\tsig := make([]byte, 64)\n\tcopy(sig[:32], R.Bytes())\n\tcopy(sig[32:], s.Bytes())\n\treturn sig, nil\n}",
"func PrivateKeyPublic(priv *rsa.PrivateKey,) crypto.PublicKey",
"func (x *Ed25519Credentials) SetPublicKey(publickey PublicKey) error {\n\n\tif publickey.Algorithm != AlgorithmEd25519 {\n\t\treturn fmt.Errorf(\"Algorithm mismatch %v vs %v\", publickey.Algorithm, AlgorithmEd25519)\n\t}\n\n\tst, ok := publickey.Public.(string)\n\tif !ok {\n\t\treturn ErrInvalidPublicKeyType\n\t}\n\n\tif len(st) != base64.URLEncoding.EncodedLen(ed25519.PublicKeySize) {\n\t\treturn fmt.Errorf(\"Key data incorrect length\")\n\t}\n\n\tbytes, err := base64.URLEncoding.DecodeString(st)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tx.Public = bytes\n\n\treturn nil\n\n}",
"func PublicKeyStrToKey(pubKey string) (*ecdsa.PublicKey, error) {\n\tpubKeyAsBytes, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx, y := elliptic.Unmarshal(curve.CURVE, pubKeyAsBytes)\n\tkey := &ecdsa.PublicKey{\n\t\tCurve: curve.CURVE,\n\t\tX: x,\n\t\tY: y,\n\t}\n\treturn key, nil\n}",
"func (pubKey PubKeyEd25519) Address() []byte { return binary.BinaryRipemd160(pubKey) }",
"func (x *ed25519_t) New(public PublicKey, private PrivateKey) (Credentials, error) {\n\n\tvar credentials Ed25519Credentials\n\tvar err error\n\n\terr = credentials.SetPublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = credentials.SetPrivateKey(private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n\n}",
"func (priv *PrivateKey) Public() crypto.PublicKey",
"func (priv *PrivateKey) Public() crypto.PublicKey",
"func fromECDSAKey(key ssh.PublicKey) (security.PublicKey, error) {\n\tk, err := parseECDSAKey(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn security.NewECDSAPublicKey(k), nil\n}",
"func (c *SFTPServer) generateED25519PrivateKey() error {\n\t_, priv, err := ed25519.GenerateKey(rand.Reader)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to generate ED25519 private key\")\n\t}\n\tif err := os.MkdirAll(path.Dir(c.PrivateKeyPath()), 0o755); err != nil {\n\t\treturn errors.Wrap(err, \"sftp: could not create internal sftp data directory\")\n\t}\n\to, err := os.OpenFile(c.PrivateKeyPath(), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tdefer o.Close()\n\n\tb, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to marshal private key into bytes\")\n\t}\n\tif err := pem.Encode(o, &pem.Block{Type: \"PRIVATE KEY\", Bytes: b}); err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to write ED25519 private key to disk\")\n\t}\n\treturn nil\n}",
"func (a *ChoriaAuth) ed25519Verify(publicKey ed25519.PublicKey, message []byte, sig []byte) (bool, error) {\n\tif len(publicKey) != ed25519.PublicKeySize {\n\t\treturn false, fmt.Errorf(\"invalid public key length %d\", len(publicKey))\n\t}\n\n\treturn ed25519.Verify(publicKey, message, sig), nil\n}",
"func (kp *FromAddress) LibP2PPubKey() (*libp2pc.Ed25519PublicKey, error) {\n\tpmes := new(pb.PublicKey)\n\tpmes.Data = kp.publicKey()[:]\n\tpk, err := libp2pc.UnmarshalEd25519PublicKey(pmes.GetData())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tepk, ok := pk.(*libp2pc.Ed25519PublicKey)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn epk, nil\n}",
"func NewAddressPubKeyEd25519Raw(scriptVersion uint16, serializedPubKey []byte,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyEd25519V0Raw(serializedPubKey, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey addresses for version %d are not supported\",\n\t\tscriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}",
"func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {\n\treturn toECDSA(d, true)\n}",
"func NewPubKeyFromHex(pk string) (res crypto.PubKey) {\n\tpkBytes, err := hex.DecodeString(pk)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar pkEd ed25519.PubKeyEd25519\n\tcopy(pkEd[:], pkBytes)\n\treturn pkEd\n}",
"func (k *Ed25519PrivateKey) GetPublic() PubKey {\n\treturn &Ed25519PublicKey{k: k.pubKeyBytes()}\n}",
"func ConvertToPPK(privateKey *rsa.PrivateKey, pub []byte) ([]byte, error) {\n\t// https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk\n\t// RSA keys are stored using an algorithm-name of 'ssh-rsa'. (Keys stored like this are also used by the updated RSA signature schemes that use\n\t// hashes other than SHA-1. The public key data has already provided the key modulus and the public encoding exponent. The private data stores:\n\t// mpint: the private decoding exponent of the key.\n\t// mpint: one prime factor p of the key.\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// mpint: the multiplicative inverse of q modulo p.\n\tppkPrivateKey := new(bytes.Buffer)\n\n\t// mpint: the private decoding exponent of the key.\n\t// this is known as 'D'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(privateKey.D))\n\n\t// mpint: one prime factor p of the key.\n\t// this is known as 'P'\n\t// the RSA standard dictates that P > Q\n\t// for some reason what PuTTY names 'P' is Primes[1] to Go, and what PuTTY names 'Q' is Primes[0] to Go\n\tP, Q := privateKey.Primes[1], privateKey.Primes[0]\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(P))\n\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// this is known as 'Q'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(Q))\n\n\t// mpint: the multiplicative inverse of q modulo p.\n\t// this is known as 'iqmp'\n\tiqmp := new(big.Int).ModInverse(Q, P)\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(iqmp))\n\n\t// now we need to base64-encode the PPK-formatted private key which is made up of the above values\n\tppkPrivateKeyBase64 := make([]byte, base64.StdEncoding.EncodedLen(ppkPrivateKey.Len()))\n\tbase64.StdEncoding.Encode(ppkPrivateKeyBase64, ppkPrivateKey.Bytes())\n\n\t// read Teleport public key\n\t// fortunately, this is the one thing that's in exactly the same format that the PPK file uses, so we can just copy it verbatim\n\t// remove ssh-rsa plus additional space from beginning of string if present\n\tif !bytes.HasPrefix(pub, []byte(constants.SSHRSAType+\" \")) {\n\t\treturn nil, trace.BadParameter(\"pub does not appear to be an ssh-rsa public key\")\n\t}\n\tpub = bytes.TrimSuffix(bytes.TrimPrefix(pub, []byte(constants.SSHRSAType+\" \")), []byte(\"\\n\"))\n\n\t// the PPK file contains an anti-tampering MAC which is made up of various values which appear in the file.\n\t// copied from Section C.3 of https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk:\n\t// hex-mac-data is a hexadecimal-encoded value, 64 digits long (i.e. 32 bytes), generated using the HMAC-SHA-256 algorithm with the following binary data as input:\n\t// string: the algorithm-name header field.\n\t// string: the encryption-type header field.\n\t// string: the key-comment-string header field.\n\t// string: the binary public key data, as decoded from the base64 lines after the 'Public-Lines' header.\n\t// string: the plaintext of the binary private key data, as decoded from the base64 lines after the 'Private-Lines' header.\n\n\t// these values are also used in the MAC generation, so we declare them as variables\n\tkeyType := constants.SSHRSAType\n\tencryptionType := \"none\"\n\t// as work for the future, it'd be nice to get the proxy/user pair name in here to make the name more\n\t// of a unique identifier. this has to be done at generation time because the comment is part of the MAC\n\tfileComment := \"teleport-generated-ppk\"\n\n\t// string: the algorithm-name header field.\n\tmacKeyType := getRFC4251String([]byte(keyType))\n\t// create a buffer to hold the elements needed to generate the MAC\n\tmacInput := new(bytes.Buffer)\n\tbinary.Write(macInput, binary.LittleEndian, macKeyType)\n\n\t// string: the encryption-type header field.\n\tmacEncryptionType := getRFC4251String([]byte(encryptionType))\n\tbinary.Write(macInput, binary.BigEndian, macEncryptionType)\n\n\t// string: the key-comment-string header field.\n\tmacComment := getRFC4251String([]byte(fileComment))\n\tbinary.Write(macInput, binary.BigEndian, macComment)\n\n\t// base64-decode the Teleport public key, as we need its binary representation to generate the MAC\n\tdecoded := make([]byte, base64.StdEncoding.EncodedLen(len(pub)))\n\tn, err := base64.StdEncoding.Decode(decoded, pub)\n\tif err != nil {\n\t\treturn nil, trace.Errorf(\"could not base64-decode public key: %v, got %v bytes successfully\", err, n)\n\t}\n\tdecoded = decoded[:n]\n\t// append the decoded public key bytes to the MAC buffer\n\tmacPublicKeyData := getRFC4251String(decoded)\n\tbinary.Write(macInput, binary.BigEndian, macPublicKeyData)\n\n\t// append our PPK-formatted private key bytes to the MAC buffer\n\tmacPrivateKeyData := getRFC4251String(ppkPrivateKey.Bytes())\n\tbinary.Write(macInput, binary.BigEndian, macPrivateKeyData)\n\n\t// as per the PPK spec, the key for the MAC is blank when the PPK file is unencrypted.\n\t// therefore, the key is a zero-length byte slice.\n\thmacHash := hmac.New(sha256.New, []byte{})\n\t// generate the MAC using HMAC-SHA-256\n\thmacHash.Write(macInput.Bytes())\n\tmacString := hex.EncodeToString(hmacHash.Sum(nil))\n\n\t// build the string-formatted output PPK file\n\tppk := new(bytes.Buffer)\n\tfmt.Fprintf(ppk, \"PuTTY-User-Key-File-3: %v\\n\", keyType)\n\tfmt.Fprintf(ppk, \"Encryption: %v\\n\", encryptionType)\n\tfmt.Fprintf(ppk, \"Comment: %v\\n\", fileComment)\n\t// chunk the Teleport-formatted public key into 64-character length lines\n\tchunkedPublicKey := chunk(string(pub), 64)\n\tfmt.Fprintf(ppk, \"Public-Lines: %v\\n\", len(chunkedPublicKey))\n\tfor _, r := range chunkedPublicKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\t// chunk the PPK-formatted private key into 64-character length lines\n\tchunkedPrivateKey := chunk(string(ppkPrivateKeyBase64), 64)\n\tfmt.Fprintf(ppk, \"Private-Lines: %v\\n\", len(chunkedPrivateKey))\n\tfor _, r := range chunkedPrivateKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\tfmt.Fprintf(ppk, \"Private-MAC: %v\\n\", macString)\n\n\treturn ppk.Bytes(), nil\n}",
"func (k *KeyPairEd25519) GetPublicKey() PublicKey {\n\treturn PublicKey{\n\t\tType: ED25519,\n\t\tData: k.privateKey.Public().(ed25519.PublicKey),\n\t}\n}",
"func main() {\n\tp384 := elliptic.P384()\n\tpriv1, _ := ecdsa.GenerateKey(p384, rand.Reader)\n\n\tprivateKeyBytes, _ := x509.MarshalECPrivateKey(priv1)\n\n\tencodedBytes := hex.EncodeToString(privateKeyBytes)\n\tfmt.Println(\"Private key:\")\n\tfmt.Printf(\"%s\\n\", encodedBytes)\n\n\tprivateKeyBytesRestored, _ := hex.DecodeString(encodedBytes)\n\tpriv2, _ := x509.ParseECPrivateKey(privateKeyBytesRestored)\n\n\tpublicKeyBytes, _ := x509.MarshalPKIXPublicKey(&priv1.PublicKey)\n\tencodedPubBytes := hex.EncodeToString(publicKeyBytes)\n\tfmt.Println(\"Public key:\")\n\tfmt.Printf(\"%s\\n\", encodedPubBytes)\n\n\tdata := []byte(\"data\")\n\t// Signing by priv1\n\tr, s, _ := ecdsa.Sign(rand.Reader, priv1, data)\n\n\t// Verifying against priv2 (restored from priv1)\n\tif !ecdsa.Verify(&priv2.PublicKey, data, r, s) {\n\t\tfmt.Printf(\"Error\")\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Key was restored from string successfully\\n\")\n}",
"func TestCompareEd25519(t *testing.T) {\n\ttest.TestCompareGroups(testSuite,\n\t\tnew(ExtendedCurve).Init(Param25519(), false),\n\t\tnew(ed25519.Curve))\n}",
"func (ec *ECPoint) SetFromPublicKey(other *ecdsa.PublicKey) *ECPoint {\n\tec.X = new(big.Int).Set(other.X)\n\tec.Y = new(big.Int).Set(other.Y)\n\tec.Curve = other.Curve\n\n\treturn ec\n}",
"func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) {\n\tkey, err := secp256k1.ParsePubKey(b)\n\treturn &PublicKeySECP256K1R{\n\t\tpk: key,\n\t\tbytes: b,\n\t}, err\n}",
"func TestProjective25519(t *testing.T) {\n\ttest.TestGroup(new(ProjectiveCurve).Init(Param25519(), false))\n}"
] | [
"0.7344533",
"0.70818925",
"0.70533943",
"0.66119736",
"0.66119736",
"0.6594977",
"0.6541674",
"0.65109813",
"0.6496604",
"0.64243305",
"0.6326358",
"0.6308357",
"0.6301593",
"0.6285583",
"0.61747074",
"0.6142724",
"0.60759276",
"0.6024686",
"0.60096884",
"0.58517253",
"0.5849791",
"0.5830617",
"0.5807623",
"0.5799805",
"0.57617784",
"0.57580984",
"0.5754702",
"0.5714054",
"0.5712461",
"0.5705392",
"0.5703584",
"0.56948096",
"0.5690599",
"0.56482226",
"0.5633911",
"0.5620152",
"0.5609086",
"0.5605606",
"0.56001294",
"0.558186",
"0.55816996",
"0.55766827",
"0.5568691",
"0.55349094",
"0.55213565",
"0.5517731",
"0.55176157",
"0.5487977",
"0.5479546",
"0.54795253",
"0.5457817",
"0.54466355",
"0.5439599",
"0.54301417",
"0.5427515",
"0.5423583",
"0.5418379",
"0.5408623",
"0.54026854",
"0.5396345",
"0.53952676",
"0.53770757",
"0.53591776",
"0.53575355",
"0.53537774",
"0.5349568",
"0.5337782",
"0.53376025",
"0.5335953",
"0.5316497",
"0.5304468",
"0.529961",
"0.52879107",
"0.5286181",
"0.5280969",
"0.52797914",
"0.5274316",
"0.52660275",
"0.5264903",
"0.5260522",
"0.52300036",
"0.5217894",
"0.5216522",
"0.5203796",
"0.5203796",
"0.51956147",
"0.5188609",
"0.5179816",
"0.5178896",
"0.5170033",
"0.51482326",
"0.5143522",
"0.5139052",
"0.5136693",
"0.5130164",
"0.5127603",
"0.51167226",
"0.51128346",
"0.5106935",
"0.51067376"
] | 0.75875074 | 0 |
Sign signs the given message with the private key p and returns a signature. It implements the XEdDSA sign method defined in XEdDSA performs two passes over messages to be signed and therefore cannot handle prehashed messages. Thus opts.HashFunc() must return zero to indicate the message hasn't been hashed. This can be achieved by passing crypto.Hash(0) as the value for opts. | Sign подписывает заданный сообщение с помощью приватного ключа p и возвращает подпись. Он реализует метод подписи XEdDSA, определённый в XEdDSA. Метод выполняет два прохода по сообщениям, которые подлежат подписи, поэтому не может обрабатывать предварительно хэшированные сообщения. Таким образом, opts.HashFunc() должен возвращать ноль, чтобы указать, что сообщение не было хэшировано. Это можно достичь, передав crypto.Hash(0) в качестве значения для opts. | func (p PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("x25519: cannot sign hashed message")
}
return Sign(rand, p, message)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"sign: cannot sign hashed message\")\n\t}\n\n\tout := Sign(message, priv)\n\treturn out[:], nil\n}",
"func (sk *PrivateKey) Sign(\n\trand io.Reader, msg []byte, opts crypto.SignerOpts,\n) (signature []byte, err error) {\n\tvar sig [SignatureSize]byte\n\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"eddilithium2: cannot sign hashed message\")\n\t}\n\n\tSignTo(sk, msg, sig[:])\n\treturn sig[:], nil\n}",
"func Sign(message, secretKey []byte) ([]byte, error) {\n\treturn defaultPH.cryptoSign(message, secretKey)\n}",
"func (sk PrivateKey) Sign(message []byte, hasher Hasher) ([]byte, error) {\n\treturn sk.privateKey.Sign(message, hasher)\n}",
"func (pk *PrivateKey) Sign(message []byte) *Signature {\n var signature Signature\n copy(signature[:], ed25519.Sign(pk[:], message)[:])\n return &signature\n}",
"func Sign(msg []byte, prv *ecdsa.PrivateKey) ([]byte, error) {\n\treturn crypto.Sign(msg, prv)\n}",
"func (pk PrivateKey) Sign(message []byte) Signature {\n\tvar signature Signature\n\tcopy(signature[:], ed25519.Sign(pk[:], message)[:])\n\treturn signature\n}",
"func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)",
"func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)",
"func (k *KeyPairEd25519) Sign(message []byte) ([]byte, error) {\n\tres, err := k.privateKey.Sign(nil, message, crypto.Hash(0))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"calling sign: %v\", err)\n\t}\n\treturn res, nil\n}",
"func (sk SecretKey) Sign(message []byte) (sig Signature, err error) {\n\tif message == nil {\n\t\terr = errors.New(\"cannot sign a nil message\")\n\t\treturn\n\t}\n\n\tsignedMessageBytes := make([]byte, len(message)+SignatureSize)\n\tsignedMessagePointer := (*C.uchar)(&signedMessageBytes[0])\n\n\tvar signatureLen uint64\n\tlenPointer := (*C.ulonglong)(&signatureLen)\n\n\tvar messagePointer *C.uchar\n\tif len(message) == 0 {\n\t\t// can't point to a slice of len 0\n\t\tmessagePointer = (*C.uchar)(nil)\n\t} else {\n\t\tmessageBytes := []byte(message)\n\t\tmessagePointer = (*C.uchar)(&messageBytes[0])\n\t}\n\n\tmessageLen := C.ulonglong(len(message))\n\tskPointer := (*C.uchar)(&sk[0])\n\n\tsignErr := C.crypto_sign(signedMessagePointer, lenPointer, messagePointer, messageLen, skPointer)\n\tif signErr != 0 {\n\t\terr = errors.New(\"call to crypto_sign failed\")\n\t\treturn\n\t}\n\n\tcopy(sig[:], signedMessageBytes)\n\treturn\n}",
"func Sign(s *big.Int, params *Params, key *PrivateKey, attrs AttributeList, message *big.Int) (*Signature, error) {\n\treturn SignPrecomputed(s, params, key, attrs, PrepareAttributeSet(params, attrs), message)\n}",
"func Sign(key *rsa.PrivateKey, message []byte) ([]byte, error) {\n\t// sha256 hash the message\n\thashed := sha256.Sum256(message)\n\t// sign the hash\n\tsignature, err := rsa.SignPKCS1v15(\n\t\trand.Reader, key, crypto.SHA256, hashed[:],\n\t)\n\t// handle error\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to sign message: \")\n\t}\n\treturn signature, nil\n}",
"func (p *KeyPair) Sign(message []byte) ([]byte, error) {\n\tprivateKey := p.ToEcdsa()\n\thash := sha256.Sum256(message)\n\tr, s, err := ecdsa.Sign(rand.Reader, privateKey, hash[:])\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := privateKey.Curve.Params()\n\tcurveOrderByteSize := params.P.BitLen() / 8\n\trBytes, sBytes := r.Bytes(), s.Bytes()\n\tsignature := make([]byte, curveOrderByteSize*2)\n\tcopy(signature[curveOrderByteSize-len(rBytes):], rBytes)\n\tcopy(signature[curveOrderByteSize*2-len(sBytes):], sBytes)\n\n\treturn signature, nil\n}",
"func Sign(hashedMessage []byte) ([]byte, error) {\n\tpk, err := privateKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Checking message signature.\n\tvar signature []byte\n\tif signature, err = rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hashedMessage); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn signature, nil\n}",
"func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) {\n\treturn k.SignHash(hashing.ComputeHash256(msg))\n}",
"func (k *onChainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}",
"func (k *Ed25519PrivateKey) Sign(msg []byte) ([]byte, error) {\n\treturn ed25519.Sign(k.k, msg), nil\n}",
"func (_Ethdkg *EthdkgSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}",
"func (k *OnchainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}",
"func (_Ethdkg *EthdkgCallerSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}",
"func (l LocalIdentity) Sign(message []byte) ed25519.Signature {\n\treturn l.privateKey.Sign(message)\n}",
"func (sk *PrivKey) Sign(msg []byte) ([]byte, error) {\n\tdigest := sha256.Sum256(msg)\n\treturn sk.PrivateKey.Sign(rand.Reader, digest[:], nil)\n}",
"func Sign(message string) (string, error) {\n\n\t// TODO check length on string\n\t// Sign\n\tvar h hash.Hash\n\th = sha256.New()\n\n\tio.WriteString(h, message)\n\tsignhash := h.Sum(nil)\n\n\trsaKey, err := loadPrivateKeyFromFile()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trsaSignature, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, crypto.SHA256, signhash)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn \"\", err\n\t}\n\n\tsEnc := base64.StdEncoding.EncodeToString(rsaSignature)\n\treturn sEnc, nil\n}",
"func Sign(opriv, rpriv *btcec.PrivateKey, m []byte) []byte {\n\tR := rpriv.PubKey()\n\tk := rpriv.D\n\tv := opriv.D\n\n\t// h(R,m) * v\n\thv := new(big.Int).Mul(hash(R, m), v)\n\n\t// k - h(R,m) * v\n\ts := new(big.Int).Sub(k, hv)\n\n\t// s mod N\n\ts = new(big.Int).Mod(s, btcec.S256().N)\n\n\treturn s.Bytes()\n}",
"func (p *PGP) Sign(message []byte) ([]byte, error) {\n\twriter := new(bytes.Buffer)\n\treader := bytes.NewReader(message)\n\terr := openpgp.ArmoredDetachSign(writer, p.entity, reader, nil)\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"cannot sign message: %s\", err)\n\t}\n\treturn writer.Bytes(), nil\n}",
"func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {\n\tprivKeyBytes := [64]byte(privKey)\n\tsignatureBytes := Sign(&privKeyBytes, msg)\n\treturn signatureBytes[:], nil\n}",
"func SignPSS(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte, opts *rsa.PSSOptions,) ([]byte, error)",
"func (_BondedECDSAKeep *BondedECDSAKeepTransactor) Sign(opts *bind.TransactOpts, _digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"sign\", _digest)\n}",
"func sign(p *pkcs11.Ctx, session pkcs11.SessionHandle, objectHandle pkcs11.ObjectHandle, payload []byte, sigAlgorithm data.SigAlgorithm) ([]byte, error) {\n\n\tvar (\n\t\tmechanism *pkcs11.Mechanism\n\t\tdigest []byte\n\t)\n\n\tsha256Prefix := []byte{0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}\n\n\thash := sha256.Sum256(payload)\n\n\tif sigAlgorithm == data.ECDSASignature {\n\t\tmechanism = pkcs11.NewMechanism(pkcs11.CKM_ECDSA, nil)\n\t\tdigest = hash[:]\n\t} else {\n\t\tmechanism = pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS, nil)\n\t\tdigest = append(sha256Prefix[:], hash[:]...)\n\t}\n\n\tvar sig []byte\n\terr := p.SignInit(\n\t\tsession, []*pkcs11.Mechanism{mechanism}, objectHandle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsig, err = p.Sign(session, digest[:])\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error while signing: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tif sig == nil {\n\t\treturn nil, errors.New(\"Failed to create signature\")\n\t}\n\treturn sig[:], nil\n}",
"func Sign(m string, kp *Keypair) *Signature {\n\treturn genSignature(m, kp.private)\n}",
"func Sign(suite suites.Suite, x kyber.Scalar, msg []byte) ([]byte, error) {\n\tHM := hashToPoint(suite, msg)\n\txHM := HM.Mul(x, HM)\n\ts, err := xHM.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}",
"func Sign(msg []byte, seckey []byte) ([]byte, error) {\n\treturn secp256k1.Sign(msg, seckey)\n}",
"func (signer *Signer) Sign(msg []byte) ([]byte, error) {\n\tif signer.privateKey == nil {\n\t\treturn nil, errors.New(\"private key not provided\")\n\t}\n\n\thasher := getHasher(signer.privateKey.Curve).New()\n\n\t_, err := hasher.Write(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thashed := hasher.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, signer.privateKey, hashed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurveBits := signer.privateKey.Curve.Params().BitSize\n\n\tconst bitsInByte = 8\n\tkeyBytes := curveBits / bitsInByte\n\tif curveBits%bitsInByte > 0 {\n\t\tkeyBytes++\n\t}\n\n\treturn append(copyPadded(r.Bytes(), keyBytes), copyPadded(s.Bytes(), keyBytes)...), nil\n}",
"func (kb *Keybase) Sign(name, passphrase string, msg []byte) ([]byte, crypto.PubKey, error) {\n\thash := sha256.Sum256([]byte(name + \":\" + passphrase))\n\tkb.mx.Lock()\n\tpriv, ok := kb.privKeysCache[hash]\n\tif !ok {\n\t\tvar err error\n\t\tif priv, err = kb.kb.ExportPrivateKeyObject(name, passphrase); err != nil {\n\t\t\tkb.mx.Unlock()\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tkb.privKeysCache[hash] = priv\n\t}\n\tkb.mx.Unlock()\n\tsig, err := priv.Sign(msg)\n\treturn sig, priv.PubKey(), err\n}",
"func (p *ProtocolTECDSA) Sign(message *big.Int) Signature {\n\ttps := p.presig[0]\n\tp.presig = p.presig[1:]\n\n\tkKey, _ := tps.k.Exp()\n\tR, _ := kKey.RevealExp()\n\tr := hash(R)\n\n\ttau, _ := tps.tau.Reveal()\n\n\ta, b := message.Div(message, tau), r.Div(r, tau)\n\tsTDSecret := p.lin(a, tps.rho, b, tps.eta)\n\n\ts, _ := sTDSecret.Reveal()\n\n\treturn Signature{r, s}\n}",
"func Sign(message string, privateKeyString string, signature *Signed) error {\n\tprivateKey, err := PemDecodePrivate([]byte(privateKeyString))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Rsa\n\tcase *ecdsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Ecdsa\n\t}\n\tsig, err := SignMessage([]byte(message), privateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsignature.Message = message\n\tsignature.Signature = string(Base64Encode(sig))\n\treturn nil\n}",
"func (p *PrivateKey) Sign(hash []byte) (*Signature, error) {\n\treturn signRFC6979(p, hash)\n}",
"func (m EncMessage) Sign(k []byte) error {\n\treturn errors.New(\"Sign method must be overridden\")\n}",
"func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {\n\t// r, s, err := Sign(priv, msg)\n\tr, s, err := SM2Sign(priv, msg, nil)\n\tfmt.Println(\"msg:\",msg)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn asn1.Marshal(sm2Signature{r, s})\n}",
"func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) {\n\tif l := len(p); l != PrivateKeySize {\n\t\tpanic(\"x25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\tpub, priv, err := p.calculateKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trandom := make([]byte, 64)\n\tif _, err := io.ReadFull(rand, random); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Using same prefix in libsignal-protocol-c implementation, but can be any\n\t// 32 byte prefix. Golang's ed25519 implementation uses:\n\t//\n\t// ph := sha512.Sum512(a.Bytes())\n\t// prefix := ph[32:]\n\tprefix := [32]byte{\n\t\t0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t}\n\n\trh := sha512.New()\n\trh.Write(prefix[:])\n\trh.Write(priv.Bytes())\n\trh.Write(message)\n\trh.Write(random)\n\trDigest := make([]byte, 0, sha512.Size)\n\trDigest = rh.Sum(rDigest)\n\n\tr, err := edwards25519.NewScalar().SetUniformBytes(rDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tR := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs\n\n\thh := sha512.New()\n\thh.Write(R.Bytes())\n\thh.Write(pub)\n\thh.Write(message)\n\thDigest := make([]byte, 0, sha512.Size)\n\thDigest = hh.Sum(hDigest)\n\th, err := edwards25519.NewScalar().SetUniformBytes(hDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv))\n\n\tsig := make([]byte, 64)\n\tcopy(sig[:32], R.Bytes())\n\tcopy(sig[32:], s.Bytes())\n\treturn sig, nil\n}",
"func Sign(msg []byte, privkey []byte, sigType SigType) (*crypto.Signature, error) {\n\tsv, ok := sigs[sigType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot sign message with signature of unsupported type: %v\", sigType)\n\t}\n\n\tsb, err := sv.Sign(privkey, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &crypto.Signature{\n\t\tType: sigType,\n\t\tData: sb,\n\t}, nil\n}",
"func (_Ethdkg *EthdkgCaller) Sign(opts *bind.CallOpts, message []byte, privK *big.Int) ([2]*big.Int, error) {\n\tvar (\n\t\tret0 = new([2]*big.Int)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"Sign\", message, privK)\n\treturn *ret0, err\n}",
"func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) {\n\treturn sk.key.SignPKCS1v15(openssl.SHA256_Method, message)\n}",
"func (p *PrivateKey) Sign(mesg string) string {\n\tvar enc, m big.Int\n\tsetBytesReverse(&m, []byte(mesg))\n\tenc.Exp(&m, p.keyD, p.keyN)\n\treturn intToBase64(&enc)\n}",
"func (b *BtcWallet) SignMessage(keyLoc keychain.KeyLocator,\n\tmsg []byte, doubleHash bool) (*ecdsa.Signature, error) {\n\n\t// First attempt to fetch the private key which corresponds to the\n\t// specified public key.\n\tprivKey, err := b.fetchPrivKey(&keychain.KeyDescriptor{\n\t\tKeyLocator: keyLoc,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Double hash and sign the data.\n\tvar msgDigest []byte\n\tif doubleHash {\n\t\tmsgDigest = chainhash.DoubleHashB(msg)\n\t} else {\n\t\tmsgDigest = chainhash.HashB(msg)\n\t}\n\treturn ecdsa.Sign(privKey, msgDigest), nil\n}",
"func (m *Message) Sign(privateKey *ecdsa.PrivateKey) error {\n\tif m == nil {\n\t\treturn errors.New(\"nil message\")\n\t}\n\trawData := getRawMessageBody(&m.Body)\n\tsignature, err := gw_common.SignData(privateKey, rawData...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Signature = utils.StringToHex(string(signature))\n\tm.Body.Sender = strings.ToLower(crypto.PubkeyToAddress(privateKey.PublicKey).Hex())\n\treturn nil\n}",
"func Sign(h hash.Hashable) []byte {\n\tbuf := io.NewBufBinWriter()\n\tfor i := 0; i < 3; i++ {\n\t\tpKey := PrivateKey(i)\n\t\tsig := pKey.SignHashable(uint32(Network()), h)\n\t\tif len(sig) != 64 {\n\t\t\tpanic(\"wrong signature length\")\n\t\t}\n\t\temit.Bytes(buf.BinWriter, sig)\n\t}\n\treturn buf.Bytes()\n}",
"func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error)",
"func SignMessage(privKey *ecdsa.PrivateKey, pack MessagePacker) []byte {\n\tdata := pack.Pack()\n\tsig, err := utils.SignData(privKey, data)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"SignMessage error %s\", err))\n\t}\n\treturn sig\n}",
"func (sn *Signer) Sign(d *ristretto255.Scalar, q *ristretto255.Element) ([]byte, error) {\n\tbuf := make([]byte, SignatureSize)\n\n\t// Add the signer's public key to the protocol.\n\tsn.schnorr.AD(q.Encode(nil))\n\n\t// Clone the protocol.\n\tclone := sn.schnorr.Clone()\n\n\t// Key the clone with a random key. This hedges against differential attacks against purely\n\t// deterministic signature algorithms.\n\tif err := clone.KEYRand(internal.UniformBytestringSize); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Key the clone with the sender's private key. This hedges against randomness failures. The\n\t// protocol's state is already dependent on the message, making the reuse of ephemeral values\n\t// across messages impossible.\n\tclone.KEY(d.Encode(buf[:0]))\n\n\t// Derive an ephemeral key pair from the clone.\n\tr := clone.PRFScalar()\n\tR := ristretto255.NewElement().ScalarBaseMult(r)\n\n\t// Hash the ephemeral public key.\n\tsn.schnorr.AD(R.Encode(buf[:0]))\n\n\t// Extract a challenge scalar from the protocol state.\n\tc := sn.schnorr.PRFScalar()\n\n\t// Calculate the signature scalar.\n\ts := ristretto255.NewScalar().Multiply(d, c)\n\ts = s.Add(s, r)\n\n\t// Return the challenge and signature scalars.\n\treturn s.Encode(c.Encode(buf[:0])), nil\n}",
"func Sign(privateKey, publicKey, message []byte) []byte {\n\n\tvar privateKeyA [32]byte\n\tcopy(privateKeyA[:], privateKey) // we need this in an array later\n\tvar messageDigest, hramDigest [64]byte\n\n\th := sha512.New()\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(publicKey)\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &privateKeyA, &messageDigestReduced)\n\n\tsignature := make([]byte, 64)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}",
"func TmSign(publicKey PublicKey, privateKey PrivateKey, digest Digest) Seal { panic(\"\") }",
"func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := new([64]byte)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\treturn signature\n}",
"func SignMessage(\n\tm *Message,\n\tsubject, reply string,\n\tsuite *AlgorithmSuite,\n\tsenderPrivKeyBytes []byte,\n) (*Message, error) {\n\tif m.Sig != \"\" {\n\t\treturn nil, errors.New(\"message is already signed\")\n\t}\n\n\tauthData, err := m.sigAuthData(subject, reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsenderPrivKey, err := ecPrivKey(suite.CurveBitSize, senderPrivKeyBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsig, err := signBytes(suite.DigestBitSize, authData, senderPrivKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsignedMsg := *m\n\tsignedMsg.Sig = base64Encode(sig)\n\n\treturn &signedMsg, nil\n}",
"func (*noSignHash) SignHash() {\n}",
"func (h *HmacSha256) Sign(msg string, secret string) ([]byte, error) {\n\tmac := hmac.New(sha256.New, []byte(secret))\n\tif _, err := mac.Write([]byte(msg)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn mac.Sum(nil), nil\n}",
"func (v *Vault) SignMessage(ctx context.Context, message []byte, key vault.StoredKey) (crypt.Signature, error) {\n\tdigest := crypt.DigestFunc(message)\n\tazureKey, ok := key.(*azureKey)\n\tif !ok {\n\t\treturn nil, errors.Wrap(fmt.Errorf(\"(Azure/%s): not a Azure key: %T\", v.config.Vault, key), http.StatusBadRequest)\n\t}\n\n\tvar req signRequest\n\tif req.Algorithm = algByCurve(azureKey.pub.Curve); req.Algorithm == \"\" {\n\t\treturn nil, errors.Wrap(fmt.Errorf(\"(Azure/%s): can't find corresponding signature algorithm for %s curve\", v.config.Vault, azureKey.bundle.Key.Curve), http.StatusBadRequest)\n\t}\n\treq.Value = base64.RawURLEncoding.EncodeToString(digest[:])\n\n\tu, err := v.makeURL(azureKey.bundle.Key.KeyID, \"/sign\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tr, err := json.Marshal(&req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tvar res keyOperationResult\n\tstatus, err := v.request(ctx, v.client, \"POST\", u, bytes.NewReader(r), &res)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t\tif status != 0 {\n\t\t\terr = errors.Wrap(err, status)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tsig, err := base64.RawURLEncoding.DecodeString(res.Value)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tbyteLen := (azureKey.pub.Params().BitSize + 7) >> 3\n\tif len(sig) != byteLen*2 {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): invalid signature size %d\", v.config.Vault, len(sig))\n\t}\n\treturn &crypt.ECDSASignature{\n\t\tR: new(big.Int).SetBytes(sig[:byteLen]),\n\t\tS: new(big.Int).SetBytes(sig[byteLen:]),\n\t\tCurve: azureKey.pub.Curve,\n\t}, nil\n}",
"func (k *PrivateKey) Sign(hash []byte) ([]byte, error) {\n\treturn Sign(hash, k.seckey)\n}",
"func PrivateKeySign(priv *rsa.PrivateKey, rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error)",
"func (kh *KeyHandler) Sign(buf []byte) ([]byte, cop.Error) {\n\treturn make([]byte, 0), nil\n}",
"func (k *Keypair) Sign(hash []byte) ([]byte, error) {\n\n\tprKeyDecoded, err := base58.DecodeToBig(k.Private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpuKeyDecoded, _ := base58.DecodeToBig(k.Public)\n\n\tpub := splitBig(puKeyDecoded, 2)\n\tx, y := pub[0], pub[1]\n\n\tkey := ecdsa.PrivateKey{\n\t\tecdsa.PublicKey{\n\t\t\telliptic.P224(),\n\t\t\tx,\n\t\t\ty,\n\t\t},\n\t\tprKeyDecoded,\n\t}\n\n\tr, s, _ := ecdsa.Sign(rand.Reader, &key, hash)\n\n\treturn base58.EncodeBig([]byte{}, bigJoin(KEY_SIZE, r, s)), nil\n}",
"func Core_Sign(SIG []byte, M []byte, S []byte) int {\n\tD := bls_hash_to_point(M)\n\ts := FromBytes(S)\n\tD = G1mul(D, s)\n\tD.ToBytes(SIG, true)\n\treturn BLS_OK\n}",
"func (dcr *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\top, err := dcr.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %w\", err)\n\t}\n\n\t// First check if we have the funding coin cached. If so, grab the address\n\t// from there.\n\tdcr.fundingMtx.RLock()\n\tfCoin, found := dcr.fundingCoins[op.pt]\n\tdcr.fundingMtx.RUnlock()\n\tvar addr string\n\tif found {\n\t\taddr = fCoin.addr\n\t} else {\n\t\t// Check if we can get the address from wallet.UnspentOutput.\n\t\t// op.tree may be wire.TxTreeUnknown but wallet.UnspentOutput is\n\t\t// able to deal with that and find the actual tree.\n\t\ttxOut, err := dcr.wallet.UnspentOutput(dcr.ctx, op.txHash(), op.vout(), op.tree)\n\t\tif err != nil {\n\t\t\tdcr.log.Errorf(\"gettxout error for SignMessage coin %s: %v\", op, err)\n\t\t} else if txOut != nil {\n\t\t\tif len(txOut.Addresses) != 1 {\n\t\t\t\t// TODO: SignMessage is usually called for coins selected by\n\t\t\t\t// FundOrder. Should consider rejecting/ignoring multisig ops\n\t\t\t\t// in FundOrder to prevent this SignMessage error from killing\n\t\t\t\t// order placements.\n\t\t\t\treturn nil, nil, fmt.Errorf(\"multi-sig not supported\")\n\t\t\t}\n\t\t\taddr = txOut.Addresses[0]\n\t\t\tfound = true\n\t\t}\n\t}\n\t// Could also try the gettransaction endpoint, which is supposed to return\n\t// information about wallet transactions, but which (I think?) doesn't list\n\t// ssgen outputs.\n\tif !found {\n\t\treturn nil, nil, fmt.Errorf(\"did not locate coin %s. is this a coin returned from Fund?\", coin)\n\t}\n\taddress, err := stdaddr.DecodeAddress(addr, dcr.chainParams)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error decoding address: %w\", err)\n\t}\n\tpriv, err := dcr.wallet.AddressPrivKey(dcr.ctx, address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer priv.Zero()\n\thash := chainhash.HashB(msg) // legacy servers will not accept this signature!\n\tsignature := ecdsa.Sign(priv, hash)\n\tpubkeys = append(pubkeys, priv.PubKey().SerializeCompressed())\n\tsigs = append(sigs, signature.Serialize()) // DER format\n\treturn pubkeys, sigs, nil\n}",
"func Sign(msg Signable, key []byte) ([]byte, error) {\n\tmac := hmac.New(msg.HashFunc(), key)\n\tmsgBytes, err := msg.Message()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = mac.Write(msgBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mac.Sum(nil), nil\n}",
"func Sign(text string, priv *ecdsa.PrivateKey) ([]byte, *big.Int, *big.Int, []byte) {\n\tvar h hash.Hash\n\th = md5.New()\n\tr := big.NewInt(0)\n\ts := big.NewInt(0)\n\n\tio.WriteString(h, text)\n\tsignhash := h.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, priv, signhash)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tsignature := r.Bytes()\n\tsignature = append(signature, s.Bytes()...)\n\treturn signature, r, s, signhash\n}",
"func (dcr *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\top, err := dcr.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %w\", err)\n\t}\n\n\t// First check if we have the funding coin cached. If so, grab the address\n\t// from there.\n\tdcr.fundingMtx.RLock()\n\tfCoin, found := dcr.fundingCoins[op.pt]\n\tdcr.fundingMtx.RUnlock()\n\tvar addr string\n\tif found {\n\t\taddr = fCoin.addr\n\t} else {\n\t\t// Check if we can get the address from gettxout.\n\t\ttxOut, err := dcr.node.GetTxOut(dcr.ctx, op.txHash(), op.vout(), true)\n\t\tif err == nil && txOut != nil {\n\t\t\taddrs := txOut.ScriptPubKey.Addresses\n\t\t\tif len(addrs) != 1 {\n\t\t\t\t// TODO: SignMessage is usually called for coins selected by\n\t\t\t\t// FundOrder. Should consider rejecting/ignoring multisig ops\n\t\t\t\t// in FundOrder to prevent this SignMessage error from killing\n\t\t\t\t// order placements.\n\t\t\t\treturn nil, nil, fmt.Errorf(\"multi-sig not supported\")\n\t\t\t}\n\t\t\taddr = addrs[0]\n\t\t\tfound = true\n\t\t}\n\t}\n\t// Could also try the gettransaction endpoint, which is supposed to return\n\t// information about wallet transactions, but which (I think?) doesn't list\n\t// ssgen outputs.\n\tif !found {\n\t\treturn nil, nil, fmt.Errorf(\"did not locate coin %s. is this a coin returned from Fund?\", coin)\n\t}\n\taddress, err := dcrutil.DecodeAddress(addr, chainParams)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error decoding address: %w\", err)\n\t}\n\tpriv, pub, err := dcr.getKeys(address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsignature := ecdsa.Sign(priv, msg)\n\tpubkeys = append(pubkeys, pub.SerializeCompressed())\n\tsigs = append(sigs, signature.Serialize())\n\treturn pubkeys, sigs, nil\n}",
"func Sign(privateKey PrivateKey, message []byte) []byte {\n\tif l := len(privateKey); l != PrivateKeySize {\n\t\tpanic(\"ed25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := make([]byte, SignatureSize)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}",
"func (d *identityManager) Sign(message []byte) ([]byte, error) {\n\treturn Sign(d.key.PrivateKey, message)\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) Sign(_digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Sign(&_BondedECDSAKeep.TransactOpts, _digest)\n}",
"func Sign(message, dir string) ([]byte, error) {\n\tvar signature []byte\n\n\tpemBytes, err := ReadPemFile(dir)\n\tif err != nil {\n\t\treturn signature, err\n\t}\n\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(pemBytes)\n\tif err != nil {\n\t\treturn signature, err\n\t}\n\n\thashed := sha256.Sum256([]byte(message))\n\trng := rand.Reader\n\treturn rsa.SignPKCS1v15(rng, privateKey, crypto.SHA256, hashed[:])\n}",
"func (addr *Address) Sign(privKey *id.PrivKey) error {\n\tbuf := make([]byte, surge.SizeHintU8+surge.SizeHintString(addr.Value)+surge.SizeHintU64)\n\treturn addr.SignWithBuffer(privKey, buf)\n}",
"func Sign(message []byte, privateKey PrivateKey) []byte {\n\tsig := ed25519.Sign(ed25519.PrivateKey(privateKey), message)\n\tresponse := make([]byte, SignatureSize+len(message))\n\tcopy(response[:SignatureSize], sig)\n\tcopy(response[SignatureSize:], message)\n\treturn response\n}",
"func (p *privateKey) Sign(data []byte) ([]byte, error) {\n\treturn p.PrivateKey.Sign(data), nil\n}",
"func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errBadKeyHandleFormat\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}",
"func (r *RPCKeyRing) SignMessage(keyLoc keychain.KeyLocator,\n\tmsg []byte, doubleHash bool) (*btcec.Signature, error) {\n\n\tctxt, cancel := context.WithTimeout(context.Background(), r.rpcTimeout)\n\tdefer cancel()\n\n\tresp, err := r.signerClient.SignMessage(ctxt, &signrpc.SignMessageReq{\n\t\tMsg: msg,\n\t\tKeyLoc: &signrpc.KeyLocator{\n\t\t\tKeyFamily: int32(keyLoc.Family),\n\t\t\tKeyIndex: int32(keyLoc.Index),\n\t\t},\n\t\tDoubleHash: doubleHash,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twireSig, err := lnwire.NewSigFromRawSignature(resp.Signature)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing raw signature: %v\", err)\n\t}\n\treturn wireSig.ToSignature()\n}",
"func (validator *validatorImpl) Sign(msg []byte) ([]byte, error) {\n\treturn validator.signWithEnrollmentKey(msg)\n}",
"func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errors.New(\"bad key handle format\")\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}",
"func (c *CIDOffer) Sign(privKey *fcrcrypto.KeyPair, keyVer *fcrcrypto.KeyVersion) error {\n\traw, err := c.MarshalToSign()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig, err := fcrcrypto.SignMessage(privKey, keyVer, raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.signature = sig\n\treturn nil\n}",
"func Sign(priv *ecdsa.PrivateKey, hash []byte) (r, s *big.Int, err error) {\n\treturn ecdsa.Sign(rand.Reader, priv, hash)\n}",
"func (ms *MemoizeSigner) Sign(msg []byte) ([]byte, error) {\n\tsig, isInMemory := ms.lookup(msg)\n\tif isInMemory {\n\t\treturn sig, nil\n\t}\n\tsig, err := ms.sign(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tms.memorize(msg, sig)\n\treturn sig, nil\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepSession) Sign(_digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Sign(&_BondedECDSAKeep.TransactOpts, _digest)\n}",
"func (r *RSA) Sign(msg string) (string, error) {\n\tif r.PrivateKey == nil {\n\t\treturn \"\", errors.New(\"missing private key\")\n\t}\n\n\trng := rand.Reader\n\thashed := sha256.Sum256([]byte(msg))\n\n\tsignature, err := rsa.SignPKCS1v15(rng, r.PrivateKey, crypto.SHA256, hashed[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r.encode(signature), err\n}",
"func (en *clearsignEncoder) signMessage(_ context.Context, w io.Writer, r io.Reader) (crypto.Hash, error) {\n\tplaintext, err := clearsign.Encode(w, en.e.PrivateKey, en.config)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer plaintext.Close()\n\n\t_, err = io.Copy(plaintext, r)\n\treturn en.config.Hash(), err\n}",
"func Sign(msg []byte, seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"Sign, Invalid seckey length\")\n\t}\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"Attempting to sign with invalid seckey\")\n\t}\n\tif len(msg) == 0 {\n\t\tlog.Panic(\"Sign, message nil\")\n\t}\n\tif len(msg) != 32 {\n\t\tlog.Panic(\"Sign, message must be 32 bytes\")\n\t}\n\n\tnonce := newSigningNonce()\n\tsig := make([]byte, 65)\n\tvar recid int // recovery byte, used to recover pubkey from sig\n\n\tvar cSig secp.Signature\n\n\tvar seckey1 secp.Number\n\tvar msg1 secp.Number\n\n\tseckey1.SetBytes(seckey)\n\tmsg1.SetBytes(msg)\n\n\tif msg1.Sign() == 0 {\n\t\tlog.Panic(\"Sign: message is 0\")\n\t}\n\n\tret := cSig.Sign(&seckey1, &msg1, &nonce, &recid)\n\n\tif ret != 1 {\n\t\tlog.Panic(\"Secp25k1-go, Sign, signature operation failed\")\n\t}\n\n\tsigBytes := cSig.Bytes()\n\tfor i := 0; i < 64; i++ {\n\t\tsig[i] = sigBytes[i]\n\t}\n\tif len(sigBytes) != 64 {\n\t\tlog.Panicf(\"Invalid signature byte count: %d\", len(sigBytes))\n\t}\n\tsig[64] = byte(recid)\n\n\tif recid > 4 {\n\t\tlog.Panic(\"invalid recovery id\")\n\t}\n\n\treturn sig\n}",
"func (kp *FromAddress) Sign(input []byte) ([]byte, error) {\n\treturn nil, ErrCannotSign\n}",
"func (p *PrivateKey) Sign(data []byte) []byte {\n\tvar digest = sha256.Sum256(data)\n\n\treturn p.SignHash(digest)\n}",
"func Sign(hash, privateKey []byte) (sig []byte, err error) {\n\tif len(hash) != 32 {\n\t\treturn nil, fmt.Errorf(\"hash is required to be exactly 32 bytes (%d)\", len(hash))\n\t}\n\tif len(privateKey) != 32 {\n\t\treturn nil, fmt.Errorf(\"hex private key is required to be exactly 64 bytes (%d)\", len(privateKey))\n\t}\n\tkey, err := ToSM2(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpubBytes := SM2PubBytes(&key.PublicKey)\n\n\tr, s, err := SM2Sign(hash, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsig = make([]byte, 128)\n\tcopy(sig[32-len(r.Bytes()):], r.Bytes())\n\tcopy(sig[64-len(s.Bytes()):], s.Bytes())\n\tcopy(sig[128-len(pubBytes):], pubBytes)\n\n\treturn sig, nil\n}",
"func (bbs *BBSG2Pub) Sign(messages [][]byte, privKeyBytes []byte) ([]byte, error) {\n\tprivKey, err := UnmarshalPrivateKey(privKeyBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal private key: %w\", err)\n\t}\n\n\tif len(messages) == 0 {\n\t\treturn nil, errors.New(\"messages are not defined\")\n\t}\n\n\treturn bbs.SignWithKey(messages, privKey)\n}",
"func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}",
"func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}",
"func Sign(sk *ecdsa.PrivateKey, h []byte) (string, error) {\n\tsigB, err := ecdsa.SignASN1(rand.Reader, sk, h)\n\treturn hex.EncodeToString(sigB), err\n}",
"func (p *Payload) Sign(key []byte) (signature [32]byte) {\n\tsignature = sha256.Sum256(append(p.message[:], key[:]...))\n\tp.signature = signature\n\treturn\n}",
"func sign(privateKey *rsa.PrivateKey, data []byte) ([]byte, error) {\n\th := sha256.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, d)\n}",
"func (k *EdX25519Key) Sign(b []byte) []byte {\n\treturn sign.Sign(nil, b, k.privateKey)\n}",
"func SignPrecomputed(s *big.Int, params *Params, key *PrivateKey, attrs AttributeList, precomputed *PreparedAttributeList, message *big.Int) (*Signature, error) {\n\tsignature := new(Signature)\n\n\t// Randomly choose s in Zp\n\tif s == nil {\n\t\tvar err error\n\t\ts, err = RandomInZp(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsignature.A0 = new(bn256.G1).ScalarMult(key.BSig, message)\n\tsignature.A0.Add(signature.A0, key.A0)\n\tsignature.A1 = new(bn256.G2).ScalarMult(params.G, s)\n\tsignature.A1.Add(signature.A1, key.A1)\n\n\tprodexp := new(bn256.G1).ScalarMult(params.HSig, message)\n\tprodexp.Add(prodexp, (*bn256.G1)(precomputed))\n\tsignature.A0.Add(signature.A0, new(bn256.G1).ScalarMult(prodexp, s))\n\n\t// In case the ATTRS parameter is more specialized than the provided key\n\tif attrs != nil {\n\t\tfor attrIndex, idx := range key.FreeMap {\n\t\t\tif attr, ok := attrs[attrIndex]; ok {\n\t\t\t\tif attr != nil {\n\t\t\t\t\tattrTerm := new(bn256.G1).Set(key.B[idx])\n\t\t\t\t\tattrTerm.ScalarMult(attrTerm, attr)\n\t\t\t\t\tsignature.A0.Add(signature.A0, attrTerm)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn signature, nil\n}",
"func (s NativeSigner) Sign(data []byte) ([]byte, error) {\n\tsignedData := bytes.NewBuffer(data)\n\tsignature := new(bytes.Buffer)\n\tif err := openpgp.DetachSign(signature, (*openpgp.Entity)(&s), signedData, nil); err != nil {\n\t\treturn nil, err\n\t}\n\treturn signature.Bytes(), nil\n}",
"func (s *SigningIdentity) Sign(reader io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tswitch pk := s.PrivateKey.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\trr, ss, err := ecdsa.Sign(reader, pk, digest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// ensure Low S signatures\n\t\tsig := toLowS(\n\t\t\tpk.PublicKey,\n\t\t\tecdsaSignature{\n\t\t\t\tR: rr,\n\t\t\t\tS: ss,\n\t\t\t},\n\t\t)\n\n\t\treturn asn1.Marshal(sig)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"signing with private key of type %T not supported\", pk)\n\t}\n}",
"func (btc *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\toutput, err := btc.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %v\", err)\n\t}\n\tbtc.fundingMtx.RLock()\n\tutxo := btc.fundingCoins[output.String()]\n\tbtc.fundingMtx.RUnlock()\n\tif utxo == nil {\n\t\treturn nil, nil, fmt.Errorf(\"no utxo found for %s\", output)\n\t}\n\tprivKey, err := btc.wallet.PrivKeyForAddress(utxo.address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpk := privKey.PubKey()\n\tsig, err := privKey.Sign(msg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpubkeys = append(pubkeys, pk.SerializeCompressed())\n\tsigs = append(sigs, sig.Serialize())\n\treturn\n}",
"func (s *Signer) Sign(msg []byte, nonce []byte) (t Token, err error) {\n\tif nonce == nil {\n\t\tif nonce, err = mknonce(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn s.sign(msg, nonce), nil\n}"
] | [
"0.67398196",
"0.6680818",
"0.6339215",
"0.62464464",
"0.619496",
"0.6178088",
"0.61637336",
"0.6148355",
"0.6148355",
"0.61142945",
"0.6048279",
"0.6040095",
"0.6037477",
"0.5980279",
"0.59378606",
"0.59160537",
"0.5905036",
"0.5871749",
"0.5864553",
"0.585822",
"0.585396",
"0.5841762",
"0.58379406",
"0.5833076",
"0.5811572",
"0.58110005",
"0.58092624",
"0.58081955",
"0.5805832",
"0.5798149",
"0.5788165",
"0.5778836",
"0.5691714",
"0.56669295",
"0.56666917",
"0.5652448",
"0.5627408",
"0.5616249",
"0.5595915",
"0.558828",
"0.554585",
"0.5543278",
"0.55410904",
"0.5526353",
"0.55122435",
"0.55014825",
"0.5488754",
"0.54883176",
"0.5479377",
"0.5478699",
"0.54727316",
"0.54635614",
"0.54591405",
"0.5457145",
"0.54482645",
"0.5436646",
"0.5404746",
"0.53785014",
"0.53711635",
"0.53705704",
"0.5368442",
"0.5360717",
"0.53588164",
"0.535524",
"0.5347626",
"0.53474385",
"0.5344602",
"0.5339314",
"0.533305",
"0.5323402",
"0.5322891",
"0.53225905",
"0.5316409",
"0.53110313",
"0.5307891",
"0.5304538",
"0.53006226",
"0.52810115",
"0.52619195",
"0.5248854",
"0.5227887",
"0.5225102",
"0.5224741",
"0.5219867",
"0.52145505",
"0.52118945",
"0.5209324",
"0.5203817",
"0.52024585",
"0.518654",
"0.518654",
"0.5173882",
"0.51719797",
"0.5170936",
"0.516516",
"0.51632935",
"0.51513994",
"0.513386",
"0.5110747",
"0.5095627"
] | 0.7261845 | 0 |
Verify reports whether sig is a valid signature of message by publicKey. It will panic if len(publicKey) is not PublicKeySize. It implements the XEdDSA verify method defined in xeddsa_verify(u, M, (R || s)): if u >= p or R.y >= 2|p| or s >= 2|q|: return false A = convert_mont(u) if not on_curve(A): return false h = hash(R || A || M) (mod q) Rcheck = sB hA if bytes_equal(R, Rcheck): return true return false | Проверяет, является ли sig допустимой подписью сообщения message с использованием publicKey. Вызовет panic, если длина publicKey не равна PublicKeySize. Реализует метод проверки XEdDSA, определённый в xeddsa_verify(u, M, (R || s)): если u >= p или R.y >= 2|p| или s >= 2|q|: вернуть false A = convert_mont(u) если A не лежит на кривой: вернуть false h = hash(R || A || M) (mod q) Rcheck = sB hA если bytes_equal(R, Rcheck): вернуть true вернуть false | func Verify(publicKey PublicKey, message, sig []byte) bool {
// The following code should be equivalent to:
//
// pub, err := publicKey.ToEd25519()
// if err != nil {
// return false
// }
// return ed25519.Verify(pub, message, sig)
if l := len(publicKey); l != PublicKeySize {
panic("x25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&0xE0 != 0 {
return false
}
a, err := convertMont(publicKey)
if err != nil {
return false
}
hh := sha512.New()
hh.Write(sig[:32])
hh.Write(a.Bytes())
hh.Write(message)
hDigest := make([]byte, 0, sha512.Size)
hDigest = hh.Sum(hDigest)
h, err := edwards25519.NewScalar().SetUniformBytes(hDigest)
if err != nil {
return false
}
s, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:])
if err != nil {
return false
}
minusA := (&edwards25519.Point{}).Negate(a)
r := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(h, minusA, s)
return subtle.ConstantTimeCompare(sig[:32], r.Bytes()) == 1
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Verify(sig []byte, publicKey PublicKey) bool {\n\tif l := len(publicKey); l != PublicKeySize {\n\t\tpanic(\"sign: bad public key length: \" + strconv.Itoa(l))\n\t}\n\n\tif len(sig) < SignatureSize || sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\tmsg := sig[SignatureSize:]\n\tsig = sig[:SignatureSize]\n\n\treturn ed25519.Verify(ed25519.PublicKey(publicKey), msg, sig)\n}",
"func Verify(publicKey ed25519.PublicKey, message, sig []byte) bool {\n\tif l := len(publicKey); l != ed25519.PublicKeySize {\n\t\treturn false\n\t}\n\n\tif len(sig) != ed25519.SignatureSize || sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\n\t// ZIP215: this works because SetBytes does not check that encodings are canonical.\n\tA, err := new(edwards25519.Point).SetBytes(publicKey)\n\tif err != nil {\n\t\treturn false\n\t}\n\tA.Negate(A)\n\n\th := sha512.New()\n\th.Write(sig[:32])\n\th.Write(publicKey[:])\n\th.Write(message)\n\tvar digest [64]byte\n\th.Sum(digest[:0])\n\n\thReduced := new(edwards25519.Scalar).SetUniformBytes(digest[:])\n\n\t// ZIP215: this works because SetBytes does not check that encodings are canonical.\n\tcheckR, err := new(edwards25519.Point).SetBytes(sig[:32])\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in\n\t// the range [0, order) in order to prevent signature malleability.\n\t// ZIP215: This is also required by ZIP215.\n\ts, err := new(edwards25519.Scalar).SetCanonicalBytes(sig[32:])\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tR := new(edwards25519.Point).VarTimeDoubleScalarBaseMult(hReduced, A, s)\n\n\t// ZIP215: We want to check [8](R - checkR) == 0\n\tp := new(edwards25519.Point).Subtract(R, checkR) // p = R - checkR\n\tp.MultByCofactor(p)\n\treturn p.Equal(edwards25519.NewIdentityPoint()) == 1 // p == 0\n}",
"func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool",
"func (sig *Signature) VerifySignature(publicKey interface{}, encoding string) bool {\n\tif sig.Data == nil {\n\t\tlog.Warn(\"sig does not contain signature data\", \"sig\", sig)\n\t\treturn false\n\t}\n\tif publicKey == nil {\n\t\tlog.Warn(\"PublicKey is nil\")\n\t\treturn false\n\t}\n\tencoding += sig.GetSignatureMetaData().String()\n\tdata := []byte(encoding)\n\tswitch sig.Algorithm {\n\tcase Ed25519:\n\t\tif pkey, ok := publicKey.(ed25519.PublicKey); ok {\n\t\t\treturn ed25519.Verify(pkey, data, sig.Data.([]byte))\n\t\t}\n\t\tlog.Warn(\"Could not assert type ed25519.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ed448:\n\t\tlog.Warn(\"Ed448 not yet Supported!\")\n\tcase Ecdsa256:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha256.Sum256(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signatureDataType\", fmt.Sprintf(\"%T\", sig.Data))\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ecdsa384:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha512.Sum384(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signature\", sig.Data)\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tdefault:\n\t\tlog.Warn(\"Signature algorithm type not supported\", \"type\", sig.Algorithm)\n\t}\n\treturn false\n}",
"func Verify(publicKey *ecdsa.PublicKey, sig []byte, message []byte) (valid bool, err error) {\n\tsignature := new(common.SchnorrSignature)\n\terr = json.Unmarshal(sig, signature)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed unmashalling schnorr signature [%s]\", err)\n\t}\n\n\t// 1. compute h(m|| s * G - e * P)\n\t// 1.1 compute s * G\n\tcurve := publicKey.Curve\n\tx1, y1 := curve.ScalarBaseMult(signature.S.Bytes())\n\n\t// 1.2 compute e * P\n\tx2, y2 := curve.ScalarMult(publicKey.X, publicKey.Y, signature.E.Bytes())\n\n\t// 1.3 计算-(e * P),如果 e * P = (x,y),则 -(e * P) = (x, -y mod P)\n\tnegativeOne := big.NewInt(-1)\n\ty2 = new(big.Int).Mod(new(big.Int).Mul(negativeOne, y2), curve.Params().P)\n\n\t// 1.4 compute s * G - e * P\n\tx, y := curve.Add(x1, y1, x2, y2)\n\n\te := hash.HashUsingSha256(append(message, elliptic.Marshal(curve, x, y)...))\n\n\tintE := new(big.Int).SetBytes(e)\n\n\t// 2. check the equation\n\t//\treturn bytes.Equal(e, signature.E.Bytes()), nil\n\tif intE.Cmp(signature.E) != 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}",
"func (vr *Verifier) Verify(q *ristretto255.Element, sig []byte) bool {\n\tbuf := make([]byte, internal.ElementSize)\n\n\t// Check signature length.\n\tif len(sig) != SignatureSize {\n\t\treturn false\n\t}\n\n\t// Decode the challenge scalar.\n\tc := ristretto255.NewScalar()\n\tif err := c.Decode(sig[:internal.ScalarSize]); err != nil {\n\t\treturn false\n\t}\n\n\t// Decode the signature scalar.\n\ts := ristretto255.NewScalar()\n\tif err := s.Decode(sig[internal.ScalarSize:]); err != nil {\n\t\treturn false\n\t}\n\n\t// Re-calculate the ephemeral public key.\n\tS := ristretto255.NewElement().ScalarBaseMult(s)\n\tQc := ristretto255.NewElement().ScalarMult(ristretto255.NewScalar().Negate(c), q)\n\tRp := ristretto255.NewElement().Add(S, Qc)\n\n\t// Add the signer's public key to the protocol.\n\tvr.schnorr.AD(q.Encode(buf[:0]))\n\n\t// Hash the ephemeral public key.\n\tvr.schnorr.AD(Rp.Encode(buf[:0]))\n\n\t// Extract a challenge scalar from the protocol state.\n\tcp := vr.schnorr.PRFScalar()\n\n\t// Compare the extracted challenge scalar to the received challenge scalar.\n\treturn c.Equal(cp) == 1\n}",
"func verify(publicKey *rsa.PublicKey, message []byte, sig []byte) error {\n\th := sha256.New()\n\th.Write(message)\n\td := h.Sum(nil)\n\treturn rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, d, sig)\n}",
"func SignatureVerify(publicKey, sig, hash []byte) bool {\n\n\tbytesDecded, _ := base58.DecodeToBig(publicKey)\n\tpubl := splitBig(bytesDecded, 2)\n\tx, y := publ[0], publ[1]\n\n\tbytesDecded, _ = base58.DecodeToBig(sig)\n\tsigg := splitBig(bytesDecded, 2)\n\tr, s := sigg[0], sigg[1]\n\n\tpub := ecdsa.PublicKey{elliptic.P224(), x, y}\n\n\treturn ecdsa.Verify(&pub, hash, r, s)\n}",
"func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) int {\n\tif msg == nil || sig == nil || pubkey1 == nil {\n\t\tlog.Panic(\"VerifySignature, ERROR: invalid input, nils\")\n\t}\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignature, invalid signature length\")\n\t}\n\tif len(pubkey1) != 33 {\n\t\tlog.Panic(\"VerifySignature, invalid pubkey length\")\n\t}\n\n\t//malleability check:\n\t//to enforce malleability, highest bit of S must be 1\n\t//S starts at 32nd byte\n\t//0x80 is 0b10000000 or 128 and masks highest bit\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 //valid signature, but fails malleability\n\t}\n\n\tif sig[64] >= 4 {\n\t\treturn 0 //recover byte invalid\n\t}\n\n\tpubkey2 := RecoverPubkey(msg, sig) //if pubkey recovered, signature valid\n\n\tif pubkey2 == nil {\n\t\treturn 0\n\t}\n\n\tif len(pubkey2) != 33 {\n\t\tlog.Panic(\"recovered pubkey length invalid\")\n\t}\n\n\tif bytes.Equal(pubkey1, pubkey2) != true {\n\t\treturn 0 //pubkeys do not match\n\t}\n\n\treturn 1 //valid signature\n}",
"func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) int {\n\tif msg == nil || len(sig) == 0 || len(pubkey1) == 0 {\n\t\tlog.Panic(\"VerifySignature, ERROR: invalid input, empty slices\")\n\t}\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignature, invalid signature length\")\n\t}\n\tif len(pubkey1) != 33 {\n\t\tlog.Panic(\"VerifySignature, invalid pubkey length\")\n\t}\n\n\tif len(msg) == 0 {\n\t\treturn 0 // empty message\n\t}\n\n\t// malleability check:\n\t// to enforce malleability, highest bit of S must be 1\n\t// S starts at 32nd byte\n\t// 0x80 is 0b10000000 or 128 and masks highest bit\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // valid signature, but fails malleability\n\t}\n\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery byte invalid\n\t}\n\n\tpubkey2 := RecoverPubkey(msg, sig)\n\tif pubkey2 == nil {\n\t\treturn 0 // pubkey could not be recovered, signature is invalid\n\t}\n\n\tif len(pubkey2) != 33 {\n\t\tlog.Panic(\"recovered pubkey length invalid\") // sanity check\n\t}\n\n\tif !bytes.Equal(pubkey1, pubkey2) {\n\t\treturn 0 // pubkeys do not match\n\t}\n\n\treturn 1 // valid signature\n}",
"func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {\n\tif sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\n\tvar A edwards25519.ExtendedGroupElement\n\tif !A.FromBytes(publicKey) {\n\t\treturn false\n\t}\n\tedwards25519.FeNeg(&A.X, &A.X)\n\tedwards25519.FeNeg(&A.T, &A.T)\n\n\th := sha512.New()\n\th.Write(sig[:32])\n\th.Write(publicKey[:])\n\th.Write(message)\n\tvar digest [64]byte\n\th.Sum(digest[:0])\n\n\tvar hReduced [32]byte\n\tedwards25519.ScReduce(&hReduced, &digest)\n\n\tvar R edwards25519.ProjectiveGroupElement\n\tvar b [32]byte\n\tcopy(b[:], sig[32:])\n\tedwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)\n\n\tvar checkR [32]byte\n\tR.ToBytes(&checkR)\n\treturn subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1\n}",
"func (p publicKey) Verify(msg, sig []byte) error {\n\thash := sha256.Sum256(msg)\n\tif p.PublicKey.Verify(sig, hash[:]) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"error\")\n}",
"func VerifySignature(key Key, sig Signature, unverified []byte) error {\n\terr := validateKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigBytes, err := hex.DecodeString(sig.Sig)\n\tif err != nil {\n\t\treturn err\n\t}\n\thashMapping := getHashMapping()\n\tswitch key.KeyType {\n\tcase rsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*rsa.PublicKey)\n\t\tif !ok {\n\t\t\treturn ErrKeyKeyTypeMismatch\n\t\t}\n\t\tswitch key.Scheme {\n\t\tcase rsassapsssha256Scheme:\n\t\t\thashed := hashToHex(hashMapping[\"sha256\"](), unverified)\n\t\t\terr = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidSignature, err)\n\t\t\t}\n\t\tdefault:\n\t\t\t// supported key schemes will get checked in validateKey\n\t\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t\t}\n\tcase ecdsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*ecdsa.PublicKey)\n\t\tif !ok {\n\t\t\treturn ErrKeyKeyTypeMismatch\n\t\t}\n\t\tcurveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize\n\t\tvar hashed []byte\n\t\tif err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {\n\t\t\treturn ErrCurveSizeSchemeMismatch\n\t\t}\n\t\t// implement https://tools.ietf.org/html/rfc5656#section-6.2.1\n\t\t// We determine the curve size and choose the correct hashing\n\t\t// method based on the curveSize\n\t\tswitch {\n\t\tcase curveSize <= 256:\n\t\t\thashed = hashToHex(hashMapping[\"sha256\"](), unverified)\n\t\tcase 256 < curveSize && curveSize <= 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha384\"](), unverified)\n\t\tcase curveSize > 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha512\"](), unverified)\n\t\tdefault:\n\t\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t\t}\n\t\tif ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok {\n\t\t\treturn ErrInvalidSignature\n\t\t}\n\tcase ed25519KeyType:\n\t\t// We do not need a scheme switch here, because ed25519\n\t\t// only consist of sha256 and curve25519.\n\t\tpubHex, err := hex.DecodeString(key.KeyVal.Public)\n\t\tif err != nil {\n\t\t\treturn ErrInvalidHexString\n\t\t}\n\t\tif ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok {\n\t\t\treturn fmt.Errorf(\"%w: ed25519\", ErrInvalidSignature)\n\t\t}\n\tdefault:\n\t\t// We should never get here, because we call validateKey in the first\n\t\t// line of the function.\n\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t}\n\treturn nil\n}",
"func Verify(mesg, testsig, publicKey string) bool {\n\tif len(mesg)*4 > len(publicKey)*3 {\n\t\treturn false\n\t}\n\tvar m, decrypted big.Int\n\tsetBytesReverse(&m, []byte(mesg))\n\tn := base64ToInt(publicKey)\n\tintSig := base64ToInt(testsig)\n\tdecrypted.Exp(intSig, rsaPublicE, n)\n\n\treturn decrypted.Cmp(&m) == 0\n}",
"func VerifySignature(message []byte, signature []byte, p *PublicKey) bool {\n\thash := sha256.Sum256(message)\n\tpublicKey := p.ecdsa()\n\n\tif p.X == nil || p.Y == nil {\n\t\treturn false\n\t}\n\trBytes := new(big.Int).SetBytes(signature[0:32])\n\tsBytes := new(big.Int).SetBytes(signature[32:64])\n\treturn ecdsa.Verify(publicKey, hash[:], rBytes, sBytes)\n}",
"func (p *Params) Verify(msg, signature, pubkey []byte) (bool, error) {\n\t// Ensure pubkey has correct size\n\tif len(pubkey) != PKSize {\n\t\treturn false, errWrongPubKeySize\n\t}\n\t// Decode signature\n\tpk := make([]byte, 0, PKSize)\n\tvar err error\n\tpk, err = p.Decode(pk, msg, signature)\n\t// Compare public key\n\treturn bytes.Equal(pk, pubkey), err\n}",
"func verifySig(sigStr []byte, publicKeyStr []byte, scriptPubKey []byte, tx *types.Transaction, txInIdx int) bool {\n\tsig, err := crypto.SigFromBytes(sigStr)\n\tif err != nil {\n\t\tlogger.Debugf(\"Deserialize signature failed\")\n\t\treturn false\n\t}\n\tpublicKey, err := crypto.PublicKeyFromBytes(publicKeyStr)\n\tif err != nil {\n\t\tlogger.Debugf(\"Deserialize public key failed\")\n\t\treturn false\n\t}\n\n\tsigHash, err := CalcTxHashForSig(scriptPubKey, tx, txInIdx)\n\tif err != nil {\n\t\tlogger.Debugf(\"Calculate signature hash failed\")\n\t\treturn false\n\t}\n\n\treturn sig.VerifySignature(publicKey, sigHash)\n}",
"func VerifySignature(message []byte, sign ECDSA, pk EllipticPoint) bool {\n\tprivateKey := ConvertKey(nil, pk)\n\thash := sha256.Sum256(message)\n\n\treturn ecdsa.Verify(&privateKey.PublicKey, hash[:], new(big.Int).SetBytes(sign.R), new(big.Int).SetBytes(sign.S))\n}",
"func RsaPublicKeyVerify(data string, publicKeyHexOrPem string, signatureHex string) error {\n\t// data is required\n\tif len(data) == 0 {\n\t\treturn errors.New(\"Data To Verify is Required\")\n\t}\n\n\t// get public key\n\tvar publicKey *rsa.PublicKey\n\tvar err error\n\n\tif util.Left(publicKeyHexOrPem, 26) == \"-----BEGIN PUBLIC KEY-----\" && util.Right(publicKeyHexOrPem, 24) == \"-----END PUBLIC KEY-----\" {\n\t\t// get public key from pem\n\t\tpublicKey, err = rsaPublicKeyFromPem(publicKeyHexOrPem)\n\t} else {\n\t\t// get public key from hex\n\t\tpublicKey, err = rsaPublicKeyFromHex(publicKeyHexOrPem)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// convert data to byte array\n\tmsg := []byte(data)\n\n\t// define hash\n\th := sha256.New()\n\th.Write(msg)\n\td := h.Sum(nil)\n\n\tsig, _ := util.HexToByte(signatureHex)\n\n\terr1 := rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, d, sig)\n\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\n\t// verified\n\treturn nil\n}",
"func VerifyPublicKeySize() bool {\n\tconfirmation := C.testPublicKeySize(C.int(PublicKeySize))\n\treturn confirmation != 0\n}",
"func VerifyPubkey(pubkey []byte) int {\n\tif len(pubkey) != 33 {\n\t\treturn -2\n\t}\n\n\tif secp.PubkeyIsValid(pubkey) != 1 {\n\t\treturn -1 // tests parse and validity\n\t}\n\n\treturn 1 //valid\n}",
"func verify(pub crypto.PublicKey, hasher crypto.Hash, data, sig []byte) error {\n\tif sig == nil {\n\t\treturn errors.New(\"signature is nil\")\n\t}\n\n\th := hasher.New()\n\tif _, err := h.Write(data); err != nil {\n\t\treturn errors.Wrap(err, \"write\")\n\t}\n\tdigest := h.Sum(nil)\n\n\tswitch pub := pub.(type) {\n\tcase *ecdsa.PublicKey:\n\t\tif !ecdsa.VerifyASN1(pub, digest, sig) {\n\t\t\treturn errors.New(\"verification failed\")\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown public key type: %T\", pub)\n\t}\n\treturn nil\n}",
"func (pk PublicKey) Verify(sig, message []byte, hasher Hasher) (bool, error) {\n\treturn pk.publicKey.Verify(sig, message, hasher)\n}",
"func (sig *Signature) Verify(msg []byte, pubKey *PublicKey) bool {\n\tif len(msg) == 0 || len(msg) > HashLen || pubKey == nil {\n\t\treturn false\n\t}\n\ts, err := sig.SerializeRSV()\n\tif err != nil {\n\t\treturn false\n\t}\n\tret := secp256k1.VerifySignature(msg, s, pubKey.bytes)\n\treturn ret != 0\n}",
"func Verify(pubkey *dsa.PublicKey, hash *[32]byte, sig *Signature) (valid bool) {\n\n\treturn dsa.Verify(pubkey, hash[:], sig.R, sig.S)\n}",
"func VerifySignature(d interface{}, signature string, keys []*rsa.PublicKey) error {\n\thash, err := calculateHash(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsg, err := base64.StdEncoding.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalid := false\n\tfor _, key := range keys {\n\t\terr = rsa.VerifyPKCS1v15(key, crypto.SHA256, hash[:], sg)\n\t\tif err == nil {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !valid {\n\t\treturn ErrInvalidSignature\n\t}\n\n\treturn nil\n}",
"func (r *rsaPublicKey) CheckSignature(message []byte, sig []byte) error {\r\n\th := sha256.New()\r\n\th.Write(message)\r\n\td := h.Sum(nil)\r\n\treturn rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig)\r\n}",
"func (k *RSAPubKey) VerifySignature(payload []byte, sig string) error {\n\tif k.key == nil {\n\t\treturn ErrorKeyUninitialized\n\t}\n\n\tsha256 := crypto.SHA256.New()\n\t_, err := sha256.Write(payload)\n\tif err != nil {\n\t\treturn errors.AddStack(err)\n\t}\n\n\thashed := sha256.Sum(nil)\n\n\tb64decSig, err := base64.StdEncoding.DecodeString(sig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn rsa.VerifyPSS(k.key, crypto.SHA256, hashed, b64decSig, nil)\n}",
"func (pk PublicKey) Verify(hash []byte, s *Sign) bool {\n\treturn secp256k1.VerifySignature(pk.Bytes(), hash, s.Bytes()[:64])\n}",
"func VerifySignWithPublicKey(src, signed []byte, hash crypto.Hash) (e error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch x := r.(type) {\n\t\t\tcase string:\n\t\t\t\te = errors.New(x)\n\t\t\tcase error:\n\t\t\t\te = x\n\t\t\tdefault:\n\t\t\t\te = errors.New(\"Unknown panic\")\n\t\t\t}\n\t\t}\n\t}()\n\th := hash.New()\n\th.Write(src)\n\thashed := h.Sum(nil)\n\terr := rsa.VerifyPKCS1v15(publicKey, hash, hashed, signed)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (pk PublicKey) Verify(sig Signature, message []byte) bool {\n\tmessageBytes := make([]byte, len(message)+SignatureSize)\n\tmessagePointer := (*C.uchar)(&messageBytes[0])\n\n\tvar messageLen uint64\n\tlenPointer := (*C.ulonglong)(&messageLen)\n\n\tsignedMessageBytes := append(sig[:], message...)\n\tsignedMessagePointer := (*C.uchar)(&signedMessageBytes[0])\n\tsignedMessageLen := C.ulonglong(len(signedMessageBytes))\n\tpkPointer := (*C.uchar)(&pk[0])\n\n\terrorCode := C.crypto_sign_open(messagePointer, lenPointer, signedMessagePointer, signedMessageLen, pkPointer)\n\treturn errorCode == 0\n}",
"func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {\n\tpublicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpub, err := x509.ParsePKIXPublicKey(publicKeyByte)\n\tif err != nil {\n\t\treturn err\n\t}\n\thashed := sha256.Sum256([]byte(data))\n\tsignatureByte, err := base64.StdEncoding.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)\n}",
"func Verify(pub *ecdsa.PublicKey, hash []byte, r, s *big.Int) bool {\n\treturn ecdsa.Verify(pub, hash, r, s)\n}",
"func (s *Signature) Verify(pub crypto.PubKey, data []byte) error {\n\tif err := s.MatchesPublicKey(pub); err != nil {\n\t\treturn err\n\t}\n\n\tok, err := pub.Verify(data, s.GetSignature())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !ok {\n\t\treturn errors.New(\"signature did not match\")\n\t}\n\n\treturn nil\n}",
"func (s *Signature) MatchesPublicKey(pub crypto.PubKey) error {\n\tpubData, err := pub.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeyMulti, err := mh.Decode(s.GetKeyMultihash())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tourMh, err := mh.Sum(pubData, keyMulti.Code, keyMulti.Length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: find a better way to derive digest without encoding it.\n\tourMhDec, err := mh.Decode(ourMh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif bytes.Compare(ourMhDec.Digest, keyMulti.Digest) != 0 {\n\t\tkeyMultiC, err := mh.Cast(s.GetKeyMultihash())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn errors.Errorf(\"hash mismatch: %s != %s\", ourMh.B58String(), keyMultiC.B58String())\n\t}\n\n\treturn nil\n}",
"func Verify(P *btcec.PublicKey, sign []byte) bool {\n\tsG := new(btcec.PublicKey)\n\tsG.X, sG.Y = btcec.S256().ScalarBaseMult(sign)\n\treturn P.IsEqual(sG)\n}",
"func (k *RSAPublicKeyData) Verify(data []byte, sig []byte) (bool, error) {\n\tpubkey := &rsa.PublicKey{\n\t\tN: big.NewInt(0).SetBytes(k.Modulus),\n\t\tE: int(uint(k.Exponent[2]) | uint(k.Exponent[1])<<8 | uint(k.Exponent[0])<<16),\n\t}\n\n\tf := HasherFromCOSEAlg(COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm))\n\th := f()\n\th.Write(data)\n\n\tvar hash crypto.Hash\n\n\tswitch COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm) {\n\tcase AlgRS1:\n\t\thash = crypto.SHA1\n\tcase AlgPS256, AlgRS256:\n\t\thash = crypto.SHA256\n\tcase AlgPS384, AlgRS384:\n\t\thash = crypto.SHA384\n\tcase AlgPS512, AlgRS512:\n\t\thash = crypto.SHA512\n\tdefault:\n\t\treturn false, ErrUnsupportedAlgorithm\n\t}\n\n\tswitch COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm) {\n\tcase AlgPS256, AlgPS384, AlgPS512:\n\t\terr := rsa.VerifyPSS(pubkey, hash, h.Sum(nil), sig, nil)\n\n\t\treturn err == nil, err\n\tcase AlgRS1, AlgRS256, AlgRS384, AlgRS512:\n\t\terr := rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sig)\n\n\t\treturn err == nil, err\n\tdefault:\n\t\treturn false, ErrUnsupportedAlgorithm\n\t}\n}",
"func verifySignedData(data, sig, pub []byte) error {\n\thashed := sha256.Sum256(data)\n\n\tvar s ECDSASignature\n\t_, err := asn1.Unmarshal(sig, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkey, err := x509.ParsePKIXPublicKey(pub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tppkey, ok := pkey.(*ecdsa.PublicKey)\n\tif !ok {\n\t\treturn errors.New(\"Public key format for the server appears incorrect. Should be ecdsa.PublicKey but unable to cast as such.\")\n\t}\n\n\tif !ecdsa.Verify(ppkey, hashed[:], s.R, s.S) {\n\t\treturn errors.New(\"Verification of signed data failed.\")\n\t}\n\n\treturn nil\n}",
"func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {\n\treturn ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S)\n}",
"func Verify(pubKey []byte, hash []byte, sig []byte) (bool, error) {\n\tif len(sig) > SigLengthInBytes {\n\t\tsig = sig[:SigLengthInBytes]\n\t}\n\treturn crypto.VerifySignature(pubKey, hash, sig), nil\n}",
"func (sig Signature) Verify(X curve.Point, hash []byte) bool {\n\tgroup := X.Curve()\n\n\tm := curve.FromHash(group, hash)\n\tsInv := group.NewScalar().Set(sig.S).Invert()\n\tmG := m.ActOnBase()\n\tr := sig.R.XScalar()\n\trX := r.Act(X)\n\tR2 := mG.Add(rX)\n\tR2 = sInv.Act(R2)\n\treturn R2.Equal(sig.R)\n}",
"func VerifyPSS(pub *rsa.PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *rsa.PSSOptions,) error",
"func (r *RsaPublicKey) Verify(message []byte, sig []byte) error {\n\th := sha256.New()\n\th.Write(message)\n\td := h.Sum(nil)\n\treturn rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig)\n}",
"func (pkv PublicKeyValidator) Validate(key string, value []byte) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"DHT: Received invalid value for key %s: %s\", key, err.Error())\n\t\t} else {\n\t\t\tlog.Infof(\"DHT: Received valid value for key %s\", key)\n\t\t}\n\t}()\n\n\tpeerID, err := pkv.getPeerID(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar publicKey pb.PublicKey\n\terr = proto.Unmarshal(value, &publicKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tsignatureKey, err := crypto.UnmarshalPublicKey(publicKey.SignatureKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif !peerID.MatchesPublicKey(signatureKey) {\n\t\treturn errors.New(ErrInvalidSenderSignature)\n\t}\n\n\tsignature := publicKey.Signature\n\n\tpublicKey.SignatureKey = nil\n\tpublicKey.Signature = nil\n\n\tsignedBytes, err := proto.Marshal(&publicKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tok, err := signatureKey.Verify(signedBytes, signature)\n\tif err != nil {\n\t\treturn errors.Wrap(err, ErrInvalidSenderSignature)\n\t}\n\tif !ok {\n\t\treturn errors.New(ErrInvalidSenderSignature)\n\t}\n\n\t// No need to validate that the point is on the curve because we only use\n\t// curve25519 for now which has twist security.\n\t// If we support more elliptic curves, we might need to check here that the\n\t// public key received is a valid curve point.\n\n\treturn nil\n}",
"func VerifyPubkey(pubkey []byte) int {\n\tif len(pubkey) != 33 {\n\t\t//log.Printf(\"Seck256k1, VerifyPubkey, pubkey length invalid\")\n\t\treturn -1\n\t}\n\n\tif secp.PubkeyIsValid(pubkey) != 1 {\n\t\treturn -3 //tests parse and validity\n\t}\n\n\tvar pubkey1 secp.XY\n\tret := pubkey1.ParsePubkey(pubkey)\n\n\tif ret == false {\n\t\treturn -2 //invalid, parse fail\n\t}\n\t//fails for unknown reason\n\t//TODO: uncomment\n\tif pubkey1.IsValid() == false {\n\t\treturn -4 //invalid, validation fail\n\t}\n\treturn 1 //valid\n}",
"func (r *RSA) Verify(msg, sig string) (bool, error) {\n\tpub := r.PublicKey\n\tif pub == nil && r.PrivateKey != nil {\n\t\tpub = &r.PrivateKey.PublicKey\n\t}\n\tif pub == nil {\n\t\treturn false, errors.New(\"missing public key\")\n\t}\n\n\tsignature, err := r.decode(sig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\thashed := sha256.Sum256([]byte(msg))\n\n\terr = rsa.VerifyPKCS1v15(pub, crypto.SHA256, hashed[:], signature)\n\treturn err == nil, nil\n}",
"func VerifySignature(pubkey, msg, signature []byte) bool {\n\treturn secp256k1.VerifySignature(pubkey, msg, signature)\n}",
"func (sig InsecureSignature) Verify(hashes [][]byte, publicKeys []PublicKey) bool {\n\tif (len(hashes) != len(publicKeys)) || len(hashes) == 0 {\n\t\t// panic(\"hashes and pubKeys vectors must be of same size and non-empty\")\n\t\treturn false\n\t}\n\n\t// Get a C pointer to an array of message hashes\n\tcNumHashes := C.size_t(len(hashes))\n\tcHashesPtr := C.AllocPtrArray(cNumHashes)\n\tdefer C.FreePtrArray(cHashesPtr)\n\t// Loop thru each message and add the key C ptr to the array of ptrs at index\n\tfor i, hash := range hashes {\n\t\tcBytesPtr := C.CBytes(hash)\n\t\tdefer C.free(cBytesPtr)\n\t\tC.SetPtrArray(cHashesPtr, cBytesPtr, C.int(i))\n\t}\n\n\t// Get a C pointer to an array of public keys\n\tcNumPublicKeys := C.size_t(len(publicKeys))\n\tcPublicKeysPtr := C.AllocPtrArray(cNumPublicKeys)\n\tdefer C.FreePtrArray(cPublicKeysPtr)\n\t// Loop thru each key and add the key C ptr to the array of ptrs at index\n\tfor i, key := range publicKeys {\n\t\tC.SetPtrArray(cPublicKeysPtr, unsafe.Pointer(key.pk), C.int(i))\n\t}\n\n\treturn bool(C.CInsecureSignatureVerify(sig.sig, cHashesPtr, cNumHashes,\n\t\tcPublicKeysPtr, cNumPublicKeys))\n}",
"func Verify(pk *PublicKey, msg []byte, signature []byte) bool {\n\tif !mode2.Verify(\n\t\t&pk.d,\n\t\tmsg,\n\t\tsignature[:mode2.SignatureSize],\n\t) {\n\t\treturn false\n\t}\n\tif !ed25519.Verify(\n\t\tpk.e,\n\t\tmsg,\n\t\tsignature[mode2.SignatureSize:],\n\t) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (cfg *Config) verifyPublicKey(host string, _ net.Addr, key ssh.PublicKey) error {\n\tactual := ssh.FingerprintSHA256(key)\n\n\tif actual != cfg.SSHfingerprint {\n\t\treturn fmt.Errorf(\"Bad HSM SSH public key. Host: %s Fingerprint: %s\", host, actual)\n\t}\n\n\treturn nil\n}",
"func Verify(pk ed25519.PublicKey, sm, f []byte) (msg []byte, err error) {\n\tif len(sm) <= 64 {\n\t\treturn nil, fmt.Errorf(\"invalid signed message length\")\n\t}\n\n\tmsg = sm[:len(sm)-64]\n\tsig := sm[len(sm)-64:]\n\tpieces := [][]byte{headerModePublic, msg, f}\n\n\tmsg2, err := pae(pieces)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ed25519.Verify(pk, msg2, sig) {\n\t\treturn nil, fmt.Errorf(\"invalid message signature\")\n\t}\n\n\treturn msg, nil\n}",
"func (b *Backend) VerifySignature(msg []byte, sig wallet.Sig, a wallet.Address) (bool, error) {\n\taddr, ok := a.(*Address)\n\tif !ok {\n\t\tlog.Panic(\"Wrong address type passed to Backend.VerifySignature\")\n\t}\n\tpk := (*ecdsa.PublicKey)(addr)\n\n\tr, s, err := deserializeSignature(sig)\n\tif err != nil {\n\t\treturn false, errors.WithMessage(err, \"could not deserialize signature\")\n\t}\n\n\t// escda.Verify needs a digest as input\n\t// ref https://golang.org/pkg/crypto/ecdsa/#Verify\n\treturn ecdsa.Verify(pk, digest(msg), r, s), nil\n}",
"func (key PublicKey) Verify(signature []byte) bool {\n\tif len(signature) < SignatureSize || signature[63]&224 != 0 {\n\t\treturn false\n\t}\n\treturn Verify(signature, key)\n}",
"func (r *Reservation) SignatureVerify(pk string, sig []byte) error {\n\tkey, err := crypto.KeyFromHex(pk)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"invalid verification key\")\n\t}\n\n\tvar buf bytes.Buffer\n\tif _, err := buf.WriteString(fmt.Sprint(int64(r.ID))); err != nil {\n\t\treturn errors.Wrap(err, \"failed to write id to buffer\")\n\t}\n\n\tif _, err := buf.WriteString(r.Json); err != nil {\n\t\treturn errors.Wrap(err, \"failed to write json to buffer\")\n\t}\n\n\treturn crypto.Verify(key, buf.Bytes(), sig)\n}",
"func VerifySignature(publicKey crypto.PublicKey, keyType pubkey.KeyType, signature, clientDataJSON, authData []byte) (bool, error) {\n\t// Calculate the hash of the client data\n\tclientDataHash := sha256.Sum256(clientDataJSON)\n\n\t// Combine all the data that is included in the signature\n\thashInput := make([]byte, 0, len(authData)+len(clientDataHash))\n\thashInput = append(hashInput, authData...)\n\thashInput = append(hashInput, clientDataHash[:]...)\n\n\t// Check the signature\n\treturn pubkey.VerifySignature(\n\t\tpublicKey,\n\t\tkeyType.Hash(),\n\t\thashInput,\n\t\tsignature,\n\t)\n}",
"func (s Signature) Verify(key PublicKey, msg []byte) error {\n\treturn key.Verify(s.Hash, msg, s.Data)\n}",
"func Verify(key *rsa.PublicKey, signature, message []byte) error {\n\t// hash the message\n\thashed := sha256.Sum256(message)\n\t// verify the signature\n\terr := rsa.VerifyPKCS1v15(key, crypto.SHA256, hashed[:], signature)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify message: \")\n\t}\n\treturn nil\n}",
"func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error {\n\tif s.signature == nil {\n\t\treturn fmt.Errorf(\"ssh signature has not been initialized\")\n\t}\n\n\tkey, ok := k.(*PublicKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid public key type for: %v\", k)\n\t}\n\n\tck, err := key.CanonicalValue()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcs, err := s.CanonicalValue()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Verify(r, cs, ck)\n}",
"func (e Execution) VerifySig() error {\n\tsig, err := bls.SignatureFromBytes(e.Signature[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpub, err := bls.PublicKeyFromBytes(e.FromPubKey[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg := e.SignatureMessage()\n\n\tvalid := sig.Verify(pub, msg[:])\n\tif !valid {\n\t\treturn errors.New(\"invalid signature from execution call\")\n\t}\n\n\treturn nil\n}",
"func validateSignature(pubKey string, signature string, elements ...string) error {\n\tsig, err := util.ConvertSignature(signature)\n\tif err != nil {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\tb, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpk, err := identity.PublicIdentityFromBytes(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar msg string\n\tfor _, v := range elements {\n\t\tmsg += v\n\t}\n\tif !pk.VerifyMessage([]byte(msg), sig) {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\treturn nil\n}",
"func (signature Signature) Verify(message []byte, key PublicKey) bool {\n\treturn key.Verify(message, signature)\n}",
"func CryptoVerify(smsg, pk []byte) bool {\n\tsmsg_buff := NewBuffer(smsg)\n\tdefer smsg_buff.Free()\n\tpk_buff := NewBuffer(pk)\n\tdefer pk_buff.Free()\n\n\tif pk_buff.size != C.crypto_sign_publickeybytes() {\n\t\treturn false\n\t}\n\tmlen := C.ulonglong(0)\n\tmsg := malloc(C.size_t(len(smsg)))\n\tdefer msg.Free()\n\tsmlen := C.ulonglong(smsg_buff.size)\n\treturn C.crypto_sign_open(msg.uchar(), &mlen, smsg_buff.uchar(), smlen, pk_buff.uchar()) != -1\n}",
"func ValidatePublicKey(k *ecdsa.PublicKey) bool {\n\treturn k != nil && k.X != nil && k.Y != nil && k.X.Sign() != 0 && k.Y.Sign() != 0\n}",
"func validateSignature(transactionID string, transactionInputSignature string, unspentOutputAddress string) (bool, error) {\n\n\t// unspentOutputAddress is actually public key\n\t// first try to decode it to PEM block\n\tpemBlock, _ := pem.Decode([]byte(unspentOutputAddress))\n\tif pemBlock == nil {\n\t\treturn false, nil\n\t}\n\t// try to get the public key out of the PEM block\n\tpub, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// get the string value out of signature which is hex encoded\n\tdecodedTransactionInputSignature, err := hex.DecodeString(transactionInputSignature)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// hash the unsigned transactionID so we can use the value in signature verification\n\thashedID := sha256.Sum256([]byte(transactionID))\n\n\t// verify signed decoded transactionID to the hashed unsigned transactionID\n\tvar verificationError = rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashedID[:], []byte(decodedTransactionInputSignature))\n\n\t// verification failed\n\tif verificationError != nil {\n\t\treturn false, verificationError\n\t}\n\n\t// verification was success if there is no error\n\treturn true, nil\n}",
"func (m *EnvelopMessage) verifySignature(data []byte) error {\n\tdataWithoutSignature := data[:len(data)-signatureLength]\n\tdatahash := utils.Sha3(dataWithoutSignature)\n\tdatatosign := m.signData(datahash)\n\t//should not change data's content,because its name is verify.\n\tvar signature = make([]byte, signatureLength)\n\tcopy(signature, data[len(data)-signatureLength:])\n\thash := utils.Sha3(datatosign)\n\tsignature[len(signature)-1] -= 27 //why?\n\tpubkey, err := crypto.Ecrecover(hash[:], signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Sender = utils.PubkeyToAddress(pubkey)\n\treturn nil\n\n}",
"func (s *Signature) Validate(masterPubKey ed25519.PublicKey, b []byte) error {\n\tif !ed25519.Verify(masterPubKey, []byte(*s.PublicKey), []byte(*s.Endorsement)) {\n\t\treturn &Error{Code: 401, Message: \"Request Public Key was not endorsed by Manifold\"}\n\t}\n\n\tlivePubKey := ed25519.PublicKey([]byte(*s.PublicKey))\n\tif !ed25519.Verify(livePubKey, b, []byte(*s.Value)) {\n\t\treturn &Error{Code: 401, Message: \"Request was not signed by included Public Key\"}\n\t}\n\n\treturn nil\n}",
"func ecdsaVerify(m []byte, D ecdsa.PublicKey, r big.Int, s big.Int) bool {\n\n\tcurve := crypto.S256()\n\n\te := new(big.Int).SetBytes(crypto.Keccak256(m))\n\n\tw := new(big.Int)\n\n\tu1 := new(big.Int)\n\n\tu2 := new(big.Int)\n\n\tw.ModInverse(&s, secp256k1_N)\n\n\tu1.Mul(e, w)\n\n\tu1.Mod(u1, secp256k1_N)\n\n\tu2.Mul(&r, w)\n\n\tu2.Mod(u2, secp256k1_N)\n\n\tA := new(ecdsa.PublicKey)\n\n\tB := new(ecdsa.PublicKey)\n\n\tC := new(ecdsa.PublicKey)\n\n\tA.X, A.Y = curve.ScalarBaseMult(u1.Bytes())\n\n\tB.X, B.Y = curve.ScalarMult(D.X, D.Y, u2.Bytes())\n\n\tC.X, C.Y = curve.Add(A.X, A.Y, B.X, B.Y)\n\n\t//to do: check whether C is infinite point of secp256k1\n\n\tC.X.Mod(C.X, secp256k1_N)\n\n\tif r.Cmp(C.X) == 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func verifySignature(user *Transaction) bool {\n\tvar testString string\n\tvar strAmount string\n\tvar testStringHash []byte\n\tsignature := new(big.Int)\n\tsignature = stringToBigInt(user.Signature) // recieved signature converted to big.int\n\tIncPK := new(RSA.PublicKeyPair) \n\tstrAmount = strconv.Itoa(user.Amount) \n\ttestString = user.From + user.To + strAmount // creates the string to get hashed\n\ttestStringHash = []byte(testString) // created string from information\n\ttestHash := RSA.Hash(testStringHash) // hashes the string\n\t//fmt.Println(\"this is the testHash \", testHash)\n\tIncPK = sortKeyPair(user.From) // sorts the keyPair to compare with signature\n\tif RSA.Verify(signature, testHash, IncPK) {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func (p PubKey) VerifyBytes(msg []byte, sig Signature) bool {\n\treturn crypto.PubKeyEd25519(p).VerifyBytes(msg, crypto.SignatureEd25519(sig))\n}",
"func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool {\n\tsig, ok := sig_.(SignatureEd25519)\n\tif !ok {\n\t\treturn false\n\t}\n\tpubKeyBytes := [32]byte(pubKey)\n\tsigBytes := [64]byte(sig)\n\treturn ed25519.Verify(&pubKeyBytes, msg, &sigBytes)\n}",
"func (c *publicKey) Verify(signable Signable) (bool, error) {\n\tif c.ki == nil {\n\t\treturn false, ErrPublicKeyCannotBeNil()\n\t}\n\n\tif signable == nil {\n\t\treturn false, ErrSignableCannotBeNil()\n\t}\n\n\tsign := signable.GetSignature()\n\tif sign == nil {\n\t\treturn false, ErrSignatureCannotBeNil()\n\t}\n\n\tblob, err := sign.Raw()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\thash, err := signable.Hash()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn c.ki.Verify(hash[:], blob)\n}",
"func verifySignatures(sectionSender sectionWithSigSender) bool {\n\tsection := sectionSender.Section\n\tkeysNeeded := make(map[rainslib.SignatureMetaData]bool)\n\tsection.NeededKeys(keysNeeded)\n\tpublicKeys, missingKeys, ok := publicKeysPresent(section.GetSubjectZone(), section.GetContext(), keysNeeded)\n\tif ok {\n\t\tlog.Info(\"All public keys are present.\", \"msgSectionWithSig\", section)\n\t\taddZoneAndContextToContainedSections(section)\n\t\treturn validSignature(section, publicKeys)\n\t}\n\thandleMissingKeys(sectionSender, missingKeys)\n\treturn false\n}",
"func (v EcdsaVerifier) Verify(msg, sig []byte) bool {\n\th := sha256.Sum256(msg)\n\tif !ecdsa.VerifyASN1(v.PubKey, h[:], sig) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func checkSigSchnorr(msg, pkBytes, sigBytes []byte) error {\n\tpubKey, err := schnorr.ParsePubKey(pkBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding schnorr PublicKey from bytes: %v\", err)\n\t}\n\tsignature, err := schnorr.ParseSignature(sigBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding schnorr Signature from bytes: %v\", err)\n\t}\n\tif !signature.Verify(msg, pubKey) {\n\t\treturn fmt.Errorf(\"schnorr signature verification failed\")\n\t}\n\treturn nil\n}",
"func VerifySignatureValidity(sig []byte) int {\n\t//64+1\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignatureValidity: sig len is not 65 bytes\")\n\t\treturn 0\n\t}\n\t//malleability check:\n\t//highest bit of 32nd byte must be 1\n\t//0x7f is 126 or 0b01111111\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // signature is malleable\n\t}\n\t//recovery id check\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery id invalid\n\t}\n\treturn 1\n}",
"func (sig *Signature) Verify(key PublicKey, message []byte) bool {\n\tif message = messageDigest(sig.Hash, sig.Purpose, message, key); message == nil {\n\t\treturn false\n\t}\n\treturn key.verify(message, sig)\n}",
"func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationKey Key) error {\n\tif s.SigKind != SigCredential {\n\t\tnodeBytes, err := nodeKey.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"marshalling pubkey: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(nodeBytes, s.Pubkey) {\n\t\t\treturn errors.New(\"signature does not authorize nodeKey\")\n\t\t}\n\t}\n\n\tsigHash := s.SigHash()\n\tswitch s.SigKind {\n\tcase SigRotation:\n\t\tif s.Nested == nil {\n\t\t\treturn errors.New(\"nested signatures must nest a signature\")\n\t\t}\n\n\t\t// Verify the signature using the nested rotation key.\n\t\tverifyPub, ok := s.Nested.wrappingPublic()\n\t\tif !ok {\n\t\t\treturn errors.New(\"missing rotation key\")\n\t\t}\n\t\tif len(verifyPub) != ed25519.PublicKeySize {\n\t\t\treturn fmt.Errorf(\"bad rotation key length: %d\", len(verifyPub))\n\t\t}\n\t\tif !ed25519.Verify(ed25519.PublicKey(verifyPub[:]), sigHash[:], s.Signature) {\n\t\t\treturn errors.New(\"invalid signature\")\n\t\t}\n\n\t\t// Recurse to verify the signature on the nested structure.\n\t\tvar nestedPub key.NodePublic\n\t\t// SigCredential signatures certify an indirection key rather than a node\n\t\t// key, so theres no need to check the node key.\n\t\tif s.Nested.SigKind != SigCredential {\n\t\t\tif err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil {\n\t\t\t\treturn fmt.Errorf(\"nested pubkey: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif err := s.Nested.verifySignature(nestedPub, verificationKey); err != nil {\n\t\t\treturn fmt.Errorf(\"nested: %v\", err)\n\t\t}\n\t\treturn nil\n\n\tcase SigDirect, SigCredential:\n\t\tif s.Nested != nil {\n\t\t\treturn fmt.Errorf(\"invalid signature: signatures of type %v cannot nest another signature\", s.SigKind)\n\t\t}\n\t\tswitch verificationKey.Kind {\n\t\tcase Key25519:\n\t\t\tif len(verificationKey.Public) != ed25519.PublicKeySize {\n\t\t\t\treturn fmt.Errorf(\"ed25519 key has wrong length: %d\", len(verificationKey.Public))\n\t\t\t}\n\t\t\tif ed25519consensus.Verify(ed25519.PublicKey(verificationKey.Public), sigHash[:], s.Signature) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.New(\"invalid signature\")\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unhandled key type: %v\", verificationKey.Kind)\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unhandled signature type: %v\", s.SigKind)\n\t}\n}",
"func (m *Message) VerifySignature() bool {\n\tmsgBytes := m.Bytes()\n\tsignature := m.Signature()\n\n\tcontentLength := len(msgBytes) - len(signature)\n\tcontent := msgBytes[:contentLength]\n\n\treturn m.issuerPublicKey.VerifySignature(content, signature)\n}",
"func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (bool, error) {\n\treturn ed25519.Verify(k.k, data, sig), nil\n}",
"func validatePubKey(publicKey string) error {\n\tpk, err := hex.DecodeString(publicKey)\n\tif err != nil {\n\t\tlog.Debugf(\"validatePubKey: decode hex string \"+\n\t\t\t\"failed for '%v': %v\", publicKey, err)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\t}\n\n\tvar emptyPK [identity.PublicKeySize]byte\n\tswitch {\n\tcase len(pk) != len(emptyPK):\n\t\tlog.Debugf(\"validatePubKey: invalid size: %v\",\n\t\t\tpublicKey)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\tcase bytes.Equal(pk, emptyPK[:]):\n\t\tlog.Debugf(\"validatePubKey: key is empty: %v\",\n\t\t\tpublicKey)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Verification(pub ecdsa.PublicKey, hash []byte, r, s *big.Int) bool {\n\tverifystatus := ecdsa.Verify(&pub, hash, r, s)\n\treturn verifystatus\n}",
"func (k *OKPPublicKeyData) Verify(data []byte, sig []byte) (bool, error) {\n\tvar key ed25519.PublicKey = make([]byte, ed25519.PublicKeySize)\n\n\tcopy(key, k.XCoord)\n\n\treturn ed25519.Verify(key, data, sig), nil\n}",
"func strictSignatureCheck(pk PublicKey, signature ByteSlice) error {\n\tswitch pk.Algorithm {\n\tcase SignatureAlgoEd25519:\n\t\tif len(pk.Key) != crypto.PublicKeySize {\n\t\t\treturn errors.New(\"invalid public key size in transaction\")\n\t\t}\n\t\tif len(signature) != crypto.SignatureSize {\n\t\t\treturn errors.New(\"invalid signature size in transaction\")\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"unrecognized public key type in transaction\")\n\t}\n}",
"func (k *PublicKeySECP256K1R) Verify(msg, sig []byte) bool {\n\treturn k.VerifyHash(hashing.ComputeHash256(msg), sig)\n}",
"func VerifySignature(transaction *model.Transaction) (bool, error) {\n\tkey, err := ecdsa.ParsePubKey(transaction.PubKey)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\thash, err := transaction.Hash()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tresult, err := ecdsa.Verify(key, hash[:], transaction.Signature)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn result, nil\n}",
"func (g *Gossiper) RSAVerifyPMSignature(msg utils.PrivateMessage) bool {\n\thash := utils.HASH_ALGO.New()\n\n\tbytes, e := json.Marshal(msg)\n\tutils.HandleError(e)\n\thash.Write(bytes)\n\thashed := hash.Sum(nil)\n\n\tpubKeyBytes, e := hex.DecodeString(msg.Origin)\n\tutils.HandleError(e)\n\tpubKey, e := x509.ParsePKCS1PublicKey(pubKeyBytes)\n\tutils.HandleError(e)\n\n\te = rsa.VerifyPKCS1v15(pubKey, utils.HASH_ALGO, hashed, msg.Signature)\n\tutils.HandleError(e)\n\tif e == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func (v *primitiveSetPublicKeyVerify) Verify(signature []byte, data []byte) error {\n\tif len(signature) < tink.NonRawPrefixSize {\n\t\treturn errInvalidSignature\n\t}\n\t// try non-raw keys\n\tprefix := signature[:tink.NonRawPrefixSize]\n\tsignatureNoPrefix := signature[tink.NonRawPrefixSize:]\n\tentries, err := v.ps.GetPrimitivesWithByteIdentifier(prefix)\n\tif err == nil {\n\t\tfor i := 0; i < len(entries); i++ {\n\t\t\tvar signedData []byte\n\t\t\tif entries[i].OutputPrefixType() == tinkpb.OutputPrefixType_LEGACY {\n\t\t\t\tsignedData = append(signedData, data...)\n\t\t\t\tsignedData = append(signedData, tink.LegacyStartByte)\n\t\t\t} else {\n\t\t\t\tsignedData = data\n\t\t\t}\n\t\t\tvar verifier = (entries[i].Primitive()).(tink.PublicKeyVerify)\n\t\t\tif err := verifier.Verify(signatureNoPrefix, signedData); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\t// try raw keys\n\tentries, err = v.ps.GetRawPrimitives()\n\tif err == nil {\n\t\tfor i := 0; i < len(entries); i++ {\n\t\t\tvar verifier = (entries[i].Primitive()).(tink.PublicKeyVerify)\n\t\t\tif err := verifier.Verify(signature, data); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn errInvalidSignature\n}",
"func IsValidKey(publicKey *[PUBLICKEYBYTES]byte) bool {\n\tpublicKeyPtr := (*C.uchar)(unsafe.Pointer(publicKey))\n\treturn C.crypto_vrf_is_valid_key(publicKeyPtr) != 0\n}",
"func Verify(publicKey []byte, signature []byte, hash []byte) bool {\n\treturn btckey.Verify(publicKey, signature, hash)\n}",
"func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {\n\tif r.Cmp(big.NewInt(1)) < 0 || s.Cmp(big.NewInt(1)) < 0 {\n\t\treturn false\n\t}\n\tcurve := DefaultCryptoType()\n\tcurve256N := curve.Params().N\n\tcurve256halfN := new(big.Int).Div(curve256N, big.NewInt(2))\n\tif homestead && s.Cmp(curve256halfN) > 0 {\n\t\treturn false\n\t}\n\t// Frontier: allow s to be in full N range\n\treturn r.Cmp(curve256N) < 0 && s.Cmp(curve256N) < 0 && (v == 0 || v == 1)\n}",
"func (pk *opensslPublicKey) Verify(data, sig []byte) (bool, error) {\n\terr := pk.key.VerifyPKCS1v15(openssl.SHA256_Method, data, sig)\n\treturn err == nil, err\n}",
"func (a *Ali) Verify(publicKey, sign []byte, req *NotifyReq) error {\n\tp, _ := pem.Decode(publicKey)\n\tif p == nil {\n\t\tpanic(\"Public key broken!\")\n\t}\n\tpub, err := x509.ParsePKIXPublicKey(p.Bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\th := crypto.Hash.New(crypto.SHA1)\n\tm := apikit.Params(structs.Map(req))\n\tb := sortedParams(removeKeys(m, \"sign\", \"sign_type\"))\n\th.Write(removeQuote(b.Bytes()))\n\tsum := h.Sum(nil)\n\tif sign, err = base64.StdEncoding.DecodeString(string(sign)); err != nil {\n\t\treturn err\n\t}\n\treturn rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA1, sum, sign)\n}",
"func (pk *PublicKey) Valid() bool {\n\t// TODO not implement\n\treturn true\n}",
"func verifySECP256K1RSignatureFormat(sig []byte) error {\n\tif len(sig) != SECP256K1RSigLen {\n\t\treturn errInvalidSigLen\n\t}\n\n\tvar s secp256k1.ModNScalar\n\ts.SetByteSlice(sig[32:64])\n\tif s.IsOverHalfOrder() {\n\t\treturn errMutatedSig\n\t}\n\treturn nil\n}",
"func (e *curveP256) Verify(publicKeyBytes, message, signatureBytes []byte) bool {\n\tpublicKey, err := btcec.ParsePubKey(publicKeyBytes, btcec.S256())\n\tsignature, err := btcec.ParseDERSignature(signatureBytes, btcec.S256())\n\tmessageHash := chainhash.DoubleHashB(message)\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// Verify the signature for the message using the public key.\n\treturn signature.Verify(messageHash, publicKey)\n}",
"func verifyHashUsingPublicKey(pk PublicKey, tx Transaction, sig []byte, extraObjects []interface{}) (err error) {\n\tswitch pk.Algorithm {\n\tcase SignatureAlgoEd25519:\n\t\t// Decode the public key and signature.\n\t\tvar (\n\t\t\tedPK crypto.PublicKey\n\t\t\tedSig crypto.Signature\n\t\t)\n\t\tcopy(edPK[:], pk.Key)\n\t\tcopy(edSig[:], sig)\n\t\tif edPK.IsNil() {\n\t\t\treturn crypto.ErrPublicNilKey\n\t\t}\n\t\tcryptoSig := crypto.Signature(edSig)\n\t\tvar sigHash crypto.Hash\n\t\tsigHash, err = tx.SignatureHash(extraObjects...)\n\t\tif err == nil {\n\t\t\terr = crypto.VerifyHash(sigHash, edPK, cryptoSig)\n\t\t}\n\n\tdefault:\n\t\terr = ErrUnknownSignAlgorithmType\n\t}\n\treturn\n}",
"func (ac *authenticatedConnection) verify(\n\texpectedSender, actualSender peer.ID,\n\tmessageBytes, signatureBytes []byte,\n) error {\n\tif expectedSender != actualSender {\n\t\treturn fmt.Errorf(\n\t\t\t\"pinned identity [%v] does not match sender identity [%v]\",\n\t\t\texpectedSender,\n\t\t\tactualSender,\n\t\t)\n\t}\n\n\tpubKey, err := actualSender.ExtractPublicKey()\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"failed to extract public key from peer [%v]\",\n\t\t\tactualSender,\n\t\t)\n\t}\n\n\tok, err := pubKey.Verify(messageBytes, signatureBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"failed to verify signature [0x%v] for sender [%v]: [%v]\",\n\t\t\thex.EncodeToString(signatureBytes),\n\t\t\tactualSender.Pretty(),\n\t\t\terr,\n\t\t)\n\t}\n\n\tif !ok {\n\t\treturn fmt.Errorf(\n\t\t\t\"invalid signature [0x%v] on message from sender [%v]\",\n\t\t\thex.EncodeToString(signatureBytes),\n\t\t\tactualSender.Pretty(),\n\t\t)\n\t}\n\n\treturn nil\n}",
"func ValidateSignatureValues(v byte, r, s *big.Int, hubble bool) bool {\n\tif r.Cmp(common.Big1) < 0 || s.Cmp(common.Big1) < 0 {\n\t\treturn false\n\t}\n\t// reject upper range of s values (ECDSA malleability)\n\t// see discussion in secp256k1/libsecp256k1/include/secp256k1.h\n\tif hubble && s.Cmp(secp256k1halfN) > 0 {\n\t\treturn false\n\t}\n\t// Frontier: allow s to be in full N range\n\treturn r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1)\n}",
"func VerifySignature(addr, signature string) (err error) {\n\tt := time.Now().UTC()\n\tdata := []byte(t.Format(passwordFormat))\n\tsig, err := hex.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\thash := crypto.Keccak256Hash(data)\n\tpubkey, err := crypto.Ecrecover(hash[:], sig)\n\tif err != nil {\n\t\treturn\n\t}\n\tsender := utils.PubkeyToAddress(pubkey)\n\tif addr != sender.String() {\n\t\treturn errors.New(\"not match\")\n\t}\n\treturn nil\n}",
"func verify(json string, signature string, pubkeyPem string) bool {\n // hash := hash(json)\n\n return true\n}"
] | [
"0.73681545",
"0.7215541",
"0.71791583",
"0.71225846",
"0.71195",
"0.70969844",
"0.7055452",
"0.70434874",
"0.6897032",
"0.6861955",
"0.67977583",
"0.67892903",
"0.6772671",
"0.67163485",
"0.671264",
"0.668523",
"0.6668513",
"0.6657132",
"0.6614563",
"0.6543836",
"0.65239805",
"0.6513536",
"0.64798796",
"0.6451889",
"0.6426968",
"0.64257294",
"0.64232224",
"0.6416667",
"0.6403809",
"0.6385103",
"0.6379217",
"0.63718575",
"0.63157916",
"0.63144875",
"0.6286649",
"0.62616223",
"0.62538445",
"0.6247374",
"0.6238343",
"0.6223094",
"0.6205865",
"0.62028",
"0.6193924",
"0.6184639",
"0.6131004",
"0.6130102",
"0.6119237",
"0.61155",
"0.6111175",
"0.6106679",
"0.6089501",
"0.6086509",
"0.60752505",
"0.60751456",
"0.60723585",
"0.6059587",
"0.6039829",
"0.6030925",
"0.60000837",
"0.59959286",
"0.599425",
"0.5987624",
"0.59873",
"0.5980995",
"0.59786105",
"0.59734637",
"0.5968952",
"0.59616864",
"0.595462",
"0.5935251",
"0.5916523",
"0.59097576",
"0.589788",
"0.58864546",
"0.58810467",
"0.5880992",
"0.5867656",
"0.5866314",
"0.58652127",
"0.5851401",
"0.5843583",
"0.58433795",
"0.5843077",
"0.58408254",
"0.5809676",
"0.5801722",
"0.5800125",
"0.57640624",
"0.5741723",
"0.5739606",
"0.5713611",
"0.5698077",
"0.5692022",
"0.56820345",
"0.5681739",
"0.5677197",
"0.567458",
"0.56735206",
"0.5672486",
"0.56719047"
] | 0.7219678 | 1 |
calculateKeyPair converts a Montgomery private key k to a twisted Edwards public key and private key (A, a) as defined in calculate_key_pair(k): E = kB A.y = E.y A.s = 0 if E.s == 1: a = k (mod q) else: a = k (mod q) return A, a | calculateKeyPair преобразует монтгомеровский приватный ключ k в публичный и приватный ключи (A, a) в виде искаженной эдвардской кривой, как определено в calculate_key_pair(k): E = kB A.y = E.y A.s = 0 если E.s == 1: a = k (mod q) иначе: a = k (mod q) return A, a | func (p PrivateKey) calculateKeyPair() ([]byte, *edwards25519.Scalar, error) {
var pA edwards25519.Point
var sa edwards25519.Scalar
k, err := (&edwards25519.Scalar{}).SetBytesWithClamping(p)
if err != nil {
return nil, nil, err
}
pub := pA.ScalarBaseMult(k).Bytes()
signBit := (pub[31] & 0x80) >> 7
if signBit == 1 {
sa.Negate(k)
// Set sig bit to 0
pub[31] &= 0x7F
} else {
sa.Set(k)
}
return pub, &sa, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GenerateKeyPair(group *schnorr.Group) (*SecKey, *PubKey) {\n\ts1 := common.GetRandomInt(group.Q)\n\ts2 := common.GetRandomInt(group.Q)\n\th1 := group.Exp(group.G, s1)\n\th2 := group.Exp(group.G, s2)\n\n\treturn NewSecKey(s1, s2), NewPubKey(h1, h2)\n}",
"func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr, err := frFromOKM(okm)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"convert OKM to FR: %w\", err)\n\t}\n\n\tprivKey := &PrivateKey{PrivKey: g2pubs.NewSecretKeyFromFR(privKeyFr)}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}",
"func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr := frFromOKM(okm)\n\n\tprivKey := &PrivateKey{privKeyFr}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}",
"func GenerateKeyPair(bits int) (keypair *KeyPair, err error) {\n\tkeypair = new(KeyPair)\n\tkeypair.PublicKey = new(PublicKey)\n\tkeypair.PrivateKey = new(PrivateKey)\n\n\tif bits == 0 {\n\t\terr = errors.New(\"RSA modulus size must not be zero.\")\n\t\treturn\n\t}\n\tif bits%8 != 0 {\n\t\terr = errors.New(\"RSA modulus size must be a multiple of 8.\")\n\t\treturn\n\t}\n\n\tfor limit := 0; limit < 1000; limit++ {\n\t\tvar tempKey *rsa.PrivateKey\n\t\ttempKey, err = rsa.GenerateKey(rand.Reader, bits)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif len(tempKey.Primes) != 2 {\n\t\t\terr = errors.New(\"RSA package generated a weird set of primes (i.e. not two)\")\n\t\t\treturn\n\t\t}\n\n\t\tp := tempKey.Primes[0]\n\t\tq := tempKey.Primes[1]\n\n\t\tif p.Cmp(q) == 0 {\n\t\t\terr = errors.New(\"RSA keypair factors were equal. This is really unlikely dependent on the bitsize and it appears something horrible has happened.\")\n\t\t\treturn\n\t\t}\n\t\tif gcd := new(big.Int).GCD(nil, nil, p, q); gcd.Cmp(big.NewInt(1)) != 0 {\n\t\t\terr = errors.New(\"RSA primes were not relatively prime!\")\n\t\t\treturn\n\t\t}\n\n\t\tmodulus := new(big.Int).Mul(p, q)\n\n\t\tpublicExp := big.NewInt(3)\n\t\t//publicExp := big.NewInt(65537)\n\n\t\t//totient = (p-1) * (q-1)\n\t\ttotient := new(big.Int)\n\t\ttotient.Sub(p, big.NewInt(1))\n\t\ttotient.Mul(totient, new(big.Int).Sub(q, big.NewInt(1)))\n\n\t\tif gcd := new(big.Int).GCD(nil, nil, publicExp, totient); gcd.Cmp(big.NewInt(1)) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tprivateExp := new(big.Int).ModInverse(publicExp, totient)\n\t\tkeypair.PublicKey.Modulus = modulus\n\t\tkeypair.PrivateKey.Modulus = modulus\n\t\tkeypair.PublicKey.PublicExp = publicExp\n\t\tkeypair.PrivateKey.PrivateExp = privateExp\n\t\treturn\n\t}\n\terr = errors.New(\"Failed to generate a within the limit!\")\n\treturn\n\n}",
"func NewKeyPair() (ecdsa.PrivateKey, []byte) {\n\tellipticCurve := EllipticCurve()\n\n\tprivateKey, err := ecdsa.GenerateKey(ellipticCurve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tX := privateKey.PublicKey.X.Bytes()\n\tY := privateKey.PublicKey.Y.Bytes()\n\t//fmt.Println(len(X), X)\n\t//fmt.Println(len(Y), Y)\n\tpublicKey := append(\n\t\tX, // 32 bytes (P256)\n\t\tY..., // 32 bytes (P256)\n\t) // 64 bytes => 64 * 8 bits = 512 bits (perchè usiamo P256 o secp256k)\n\treturn *privateKey, publicKey\n}",
"func NewKeyPair(suite suites.Suite, random cipher.Stream) (kyber.Scalar, kyber.Point) {\n\tx := suite.G2().Scalar().Pick(random)\n\tX := suite.G2().Point().Mul(x, nil)\n\treturn x, X\n}",
"func generateKeyPair() (publicKey, privateKey *[32]byte, err error) {\n\treturn box.GenerateKey(rand.Reader)\n}",
"func GenerateKeyPair() (*rsa.PrivateKey, *rsa.PublicKey, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn privKey, &privKey.PublicKey, nil\n}",
"func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\t// ECC generate private key\n\tcurve := elliptic.P256()\n\tprivate, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tlog.Println(\"--------\", private)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t// private key generate public key\n\tpubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)\n\treturn *private, pubKey\n}",
"func NewKeyPair() (*keyPair, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivKey.Precompute()\n\n\tpubKey := &privKey.PublicKey\n\treturn &keyPair{Private: privKey, Public: pubKey}, nil\n}",
"func GenerateKeyPair() ([]byte, []byte) {\n\tconst seckeyLen = 32\n\tvar seckey []byte\n\tvar pubkey []byte\n\nnew_seckey:\n\tseckey = RandByte(seckeyLen)\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tgoto new_seckey // regen\n\t}\n\n\tpubkey = pubkeyFromSeckey(seckey)\n\tif pubkey == nil {\n\t\tlog.Panic(\"IMPOSSIBLE: pubkey invalid from valid seckey\")\n\t\tgoto new_seckey\n\t}\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panicf(\"ERROR: Pubkey invalid, ret=%d\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}",
"func NewPair(p *big.Int, g int64) (*big.Int, *big.Int) {\n\tprivateKey := PrivateKey(p)\n\tpublicKey := PublicKey(privateKey, p, g)\n\treturn privateKey, publicKey\n}",
"func GenerateKeyPair() (pubkey, privkey []byte) {\n\tkey, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpubkey = elliptic.Marshal(secp256k1.S256(), key.X, key.Y)\n\tprivkey = make([]byte, 32)\n\tblob := key.D.Bytes()\n\tcopy(privkey[32-len(blob):], blob)\n\treturn\n}",
"func generateKeyPair(bits int) (*rsa.PrivateKey, *rsa.PublicKey, error) {\n\tprivkey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn privkey, &privkey.PublicKey, nil\n}",
"func GetAccountKeyPairFor(name string) (string, string) {\n\n\tar := AccountsRepository()\n\tpk1, ok := ar.publicKey[name]\n\tvar puk, prk string\n\tif ok {\n\t\tpuk = pk1\n\t} else {\n\t\tpuk = \"\"\n\t}\n\tpk2, ok := ar.privateKey[name]\n\tif ok {\n\t\tprk = pk2\n\t} else {\n\t\tprk = \"\"\n\t}\n\treturn puk, prk\n}",
"func possibleK(pair messagePair, pub *dsa.PublicKey) *big.Int {\n\tz1 := new(big.Int).SetBytes(pair.fst.sum)\n\tz2 := new(big.Int).SetBytes(pair.snd.sum)\n\n\tz1.Sub(z1, z2)\n\tz2.Sub(pair.fst.s, pair.snd.s)\n\tz2.ModInverse(z2, pub.Q)\n\tk := z1.Mul(z1, z2)\n\n\treturn k.Mod(k, pub.Q)\n}",
"func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\tcurve := elliptic.P256()\n\n\tpriKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tpubKey := append(priKey.PublicKey.X.Bytes(), priKey.PublicKey.Y.Bytes()...)\n\n\treturn *priKey, pubKey\n}",
"func (n *nauth) GenerateKeyPair(passphrase string) ([]byte, []byte, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tprivDer := x509.MarshalPKCS1PrivateKey(priv)\n\tprivBlock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privDer,\n\t}\n\tprivPem := pem.EncodeToMemory(&privBlock)\n\n\tpub, err := ssh.NewPublicKey(&priv.PublicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpubBytes := ssh.MarshalAuthorizedKey(pub)\n\treturn privPem, pubBytes, nil\n}",
"func (c *Curve25519) GenerateKeyPair() (KeyPair, error) {\n\n\tvar priv [32]byte\n\n\t// fill private key\n\t_, err := c.randSource.Read(priv[:])\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tpriv[0] &= 248\n\tpriv[31] &= 127\n\tpriv[31] |= 64\n\n\tvar pubKey [32]byte\n\tcurve25519.ScalarBaseMult(&pubKey, &priv)\n\n\treturn KeyPair{\n\t\tPrivateKey: priv,\n\t\tPublicKey: pubKey,\n\t}, nil\n\n}",
"func (s Seed) deriveKeyPair(index uint64) (keypair [64]byte) {\n\tbuf := make([]byte, len(s.siadSeed)+8)\n\tn := copy(buf, s.siadSeed[:])\n\tbinary.LittleEndian.PutUint64(buf[n:], index)\n\tseed := blake2b.Sum256(buf)\n\tcopy(keypair[:], ed25519.NewKeyFromSeed(seed[:]))\n\treturn\n}",
"func generateKeyPair(algo string, ecCurve string) (privateKey interface{}, publicKey interface{}, err error) {\n\n // Make them case-insensitive\n switch strings.ToUpper(algo) {\n // If RSA, generate a pair of RSA keys\n case \"RSA\":\n // rsa.GenerateKey(): https://golang.org/pkg/crypto/rsa/#GenerateKey\n // Return value is of type *rsa.PrivateKey\n privateKey, err = rsa.GenerateKey(rand.Reader, 2048) // by default create a 2048 bit key\n\n // If ECDSA, use a provided curve\n case \"ECDSA\":\n // First check if ecCurve is provided\n if ecCurve == \"\" {\n return nil, nil, errors.New(\"ECDSA needs a curve\")\n }\n // Then generate the key based on the curve\n // Curves: https://golang.org/pkg/crypto/elliptic/#Curve\n // ecdsa.GenerateKey(): https://golang.org/pkg/crypto/ecdsa/#GenerateKey\n // Return value is of type *ecdsa.PrivateKey\n switch strings.ToUpper(ecCurve) {\n case \"P224\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n case \"P256\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n case \"P384\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n case \"P521\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\n // If the curve is invalid\n default:\n return nil, nil, errors.New(\"Unrecognized curve, valid values are P224, P256, P384 and P521\")\n }\n\n // If neither RSA nor ECDSA return an error\n default:\n return nil, nil, errors.New(\"Unrecognized algorithm, valid options are RSA and ECDSA\")\n }\n\n // If we get here, then input parameters have been valid\n // Check if key generation has been successful by checking err\n if err != nil {\n return nil, nil, err\n }\n\n // Exporting the public key (needed later)\n switch tempPrivKey:= privateKey.(type) {\n case *rsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n case *ecdsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n }\n\n return privateKey, publicKey, err // or just return\n}",
"func generateKeypair() ([]byte, []byte, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to generate SSH private key: %v\", err)\n\t}\n\tprivatePEM := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t})\n\tpublicKey, err := cssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to generate SSH public key: %v\", err)\n\t}\n\tpublicPEM := cssh.MarshalAuthorizedKey(publicKey)\n\treturn privatePEM, publicPEM, nil\n}",
"func NewPair(p *big.Int, g int64) (private, public *big.Int) {\n\tprivKey := PrivateKey(p)\n\tpubKey := PublicKey(privKey, p, g)\n\treturn privKey, pubKey\n}",
"func GenerateKeyPair() (*ecdsa.PrivateKey, error ) {\n\tkey, err := ecdsa.GenerateKey(btcec.S256(), rand.Reader)\n\tif err != nil { return nil, err } \n\treturn key, nil\n}",
"func GenerateKeyPair() *rsa.PrivateKey {\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error in generating key-value pair, error is\", err)\n\t}\n\treturn privateKey\n}",
"func GenerateRSAKeyPair(opts GenerateRSAOptions) (*RSAKeyPair, error) {\n\t//creates the private key\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, opts.Bits)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error generating private key: %s\\n\", err)\n\t}\n\n\t//validates the private key\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error validating private key: %s\\n\", err)\n\t}\n\n\t// sets up the PEM block for private key\n\tprivateKeyBlock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t}\n\n\t//check to see if we are applying encryption to this key\n\tif opts.Encryption != nil {\n\t\t//check to make sure we have a password specified\n\t\tpass := strings.TrimSpace(opts.Encryption.Password)\n\t\tif pass == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"%s\", \"need a password!\")\n\t\t}\n\t\t//check to make sure we're using a supported PEMCipher\n\t\tencCipher := opts.Encryption.PEMCipher\n\t\tif encCipher != x509.PEMCipherDES &&\n\t\t\tencCipher != x509.PEMCipher3DES &&\n\t\t\tencCipher != x509.PEMCipherAES128 &&\n\t\t\tencCipher != x509.PEMCipherAES192 &&\n\t\t\tencCipher != x509.PEMCipherAES256 {\n\t\t\treturn nil, fmt.Errorf(\"%s\", \"invalid PEMCipher\")\n\t\t}\n\t\t//encrypt the private key block\n\t\tencBlock, err := x509.EncryptPEMBlock(rand.Reader, \"RSA PRIVATE KEY\", privateKeyBlock.Bytes, []byte(pass), encCipher)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error encrypting pirvate key: %s\\n\", err)\n\t\t}\n\t\t//replaces the starting one with the one we encrypted\n\t\tprivateKeyBlock = *encBlock\n\t}\n\n\t// serializes the public key in a DER-encoded PKIX format (see docs for more)\n\tpublicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up public key: %s\\n\", err)\n\t}\n\n\t// sets up the PEM block for public key\n\tpublicKeyBlock := pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tHeaders: nil,\n\t\tBytes: publicKeyBytes,\n\t}\n\n\t//returns the created key pair\n\treturn &RSAKeyPair{\n\t\tPrivateKey: string(pem.EncodeToMemory(&privateKeyBlock)),\n\t\tPublicKey: string(pem.EncodeToMemory(&publicKeyBlock)),\n\t}, nil\n}",
"func ConvertToPPK(privateKey *rsa.PrivateKey, pub []byte) ([]byte, error) {\n\t// https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk\n\t// RSA keys are stored using an algorithm-name of 'ssh-rsa'. (Keys stored like this are also used by the updated RSA signature schemes that use\n\t// hashes other than SHA-1. The public key data has already provided the key modulus and the public encoding exponent. The private data stores:\n\t// mpint: the private decoding exponent of the key.\n\t// mpint: one prime factor p of the key.\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// mpint: the multiplicative inverse of q modulo p.\n\tppkPrivateKey := new(bytes.Buffer)\n\n\t// mpint: the private decoding exponent of the key.\n\t// this is known as 'D'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(privateKey.D))\n\n\t// mpint: one prime factor p of the key.\n\t// this is known as 'P'\n\t// the RSA standard dictates that P > Q\n\t// for some reason what PuTTY names 'P' is Primes[1] to Go, and what PuTTY names 'Q' is Primes[0] to Go\n\tP, Q := privateKey.Primes[1], privateKey.Primes[0]\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(P))\n\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// this is known as 'Q'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(Q))\n\n\t// mpint: the multiplicative inverse of q modulo p.\n\t// this is known as 'iqmp'\n\tiqmp := new(big.Int).ModInverse(Q, P)\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(iqmp))\n\n\t// now we need to base64-encode the PPK-formatted private key which is made up of the above values\n\tppkPrivateKeyBase64 := make([]byte, base64.StdEncoding.EncodedLen(ppkPrivateKey.Len()))\n\tbase64.StdEncoding.Encode(ppkPrivateKeyBase64, ppkPrivateKey.Bytes())\n\n\t// read Teleport public key\n\t// fortunately, this is the one thing that's in exactly the same format that the PPK file uses, so we can just copy it verbatim\n\t// remove ssh-rsa plus additional space from beginning of string if present\n\tif !bytes.HasPrefix(pub, []byte(constants.SSHRSAType+\" \")) {\n\t\treturn nil, trace.BadParameter(\"pub does not appear to be an ssh-rsa public key\")\n\t}\n\tpub = bytes.TrimSuffix(bytes.TrimPrefix(pub, []byte(constants.SSHRSAType+\" \")), []byte(\"\\n\"))\n\n\t// the PPK file contains an anti-tampering MAC which is made up of various values which appear in the file.\n\t// copied from Section C.3 of https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk:\n\t// hex-mac-data is a hexadecimal-encoded value, 64 digits long (i.e. 32 bytes), generated using the HMAC-SHA-256 algorithm with the following binary data as input:\n\t// string: the algorithm-name header field.\n\t// string: the encryption-type header field.\n\t// string: the key-comment-string header field.\n\t// string: the binary public key data, as decoded from the base64 lines after the 'Public-Lines' header.\n\t// string: the plaintext of the binary private key data, as decoded from the base64 lines after the 'Private-Lines' header.\n\n\t// these values are also used in the MAC generation, so we declare them as variables\n\tkeyType := constants.SSHRSAType\n\tencryptionType := \"none\"\n\t// as work for the future, it'd be nice to get the proxy/user pair name in here to make the name more\n\t// of a unique identifier. this has to be done at generation time because the comment is part of the MAC\n\tfileComment := \"teleport-generated-ppk\"\n\n\t// string: the algorithm-name header field.\n\tmacKeyType := getRFC4251String([]byte(keyType))\n\t// create a buffer to hold the elements needed to generate the MAC\n\tmacInput := new(bytes.Buffer)\n\tbinary.Write(macInput, binary.LittleEndian, macKeyType)\n\n\t// string: the encryption-type header field.\n\tmacEncryptionType := getRFC4251String([]byte(encryptionType))\n\tbinary.Write(macInput, binary.BigEndian, macEncryptionType)\n\n\t// string: the key-comment-string header field.\n\tmacComment := getRFC4251String([]byte(fileComment))\n\tbinary.Write(macInput, binary.BigEndian, macComment)\n\n\t// base64-decode the Teleport public key, as we need its binary representation to generate the MAC\n\tdecoded := make([]byte, base64.StdEncoding.EncodedLen(len(pub)))\n\tn, err := base64.StdEncoding.Decode(decoded, pub)\n\tif err != nil {\n\t\treturn nil, trace.Errorf(\"could not base64-decode public key: %v, got %v bytes successfully\", err, n)\n\t}\n\tdecoded = decoded[:n]\n\t// append the decoded public key bytes to the MAC buffer\n\tmacPublicKeyData := getRFC4251String(decoded)\n\tbinary.Write(macInput, binary.BigEndian, macPublicKeyData)\n\n\t// append our PPK-formatted private key bytes to the MAC buffer\n\tmacPrivateKeyData := getRFC4251String(ppkPrivateKey.Bytes())\n\tbinary.Write(macInput, binary.BigEndian, macPrivateKeyData)\n\n\t// as per the PPK spec, the key for the MAC is blank when the PPK file is unencrypted.\n\t// therefore, the key is a zero-length byte slice.\n\thmacHash := hmac.New(sha256.New, []byte{})\n\t// generate the MAC using HMAC-SHA-256\n\thmacHash.Write(macInput.Bytes())\n\tmacString := hex.EncodeToString(hmacHash.Sum(nil))\n\n\t// build the string-formatted output PPK file\n\tppk := new(bytes.Buffer)\n\tfmt.Fprintf(ppk, \"PuTTY-User-Key-File-3: %v\\n\", keyType)\n\tfmt.Fprintf(ppk, \"Encryption: %v\\n\", encryptionType)\n\tfmt.Fprintf(ppk, \"Comment: %v\\n\", fileComment)\n\t// chunk the Teleport-formatted public key into 64-character length lines\n\tchunkedPublicKey := chunk(string(pub), 64)\n\tfmt.Fprintf(ppk, \"Public-Lines: %v\\n\", len(chunkedPublicKey))\n\tfor _, r := range chunkedPublicKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\t// chunk the PPK-formatted private key into 64-character length lines\n\tchunkedPrivateKey := chunk(string(ppkPrivateKeyBase64), 64)\n\tfmt.Fprintf(ppk, \"Private-Lines: %v\\n\", len(chunkedPrivateKey))\n\tfor _, r := range chunkedPrivateKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\tfmt.Fprintf(ppk, \"Private-MAC: %v\\n\", macString)\n\n\treturn ppk.Bytes(), nil\n}",
"func GenerateKeyPair(rand io.Reader) (*PublicKey, *PrivateKey, error) {\n\tvar seed [KeySeedSize]byte\n\tif rand == nil {\n\t\trand = cryptoRand.Reader\n\t}\n\t_, err := io.ReadFull(rand, seed[:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpk, sk := NewKeyFromSeed(seed[:])\n\treturn pk, sk, nil\n}",
"func generate_keys(key string, round_keys *([]string)) {\n\t// The PC1 table\n\tpc1 := [56]int{\n\t\t57, 49, 41, 33, 25, 17, 9,\n\t\t1, 58, 50, 42, 34, 26, 18,\n\t\t10, 2, 59, 51, 43, 35, 27,\n\t\t19, 11, 3, 60, 52, 44, 36,\n\t\t63, 55, 47, 39, 31, 23, 15,\n\t\t7, 62, 54, 46, 38, 30, 22,\n\t\t14, 6, 61, 53, 45, 37, 29,\n\t\t21, 13, 5, 28, 20, 12, 4,\n\t}\n\t// The PC2 table\n\tpc2 := [48]int{\n\t\t14, 17, 11, 24, 1, 5,\n\t\t3, 28, 15, 6, 21, 10,\n\t\t23, 19, 12, 4, 26, 8,\n\t\t16, 7, 27, 20, 13, 2,\n\t\t41, 52, 31, 37, 47, 55,\n\t\t30, 40, 51, 45, 33, 48,\n\t\t44, 49, 39, 56, 34, 53,\n\t\t46, 42, 50, 36, 29, 32,\n\t}\n\t// 1. Compressing the key using the PC1 table\n\tperm_key := \"\"\n\tfor i := 0; i < 56; i++ {\n\t\tperm_key += string(key[pc1[i]-1])\n\t}\n\t// 2. Dividing the key into two equal halves\n\t// left := perm_key.substr(0, 28)\n\tleft := perm_key[0:28]\n\tright := perm_key[28:56]\n\tfor i := 0; i < 16; i++ {\n\t\t// 3.1. For rounds 1, 2, 9, 16 the key_chunks\n\t\t// are shifted by one.\n\t\tif i == 0 || i == 1 || i == 8 || i == 15 {\n\t\t\tleft = shift_left_once(left)\n\t\t\tright = shift_left_once(right)\n\t\t} else {\n\t\t\t// 3.2. For other rounds, the key_chunks\n\t\t\t// are shifted by two\n\t\t\tleft = shift_left_twice(left)\n\t\t\tright = shift_left_twice(right)\n\t\t}\n\t\t// Combining the two chunks\n\t\tcombined_key := left + right\n\t\tround_key := \"\"\n\t\t// Finally, using the PC2 table to transpose the key bits\n\t\tfor i := 0; i < 48; i++ {\n\t\t\tround_key += string(combined_key[pc2[i]-1])\n\t\t}\n\t\t(*round_keys)[i] = round_key\n\t}\n\n}",
"func GenerateNewKeypair() *Keypair {\n\n\tpk, _ := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n\n\tb := bigJoin(KEY_SIZE, pk.PublicKey.X, pk.PublicKey.Y)\n\n\tpublic := base58.EncodeBig([]byte{}, b)\n\tprivate := base58.EncodeBig([]byte{}, pk.D)\n\n\tkp := Keypair{Public: public, Private: private}\n\n\treturn &kp\n}",
"func (p *ph) KeyPair() (publicKey, privateKey []byte, err error) {\n\tsecretKey, err := randomBytes(p._SKLEN)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpublicKey, err = p.PubKey(secretKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn publicKey, secretKey, nil\n}",
"func (w *Whisper) NewKeyPair() (string, error) {\n\tkey, err := crypto.GenerateKey()\n\tif err != nil || !validatePrivateKey(key) {\n\t\tkey, err = crypto.GenerateKey() // retry once\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !validatePrivateKey(key) {\n\t\treturn \"\", fmt.Errorf(\"failed to generate valid key\")\n\t}\n\n\tid, err := GenerateRandomID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate ID: %s\", err)\n\t}\n\n\tw.keyMu.Lock()\n\tdefer w.keyMu.Unlock()\n\n\tif w.privateKeys[id] != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate unique ID\")\n\t}\n\tw.privateKeys[id] = key\n\treturn id, nil\n}",
"func NewPair(p *big.Int, g int64) (private, public *big.Int) {\n\tprivate = PrivateKey(p)\n\tpublic = PublicKey(private, p, g)\n\treturn\n}",
"func NewKeyPair(rootKey RootKeyable, chainKey ChainKeyable) *KeyPair {\n\tkeyPair := KeyPair{\n\t\tRootKey: rootKey,\n\t\tChainKey: chainKey,\n\t}\n\n\treturn &keyPair\n}",
"func GenerateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\t_, pubkey, seckey := DeterministicKeyPairIterator(seed)\n\treturn pubkey, seckey\n}",
"func GenerateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\t_, pubkey, seckey := DeterministicKeyPairIterator(seed)\n\treturn pubkey, seckey\n}",
"func generateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\tif seed == nil {\n\t\tlog.Panic()\n\t}\n\tif len(seed) != 32 {\n\t\tlog.Panic()\n\t}\n\n\tconst seckey_len = 32\n\tvar seckey []byte = make([]byte, seckey_len)\n\nnew_seckey:\n\tseed = SumSHA256(seed[0:32])\n\tcopy(seckey[0:32], seed[0:32])\n\n\tif bytes.Equal(seckey, seed) == false {\n\t\tlog.Panic()\n\t}\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Printf(\"generateDeterministicKeyPair, secp.SeckeyIsValid fail\")\n\t\tgoto new_seckey //regen\n\t}\n\n\tvar pubkey []byte = secp.GeneratePublicKey(seckey)\n\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: impossible, secp.BaseMultiply always returns true\")\n\t\tgoto new_seckey\n\t}\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: impossible, pubkey length wrong\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panic(\"ERROR: pubkey invalid, ret=%i\", ret)\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\n\t\tlog.Panic(\"ERROR: pubkey is invalid, for deterministic. ret=%i\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}",
"func (s *SkyСoinService) GenerateKeyPair() *KeysResponse {\n\tseed := getRand()\n\trand.Read(seed)\n\tpub, sec := cipher.GenerateDeterministicKeyPair(seed)\n\treturn &KeysResponse{\n\t\tPrivate: sec.Hex(),\n\t\tPublic: pub.Hex(),\n\t}\n}",
"func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}",
"func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}",
"func CreateKeyPair() (publicKeyBytes []byte, privateKeyBytes []byte, err error) {\n\tprivateKey, _ := rsa.GenerateKey(rand.Reader, 2048)\n\tpublicKey := privateKey.PublicKey\n\tpub, err := ssh.NewPublicKey(&publicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublicKeyBytes = ssh.MarshalAuthorizedKey(pub)\n\n\tpriBytes := x509.MarshalPKCS1PrivateKey(privateKey)\n\tprivateKeyBytes = pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"RSA PRIVATE KEY\",\n\t\t\tBytes: priBytes,\n\t\t},\n\t)\n\treturn publicKeyBytes, privateKeyBytes, nil\n}",
"func generateKeyPairs(keysize int) ([]byte, []byte, []byte, error) {\n var (\n privateKey *rsa.PrivateKey = nil\n privDer, pubDer, sshBytes []byte = nil, nil, nil\n privBlock, pubBlock *pem.Block = nil, nil\n privPem, pubPem []byte = nil, nil\n sshPub ssh.PublicKey\n err error = nil\n )\n\n // check key size\n if keysize != rsaStrongKeySize && keysize != rsaWeakKeySize {\n return nil, nil, nil, fmt.Errorf(\"[ERR] RSA key size should be either 1024 or 2048. Current %d\", keysize)\n }\n\n // generate private key\n privateKey, err = rsa.GenerateKey(rand.Reader, keysize)\n if err != nil {\n return nil, nil, nil, err\n }\n // check the key generated\n err = privateKey.Validate()\n if err != nil {\n return nil, nil, nil, err\n }\n // build private key\n privDer = x509.MarshalPKCS1PrivateKey(privateKey)\n privBlock = &pem.Block{\n Type: \"RSA PRIVATE KEY\",\n Headers: nil,\n Bytes: privDer,\n }\n privPem = pem.EncodeToMemory(privBlock)\n\n // generate and public key\n pubDer, err = x509.MarshalPKIXPublicKey(privateKey.Public())\n if err != nil {\n return nil, nil, nil, err\n }\n pubBlock = &pem.Block{\n Type: \"PUBLIC KEY\",\n Headers: nil,\n Bytes: pubDer,\n }\n pubPem = pem.EncodeToMemory(pubBlock)\n\n // generate ssh key\n sshPub, err = ssh.NewPublicKey(privateKey.Public())\n if err != nil {\n return nil, nil, nil, err\n }\n sshBytes = ssh.MarshalAuthorizedKey(sshPub)\n return privPem, pubPem, sshBytes, err\n}",
"func deterministicKeyPairIteratorStep(seed []byte) ([]byte, []byte) {\n\tif len(seed) != 32 {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: seed must be 32 bytes\")\n\t}\n\n\tconst seckeyLen = 32\n\tseckey := make([]byte, seckeyLen)\n\nnew_seckey:\n\tseed = SumSHA256(seed)\n\tcopy(seckey, seed)\n\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tif DebugPrint {\n\t\t\tlog.Printf(\"deterministicKeyPairIteratorStep, secp.SeckeyIsValid fail\")\n\t\t}\n\t\tgoto new_seckey //regen\n\t}\n\n\tpubkey := secp.GeneratePublicKey(seckey)\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: GeneratePublicKey failed, impossible, secp.BaseMultiply always returns true\")\n\t\tgoto new_seckey\n\t}\n\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: impossible, pubkey length wrong\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panicf(\"ERROR: deterministicKeyPairIteratorStep: PubkeyIsValid failed, ret=%d\", ret)\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\n\t\tlog.Panicf(\"ERROR: deterministicKeyPairIteratorStep: VerifyPubkey failed, ret=%d\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}",
"func GenerateWeakKeyPair() ([]byte, []byte, []byte, error) {\n prv, pub, ssh, err := generateKeyPairs(rsaWeakKeySize)\n return pub, prv, ssh, err\n}",
"func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {\n\tr := NewBIGints(CURVE_Order)\n\tL := ceil(3*ceil(r.nbits(),8),2)\n\tLEN:=core.InttoBytes(L, 2)\n\tAIKM:=make([]byte,len(IKM)+1) \n\tfor i:=0;i<len(IKM);i++ {\n\t\tAIKM[i]=IKM[i]\n\t}\n\tAIKM[len(IKM)]=0\n\n\tG := ECP2_generator()\n\tif G.Is_infinity() {\n\t\treturn BLS_FAIL\n\t}\n\tSALT := []byte(\"BLS-SIG-KEYGEN-SALT-\")\n\tPRK := core.HKDF_Extract(core.MC_SHA2,HASH_TYPE,SALT,AIKM)\n\tOKM := core.HKDF_Expand(core.MC_SHA2,HASH_TYPE,L,PRK,LEN)\n\n\tdx:= DBIG_fromBytes(OKM[:])\n\ts:= dx.Mod(r)\n\ts.ToBytes(S)\n// SkToPk\n\tG = G2mul(G, s)\n\tG.ToBytes(W,true)\n\treturn BLS_OK\n}",
"func KeyPair() (*[PUBLICKEYBYTES]byte, *[SECRETKEYBYTES]byte) {\n\tpublicKey := [PUBLICKEYBYTES]byte{}\n\tprivateKey := [SECRETKEYBYTES]byte{}\n\tpublicKeyPtr := (*C.uchar)(unsafe.Pointer(&publicKey))\n\tprivateKeyPtr := (*C.uchar)(unsafe.Pointer(&privateKey))\n\tC.crypto_vrf_keypair(publicKeyPtr, privateKeyPtr)\n\treturn &publicKey, &privateKey\n}",
"func NewKeyPair(pub crypto.PublicKey, privArmor string) KeyPair {\n\treturn KeyPair{\n\t\tPublicKey: pub,\n\t\tPrivKeyArmor: privArmor,\n\t}\n}",
"func GetKeyPair(version *avatar.Version) (avatar.KeyPair, error) {\n\t// zero out the Revision field since it is irrelevant to client keys:\n\tv := avatar.Version{version.Major, version.Minor, version.Patch, 0}\n\n\tif pair, ok := keys[v]; ok {\n\t\treturn pair, nil\n\t}\n\n\treturn *emptyPair, errors.New(\"unsupported version\")\n}",
"func Keypair(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {\n\tpublic, private, err := ed25519.GenerateKey(rand)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn PublicKey(public), PrivateKey(private), nil\n}",
"func (hd *HDWallet) KeyPair() (ed25519.PrivateKey, ed25519.PublicKey) {\n\n\tpath, err := bip32path.ParsePath(fmt.Sprintf(pathString, hd.index))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcurve := eddsa.Ed25519()\n\tkey, err := slip10.DeriveKeyFromPath(hd.seed, curve, path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpubKey, privKey := key.Key.(eddsa.Seed).Ed25519Key()\n\n\treturn ed25519.PrivateKey(privKey), ed25519.PublicKey(pubKey)\n}",
"func GenLamportKeyPair() *Keypair {\n\tkp := Keypair{\n\t\tpublic: [256]*key{},\n\t\tprivate: [256]*key{},\n\t}\n\n\tpub, priv := genKeyPair()\n\tcopy(kp.public[:], pub)\n\tcopy(kp.private[:], priv)\n\treturn &kp\n}",
"func GenerateStrongKeyPair() ([]byte, []byte, []byte, error) {\n prv, pub, ssh, err := generateKeyPairs(rsaStrongKeySize)\n return pub, prv, ssh, err\n}",
"func SplitKey(privateKey *big.Int, publicKey *Key, n int) ([]*Trustee, []*big.Int, error) {\n\t// Choose n-1 random private keys and compute the nth as privateKey -\n\t// (key_1 + key_2 + ... + key_{n-1}). This computation must be\n\t// performed in the exponent group of g, which is\n\t// Z_{Key.ExponentPrime}.\n\ttrustees := make([]*Trustee, n)\n\tkeys := make([]*big.Int, n)\n\tsum := big.NewInt(0)\n\tvar err error\n\tfor i := 0; i < n-1; i++ {\n\t\tkeys[i], err = rand.Int(rand.Reader, publicKey.ExponentPrime)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\ttpk := &Key{\n\t\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[i], publicKey.Prime),\n\t\t}\n\n\t\ttrustees[i] = &Trustee{PublicKey: tpk}\n\t\tsum.Add(sum, keys[i])\n\t\tsum.Mod(sum, publicKey.ExponentPrime)\n\t}\n\n\t// The choice of random private keys in the loop fully determines the\n\t// final key.\n\tkeys[n-1] = new(big.Int).Sub(privateKey, sum)\n\tkeys[n-1].Mod(keys[n-1], publicKey.ExponentPrime)\n\t//npok, err := NewSchnorrProof(keys[n-1], publicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tntpk := &Key{\n\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[n-1], publicKey.Prime),\n\t}\n\n\t//trustees[n-1] = &Trustee{PoK: npok, PublicKey: ntpk}\n\ttrustees[n-1] = &Trustee{PublicKey: ntpk}\n\n\treturn trustees, keys, nil\n}",
"func DeterministicKeyPairIterator(seed_in []byte) ([]byte, []byte, []byte) {\n\tseed1 := Secp256k1Hash(seed_in) //make it difficult to derive future seckeys from previous seckeys\n\tseed2 := SumSHA256(append(seed_in, seed1...))\n\tpubkey, seckey := generateDeterministicKeyPair(seed2) //this is our seckey\n\treturn seed1, pubkey, seckey\n}",
"func GenKeyPairs(bits int) (privateKey ,publicKey string,err error) {\n\tpriKey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tderStream := x509.MarshalPKCS1PrivateKey(priKey)\n\tblock := &pem.Block{\n\t\tType: \"private key\",\n\t\tBytes: derStream,\n\t}\n\tb := pem.EncodeToMemory(block)\n\tprivateKey = string(b)\n\n\tpubKey := &priKey.PublicKey\n\tderPkix, err := x509.MarshalPKIXPublicKey(pubKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tblock = &pem.Block{\n\t\tType: \"public key\",\n\t\tBytes: derPkix,\n\t}\n\tb = pem.EncodeToMemory(block)\n\tpublicKey = string(b)\n\treturn privateKey, publicKey, nil\n}",
"func GenerateKeypair() (privkey, pubkey []byte, err error) {\n\tpair, err := noise.DH25519.GenerateKeypair(rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// pair.Public is already filled in; assert here that PubkeyFromPrivkey\n\t// agrees with it.\n\tderivedPubkey := PubkeyFromPrivkey(pair.Private)\n\tif !bytes.Equal(derivedPubkey, pair.Public) {\n\t\tpanic(fmt.Sprintf(\"expected pubkey %x, got %x\", derivedPubkey, pair.Public))\n\t}\n\n\treturn pair.Private, pair.Public, nil\n}",
"func keypathpair(k0, k1 int) float64 {\n\tswitch p := math.Abs(float64(row(k0)-row(k1))) + math.Abs(float64(column(k0)-column(k1))); {\n\tcase finger(k0) == finger(k1):\n\t\treturn 2 * p\n\tcase (finger(k0) < finger(k1)) == (finger(k1) < 5):\n\t\treturn p - 0.5\n\tdefault:\n\t\treturn p\n\t}\n}",
"func KeyPair() (publicKey, privateKey []byte, err error) {\n\treturn defaultPH.KeyPair()\n}",
"func generateKasme(ck, ik, plmn, sqn, ak []byte) ([]byte, error) {\n\tconst fc = 16 // identifies the algorithm\n\tconst inputBytes = 14\n\n\tvar msg = make([]byte, inputBytes)\n\tmsg[0] = fc\n\tcopy(msg[1:], plmn)\n\tmsg[5] = ExpectedPlmnBytes\n\tcopy(msg[6:], xor(sqn, ak))\n\tmsg[13] = sqnMaxBytes\n\tkey := append(ck, ik...)\n\n\t// 3GPP Key Derivation Function defined in TS 33.220 to be hmac-sha256\n\thash := hmac.New(sha256.New, key)\n\t_, err := hash.Write(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hash.Sum(nil), nil\n}",
"func CreateKeyPair() (pubKey PublicKey, secKey SecretKey, err error) {\n\terrorCode := C.crypto_sign_keypair((*C.uchar)(&pubKey[0]), (*C.uchar)(&secKey[0]))\n\tif errorCode != 0 {\n\t\terr = errors.New(\"call to crypto_sign_keypair failed\")\n\t}\n\treturn\n}",
"func (vdb *VspDatabase) KeyPair() (ed25519.PrivateKey, ed25519.PublicKey, error) {\n\tvar seed []byte\n\terr := vdb.db.View(func(tx *bolt.Tx) error {\n\t\tvspBkt := tx.Bucket(vspBktK)\n\n\t\ts := vspBkt.Get(privateKeyK)\n\n\t\t// Byte slices returned from Bolt are only valid during a transaction.\n\t\t// Need to make a copy.\n\t\tseed = make([]byte, len(s))\n\t\tcopy(seed, s)\n\n\t\tif seed == nil {\n\t\t\t// should not happen\n\t\t\treturn fmt.Errorf(\"no private key found\")\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsignKey := ed25519.NewKeyFromSeed(seed)\n\n\t// Derive pubKey from signKey\n\tpubKey, ok := signKey.Public().(ed25519.PublicKey)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"failed to cast signing key: %T\", pubKey)\n\t}\n\n\treturn signKey, pubKey, err\n}",
"func DeterministicKeyPairIterator(seedIn []byte) ([]byte, []byte, []byte) {\n\tseed1 := Secp256k1Hash(seedIn) // make it difficult to derive future seckeys from previous seckeys\n\tseed2 := SumSHA256(append(seedIn, seed1...))\n\tpubkey, seckey := deterministicKeyPairIteratorStep(seed2) // this is our seckey\n\treturn seed1, pubkey, seckey\n}",
"func computePubKey(pubA, pubR [33]byte, msg []byte) ([33]byte, error) {\n\tvar returnValue [33]byte\n\n\t// Hardcode curve\n\tcurve := btcec.S256()\n\n\tA, err := btcec.ParsePubKey(pubA[:], curve)\n\tif err != nil {\n\t\treturn returnValue, err\n\t}\n\n\tR, err := btcec.ParsePubKey(pubR[:], curve)\n\tif err != nil {\n\t\treturn returnValue, err\n\t}\n\n\t// e = Hash(messageType, oraclePubQ)\n\tvar hashInput []byte\n\thashInput = append(msg, R.X.Bytes()...)\n\te := chainhash.HashB(hashInput)\n\n\tbigE := new(big.Int).SetBytes(e)\n\n\tif bigE.Cmp(curve.N) >= 0 {\n\t\treturn returnValue, fmt.Errorf(\"hash of (msg, pubR) too big\")\n\t}\n\n\t// e * B\n\tA.X, A.Y = curve.ScalarMult(A.X, A.Y, e)\n\n\tA.Y.Neg(A.Y)\n\n\tA.Y.Mod(A.Y, curve.P)\n\n\tP := new(btcec.PublicKey)\n\n\t// add to R\n\tP.X, P.Y = curve.Add(A.X, A.Y, R.X, R.Y)\n\tcopy(returnValue[:], P.SerializeCompressed())\n\treturn returnValue, nil\n}",
"func DeriveKeyPair(scheme Scheme, uri string) (kp KeyPair, err error) {\n\tphrase, path, pwd, err := splitURI(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b, ok := DecodeHex(phrase); ok {\n\t\tkp, err = scheme.FromSeed(b)\n\t} else {\n\t\tkp, err = scheme.FromPhrase(phrase, pwd)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdjs, err := deriveJunctions(derivePath(path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn scheme.Derive(kp, djs)\n}",
"func (session *Session) GenerateRSAKeyPair(tokenLabel string, tokenPersistent bool, expDate time.Time, bits int) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {\n\tif session == nil || session.Ctx == nil {\n\t\treturn 0, 0, fmt.Errorf(\"session not initialized\")\n\t}\n\ttoday := time.Now()\n\tpublicKeyTemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, session.Label),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, []byte(tokenLabel)),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_RSA),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, tokenPersistent),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_START_DATE, today),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_END_DATE, expDate),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODULUS_BITS, bits),\n\t}\n\n\tprivateKeyTemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, session.Label),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, []byte(tokenLabel)),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_RSA),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, tokenPersistent),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_START_DATE, today),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_END_DATE, expDate),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SIGN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),\n\t}\n\n\tpubKey, privKey, err := session.Ctx.GenerateKeyPair(\n\t\tsession.Handle,\n\t\t[]*pkcs11.Mechanism{\n\t\t\tpkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil),\n\t\t},\n\t\tpublicKeyTemplate,\n\t\tprivateKeyTemplate,\n\t)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn pubKey, privKey, nil\n}",
"func GetKeyPair(file string) (string, string, error) {\n\t// read keys from file\n\t_, err := os.Stat(file)\n\tif err == nil {\n\t\tpriv, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlumber.Debug(\"Failed to read file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\tpub, err := ioutil.ReadFile(file + \".pub\")\n\t\tif err != nil {\n\t\t\tlumber.Debug(\"Failed to read pub file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\treturn string(pub), string(priv), nil\n\t}\n\n\t// generate keys and save to file\ngenKeys:\n\tpub, priv, err := GenKeyPair()\n\terr = ioutil.WriteFile(file, []byte(priv), 0600)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write file - %s\", err)\n\t}\n\terr = ioutil.WriteFile(file+\".pub\", []byte(pub), 0644)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write pub file - %s\", err)\n\t}\n\n\treturn pub, priv, nil\n}",
"func sharedKey(priv, peerPub key) key {\n\tk := newKey()\n\tbox.Precompute(k, peerPub, priv)\n\treturn k\n}",
"func NewKey(bitSize int, k, l uint16, args *KeyMetaArgs) (shares KeyShareList, meta *KeyMeta, err error) {\n\n\tif args == nil {\n\t\targs = &KeyMetaArgs{}\n\t}\n\n\t// Parameter checking\n\tif bitSize < minBitSize || bitSize > maxBitSize {\n\t\terr = fmt.Errorf(\"bit size should be between %d and %d, but it is %d\", minBitSize, maxBitSize, bitSize)\n\t\treturn\n\t}\n\tif l <= 1 {\n\t\terr = fmt.Errorf(\"l should be greater than 1, but it is %d\", l)\n\t\treturn\n\t}\n\tif k <= 0 {\n\t\terr = fmt.Errorf(\"k should be greater than 0, but it is %d\", k)\n\t\treturn\n\t}\n\tif k < (l/2+1) || k > l {\n\t\terr = fmt.Errorf(\"k should be between the %d and %d, but it is %d\", (l/2)+1, l, k)\n\t\treturn\n\t}\n\n\tpPrimeSize := (bitSize + 1) / 2\n\tqPrimeSize := bitSize - pPrimeSize - 1\n\n\tif args.P != nil && args.P.BitLen() != pPrimeSize {\n\t\terr = fmt.Errorf(\"P bit length is %d, but it should be %d\", args.P.BitLen(), pPrimeSize)\n\t\treturn\n\t}\n\tif args.Q != nil && args.Q.BitLen() != qPrimeSize {\n\t\terr = fmt.Errorf(\"Q bit length is %d, but it should be %d\", args.Q.BitLen(), qPrimeSize)\n\t\treturn\n\t}\n\n\tmeta = &KeyMeta{\n\t\tPublicKey: &rsa.PublicKey{},\n\t\tK: k,\n\t\tL: l,\n\t\tVerificationKey: NewVerificationKey(l),\n\t}\n\tshares = make(KeyShareList, meta.L)\n\n\tvar i uint16\n\tfor i = 0; i < meta.L; i++ {\n\t\tshares[i] = &KeyShare{}\n\t}\n\n\t// Init big numbers\n\tpr := new(big.Int)\n\tqr := new(big.Int)\n\tp := new(big.Int)\n\tq := new(big.Int)\n\td := new(big.Int)\n\te := new(big.Int)\n\tlBig := new(big.Int)\n\tm := new(big.Int)\n\tn := new(big.Int)\n\tdeltaInv := new(big.Int)\n\tdivisor := new(big.Int)\n\tr := new(big.Int)\n\tvkv := new(big.Int)\n\tvku := new(big.Int)\n\tvki := new(big.Int)\n\n\tif args.P != nil {\n\t\tif !args.P.ProbablyPrime(c) {\n\t\t\terr = fmt.Errorf(\"p should be prime, but it's not\")\n\t\t\treturn\n\t\t}\n\t\tp.Set(args.P)\n\t\tpr.Sub(p, big.NewInt(1)).Div(pr, big.NewInt(2))\n\t} else {\n\t\tif p, pr, err = generateSafePrimes(pPrimeSize, randomDev); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif args.Q != nil {\n\t\tif !args.Q.ProbablyPrime(c) {\n\t\t\terr = fmt.Errorf(\"q should be prime, but it's not\")\n\t\t\treturn\n\t\t}\n\t\tq.Set(args.Q)\n\t\tqr.Sub(q, big.NewInt(1)).Div(qr, big.NewInt(2))\n\t} else {\n\t\tif q, qr, err = generateSafePrimes(qPrimeSize, randomDev); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// n = p * q and m = p' * q'\n\tn.Mul(p, q)\n\tm.Mul(pr, qr)\n\n\tmeta.PublicKey.N = n\n\n\tlBig.SetUint64(uint64(l))\n\n\teSet := false\n\n\tif args.E != 0 {\n\t\tmeta.PublicKey.E = args.E\n\t\te = big.NewInt(int64(meta.PublicKey.E))\n\t\tif e.ProbablyPrime(c) && lBig.Cmp(e) < 0 {\n\t\t\teSet = true\n\t\t}\n\t}\n\tif !eSet {\n\t\tmeta.PublicKey.E = f4\n\t\te = big.NewInt(int64(meta.PublicKey.E))\n\t}\n\n\t// d = e^{-1} mod m\n\td.ModInverse(e, m)\n\n\t// generate v\n\tif args.R == nil {\n\t\tfor divisor.Cmp(big.NewInt(1)) != 0 {\n\t\t\tr, err = randomDev(n.BitLen())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdivisor.GCD(nil, nil, r, n)\n\t\t}\n\t} else {\n\t\tdivisor.GCD(nil, nil, args.R, n)\n\t\tif divisor.Cmp(big.NewInt(1)) != 0 {\n\t\t\terr = fmt.Errorf(\"provided r value should be coprime with p*q (i.e., it should not be 0, 1, p or q)\")\n\t\t\treturn\n\t\t}\n\t\tr.Set(args.R)\n\t}\n\n\tvkv.Exp(r, big.NewInt(2), n)\n\n\tmeta.VerificationKey.V = vkv.Bytes()\n\n\t// generate u\n\tif args.U == nil {\n\t\tfor cond := true; cond; cond = big.Jacobi(vku, n) != -1 {\n\t\t\tvku, err = randomDev(n.BitLen())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvku.Mod(vku, n)\n\t\t}\n\t} else {\n\t\tvku.Set(args.U)\n\t}\n\n\tmeta.VerificationKey.U = vku.Bytes()\n\n\t// Delta is fact(l)\n\tdeltaInv.MulRange(1, int64(l)).ModInverse(deltaInv, m)\n\n\t// Generate polynomial with random coefficients.\n\tvar poly polynomial\n\tpoly, err = createRandomPolynomial(int(k-1), d, m)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Calculate Key Shares for each i TC participant.\n\tfor i = 1; i <= meta.L; i++ {\n\t\tkeyShare := shares[i-1]\n\t\tkeyShare.Id = i\n\t\tsi := poly.eval(big.NewInt(int64(i)))\n\t\tsi.Mul(si, deltaInv)\n\t\tsi.Mod(si, m)\n\t\tkeyShare.Si = si.Bytes()\n\t\tvki.Exp(vkv, si, n)\n\n\t\tmeta.VerificationKey.I[i-1] = vki.Bytes()\n\t}\n\treturn\n}",
"func GetKeyPair(file string) (string, string, error) {\n\t// read keys from file\n\t_, err := os.Stat(file)\n\tif err == nil {\n\t\tpriv, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to read file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\tpub, err := ioutil.ReadFile(file + \".pub\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to read pub file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\treturn string(pub), string(priv), nil\n\t}\n\n\t// generate keys and save to file\ngenKeys:\n\tpub, priv, err := GenKeyPair()\n\terr = ioutil.WriteFile(file, []byte(priv), 0600)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write file - %s\", err)\n\t}\n\terr = ioutil.WriteFile(file+\".pub\", []byte(pub), 0644)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write pub file - %s\", err)\n\t}\n\n\treturn pub, priv, nil\n}",
"func NewRSAKeyPair(name ndn.Name) (PrivateKey, PublicKey, error) {\n\tkeyName := ToKeyName(name)\n\tkey, e := rsa.GenerateKey(rand.Reader, 2048)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tpvt, e := NewRSAPrivateKey(keyName, key)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tpub, e := NewRSAPublicKey(keyName, &key.PublicKey)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\treturn pvt, pub, e\n}",
"func sortKeyPair(pk string) *RSA.PublicKeyPair {\n\tu := new(RSA.PublicKeyPair)\n\tsplit := strings.Split(pk, \",\")\n\t//fmt.Println(pk)\n\tn2 := new(big.Int)\n\te2 := new(big.Int)\n\tn2 = stringToBigInt(split[0])\n\te2 = stringToBigInt(split[1])\n\t//fmt.Println(\"n1:\", n2)\n\t//fmt.Println(\"e1:\", e2)\n\tu.N = n2\n\tu.E = e2\n\tPKlist = append(PKlist, u)\n\t//fmt.Println(PKlist)\n\treturn u\n}",
"func newRsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tif config.Bits == 0 {\n\t\tconfig.Bits = defaultRsaBits\n\t}\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, config.Bits)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePemBlock, err := rawPemBlock(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePemBlock,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}",
"func GenerateKeypair() (*Keypair, error) {\n\tvar publicKey [32]byte\n\tvar privateKey [32]byte\n\t_, err := rand.Read(privateKey[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcurve25519.ScalarBaseMult(&publicKey, &privateKey)\n\treturn &Keypair{publicKey, privateKey}, nil\n}",
"func Keygen(pub *BswabePub,msk *BswabeMsk, attrs []string) *BswabePrv {\n\t//attrs := strings.Split(attr, \" \")\n\n\tprv := new(BswabePrv)\n\tvar g_r, r, beta_inv *pbc.Element\n\tvar pairing *pbc.Pairing\n\n\t/* initialize */\n\tpairing = pub.p\n\tprv.d = pairing.NewG2()\n\tg_r = pairing.NewG2()\n\tr = pairing.NewZr()\n\tbeta_inv = pairing.NewZr()\n\n\t/* compute */\n\tr.Rand()\n\tprv.r = r.NewFieldElement().Set(r)\n\tg_r = pub.gp.NewFieldElement().Set(pub.gp)\n\t//g_r = pub.g.NewFieldElement().Set(pub.g)\n\tg_r.PowZn(g_r, r)\n\n\tprv.d = msk.g_alpha.NewFieldElement().Set(msk.g_alpha)\n\tprv.d.Mul(prv.d, g_r)\n\tbeta_inv = msk.beta.NewFieldElement().Set(msk.beta)\n\tbeta_inv.Invert(beta_inv)\n\tprv.d.PowZn(prv.d, beta_inv)\n\n\tlen := len(attrs)\n\tfor i := 0; i < len; i++ {\n\t\tcomp := new(BswabePrvComp)\n\t\tvar h_rp, rp *pbc.Element\n\n\t\tcomp.attr = attrs[i]\n\t\tcomp.d = pairing.NewG2()\n\t\tcomp.dp = pairing.NewG1()\n\t\th_rp = pairing.NewG2()\n\t\trp = pairing.NewZr()\n\n\t\telementFromString(h_rp, comp.attr)\n\t\trp.Rand()\n\n\t\th_rp.PowZn(h_rp, rp)\n\n\t\tcomp.d = g_r.NewFieldElement().Set(g_r)\n\t\tcomp.d.Mul(comp.d, h_rp)\n\t\tcomp.dp = pub.g.NewFieldElement().Set(pub.g)\n\t\tcomp.dp.PowZn(comp.dp, rp)\n\n\t\tprv.comps = append(prv.comps, comp)\n\t}\n\treturn prv\n}",
"func GenKey(ip, port string) (kyber.Scalar, kyber.Point) {\n\tpriKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(GetUniqueIDFromIPPort(ip, port))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)\n\tpubKey := pki.GetPublicKeyFromScalar(priKey)\n\n\treturn priKey, pubKey\n}",
"func deriveKeys(forwardSecure bool, sharedSecret, nonces []byte, connID protocol.ConnectionID, chlo, scfg, cert, divNonce []byte, keyLen int, swap bool) ([]byte, []byte, []byte, []byte, error) {\n\tvar info bytes.Buffer\n\tif forwardSecure {\n\t\tinfo.Write([]byte(\"QUIC forward secure key expansion\\x00\"))\n\t} else {\n\t\tinfo.Write([]byte(\"QUIC key expansion\\x00\"))\n\t}\n\tinfo.Write(connID)\n\tinfo.Write(chlo)\n\tinfo.Write(scfg)\n\tinfo.Write(cert)\n\n\tr := hkdf.New(sha256.New, sharedSecret, nonces, info.Bytes())\n\n\ts := make([]byte, 2*keyLen+2*4)\n\tif _, err := io.ReadFull(r, s); err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tkey1 := s[:keyLen]\n\tkey2 := s[keyLen : 2*keyLen]\n\tiv1 := s[2*keyLen : 2*keyLen+4]\n\tiv2 := s[2*keyLen+4:]\n\n\tvar otherKey, myKey []byte\n\tvar otherIV, myIV []byte\n\n\tif !forwardSecure {\n\t\tif err := diversify(key2, iv2, divNonce); err != nil {\n\t\t\treturn nil, nil, nil, nil, err\n\t\t}\n\t}\n\n\tif swap {\n\t\totherKey = key2\n\t\tmyKey = key1\n\t\totherIV = iv2\n\t\tmyIV = iv1\n\t} else {\n\t\totherKey = key1\n\t\tmyKey = key2\n\t\totherIV = iv1\n\t\tmyIV = iv2\n\t}\n\n\treturn otherKey, myKey, otherIV, myIV, nil\n}",
"func RSAKeyPair2048(rng *Rand, e int32, priv RSAPrivateKey, pub RSAPublicKey, p *Octet, q *Octet) {\n\tC.RSA_2048_KEY_PAIR((*C.csprng)(rng), C.sign32(e), priv.(*C.rsa_private_key_2048), pub.(*C.rsa_public_key_2048), (*C.octet)(p), (*C.octet)(q))\n}",
"func GenKeyP2PRand() (p2p_crypto.PrivKey, p2p_crypto.PubKey, error) {\n\treturn p2p_crypto.GenerateKeyPair(p2p_crypto.RSA, 2048)\n}",
"func (lib *PKCS11Lib) GenerateRSAKeyPair(bits int, purpose KeyPurpose) (*PKCS11PrivateKeyRSA, error) {\n\treturn lib.GenerateRSAKeyPairOnSlot(lib.Slot.id, nil, nil, bits, purpose)\n}",
"func KeyGenerate_ec2(msgprex string,ch chan interface{},id int,cointype string) bool {\n if id < 0 || id >= RpcMaxWorker || id >= len(workers) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get worker id fail\",Err:GetRetErr(ErrGetWorkerIdError)}\n\tch <- res\n\treturn false\n }\n\n w := workers[id]\n GroupId := w.groupid \n fmt.Println(\"========KeyGenerate_ec2============\",\"GroupId\",GroupId)\n if GroupId == \"\" {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"get group id fail in req ec2 pubkey\",Err:fmt.Errorf(\"get group id fail.\")}\n\tch <- res\n\treturn false\n }\n \n ns,_ := GetGroup(GroupId)\n if ns != NodeCnt {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:the group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn false \n }\n\n //1. generate their own \"partial\" private key secretly\n u1 := GetRandomIntFromZn(secp256k1.S256().N)\n\n // 2. calculate \"partial\" public key, make \"pritial\" public key commiment to get (C,D)\n u1Gx, u1Gy := secp256k1.S256().ScalarBaseMult(u1.Bytes())\n commitU1G := new(ec2.Commitment).Commit(u1Gx, u1Gy)\n\n // 3. generate their own paillier public key and private key\n u1PaillierPk, u1PaillierSk := ec2.GenerateKeyPair(PaillierKeyLength)\n\n // 4. Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n mp := []string{msgprex,cur_enode}\n enode := strings.Join(mp,\"-\")\n s0 := \"C1\"\n s1 := string(commitU1G.C.Bytes())\n s2 := u1PaillierPk.Length\n s3 := string(u1PaillierPk.N.Bytes()) \n s4 := string(u1PaillierPk.G.Bytes()) \n s5 := string(u1PaillierPk.N2.Bytes()) \n ss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n _,tip,cherr := GetChannelValue(ch_t,w.bc1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetC1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. generate their vss to get shares which is a set\n // [notes]\n // all nodes has their own id, in practival, we can take it as double hash of public key of fusion\n\n ids := GetIds(cointype,GroupId)\n\n u1PolyG, _, u1Shares, err := ec2.Vss(u1, ids, ThresHold, NodeCnt)\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:generate vss fail\",Err:err}\n\tch <- res\n\treturn false \n }\n\n // 3. send the the proper share to proper node \n //example for u1:\n // Send u1Shares[0] to u1\n // Send u1Shares[1] to u2\n // Send u1Shares[2] to u3\n // Send u1Shares[3] to u4\n // Send u1Shares[4] to u5\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\n\tif enodes == \"\" {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get enode by uid fail\",Err:GetRetErr(ErrGetEnodeByUIdFail)}\n\t ch <- res\n\t return false\n\t}\n\t\n\tif IsCurNode(enodes,cur_enode) {\n\t continue\n\t}\n\n\tfor _,v := range u1Shares {\n\t uid := ec2.GetSharesId(v)\n\t if uid.Cmp(id) == 0 {\n\t\tmp := []string{msgprex,cur_enode}\n\t\tenode := strings.Join(mp,\"-\")\n\t\ts0 := \"SHARE1\"\n\t\ts1 := strconv.Itoa(v.T) \n\t\ts2 := string(v.Id.Bytes()) \n\t\ts3 := string(v.Share.Bytes()) \n\t\tss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3\n\t\tSendMsgToPeer(enodes,ss)\n\t\tbreak\n\t }\n\t}\n }\n\n // 4. Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"D1\"\n dlen := len(commitU1G.D)\n s1 = strconv.Itoa(dlen)\n\n ss = enode + Sep + s0 + Sep + s1 + Sep\n for _,d := range commitU1G.D {\n\tss += string(d.Bytes())\n\tss += Sep\n }\n\n s2 = strconv.Itoa(u1PolyG.T)\n s3 = strconv.Itoa(u1PolyG.N)\n ss = ss + s2 + Sep + s3 + Sep\n\n pglen := 2*(len(u1PolyG.PolyG))\n s4 = strconv.Itoa(pglen)\n\n ss = ss + s4 + Sep\n\n for _,p := range u1PolyG.PolyG {\n\tfor _,d := range p {\n\t ss += string(d.Bytes())\n\t ss += Sep\n\t}\n }\n ss = ss + \"NULL\"\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n _,tip,cherr = GetChannelValue(ch_t,w.bd1_1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetD1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. Receive Personal Data\n _,tip,cherr = GetChannelValue(ch_t,w.bshare1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetSHARE1Timeout)}\n\tch <- res\n\treturn false \n }\n\t \n shares := make([]string,NodeCnt-1)\n if w.msg_share1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_share1 fail\",Err:GetRetErr(ErrGetAllSHARE1Fail)}\n\tch <- res\n\treturn false\n }\n itmp := 0\n iter := w.msg_share1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tshares[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n \n //var sstruct = make(map[string]*vss.ShareStruct)\n var sstruct = make(map[string]*ec2.ShareStruct)\n for _,v := range shares {\n\tmm := strings.Split(v, Sep)\n\t//bug\n\tif len(mm) < 5 {\n\t fmt.Println(\"===================!!! KeyGenerate_ec2,fill lib.ShareStruct map error. !!!==================\")\n\t res := RpcDcrmRes{Ret:\"\",Err:fmt.Errorf(\"fill lib.ShareStruct map error.\")}\n\t ch <- res\n\t return false\n\t}\n\t//\n\tt,_ := strconv.Atoi(mm[2])\n\tushare := &ec2.ShareStruct{T:t,Id:new(big.Int).SetBytes([]byte(mm[3])),Share:new(big.Int).SetBytes([]byte(mm[4]))}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tsstruct[prexs[len(prexs)-1]] = ushare\n }\n for _,v := range u1Shares {\n\tuid := ec2.GetSharesId(v)\n\tenodes := GetEnodesByUid(uid,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t sstruct[cur_enode] = v \n\t break\n\t}\n }\n\n ds := make([]string,NodeCnt-1)\n if w.msg_d1_1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_d1_1 fail\",Err:GetRetErr(ErrGetAllD1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_d1_1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tds[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var upg = make(map[string]*ec2.PolyGStruct)\n for _,v := range ds {\n\tmm := strings.Split(v, Sep)\n\tdlen,_ := strconv.Atoi(mm[2])\n\tpglen,_ := strconv.Atoi(mm[3+dlen+2])\n\tpglen = (pglen/2)\n\tvar pgss = make([][]*big.Int, 0)\n\tl := 0\n\tfor j:=0;j<pglen;j++ {\n\t l++\n\t var gg = make([]*big.Int,0)\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t l++\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t pgss = append(pgss,gg)\n\t}\n\n\tt,_ := strconv.Atoi(mm[3+dlen])\n\tn,_ := strconv.Atoi(mm[4+dlen])\n\tps := &ec2.PolyGStruct{T:t,N:n,PolyG:pgss}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tupg[prexs[len(prexs)-1]] = ps\n }\n upg[cur_enode] = u1PolyG\n\n // 3. verify the share\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif sstruct[en[0]].Verify(upg[en[0]]) == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification share1 fail\",Err:GetRetErr(ErrVerifySHARE1Fail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // 4.verify and de-commitment to get uG\n // for all nodes, construct the commitment by the receiving C and D\n cs := make([]string,NodeCnt-1)\n if w.msg_c1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_c1 fail\",Err:GetRetErr(ErrGetAllC1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_c1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tcs[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var udecom = make(map[string]*ec2.Commitment)\n for _,v := range cs {\n\tmm := strings.Split(v, Sep)\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tfor _,vv := range ds {\n\t mmm := strings.Split(vv, Sep)\n\t prex2 := mmm[0]\n\t prexs2 := strings.Split(prex2,\"-\")\n\t if prexs[len(prexs)-1] == prexs2[len(prexs2)-1] {\n\t\tdlen,_ := strconv.Atoi(mmm[2])\n\t\tvar gg = make([]*big.Int,0)\n\t\tl := 0\n\t\tfor j:=0;j<dlen;j++ {\n\t\t l++\n\t\t gg = append(gg,new(big.Int).SetBytes([]byte(mmm[2+l])))\n\t\t}\n\t\tdeCommit := &ec2.Commitment{C:new(big.Int).SetBytes([]byte(mm[2])), D:gg}\n\t\tudecom[prexs[len(prexs)-1]] = deCommit\n\t\tbreak\n\t }\n\t}\n }\n deCommit_commitU1G := &ec2.Commitment{C: commitU1G.C, D: commitU1G.D}\n udecom[cur_enode] = deCommit_commitU1G\n\n // for all nodes, verify the commitment\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif udecom[en[0]].Verify() == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification commitment fail\",Err:GetRetErr(ErrKeyGenVerifyCommitFail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // for all nodes, de-commitment\n var ug = make(map[string][]*big.Int)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t_, u1G := udecom[en[0]].DeCommit()\n\tug[en[0]] = u1G\n }\n\n // for all nodes, calculate the public key\n var pkx *big.Int\n var pky *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx = (ug[en[0]])[0]\n\tpky = (ug[en[0]])[1]\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx, pky = secp256k1.S256().Add(pkx, pky, (ug[en[0]])[0],(ug[en[0]])[1])\n }\n w.pkx.PushBack(string(pkx.Bytes()))\n w.pky.PushBack(string(pky.Bytes()))\n\n // 5. calculate the share of private key\n var skU1 *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = sstruct[en[0]].Share\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = new(big.Int).Add(skU1,sstruct[en[0]].Share)\n }\n skU1 = new(big.Int).Mod(skU1, secp256k1.S256().N)\n\n //save skU1/u1PaillierSk/u1PaillierPk/...\n ss = string(skU1.Bytes())\n ss = ss + SepSave\n s1 = u1PaillierSk.Length\n s2 = string(u1PaillierSk.L.Bytes()) \n s3 = string(u1PaillierSk.U.Bytes())\n ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) {\n\t s1 = u1PaillierPk.Length\n\t s2 = string(u1PaillierPk.N.Bytes()) \n\t s3 = string(u1PaillierPk.G.Bytes()) \n\t s4 = string(u1PaillierPk.N2.Bytes()) \n\t ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t continue\n\t}\n\tfor _,v := range cs {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\ts1 = mm[3] \n\t\ts2 = mm[4] \n\t\ts3 = mm[5] \n\t\ts4 = mm[6] \n\t\tss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t\tbreak\n\t }\n\t}\n }\n\n sstmp := ss //////\n tmp := ss\n\n ss = ss + \"NULL\"\n\n // 6. calculate the zk\n // ## add content: zk of paillier key, zk of u\n \n // zk of paillier key\n u1zkFactProof := u1PaillierSk.ZkFactProve()\n // zk of u\n //u1zkUProof := schnorrZK.ZkUProve(u1)\n u1zkUProof := ec2.ZkUProve(u1)\n\n // 7. Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKFACTPROOF\"\n s1 = string(u1zkFactProof.H1.Bytes())\n s2 = string(u1zkFactProof.H2.Bytes())\n s3 = string(u1zkFactProof.Y.Bytes())\n s4 = string(u1zkFactProof.E.Bytes())\n s5 = string(u1zkFactProof.N.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzkfact)\n if cherr != nil {\n//\tlogs.Debug(\"get w.bzkfact timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKFACTPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n\n sstmp2 := s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave + s5\n\n // 8. Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKUPROOF\"\n s1 = string(u1zkUProof.E.Bytes())\n s2 = string(u1zkUProof.S.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 9. Receive Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzku)\n if cherr != nil {\n//\tlogs.Info(\"get w.bzku timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKUPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n \n // 1. verify the zk\n // ## add content: verify zk of paillier key, zk of u\n\t\n // for all nodes, verify zk of paillier key\n zkfacts := make([]string,NodeCnt-1)\n if w.msg_zkfact.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get msg_zkface fail\",Err:GetRetErr(ErrGetAllZKFACTPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zkfact.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzkfacts[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for k,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) { /////bug for save zkfact\n\t sstmp = sstmp + sstmp2 + SepSave\n\t continue\n\t}\n\n\tu1PaillierPk2 := GetPaillierPk(tmp,k)\n\tfor _,v := range zkfacts {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\th1 := new(big.Int).SetBytes([]byte(mm[2]))\n\t\th2 := new(big.Int).SetBytes([]byte(mm[3]))\n\t\ty := new(big.Int).SetBytes([]byte(mm[4]))\n\t\te := new(big.Int).SetBytes([]byte(mm[5]))\n\t\tn := new(big.Int).SetBytes([]byte(mm[6]))\n\t\tzkFactProof := &ec2.ZkFactProof{H1: h1, H2: h2, Y: y, E: e,N:n}\n\t\t///////\n\t\tsstmp = sstmp + mm[2] + SepSave + mm[3] + SepSave + mm[4] + SepSave + mm[5] + SepSave + mm[6] + SepSave ///for save zkfact\n\t\t//////\n\n\t\tif !u1PaillierPk2.ZkFactVerify(zkFactProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkfact verification fail\",Err:GetRetErr(ErrVerifyZKFACTPROOFFail)}\n\t\t ch <- res\n\t \n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n }\n\n fmt.Println(\"========AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA KeyGenerate_ec2, AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ============\",\"GroupId\",GroupId)\n\n // for all nodes, verify zk of u\n zku := make([]string,NodeCnt-1)\n if w.msg_zku.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_zku fail\",Err:GetRetErr(ErrGetAllZKUPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zku.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzku[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tfor _,v := range zku {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\te := new(big.Int).SetBytes([]byte(mm[2]))\n\t\ts := new(big.Int).SetBytes([]byte(mm[3]))\n\t\tzkUProof := &ec2.ZkUProof{E: e, S: s}\n\t\tif !ec2.ZkUVerify(ug[en[0]],zkUProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkuproof verification fail\",Err:GetRetErr(ErrVerifyZKUPROOFFail)}\n\t\t ch <- res\n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n } \n \n sstmp = sstmp + \"NULL\"\n //w.save <- sstmp\n //w.save: sku1:UiSK:U1PK:U2PK:U3PK:....:UnPK:U1H1:U1H2:U1Y:U1E:U1N:U2H1:U2H2:U2Y:U2E:U2N:U3H1:U3H2:U3Y:U3E:U3N:......:NULL\n w.save.PushBack(sstmp)\n return true\n}",
"func mitmDHGroupX(g, p *big.Int) bool {\n\t// This mitm function doesn't change anything, but I wanted to reuse code\n\t// from challenge 34.\n\tmitm := func(msg *dhMsg) dhMsg {\n\t\treturn msg.Copy()\n\t}\n\n\t// Open a channel to a simulated Bob.\n\tbobch := make(chan dhMsg)\n\tgo bob(bobch, mitm)\n\n\t// Perform the key exchange.\n\tkex := dhProtocol(bobch, g, p, mitm)\n\n\t// Encrypt the message.\n\tplaintext := []byte(\"hello\")\n\tciphertext, err := kex.Encrypt(plaintext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Because g = p-1, we know the subgroup it generates is {1, p-1}. Therefore\n\t// kex.X and kex.Y must be 1 or p-1 and the resulting secret g^(xy) must be\n\t// 1 or p-1. Since a man-in-the-middle can view X and Y on the wire, they\n\t// have enough information to determine which of those two values the key\n\t// actually is. So we \"forge\" a dhKeyExchange struct and set the secret\n\t// private key x to 1 so that we can craft Y to produce the correct session\n\t// key.\n\tmitmKex := dhKeyExchange{}\n\tmitmKex.Init(g, p)\n\tone := big.NewInt(1)\n\tmitmKex.x = one\n\tpp := big.NewInt(0).Add(p, big.NewInt(-1)) // pp = p-1\n\n\tswitch {\n\tcase kex.X.Cmp(one)+kex.Y.Cmp(one) < 2:\n\t\t// If either X or Y is 1, then the session key is 1.\n\t\tmitmKex.Y = one\n\tcase kex.X.Cmp(pp)+kex.Y.Cmp(pp) == 0:\n\t\t// If both X and Y are p-1, then the session key is p-1.\n\t\tmitmKex.Y = pp\n\tdefault:\n\t\tcryptopals.PrintError(errors.New(\"g is not valid for this attack\"))\n\t\treturn false\n\t}\n\n\t// The man-in-the-middle can now decrypt messages encrypted with the\n\t// diffie-hellman session key.\n\tourtext, err := mitmKex.Decrypt(ciphertext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Send the ciphertext to Bob.\n\tbobch <- mitm(&dhMsg{t: dhSendMsg, msg: ciphertext})\n\n\t// Receive Bob's response.\n\tans := <-bobch\n\tif !ans.ok {\n\t\tcryptopals.PrintError(ans.err)\n\t\treturn false\n\t}\n\n\t// The MITM can decrypt Bob's messages, too.\n\tbobtext, err := mitmKex.Decrypt(ans.msg)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Test if the decryptions are correct.\n\tif bytes.Equal(bobtext, []byte(\"hi\")) && bytes.Equal(ourtext, []byte(\"hello\")) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func KeypairFromSeed(seed string, index uint32) (ed25519.PublicKey, ed25519.PrivateKey, error) {\n\thash, err := blake2b.New(32, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tseed_data, err := hex.DecodeString(seed)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbs := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(bs, index)\n\n\thash.Write(seed_data)\n\thash.Write(bs)\n\n\tseed_bytes := hash.Sum(nil)\n\tpub, priv, err := ed25519.GenerateKey(bytes.NewReader(seed_bytes))\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pub, priv, nil\n}",
"func GenerateNewKeyPair(bits int) (*rsa.PrivateKey, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn privKey, err\n}",
"func deriveKeys(passphrase, salt []byte, logN, r, p int) (cipherKey, hmacKey []byte) {\n\tkeyLen := keySize + hashFunc.Size()\n\tkey, err := scrypt.Key(passphrase, salt, 1<<uint(logN), r, p, keyLen)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcipherKey, hmacKey = key[:keySize], key[keySize:]\n\treturn\n}",
"func ConvertKey(sk *PrivateKey, pk EllipticPoint) *ecdsa.PrivateKey {\n\tpubKey := ecdsa.PublicKey{\n\t\tCurve: pk.C,\n\t\tX: pk.x,\n\t\tY: pk.y,\n\t}\n\n\tvar D *big.Int\n\n\tif sk != nil {\n\t\tD = new(big.Int)\n\t\tD.SetBytes(*sk.d)\n\t}\n\n\tprivKey := ecdsa.PrivateKey{\n\t\tPublicKey: pubKey,\n\t\tD: D,\n\t}\n\n\treturn &privKey\n}",
"func newPrivateKey(pSeed, qSeed big.Int) (*PrivateKey, error) {\n\tq := &qSeed\n\tp := &pSeed\n\tvar tmp big.Int\n\ttest := big.NewInt(0x7743)\n\tvar q1, phi, keyD, keyN big.Int\n\tfor count := 0; count < rsaCreateGiveup; count++ {\n\t\tq = primize(q)\n\t\tq1.Add(q, tmp.SetInt64(-1))\n\t\tp = primize(p)\n\t\tphi.Add(p, tmp.SetInt64(-1))\n\t\tphi.Mul(&phi, &q1)\n\t\tkeyD.ModInverse(rsaPublicE, &phi)\n\t\tif keyD.Cmp(tmp.SetInt64(0)) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tkeyN.Mul(p, q)\n\t\ttmp.Exp(test, rsaPublicE, &keyN)\n\t\ttmp.Exp(&tmp, &keyD, &keyN)\n\t\tif tmp.Cmp(test) == 0 {\n\t\t\treturn &PrivateKey{&keyN, &keyD}, nil\n\t\t}\n\t\tp.Add(p, tmp.SetInt64(2))\n\t\tq.Add(q, tmp.SetInt64(2))\n\t}\n\terr := errors.New(\"cannot generate private key\")\n\tlog.Fatal(err)\n\treturn nil, err\n}",
"func createKeypair() *keypair.Full {\n\tpair, err := keypair.Random()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Seed:\", pair.Seed())\n\tlog.Println(\"Address:\", pair.Address())\n\n\treturn pair\n}",
"func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {\n\n\t// setup\n\tn := len(participants)\n\n\t// build portions of group secret key\n\tpublicKeyG1s := make([]*cloudflare.G1, n)\n\n\tfor idx := 0; idx < n; idx++ {\n\t\tpublicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)\n\t\tif err != nil {\n\t\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting public key to g1: %v\", err)\n\t\t}\n\t\tpublicKeyG1s[idx] = publicKeyG1\n\t}\n\n\ttransportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting transport public key to g1: %v\", err)\n\t}\n\n\tsharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error condensing commitments: %v\", err)\n\t}\n\n\tsharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error generating decrypted shares: %v\", err)\n\t}\n\n\t// here's the final group secret\n\tgskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index)\n\tfor idx := 0; idx < len(sharedSecrets); idx++ {\n\t\tgskj.Add(gskj, sharedSecrets[idx])\n\t}\n\tgskj.Mod(gskj, cloudflare.Order)\n\n\t// here's the group public\n\tgpkj := new(cloudflare.G2).ScalarBaseMult(gskj)\n\tgpkjBig := bn256.G2ToBigIntArray(gpkj)\n\n\t// create sig\n\tsig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error signing message: %v\", err)\n\t}\n\tsigBig := bn256.G1ToBigIntArray(sig)\n\n\t// verify signature\n\tvalidSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error verifying signature: %v\", err)\n\t}\n\n\tif !validSig {\n\t\treturn nil, empty4Big, empty2Big, errors.New(\"not a valid group signature\")\n\t}\n\n\treturn gskj, gpkjBig, sigBig, nil\n}",
"func GenerateKey(rand io.Reader) (*PrivateKey, error) {\n\n\tc := SM2P256()\n\n\tk, err := randFieldElement(c, rand)\n\tfmt.Println(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpriv := new(PrivateKey)\n\tpriv.PublicKey.Curve= c\n\tpriv.D = k\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn priv, nil\n}",
"func ECDH_KEY_PAIR_GENERATE(RNG *core.RAND, S []byte, W []byte) int {\n\tres := 0\n\tvar s *BIG\n\tvar G *ECP\n\n\tG = ECP_generator()\n\n\tr := NewBIGints(CURVE_Order)\n\n\tif RNG == nil {\n\t\ts = FromBytes(S)\n\t\ts.Mod(r)\n\t} else {\n\t\ts = Randtrunc(r, 16*AESKEY, RNG)\n\t}\n\n\ts.ToBytes(S)\n\n\tWP := G.mul(s)\n\n\tWP.ToBytes(W, false) // To use point compression on public keys, change to true\n\n\treturn res\n}",
"func (d Dispatcher) KeyPair() (string, error) {\n\tpriv, pub := crypt.GenKeys()\n\ttemp := make(map[string]string)\n\ttemp[\"priv\"] = priv\n\ttemp[\"pub\"] = pub\n\tkeysBytes, err := helpers.Serialize(temp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(keysBytes), nil\n}",
"func getBootstrapPeerOperatorKey() (\n\t*operator.PrivateKey,\n\t*operator.PublicKey,\n) {\n\treturn getPeerOperatorKey(big.NewInt(128838122312))\n}",
"func KeyPairFromPrivateKey(config FromPrivateKeyConfig) (KeyPair, error) {\n\tprivateKey, err := gossh.ParseRawPrivateKey(config.RawPrivateKeyPemBlock)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tswitch pk := privateKey.(type) {\n\tcase crypto.Signer:\n\t\t// crypto.Signer is implemented by ecdsa.PrivateKey,\n\t\t// ed25519.PrivateKey, and rsa.PrivateKey - separate cases\n\t\t// for each PrivateKey type would be redundant.\n\t\tpublicKey, err := gossh.NewPublicKey(pk.Public())\n\t\tif err != nil {\n\t\t\treturn KeyPair{}, err\n\t\t}\n\t\treturn KeyPair{\n\t\t\tComment: config.Comment,\n\t\t\tPrivateKeyPemBlock: config.RawPrivateKeyPemBlock,\n\t\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),\n\t\t}, nil\n\tcase *dsa.PrivateKey:\n\t\tpublicKey, err := gossh.NewPublicKey(&pk.PublicKey)\n\t\tif err != nil {\n\t\t\treturn KeyPair{}, err\n\t\t}\n\t\treturn KeyPair{\n\t\t\tComment: config.Comment,\n\t\t\tPrivateKeyPemBlock: config.RawPrivateKeyPemBlock,\n\t\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),\n\t\t}, nil\n\t}\n\n\treturn KeyPair{}, fmt.Errorf(\"Cannot parse existing SSH key pair - unknown key pair type\")\n}",
"func KeyPairGenerateFA(rng *core.RAND, S []byte, W []byte) int {\r\n\tr := NewBIGints(CURVE_Order)\r\n\tG := ECP2_generator()\r\n\tif G.Is_infinity() {\r\n\t\treturn BLS_FAIL\r\n\t}\r\n\ts := Randomnum(r, rng)\r\n\ts.ToBytes(S)\r\n\t// SkToPk\r\n\tG = G2mul(G, s)\r\n\tG.ToBytes(W, true)\r\n\treturn BLS_OK\r\n}",
"func KeyGen(r *big.Int, params *Params, master *MasterKey, attrs AttributeList) (*PrivateKey, error) {\n\tkey := &PrivateKey{}\n\tk := len(attrs)\n\tl := len(params.H)\n\n\t// Randomly choose r in Zp.\n\tif r == nil {\n\t\tvar err error\n\t\tr, err = RandomInZp(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tproduct := new(bn256.G1).Set(params.G3)\n\tkey.B = make([]*bn256.G1, l-k)\n\tkey.FreeMap = make(map[AttributeIndex]int)\n\tj := 0\n\tfor i, h := range params.H {\n\t\tattrIndex := AttributeIndex(i)\n\t\tif attr, ok := attrs[attrIndex]; ok {\n\t\t\tif attr != nil {\n\t\t\t\thi := new(bn256.G1).ScalarMult(h, attr)\n\t\t\t\tproduct.Add(product, hi)\n\t\t\t}\n\t\t} else {\n\t\t\tkey.B[j] = new(bn256.G1).ScalarMult(h, r)\n\t\t\tkey.FreeMap[attrIndex] = j\n\t\t\tj++\n\t\t}\n\t}\n\tif params.HSig != nil {\n\t\tkey.BSig = new(bn256.G1).ScalarMult(params.HSig, r)\n\t}\n\tproduct.ScalarMult(product, r)\n\n\tkey.A0 = new(bn256.G1).Add((*bn256.G1)(master), product)\n\tkey.A1 = new(bn256.G2).ScalarMult(params.G, r)\n\n\treturn key, nil\n}",
"func NewRSAKeyPair() (*RSAKeyPair, error) {\n\treader := rand.Reader\n\tprivateKey, err := rsa.GenerateKey(reader, bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newRSAKeyPair(privateKey, &privateKey.PublicKey)\n}",
"func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tvar curve elliptic.Curve\n\n\tswitch config.Bits {\n\tcase 0:\n\t\tconfig.Bits = 521\n\t\tfallthrough\n\tcase 521:\n\t\tcurve = elliptic.P521()\n\tcase 384:\n\t\tcurve = elliptic.P384()\n\tcase 256:\n\t\tcurve = elliptic.P256()\n\tcase 224:\n\t\t// Not supported by \"golang.org/x/crypto/ssh\".\n\t\treturn KeyPair{}, fmt.Errorf(\"golang.org/x/crypto/ssh does not support %d bits\", config.Bits)\n\tdefault:\n\t\treturn KeyPair{}, fmt.Errorf(\"crypto/elliptic does not support %d bits\", config.Bits)\n\t}\n\n\tprivateKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivateRaw, err := x509.MarshalECPrivateKey(privateKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePem, err := rawPemBlock(&pem.Block{\n\t\tType: \"EC PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privateRaw,\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePem,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}",
"func GenerateEncodedKeypair(passphrase string, bits int) (*EncodedKeypair, error) {\n\tkeypair, err := GenerateRSA(bits)\n\tif err != nil {\n\t\treturn nil, errs.Wrap(err, \"Could not generate RSA\")\n\t}\n\treturn EncodeKeypair(keypair, passphrase)\n}",
"func ParseKeyPair(r io.Reader) (*KeyPair, error) {\n\tvar s ssbSecret\n\tif err := json.NewDecoder(r).Decode(&s); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: JSON decoding failed\")\n\t}\n\n\tpublic, err := base64.StdEncoding.DecodeString(strings.TrimSuffix(s.Public, \".ed25519\"))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: base64 decode of public part failed\")\n\t}\n\n\tprivate, err := base64.StdEncoding.DecodeString(strings.TrimSuffix(s.Private, \".ed25519\"))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: base64 decode of private part failed\")\n\t}\n\n\tvar kp secrethandshake.EdKeyPair\n\tcopy(kp.Public[:], public)\n\tcopy(kp.Secret[:], private)\n\n\tssbkp := KeyPair{\n\t\tId: s.ID,\n\t\tPair: kp,\n\t}\n\treturn &ssbkp, nil\n}",
"func genKey() (peerid string, privatekey string, err error) {\n\t// generate private key\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.Ed25519, -1, crand.Reader)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// convert to bytes\n\tkBytes, err := crypto.MarshalPrivateKey(priv)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// Obtain Peer ID from public key\n\tpid, err := libp2p_peer.IDFromPublicKey(priv.GetPublic())\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn pid.String(), base64.StdEncoding.EncodeToString(kBytes), nil\n}"
] | [
"0.69897085",
"0.68689436",
"0.68633217",
"0.6845767",
"0.6727594",
"0.6598774",
"0.65976435",
"0.6574991",
"0.65571785",
"0.6535598",
"0.6515089",
"0.6499443",
"0.6463625",
"0.64459366",
"0.6434493",
"0.64074236",
"0.63993114",
"0.6224574",
"0.62184876",
"0.61737037",
"0.61624074",
"0.61602837",
"0.61586404",
"0.6141591",
"0.61369413",
"0.61289483",
"0.6102342",
"0.6101767",
"0.609358",
"0.6090318",
"0.607254",
"0.607085",
"0.6054036",
"0.60476506",
"0.60298365",
"0.60298365",
"0.6015079",
"0.6000052",
"0.59985435",
"0.59985435",
"0.59916955",
"0.59852165",
"0.59809947",
"0.59788024",
"0.59282297",
"0.5928024",
"0.5892178",
"0.589091",
"0.5866779",
"0.58596355",
"0.5858928",
"0.5851495",
"0.5841",
"0.5835287",
"0.5833043",
"0.58284116",
"0.5824412",
"0.5806976",
"0.5806886",
"0.5786409",
"0.57723343",
"0.57684106",
"0.5766486",
"0.57305753",
"0.57305074",
"0.5725356",
"0.57105577",
"0.57102466",
"0.57077044",
"0.5667935",
"0.5666794",
"0.5666478",
"0.56367373",
"0.5609461",
"0.55830675",
"0.55792254",
"0.55646515",
"0.55405116",
"0.5539529",
"0.55374",
"0.55374",
"0.55329007",
"0.5517842",
"0.55076104",
"0.5501525",
"0.5498859",
"0.548621",
"0.54745454",
"0.5465133",
"0.5442755",
"0.5440845",
"0.5436092",
"0.5433556",
"0.54318637",
"0.54318625",
"0.54317206",
"0.5430272",
"0.54238963",
"0.5420849",
"0.54063374"
] | 0.7862833 | 0 |
convertMont converts from a Montgomery ucoordinate to a twisted Edwards point P, according to convert_mont(u): umasked = u (mod 2|p|) P.y = u_to_y(umasked) P.s = 0 return P | convertMont преобразует координату u в системе Монтгомери в точку P вида twisted Edwards, согласно convert_mont(u): umasked = u (mod 2|p|) P.y = u_to_y(umasked) P.s = 0 return P | func convertMont(u PublicKey) (*edwards25519.Point, error) {
um, err := (&field.Element{}).SetBytes(u)
if err != nil {
return nil, err
}
// y = (u - 1)/(u + 1)
a := new(field.Element).Subtract(um, one)
b := new(field.Element).Add(um, one)
y := new(field.Element).Multiply(a, b.Invert(b)).Bytes()
// Set sign to 0
y[31] &= 0x7F
return (&edwards25519.Point{}).SetBytes(y)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Poly) toMont() {\n\tvar f int16 = int16((uint64(1) << 32) % uint64(q))\n\tfor i := 0; i < n; i++ {\n\t\tp[i] = montgomeryReduce(int32(p[i]) * int32(f))\n\t}\n}",
"func (z *Element22) ToMont() *Element22 {\n\tvar rSquare = Element22{\n\t\t11555390936043306539,\n\t\t958669060063230310,\n\t\t2580977272801836257,\n\t\t1403887552063632943,\n\t\t13867690507567207459,\n\t\t3907927833394869101,\n\t\t10840458828090788374,\n\t\t4883929514287350477,\n\t\t15550705002284641687,\n\t\t204726014467581413,\n\t\t9800326706814271754,\n\t\t15550253209695210297,\n\t\t6763495363949586021,\n\t\t12116376736443678463,\n\t\t15994432058116609212,\n\t\t9284907172179203497,\n\t\t12057222969833993383,\n\t\t7578266974200549103,\n\t\t1045705632585341962,\n\t\t16636233895911641002,\n\t\t10037290343882990384,\n\t\t7731486842628832948,\n\t}\n\treturn z.MulAssign(&rSquare)\n}",
"func (z *Element22) FromMont() *Element22 {\n\n\t// the following lines implement z = z * 1\n\t// with a modified CIOS montgomery multiplication\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\n\t// if z > q --> z -= q\n\tif !(z[21] < 7910025299994333900 || (z[21] == 7910025299994333900 && (z[20] < 1619463007483089584 || (z[20] == 1619463007483089584 && (z[19] < 17666595880400198649 || (z[19] == 17666595880400198649 && (z[18] < 17492726232193822651 || (z[18] == 17492726232193822651 && (z[17] < 200317109320159479 || (z[17] == 200317109320159479 && (z[16] < 243347369443125979 || (z[16] == 243347369443125979 && (z[15] < 7409464670784690235 || (z[15] == 7409464670784690235 && (z[14] < 123227702747754650 || (z[14] == 123227702747754650 && (z[13] < 18137346262305431037 || (z[13] == 18137346262305431037 && (z[12] < 11172154229712803058 || (z[12] == 11172154229712803058 && (z[11] < 8413249848292746549 || (z[11] == 8413249848292746549 && (z[10] < 8062699450825609015 || (z[10] == 8062699450825609015 && (z[9] < 14115032483094903896 || (z[9] == 14115032483094903896 && (z[8] < 2672987780203805083 || (z[8] == 2672987780203805083 && (z[7] < 366248478184989226 || (z[7] == 366248478184989226 && (z[6] < 6852504016717314360 || (z[6] == 6852504016717314360 && (z[5] < 2607080593922027197 || (z[5] == 2607080593922027197 && (z[4] < 17899356805776864267 || (z[4] == 17899356805776864267 && (z[3] < 9476693002504986527 || (z[3] == 9476693002504986527 && (z[2] < 13987751354083916656 || (z[2] == 13987751354083916656 && (z[1] < 952425709649632109 || (z[1] == 952425709649632109 && (z[0] < 9062599614324828209))))))))))))))))))))))))))))))))))))))))))) {\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(z[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(z[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(z[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(z[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(z[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(z[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(z[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(z[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(z[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(z[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(z[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(z[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(z[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(z[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(z[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(z[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(z[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(z[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(z[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(z[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(z[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(z[21], 7910025299994333900, b)\n\t}\n\treturn z\n}",
"func (curve *EdCurve) ToMontgomeryPointForm1(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\toneSubY := new(big.Int).Sub(ONE, p.Y) // 1-y\n\toneAddY := new(big.Int).Add(ONE, p.Y) // 1+y\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(oneAddY, oneSubY, curve.P) // (1+y)/(1-y)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(oneSubY, oneAddY, curve.P) // (1-y)/(1+y)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}",
"func (z *E12) FromMont() *E12 {\n\tz.C0.FromMont()\n\tz.C1.FromMont()\n\treturn z\n}",
"func (curve *EdCurve) ToMontgomeryPointForm2(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\tyAddOne := new(big.Int).Add(p.Y, ONE) // y+1\n\tySubOne := new(big.Int).Sub(p.Y, ONE) // y-1\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(yAddOne, ySubOne, curve.P) // (y+1)/(y-1)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(ySubOne, yAddOne, curve.P) // (y-1)/(y+1)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}",
"func (z *E12) ToMont() *E12 {\n\tz.C0.ToMont()\n\tz.C1.ToMont()\n\treturn z\n}",
"func (z *E6) FromMont() *E6 {\n\tz.B0.FromMont()\n\tz.B1.FromMont()\n\tz.B2.FromMont()\n\treturn z\n}",
"func ConvertCoord(target, camCenter Coordinate, n, i int, rp float64, right bool) Coordinate {\n\trx := getRadius(target, camCenter, rp)\n\n\tB := math.Atan( (target.Y - camCenter.Y) / (target.X - camCenter.X) )\n\n\tKx := 2.0 * math.Pi * float64(i) / float64(n)\n\tif right {\n\t\tKx += math.Pi\n\t\tKx *= -1.0\n\t}\n\t\n\tθ := 2.0 * math.Pi - Kx + B\n\n\treturn Coordinate {\n\t\tX: rx * math.Cos(θ),\n\t\tY: rx * math.Sin(θ),\n\t}\n}",
"func (z *E6) ToMont() *E6 {\n\tz.B0.ToMont()\n\tz.B1.ToMont()\n\tz.B2.ToMont()\n\treturn z\n}",
"func convertTemperature(fromUOM, toUOM string, value float64) float64 {\n\tfromUOM = resolveTemperatureSynonyms(fromUOM)\n\ttoUOM = resolveTemperatureSynonyms(toUOM)\n\tif fromUOM == toUOM {\n\t\treturn value\n\t}\n\t// convert to Kelvin\n\tswitch fromUOM {\n\tcase \"F\":\n\t\tvalue = (value-32)/1.8 + 273.15\n\tcase \"C\":\n\t\tvalue += 273.15\n\tcase \"Rank\":\n\t\tvalue /= 1.8\n\tcase \"Reau\":\n\t\tvalue = value*1.25 + 273.15\n\t}\n\t// convert from Kelvin\n\tswitch toUOM {\n\tcase \"F\":\n\t\tvalue = (value-273.15)*1.8 + 32\n\tcase \"C\":\n\t\tvalue -= 273.15\n\tcase \"Rank\":\n\t\tvalue *= 1.8\n\tcase \"Reau\":\n\t\tvalue = (value - 273.15) * 0.8\n\t}\n\treturn value\n}",
"func (f *Fpdf) UnitToPointConvert(u float64) (pt float64) {\n\treturn u * f.k\n}",
"func NotationToCoord(algebra string) Coord {\n\tif len(algebra) != 2 {\n\t\tpanic(\"Algebraic notation must be 2 characters precisely; got: '\" + algebra + \"'\")\n\t}\n\talgebra = strings.ToUpper(algebra)\n\n\tvar c Coord\n\tfile := algebra[0]\n\trank := algebra[1]\n\n\t// Remember, these are ASCII code points, not numbers\n\tif file < 65 || file > 72 || rank < 48 || rank > 57 {\n\t\tpanic(\"Bad position (\" + algebra + \")\")\n\t}\n\n\tc.Row = int(rank - 48 - 1)\n\tc.Col = int(file - 65)\n\n\treturn c\n}",
"func ASwissCoordToStruct(coord string) (*SwissCoord, error) {\n\n\tcompact := strings.ToUpper(strings.TrimSpace(coord))\n\tvar rights, heights string\n\tvar coordType, oldcoordType SwissCoordType\n\tvar right, height float64\n\tvar err error\n\nL1:\n\tfor i, index := 0, 0; i < 2; i++ {\n\t\tindex = strings.Index(compact, \" \")\n\t\tif index == -1 {\n\t\t\tindex = len(compact)\n\t\t}\n\n\t\tswitch compact[:2] {\n\t\tcase \"X:\":\n\t\t\tcoordType = LV03\n\t\t\theights = compact[2:index]\n\t\tcase \"Y:\":\n\t\t\tcoordType = LV03\n\t\t\trights = compact[2:index]\n\t\tcase \"E:\":\n\t\t\tcoordType = LV95\n\t\t\trights = compact[2:index]\n\t\tcase \"N:\":\n\t\t\tcoordType = LV95\n\t\t\theights = compact[2:index]\n\t\tdefault:\n\t\t\terr = cartconvert.ErrSyntax\n\t\t\tbreak L1\n\t\t}\n\n\t\tif oldcoordType != coordType {\n\t\t\terr = cartconvert.ErrSyntax\n\t\t\tbreak L1\n\t\t}\n\n\t\tif i == 1 {\n\t\t\tbreak L1\n\t\t}\n\t\tcompact = compact[index+len(\" \"):]\n\t\tcompact = strings.TrimLeft(compact, \" \")\n\t\toldcoordType = coordType\n\t}\n\n\tif err == nil {\n\n\t\tright, err = strconv.ParseFloat(rights, 64)\n\t\tif err == nil {\n\n\t\t\theight, err = strconv.ParseFloat(heights, 64)\n\t\t\tif err == nil {\n\t\t\t\treturn &SwissCoord{Easting: right, Northing: height, CoordType: coordType, El: cartconvert.Bessel1841Ellipsoid}, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, err\n}",
"func ToCoordinate(l latlong.LatLonger) Coordinate {\n\trlat, rlon := rad(l.Lat()), rad(l.Lon())\n\n\treturn Coordinate{\n\t\tX: deg(math.Cos(rlat) * math.Cos(rlon)),\n\t\tY: deg(math.Cos(rlat) * math.Sin(rlon)),\n\t\tZ: deg(math.Sin(rlat)),\n\t}\n}",
"func (f *Fpdf) PointToUnitConvert(pt float64) (u float64) {\n\treturn pt / f.k\n}",
"func Convert_Merc_Point(point []float64) []float64 {\n\tx := float64(point[0]) / (math.Pi / 180.0) / 6378137.0\n\ty := 180.0 / math.Pi * (2.0*math.Atan(math.Exp((float64(point[1])/6378137.0))) - math.Pi/2.0)\n\treturn []float64{x, y}\n}",
"func (m *maps) cm(p0, p1 Point) (c Point) {\n\tvar mass, dm uint64\n\n\tx0 := int(p0.X >> fpmbits)\n\ty0 := int(p0.Y >> fpmbits)\n\tx1 := int(p1.X >> fpmbits)\n\ty1 := int(p1.Y >> fpmbits)\n\tfxp0 := (0x400 - (p0.X & 0x3FF)) & 0x400\n\tfyp0 := (0x400 - (p0.Y & 0x3FF)) & 0x400\n\tfxp1 := p1.X & 0x3FF\n\tfyp1 := p1.Y & 0x3FF\n\n\t// Finding WX. I hate FPM... Self-documenting in\n\t// an attempt keep overview of structure.\n\n\t// Leftmost column first.\n\n\t// Check if top-left corner is a fraction in y-axis\n\tif fyp0 != 0 {\n\t\tdm = fyp0 * m.dmap.ValueAt(x0, y0)\n\t} else {\n\t\tdm = m.dmap.ValueAt(x0, y0) << fpmbits\n\t}\n\n\t// Leftmost column without corners\n\tdm += (m.sumy.ValueAt(x0, y1) - m.sumy.ValueAt(x0, y0)) << fpmbits\n\n\t// check if bottom-left corner is a fraction in y-axis\n\tif fyp1 != 0 {\n\t\tdm += fyp1 * m.dmap.ValueAt(x0, y1)\n\t}\n\n\t// check is leftmost column + corners are a fraction\n\t// in the x axis, last correction, add everything to\n\t// c.X and mass\n\tif fxp0 != 0 {\n\t\tc.X = (uint64(x0) * fxp0 * dm) >> fpmbits\n\t\tmass = (fxp0 * dm) >> fpmbits\n\t} else {\n\t\tc.X = uint64(x0) * dm\n\t\tmass = dm\n\t}\n\n\t//Middle columns, without left and right columns or bottom row\n\n\t// Correct top-most row without corners, if a fraction\n\tif fyp0 != 0 {\n\t\tfor x := x0 + 1; x < x1; x++ {\n\t\t\tdm = m.dmap.ValueAt(x, y0) * fyp0\n\t\t\tc.X += uint64(x) * dm\n\t\t\tmass += dm\n\t\t}\n\t} else {\n\t\ty0--\n\t}\n\n\tfor x := x0 + 1; x < x1; x++ {\n\t\tdm = (m.sumy.ValueAt(x, y1) - m.sumy.ValueAt(x, y0))\n\t\tc.X += uint64(x) * dm\n\t\tmass += dm\n\t}\n\n\tif fyp0 == 0 {\n\t\ty0++\n\t}\n\n\t// Bottom row, excluding corners\n\tif fyp1 != 0 {\n\t\tfor x := x0 + 1; x < x1; x++ {\n\t\t\tdm = m.dmap.ValueAt(x, y1) * fyp1\n\t\t\tc.X += uint64(x) * dm\n\t\t\tmass += dm\n\t\t}\n\t}\n\n\t// Check if rightmost column is a fraction in the x-axis\n\tif fxp1 != 0 {\n\t\tif fyp0 != 0 {\n\t\t\tdm = fyp0 * m.dmap.ValueAt(x1, y0)\n\t\t}\n\n\t\t// Rightmost column without corners\n\t\tdm += (m.sumy.ValueAt(x1, y1) - m.sumy.ValueAt(x1, y0)) << fpmbits\n\n\t\t// check if bottom-right corner is a fraction in y-axis\n\t\tif fyp1 != 0 {\n\t\t\tdm += fyp1 * m.dmap.ValueAt(x1, y1)\n\t\t}\n\n\t\t// Correct for fraction, add to c.X and mass\n\t\tc.X = (uint64(x1) * fxp1 * dm) >> fpmbits\n\t\tmass = (fxp1 * dm) >> fpmbits\n\t}\n\n\t// Find WY. Similar procedure to WX, without the mass part.\n\n\t// Topmost row first.\n\n\t// Check if top-left corner is a fraction in x-axis\n\tif fxp0 != 0 {\n\t\tdm = fxp0 * m.dmap.ValueAt(x0, y0)\n\t} else {\n\t\tdm = m.dmap.ValueAt(x0, y0) << fpmbits\n\t}\n\n\t// Topmost row without corners\n\tdm += (m.sumx.ValueAt(x1, y0) - m.sumx.ValueAt(x0, y0)) << fpmbits\n\n\t// check if top-right corner is a fraction in x-axis\n\tif fxp1 != 0 {\n\t\tdm += fyp1 * m.dmap.ValueAt(x1, y0)\n\t}\n\n\t// check if topmost row + corners are a fraction\n\t// in the y axis, last correction, add everything to\n\t// c.Y and mass\n\tif fyp0 != 0 {\n\t\tc.Y = (uint64(y0) * fyp0 * dm) >> fpmbits\n\t} else {\n\t\tc.Y = uint64(y0) * dm\n\t}\n\n\t// Middle rows, without top and bottom rows and rightmost column\n\n\t// Correct left-most column without corners, if a fraction\n\tif fxp0 != 0 {\n\t\tfor y := y0 + 1; y < y1; y++ {\n\t\t\tc.Y += m.dmap.ValueAt(x0, y) * fxp0\n\t\t}\n\t} else {\n\t\tx0--\n\t}\n\n\tfor y := y0 + 1; y < y1; y++ {\n\t\tc.Y += uint64(y) * (m.sumx.ValueAt(x0, y) - m.sumx.ValueAt(x0, y))\n\t}\n\n\tif fxp0 == 0 {\n\t\tx0++\n\t}\n\n\t// Rightmost column, excluding corners\n\tif fxp1 != 0 {\n\t\tfor y := y0 + 1; y < y1; y++ {\n\t\t\tc.Y += m.dmap.ValueAt(x0, y) * fxp1\n\t\t}\n\t}\n\n\t// Check if bottom row is a fraction in the y-axis\n\tif fyp1 != 0 {\n\t\tif fxp0 != 0 {\n\t\t\tdm = fxp0 * m.dmap.ValueAt(x0, y1)\n\t\t}\n\n\t\t// Rightmost column without corners\n\t\tdm += (m.sumx.ValueAt(x1, y1) - m.sumx.ValueAt(x0, y1)) << fpmbits\n\n\t\t// check if bottom-right corner is a fraction in y-axis\n\t\tif fxp1 != 0 {\n\t\t\tdm += fxp1 * m.dmap.ValueAt(x1, y1)\n\t\t}\n\n\t\t// Correct for fraction, add to c.Y\n\t\tc.Y = (uint64(x1) * fyp1 * dm) >> fpmbits\n\t}\n\n\t// correct X and Y for mass\n\tc.X /= mass\n\tc.Y /= mass\n\treturn\n}",
"func newCoord(instruction string, xwards bool) {\n\tsteps, _ := strconv.Atoi(instruction[1:])\n\tif xwards {\n\t\tif string(instruction[0]) == \"R\" {\n\t\t\tfacing.x = facing.y\n\t\t} else {\n\t\t\tfacing.x = -facing.y\n\t\t}\n\t\twalk(steps, facing.x, coord, xwards)\n\t\tcoord.x += facing.x * steps\n\t} else {\n\t\tif string(instruction[0]) == \"R\" {\n\t\t\tfacing.y = -facing.x\n\t\t} else {\n\t\t\tfacing.y = facing.x\n\t\t}\n\t\twalk(steps, facing.y, coord, xwards)\n\t\tcoord.y += facing.y * steps\n\t}\n}",
"func ConvertPt(pt []float64) []int64 {\n\tnewpt := make([]int64, 2)\n\tnewpt[0] = int64(pt[0] * math.Pow(10.0, 7.0))\n\tnewpt[1] = int64(pt[1] * math.Pow(10.0, 7.0))\n\treturn newpt\n}",
"func PToXY(c ...float64) ([]float64, error) {\n\tif len(c) < 2 {\n\t\treturn c, ErrCoordsRequire2Values\n\t}\n\t// log.Println(\"Lon/Lat\", c)\n\t//x, y := PLonToX(c[0]), PLatToY(c[1])\n\n\tcrds := []float64{PLonToX(c[0]), PLatToY(c[1])}\n\tcrds = append(crds, c[2:]...)\n\treturn crds, nil\n}",
"func (s *OrganizationsService) ConvertMemberToOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) {\n\tu := fmt.Sprintf(\"orgs/%v/outside_collaborators/%v\", org, user)\n\treq, err := s.client.NewRequest(\"PUT\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}",
"func unitCoordinates(rad float64) (p Point) {\n\tp.X = math.Sin(rad)\n\tp.Y = math.Cos(rad)\n\n\treturn\n}",
"func FromOctUV(e vector.Vector2) vector.Vector3 {\n\t// vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n\tv := vector.NewVector3(e.X(), e.Y(), 1.0-math.Abs(e.X())-math.Abs(e.Y()))\n\n\t// if (v.z < 0) v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n\tif v.Z() < 0 {\n\t\tn := multVect(vector.NewVector2(1.0-math.Abs(v.Y()), 1.0-math.Abs(v.X())), signNotZero(vector.NewVector2(v.X(), v.Y())))\n\t\tv = v.SetX(n.X()).SetY(n.Y())\n\t}\n\n\treturn v.Normalized()\n}",
"func Pt(x, y int) fixed.Point26_6 {\n\treturn fixed.Point26_6{\n\t\tX: fixed.Int26_6(x << 6),\n\t\tY: fixed.Int26_6(y << 6),\n\t}\n}",
"func ConvertActress(r dmm.Actress) (result Actress, err error) {\n\tvar bust, waist, hip, height int\n\tif r.Bust != \"\" {\n\t\tif bust, err = strconv.Atoi(r.Bust); err != nil {\n\t\t\terr = fmt.Errorf(\"bust is not numeric; %s; %v\", r.Bust, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Waist != \"\" {\n\t\tif waist, err = strconv.Atoi(r.Waist); err != nil {\n\t\t\terr = fmt.Errorf(\"waist is not numeric; %s; %v\", r.Waist, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Hip != \"\" {\n\t\tif hip, err = strconv.Atoi(r.Hip); err != nil {\n\t\t\terr = fmt.Errorf(\"hip is not numeric; %s; %v\", r.Hip, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Height != \"\" {\n\t\tif height, err = strconv.Atoi(r.Height); err != nil {\n\t\t\terr = fmt.Errorf(\"height is not numeric; %s; %v\", r.Height, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresult = Actress{\n\t\tID: r.ID,\n\t\tName: r.Name,\n\t\tRuby: r.Ruby,\n\t\tBust: bust,\n\t\tCup: r.Cup,\n\t\tWaist: waist,\n\t\tHip: hip,\n\t\tHeight: height,\n\t\tBirthday: r.Birthday,\n\t\tBloodType: r.BloodType,\n\t\tHobby: r.Hobby,\n\t\tPrefecture: r.Prefectures,\n\t\tImageURL: r.ImageURL,\n\t\tListURL: r.ListURL,\n\t}\n\treturn\n}",
"func fixCoordSystem(p vertexType) vertexType {\n\treturn vertexType{\n\t\tp[0],\n\t\t-1.0 * p[2],\n\t\tp[1],\n\t}\n}",
"func (o OceanLaunchSpecSchedulingTaskTaskHeadroomOutput) GpuPerUnit() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v OceanLaunchSpecSchedulingTaskTaskHeadroom) *int { return v.GpuPerUnit }).(pulumi.IntPtrOutput)\n}",
"func GRS80LatLongToSwissCoord(gc *cartconvert.PolarCoord, coordType SwissCoordType) (*SwissCoord, error) {\n\n\tvar fn, fe float64\n\n\t// This sets the Ellipsoid to GRS80, regardless of the actual value set\n\tgc.El = cartconvert.GRS80Ellipsoid\n\n\tcart := cartconvert.PolarToCartesian(gc)\n\t// According to literature, the Granit87 parameters shall not be used in favour of\n\t// higher accuracy of the following shift values\n\n\t// pt := cartconvert.HelmertWGS84ToMGI.Transform(&cartconvert.Point3D{X: cart.X, Y: cart.Y, Z: cart.Z})\n\tpt := &cartconvert.Point3D{X: cart.X - 674.374, Y: cart.Y - 15.056, Z: cart.Z - 405.346}\n\tpolar := cartconvert.CartesianToPolar(&cartconvert.CartPoint{X: pt.X, Y: pt.Y, Z: pt.Z, El: cartconvert.Bessel1841Ellipsoid})\n\n\tswitch coordType {\n\tcase LV03:\n\t\tfe = 600000\n\t\tfn = 200000\n\tcase LV95:\n\t\tfe = -2600000\n\t\tfn = -1200000\n\tdefault:\n\t\treturn nil, cartconvert.ErrRange\n\t}\n\n\tgp := cartconvert.DirectTransverseMercator(\n\t\tpolar,\n\t\t46.952406, // lat0\n\t\t7.439583, // long0\n\t\t1,\n\t\tfe, // fe\n\t\tfn) // fn\n\n\treturn &SwissCoord{CoordType: coordType, Northing: gp.Y, Easting: gp.X, El: gp.El}, nil\n}",
"func Merc(this *SR) (forward, inverse Transformer, err error) {\n\tif math.IsNaN(this.Long0) {\n\t\tthis.Long0 = 0\n\t}\n\tvar con = this.B / this.A\n\tthis.Es = 1 - con*con\n\tif math.IsNaN(this.X0) {\n\t\tthis.X0 = 0\n\t}\n\tif math.IsNaN(this.Y0) {\n\t\tthis.Y0 = 0\n\t}\n\tthis.E = math.Sqrt(this.Es)\n\tif !math.IsNaN(this.LatTS) {\n\t\tif this.sphere {\n\t\t\tthis.K0 = math.Cos(this.LatTS)\n\t\t} else {\n\t\t\tthis.K0 = msfnz(this.E, math.Sin(this.LatTS), math.Cos(this.LatTS))\n\t\t}\n\t} else {\n\t\tif math.IsNaN(this.K0) {\n\t\t\tif !math.IsNaN(this.K) {\n\t\t\t\tthis.K0 = this.K\n\t\t\t} else {\n\t\t\t\tthis.K0 = 1\n\t\t\t}\n\t\t}\n\t}\n\n\t// Mercator forward equations--mapping lat,long to x,y\n\tforward = func(lon, lat float64) (x, y float64, err error) {\n\t\t// convert to radians\n\t\tif math.IsNaN(lat) || math.IsNaN(lon) || lat*r2d > 90 || lat*r2d < -90 || lon*r2d > 180 || lon*r2d < -180 {\n\t\t\terr = fmt.Errorf(\"in proj.Merc forward: invalid longitude (%g) or latitude (%g)\", lon, lat)\n\t\t\treturn\n\t\t}\n\n\t\tif math.Abs(math.Abs(lat)-halfPi) <= epsln {\n\t\t\terr = fmt.Errorf(\"in proj.Merc forward, abs(lat)==pi/2\")\n\t\t\treturn\n\t\t}\n\t\tif this.sphere {\n\t\t\tx = this.X0 + this.A*this.K0*adjust_lon(lon-this.Long0)\n\t\t\ty = this.Y0 + this.A*this.K0*math.Log(math.Tan(fortPi+0.5*lat))\n\t\t} else {\n\t\t\tvar sinphi = math.Sin(lat)\n\t\t\tvar ts = tsfnz(this.E, lat, sinphi)\n\t\t\tx = this.X0 + this.A*this.K0*adjust_lon(lon-this.Long0)\n\t\t\ty = this.Y0 - this.A*this.K0*math.Log(ts)\n\t\t}\n\t\treturn\n\t}\n\n\t// Mercator inverse equations--mapping x,y to lat/long\n\tinverse = func(x, y float64) (lon, lat float64, err error) {\n\t\tx -= this.X0\n\t\ty -= this.Y0\n\n\t\tif this.sphere {\n\t\t\tlat = halfPi - 2*math.Atan(math.Exp(-y/(this.A*this.K0)))\n\t\t} else {\n\t\t\tvar ts = math.Exp(-y / (this.A * this.K0))\n\t\t\tlat, err = phi2z(this.E, ts)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tlon = adjust_lon(this.Long0 + x/(this.A*this.K0))\n\t\treturn\n\t}\n\treturn\n}",
"func latLon2Grid(lat, lon float64, from eDatum, to gGrid) (int, int) {\n\t// Datum data for Lat/Lon to TM conversion\n\ta := Datum[from].a\n\te := Datum[from].e // sqrt(esq);\n\tb := Datum[from].b\n\n\t//===============\n\t// Lat/Lon -> TM\n\t//===============\n\tslat1 := math.Sin(lat)\n\tclat1 := math.Cos(lat)\n\tclat1sq := clat1 * clat1\n\ttanlat1sq := slat1 * slat1 / clat1sq\n\te2 := e * e\n\te4 := e2 * e2\n\te6 := e4 * e2\n\teg := (e * a / b)\n\teg2 := eg\n\tl1 := 1 - e2/4 - 3*e4/64 - 5*e6/256\n\tl2 := 3*e2/8 + 3*e4/32 + 45*e6/1024\n\tl3 := 15*e4/256 + 45*e6/1024\n\tl4 := 35 * e6 / 3072\n\tM := a * (l1*lat - l2*math.Sin(2*lat) + l3*math.Sin(4*lat) - l4*math.Sin(6*lat))\n\t//double rho = a*(1-e2) / pow((1-(e*slat1)*(e*slat1)),1.5);\n\tnu := a / math.Sqrt(1-(e*slat1)*(e*slat1))\n\tp := lon - grid[to].lon0\n\tk0 := grid[to].k0\n\t// y = northing = K1 + K2p2 + K3p4, where\n\tK1 := M * k0\n\tK2 := k0 * nu * slat1 * clat1 / 2\n\tK3 := (k0 * nu * slat1 * clat1 * clat1sq / 24) * (5 - tanlat1sq + 9*eg2*clat1sq + 4*eg2*eg2*clat1sq*clat1sq)\n\t// ING north\n\tY := K1 + K2*p*p + K3*p*p*p*p - grid[to].falseN\n\n\t// x = easting = K4p + K5p3, where\n\tK4 := k0 * nu * clat1\n\tK5 := (k0 * nu * clat1 * clat1sq / 6) * (1 - tanlat1sq + eg2*clat1*clat1)\n\t// ING east\n\tX := K4*p + K5*p*p*p + grid[to].falseE\n\n\t// final rounded results\n\tE := int(X + 0.5)\n\tN := int(Y + 0.5)\n\treturn E, N\n}",
"func UnitConvert() {\n\tvar nums []string\n\t\n\tif len(os.Args[1:]) == 0 {\n\t\tinput := bufio.NewScanner(os.Stdin)\n\t\tif input.Scan() {\n\t\t\tnums = strings.Split(input.Text(), \" \")\n\t\t}\n\t} else {\n\t\tnums = os.Args[1:]\n\t}\n\n\tfor _, n := range nums {\n\t\tt, err := strconv.ParseFloat(n, 64)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tc := Celsius(t)\n\t\tf := Fahrenheit(t)\n\t\tft := Feet(t)\n\t\tm := Meters(t)\n\t\tk := Kilograms(t)\n\t\tp := Pounds(t)\n\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", c, CToF(c), f, FToC(f))\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", ft, FToM(ft), m, MToF(m))\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", k, KToP(k), p, PToK(p))\n\t}\n}",
"func (m *SubsystemMeasurement) ToPoint(p *data.Point, measurementName []byte, labels []LabeledDistributionMaker) {\n\tp.SetMeasurementName(measurementName)\n\tp.SetTimestamp(&m.Timestamp)\n\n\tfor i, d := range m.Distributions {\n\t\tp.AppendField(labels[i].Label, d.Get())\n\t}\n}",
"func UniformGridToCoord(in []int, xSize, ySize int) []float64 {\n\tm := float64(in[0])\n\tn := float64(in[1])\n\tN := float64(xSize * ySize)\n\ta := 4.0 * math.Pi / N\n\td := math.Sqrt(a)\n\tmTheta := math.Round(math.Pi / d)\n\tdTheta := math.Pi / mTheta\n\tdPhi := a / dTheta\n\ttheta := math.Pi * (m + 0.5) / mTheta\n\tmPhi := math.Round(2.0 * math.Pi * math.Sin(theta) / dPhi)\n\tphi := 2 * math.Pi * n / mPhi\n\treturn []float64{(theta/math.Pi)*180 - 90, (phi / math.Pi) * 180}\n}",
"func ConvertFromM(n float64, toUnit string) float64 {\n\ttoUnit = strings.TrimSpace(strings.ToLower(toUnit))\n\tif v, is := SPEED_UNITS[toUnit]; is {\n\t\treturn n / v\n\t}\n\tif v, is := Units[toUnit]; is {\n\t\treturn n / v\n\t}\n\treturn 0\n}",
"func MToF(m Meter) Foot { return Foot(m / 0.3048) }",
"func GoXy06(date1, date2 float64) (x, y float64) {\n\t// Maximum power of T in the polynomials for X and Y\n\tconst MAXPT = 5\n\n\t// Polynomial coefficients (arcsec, X then Y).\n\tvar xyp = [2][MAXPT + 1]float64{\n\n\t\t{-0.016617,\n\t\t\t2004.191898,\n\t\t\t-0.4297829,\n\t\t\t-0.19861834,\n\t\t\t0.000007578,\n\t\t\t0.0000059285,\n\t\t},\n\t\t{-0.006951,\n\t\t\t-0.025896,\n\t\t\t-22.4072747,\n\t\t\t0.00190059,\n\t\t\t0.001112526,\n\t\t\t0.0000001358,\n\t\t},\n\t}\n\n\t// Fundamental-argument multipliers: luni-solar terms\n\tvar mfals = [...][5]int{\n\n\t\t// 1-10\n\t\t{0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 2},\n\t\t{0, 0, 2, 0, 2},\n\t\t{0, 0, 0, 0, 2},\n\t\t{0, 1, 0, 0, 0},\n\t\t{0, 1, 2, -2, 2},\n\t\t{1, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 1},\n\t\t{1, 0, 2, 0, 2},\n\t\t{0, 1, -2, 2, -2},\n\n\t\t// 11-20\n\t\t{0, 0, 2, -2, 1},\n\t\t{1, 0, -2, 0, -2},\n\t\t{1, 0, 0, -2, 0},\n\t\t{1, 0, 0, 0, 1},\n\t\t{1, 0, 0, 0, -1},\n\t\t{1, 0, -2, -2, -2},\n\t\t{1, 0, 2, 0, 1},\n\t\t{2, 0, -2, 0, -1},\n\t\t{0, 0, 0, 2, 0},\n\t\t{0, 0, 2, 2, 2},\n\n\t\t// 21-30\n\t\t{2, 0, 0, -2, 0},\n\t\t{0, 2, -2, 2, -2},\n\t\t{2, 0, 2, 0, 2},\n\t\t{1, 0, 2, -2, 2},\n\t\t{1, 0, -2, 0, -1},\n\t\t{2, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 0},\n\t\t{0, 1, 0, 0, 1},\n\t\t{1, 0, 0, -2, -1},\n\t\t{0, 2, 2, -2, 2},\n\n\t\t// 31-40\n\t\t{0, 0, 2, -2, 0},\n\t\t{1, 0, 0, -2, 1},\n\t\t{0, 1, 0, 0, -1},\n\t\t{0, 2, 0, 0, 0},\n\t\t{1, 0, -2, -2, -1},\n\t\t{1, 0, 2, 2, 2},\n\t\t{0, 1, 2, 0, 2},\n\t\t{2, 0, -2, 0, 0},\n\t\t{0, 0, 2, 2, 1},\n\t\t{0, 1, -2, 0, -2},\n\n\t\t// 41-50\n\t\t{0, 0, 0, 2, 1},\n\t\t{1, 0, 2, -2, 1},\n\t\t{2, 0, 0, -2, -1},\n\t\t{2, 0, 2, -2, 2},\n\t\t{2, 0, 2, 0, 1},\n\t\t{0, 0, 0, 2, -1},\n\t\t{0, 1, -2, 2, -1},\n\t\t{1, 1, 0, -2, 0},\n\t\t{2, 0, 0, -2, 1},\n\t\t{1, 0, 0, 2, 0},\n\n\t\t// 51-60\n\t\t{0, 1, 2, -2, 1},\n\t\t{1, -1, 0, 0, 0},\n\t\t{0, 1, -1, 1, -1},\n\t\t{2, 0, -2, 0, -2},\n\t\t{0, 1, 0, -2, 0},\n\t\t{1, 0, 0, -1, 0},\n\t\t{3, 0, 2, 0, 2},\n\t\t{0, 0, 0, 1, 0},\n\t\t{1, -1, 2, 0, 2},\n\t\t{1, 1, -2, -2, -2},\n\n\t\t// 61-70\n\t\t{1, 0, -2, 0, 0},\n\t\t{2, 0, 0, 0, -1},\n\t\t{0, 1, -2, -2, -2},\n\t\t{1, 1, 2, 0, 2},\n\t\t{2, 0, 0, 0, 1},\n\t\t{1, 1, 0, 0, 0},\n\t\t{1, 0, -2, 2, -1},\n\t\t{1, 0, 2, 0, 0},\n\t\t{1, -1, 0, -1, 0},\n\t\t{1, 0, 0, 0, 2},\n\n\t\t// 71-80\n\t\t{1, 0, -1, 0, -1},\n\t\t{0, 0, 2, 1, 2},\n\t\t{1, 0, -2, -4, -2},\n\t\t{1, -1, 0, -1, -1},\n\t\t{1, 0, 2, 2, 1},\n\t\t{0, 2, -2, 2, -1},\n\t\t{1, 0, 0, 0, -2},\n\t\t{2, 0, -2, -2, -2},\n\t\t{1, 1, 2, -2, 2},\n\t\t{2, 0, -2, -4, -2},\n\n\t\t// 81-90\n\t\t{1, 0, -4, 0, -2},\n\t\t{2, 0, 2, -2, 1},\n\t\t{1, 0, 0, -1, -1},\n\t\t{2, 0, 2, 2, 2},\n\t\t{3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 2, 1},\n\t\t{0, 0, 2, -2, -1},\n\t\t{3, 0, 2, -2, 2},\n\t\t{0, 0, 4, -2, 2},\n\t\t{1, 0, 0, -4, 0},\n\n\t\t// 91-100\n\t\t{0, 1, 2, 0, 1},\n\t\t{2, 0, 0, -4, 0},\n\t\t{1, 1, 0, -2, -1},\n\t\t{2, 0, -2, 0, 1},\n\t\t{0, 0, 2, 0, -1},\n\t\t{0, 1, -2, 0, -1},\n\t\t{0, 1, 0, 0, 2},\n\t\t{0, 0, 2, -1, 2},\n\t\t{0, 0, 2, 4, 2},\n\t\t{2, 1, 0, -2, 0},\n\n\t\t// 101-110\n\t\t{1, 1, 0, -2, 1},\n\t\t{1, -1, 0, -2, 0},\n\t\t{1, -1, 0, -1, -2},\n\t\t{1, -1, 0, 0, 1},\n\t\t{0, 1, -2, 2, 0},\n\t\t{0, 1, 0, 0, -2},\n\t\t{1, -1, 2, 2, 2},\n\t\t{1, 0, 0, 2, -1},\n\t\t{1, -1, -2, -2, -2},\n\t\t{3, 0, 2, 0, 1},\n\n\t\t// 111-120\n\t\t{0, 1, 2, 2, 2},\n\t\t{1, 0, 2, -2, 0},\n\t\t{1, 1, -2, -2, -1},\n\t\t{1, 0, 2, -4, 1},\n\t\t{0, 1, -2, -2, -1},\n\t\t{2, -1, 2, 0, 2},\n\t\t{0, 0, 0, 2, 2},\n\t\t{1, -1, 2, 0, 1},\n\t\t{1, -1, -2, 0, -2},\n\t\t{0, 1, 0, 2, 0},\n\n\t\t// 121-130\n\t\t{0, 1, 2, -2, 0},\n\t\t{0, 0, 0, 1, 1},\n\t\t{1, 0, -2, -2, 0},\n\t\t{0, 3, 2, -2, 2},\n\t\t{2, 1, 2, 0, 2},\n\t\t{1, 1, 0, 0, 1},\n\t\t{2, 0, 0, 2, 0},\n\t\t{1, 1, 2, 0, 1},\n\t\t{1, 0, 0, -2, -2},\n\t\t{1, 0, -2, 2, 0},\n\n\t\t// 131-140\n\t\t{1, 0, -1, 0, -2},\n\t\t{0, 1, 0, -2, 1},\n\t\t{0, 1, 0, 1, 0},\n\t\t{0, 0, 0, 1, -1},\n\t\t{1, 0, -2, 2, -2},\n\t\t{1, -1, 0, 0, -1},\n\t\t{0, 0, 0, 4, 0},\n\t\t{1, -1, 0, 2, 0},\n\t\t{1, 0, 2, 1, 2},\n\t\t{1, 0, 2, -1, 2},\n\n\t\t// 141-150\n\t\t{0, 0, 2, 1, 1},\n\t\t{1, 0, 0, -2, 2},\n\t\t{1, 0, -2, 0, 1},\n\t\t{1, 0, -2, -4, -1},\n\t\t{0, 0, 2, 2, 0},\n\t\t{1, 1, 2, -2, 1},\n\t\t{1, 0, -2, 1, -1},\n\t\t{0, 0, 1, 0, 1},\n\t\t{2, 0, -2, -2, -1},\n\t\t{4, 0, 2, 0, 2},\n\n\t\t// 151-160\n\t\t{2, -1, 0, 0, 0},\n\t\t{2, 1, 2, -2, 2},\n\t\t{0, 1, 2, 1, 2},\n\t\t{1, 0, 4, -2, 2},\n\t\t{1, 1, 0, 0, -1},\n\t\t{2, 0, 2, 0, 0},\n\t\t{2, 0, -2, -4, -1},\n\t\t{1, 0, -1, 0, 0},\n\t\t{1, 0, 0, 1, 0},\n\t\t{0, 1, 0, 2, 1},\n\n\t\t// 161-170\n\t\t{1, 0, -4, 0, -1},\n\t\t{1, 0, 0, -4, -1},\n\t\t{2, 0, 2, 2, 1},\n\t\t{2, 1, 0, 0, 0},\n\t\t{0, 0, 2, -3, 2},\n\t\t{1, 2, 0, -2, 0},\n\t\t{0, 3, 0, 0, 0},\n\t\t{0, 0, 4, 0, 2},\n\t\t{0, 0, 2, -4, 1},\n\t\t{2, 0, 0, -2, -2},\n\n\t\t// 171-180\n\t\t{1, 1, -2, -4, -2},\n\t\t{0, 1, 0, -2, -1},\n\t\t{0, 0, 0, 4, 1},\n\t\t{3, 0, 2, -2, 1},\n\t\t{1, 0, 2, 4, 2},\n\t\t{1, 1, -2, 0, -2},\n\t\t{0, 0, 4, -2, 1},\n\t\t{2, -2, 0, -2, 0},\n\t\t{2, 1, 0, -2, -1},\n\t\t{0, 2, 0, -2, 0},\n\n\t\t// 181-190\n\t\t{1, 0, 0, -1, 1},\n\t\t{1, 1, 2, 2, 2},\n\t\t{3, 0, 0, 0, -1},\n\t\t{2, 0, 0, -4, -1},\n\t\t{3, 0, 2, 2, 2},\n\t\t{0, 0, 2, 4, 1},\n\t\t{0, 2, -2, -2, -2},\n\t\t{1, -1, 0, -2, -1},\n\t\t{0, 0, 2, -1, 1},\n\t\t{2, 0, 0, 2, 1},\n\n\t\t// 191-200\n\t\t{1, -1, -2, 2, -1},\n\t\t{0, 0, 0, 2, -2},\n\t\t{2, 0, 0, -4, 1},\n\t\t{1, 0, 0, -4, 1},\n\t\t{2, 0, 2, -4, 1},\n\t\t{4, 0, 2, -2, 2},\n\t\t{2, 1, -2, 0, -1},\n\t\t{2, 1, -2, -4, -2},\n\t\t{3, 0, 0, -4, 0},\n\t\t{1, -1, 2, 2, 1},\n\n\t\t// 201-210\n\t\t{1, -1, -2, 0, -1},\n\t\t{0, 2, 0, 0, 1},\n\t\t{1, 2, -2, -2, -2},\n\t\t{1, 1, 0, -4, 0},\n\t\t{2, 0, 0, -2, 2},\n\t\t{0, 2, 2, -2, 1},\n\t\t{1, 0, 2, 0, -1},\n\t\t{2, 1, 0, -2, 1},\n\t\t{2, -1, -2, 0, -1},\n\t\t{1, -1, -2, -2, -1},\n\n\t\t// 211-220\n\t\t{0, 1, -2, 1, -2},\n\t\t{1, 0, -4, 2, -2},\n\t\t{0, 1, 2, 2, 1},\n\t\t{3, 0, 0, 0, 1},\n\t\t{2, -1, 2, 2, 2},\n\t\t{0, 1, -2, -4, -2},\n\t\t{1, 0, -2, -3, -2},\n\t\t{2, 0, 0, 0, 2},\n\t\t{1, -1, 0, -2, -2},\n\t\t{2, 0, -2, 2, -1},\n\n\t\t// 221-230\n\t\t{0, 2, -2, 0, -2},\n\t\t{3, 0, -2, 0, -1},\n\t\t{2, -1, 2, 0, 1},\n\t\t{1, 0, -2, -1, -2},\n\t\t{0, 0, 2, 0, 3},\n\t\t{2, 0, -4, 0, -2},\n\t\t{2, 1, 0, -4, 0},\n\t\t{1, 1, -2, 1, -1},\n\t\t{0, 2, 2, 0, 2},\n\t\t{1, -1, 2, -2, 2},\n\n\t\t// 231-240\n\t\t{1, -1, 0, -2, 1},\n\t\t{2, 1, 2, 0, 1},\n\t\t{1, 0, 2, -4, 2},\n\t\t{1, 1, -2, 0, -1},\n\t\t{1, 1, 0, 2, 0},\n\t\t{1, 0, 0, -3, 0},\n\t\t{2, 0, 2, -1, 2},\n\t\t{0, 2, 0, 0, -1},\n\t\t{2, -1, 0, -2, 0},\n\t\t{4, 0, 0, 0, 0},\n\n\t\t// 241-250\n\t\t{2, 1, -2, -2, -2},\n\t\t{0, 2, -2, 2, 0},\n\t\t{1, 0, 2, 1, 1},\n\t\t{1, 0, -1, 0, -3},\n\t\t{3, -1, 2, 0, 2},\n\t\t{2, 0, 2, -2, 0},\n\t\t{1, -2, 0, 0, 0},\n\t\t{2, 0, 0, 0, -2},\n\t\t{1, 0, 0, 4, 0},\n\t\t{0, 1, 0, 1, 1},\n\n\t\t// 251-260\n\t\t{1, 0, 2, 2, 0},\n\t\t{0, 1, 0, 2, -1},\n\t\t{0, 1, 0, 1, -1},\n\t\t{0, 0, 2, -2, 3},\n\t\t{3, 1, 2, 0, 2},\n\t\t{1, 1, 2, 1, 2},\n\t\t{1, 1, -2, 2, -1},\n\t\t{2, -1, 2, -2, 2},\n\t\t{1, -2, 2, 0, 2},\n\t\t{1, 0, 2, -4, 0},\n\n\t\t// 261-270\n\t\t{0, 0, 1, 0, 0},\n\t\t{1, 0, 2, -3, 1},\n\t\t{1, -2, 0, -2, 0},\n\t\t{2, 0, 0, 2, -1},\n\t\t{1, 1, 2, -4, 1},\n\t\t{4, 0, 2, 0, 1},\n\t\t{0, 1, 2, 1, 1},\n\t\t{1, 2, 2, -2, 2},\n\t\t{2, 0, 2, 1, 2},\n\t\t{2, 1, 2, -2, 1},\n\n\t\t// 271-280\n\t\t{1, 0, 2, -1, 1},\n\t\t{1, 0, 4, -2, 1},\n\t\t{1, -1, 2, -2, 1},\n\t\t{0, 1, 0, -4, 0},\n\t\t{3, 0, -2, -2, -2},\n\t\t{0, 0, 4, -4, 2},\n\t\t{2, 0, -4, -2, -2},\n\t\t{2, -2, 0, -2, -1},\n\t\t{1, 0, 2, -2, -1},\n\t\t{2, 0, -2, -6, -2},\n\n\t\t// 281-290\n\t\t{1, 0, -2, 1, -2},\n\t\t{1, 0, -2, 2, 1},\n\t\t{1, -1, 0, 2, -1},\n\t\t{1, 0, -2, 1, 0},\n\t\t{2, -1, 0, -2, 1},\n\t\t{1, -1, 0, 2, 1},\n\t\t{2, 0, -2, -2, 0},\n\t\t{1, 0, 2, -3, 2},\n\t\t{0, 0, 0, 4, -1},\n\t\t{2, -1, 0, 0, 1},\n\n\t\t// 291-300\n\t\t{2, 0, 4, -2, 2},\n\t\t{0, 0, 2, 3, 2},\n\t\t{0, 1, 4, -2, 2},\n\t\t{0, 1, -2, 2, 1},\n\t\t{1, 1, 0, 2, 1},\n\t\t{1, 0, 0, 4, 1},\n\t\t{0, 0, 4, 0, 1},\n\t\t{2, 0, 0, -3, 0},\n\t\t{1, 0, 0, -1, -2},\n\t\t{1, -2, -2, -2, -2},\n\n\t\t// 301-310\n\t\t{3, 0, 0, 2, 0},\n\t\t{2, 0, 2, -4, 2},\n\t\t{1, 1, -2, -4, -1},\n\t\t{1, 0, -2, -6, -2},\n\t\t{2, -1, 0, 0, -1},\n\t\t{2, -1, 0, 2, 0},\n\t\t{0, 1, 2, -2, -1},\n\t\t{1, 1, 0, 1, 0},\n\t\t{1, 2, 0, -2, -1},\n\t\t{1, 0, 0, 1, -1},\n\n\t\t// 311-320\n\t\t{0, 0, 1, 0, 2},\n\t\t{3, 1, 2, -2, 2},\n\t\t{1, 0, -4, -2, -2},\n\t\t{1, 0, 2, 4, 1},\n\t\t{1, -2, 2, 2, 2},\n\t\t{1, -1, -2, -4, -2},\n\t\t{0, 0, 2, -4, 2},\n\t\t{0, 0, 2, -3, 1},\n\t\t{2, 1, -2, 0, 0},\n\t\t{3, 0, -2, -2, -1},\n\n\t\t// 321-330\n\t\t{2, 0, 2, 4, 2},\n\t\t{0, 0, 0, 0, 3},\n\t\t{2, -1, -2, -2, -2},\n\t\t{2, 0, 0, -1, 0},\n\t\t{3, 0, 2, -4, 2},\n\t\t{2, 1, 2, 2, 2},\n\t\t{0, 0, 3, 0, 3},\n\t\t{1, 1, 2, 2, 1},\n\t\t{2, 1, 0, 0, -1},\n\t\t{1, 2, 0, -2, 1},\n\n\t\t// 331-340\n\t\t{3, 0, 2, 2, 1},\n\t\t{1, -1, -2, 2, -2},\n\t\t{1, 1, 0, -1, 0},\n\t\t{1, 2, 0, 0, 0},\n\t\t{1, 0, 4, 0, 2},\n\t\t{1, -1, 2, 4, 2},\n\t\t{2, 1, 0, 0, 1},\n\t\t{1, 0, 0, 2, 2},\n\t\t{1, -1, -2, 2, 0},\n\t\t{0, 2, -2, -2, -1},\n\n\t\t// 341-350\n\t\t{2, 0, -2, 0, 2},\n\t\t{5, 0, 2, 0, 2},\n\t\t{3, 0, -2, -6, -2},\n\t\t{1, -1, 2, -1, 2},\n\t\t{3, 0, 0, -4, -1},\n\t\t{1, 0, 0, 1, 1},\n\t\t{1, 0, -4, 2, -1},\n\t\t{0, 1, 2, -4, 1},\n\t\t{1, 2, 2, 0, 2},\n\t\t{0, 1, 0, -2, -2},\n\n\t\t// 351-360\n\t\t{0, 0, 2, -1, 0},\n\t\t{1, 0, 1, 0, 1},\n\t\t{0, 2, 0, -2, 1},\n\t\t{3, 0, 2, 0, 0},\n\t\t{1, 1, -2, 1, 0},\n\t\t{2, 1, -2, -4, -1},\n\t\t{3, -1, 0, 0, 0},\n\t\t{2, -1, -2, 0, 0},\n\t\t{4, 0, 2, -2, 1},\n\t\t{2, 0, -2, 2, 0},\n\n\t\t// 361-370\n\t\t{1, 1, 2, -2, 0},\n\t\t{1, 0, -2, 4, -1},\n\t\t{1, 0, -2, -2, 1},\n\t\t{2, 0, 2, -4, 0},\n\t\t{1, 1, 0, -2, -2},\n\t\t{1, 1, -2, -2, 0},\n\t\t{1, 0, 1, -2, 1},\n\t\t{2, -1, -2, -4, -2},\n\t\t{3, 0, -2, 0, -2},\n\t\t{0, 1, -2, -2, 0},\n\n\t\t// 371-380\n\t\t{3, 0, 0, -2, -1},\n\t\t{1, 0, -2, -3, -1},\n\t\t{0, 1, 0, -4, -1},\n\t\t{1, -2, 2, -2, 1},\n\t\t{0, 1, -2, 1, -1},\n\t\t{1, -1, 0, 0, 2},\n\t\t{2, 0, 0, 1, 0},\n\t\t{1, -2, 0, 2, 0},\n\t\t{1, 2, -2, -2, -1},\n\t\t{0, 0, 4, -4, 1},\n\n\t\t// 381-390\n\t\t{0, 1, 2, 4, 2},\n\t\t{0, 1, -4, 2, -2},\n\t\t{3, 0, -2, 0, 0},\n\t\t{2, -1, 2, 2, 1},\n\t\t{0, 1, -2, -4, -1},\n\t\t{4, 0, 2, 2, 2},\n\t\t{2, 0, -2, -3, -2},\n\t\t{2, 0, 0, -6, 0},\n\t\t{1, 0, 2, 0, 3},\n\t\t{3, 1, 0, 0, 0},\n\n\t\t// 391-400\n\t\t{3, 0, 0, -4, 1},\n\t\t{1, -1, 2, 0, 0},\n\t\t{1, -1, 0, -4, 0},\n\t\t{2, 0, -2, 2, -2},\n\t\t{1, 1, 0, -2, 2},\n\t\t{4, 0, 0, -2, 0},\n\t\t{2, 2, 0, -2, 0},\n\t\t{0, 1, 2, 0, 0},\n\t\t{1, 1, 0, -4, 1},\n\t\t{1, 0, 0, -4, -2},\n\n\t\t// 401-410\n\t\t{0, 0, 0, 1, 2},\n\t\t{3, 0, 0, 2, 1},\n\t\t{1, 1, 0, -4, -1},\n\t\t{0, 0, 2, 2, -1},\n\t\t{1, 1, 2, 0, 0},\n\t\t{1, -1, 2, -4, 1},\n\t\t{1, 1, 0, 0, 2},\n\t\t{0, 0, 2, 6, 2},\n\t\t{4, 0, -2, -2, -1},\n\t\t{2, 1, 0, -4, -1},\n\n\t\t// 411-420\n\t\t{0, 0, 0, 3, 1},\n\t\t{1, -1, -2, 0, 0},\n\t\t{0, 0, 2, 1, 0},\n\t\t{1, 0, 0, 2, -2},\n\t\t{3, -1, 2, 2, 2},\n\t\t{3, -1, 2, -2, 2},\n\t\t{1, 0, 0, -1, 2},\n\t\t{1, -2, 2, -2, 2},\n\t\t{0, 1, 0, 2, 2},\n\t\t{0, 1, -2, -1, -2},\n\n\t\t// 421-430\n\t\t{1, 1, -2, 0, 0},\n\t\t{0, 2, 2, -2, 0},\n\t\t{3, -1, -2, -1, -2},\n\t\t{1, 0, 0, -6, 0},\n\t\t{1, 0, -2, -4, 0},\n\t\t{2, 1, 0, -4, 1},\n\t\t{2, 0, 2, 0, -1},\n\t\t{2, 0, -4, 0, -1},\n\t\t{0, 0, 3, 0, 2},\n\t\t{2, 1, -2, -2, -1},\n\n\t\t// 431-440\n\t\t{1, -2, 0, 0, 1},\n\t\t{2, -1, 0, -4, 0},\n\t\t{0, 0, 0, 3, 0},\n\t\t{5, 0, 2, -2, 2},\n\t\t{1, 2, -2, -4, -2},\n\t\t{1, 0, 4, -4, 2},\n\t\t{0, 0, 4, -1, 2},\n\t\t{3, 1, 0, -4, 0},\n\t\t{3, 0, 0, -6, 0},\n\t\t{2, 0, 0, 2, 2},\n\n\t\t// 441-450\n\t\t{2, -2, 2, 0, 2},\n\t\t{1, 0, 0, -3, 1},\n\t\t{1, -2, -2, 0, -2},\n\t\t{1, -1, -2, -3, -2},\n\t\t{0, 0, 2, -2, -2},\n\t\t{2, 0, -2, -4, 0},\n\t\t{1, 0, -4, 0, 0},\n\t\t{0, 1, 0, -1, 0},\n\t\t{4, 0, 0, 0, -1},\n\t\t{3, 0, 2, -1, 2},\n\n\t\t// 451-460\n\t\t{3, -1, 2, 0, 1},\n\t\t{2, 0, 2, -1, 1},\n\t\t{1, 2, 2, -2, 1},\n\t\t{1, 1, 0, 2, -1},\n\t\t{0, 2, 2, 0, 1},\n\t\t{3, 1, 2, 0, 1},\n\t\t{1, 1, 2, 1, 1},\n\t\t{1, 1, 0, -1, 1},\n\t\t{1, -2, 0, -2, -1},\n\t\t{4, 0, 0, -4, 0},\n\n\t\t// 461-470\n\t\t{2, 1, 0, 2, 0},\n\t\t{1, -1, 0, 4, 0},\n\t\t{0, 1, 0, -2, 2},\n\t\t{0, 0, 2, 0, -2},\n\t\t{1, 0, -1, 0, 1},\n\t\t{3, 0, 2, -2, 0},\n\t\t{2, 0, 2, 2, 0},\n\t\t{1, 2, 0, -4, 0},\n\t\t{1, -1, 0, -3, 0},\n\t\t{0, 1, 0, 4, 0},\n\n\t\t// 471 - 480\n\t\t{0, 1, -2, 0, 0},\n\t\t{2, 2, 2, -2, 2},\n\t\t{0, 0, 0, 1, -2},\n\t\t{0, 2, -2, 0, -1},\n\t\t{4, 0, 2, -4, 2},\n\t\t{2, 0, -4, 2, -2},\n\t\t{2, -1, -2, 0, -2},\n\t\t{1, 1, 4, -2, 2},\n\t\t{1, 1, 2, -4, 2},\n\t\t{1, 0, 2, 3, 2},\n\n\t\t// 481-490\n\t\t{1, 0, 0, 4, -1},\n\t\t{0, 0, 0, 4, 2},\n\t\t{2, 0, 0, 4, 0},\n\t\t{1, 1, -2, 2, 0},\n\t\t{2, 1, 2, 1, 2},\n\t\t{2, 1, 2, -4, 1},\n\t\t{2, 0, 2, 1, 1},\n\t\t{2, 0, -4, -2, -1},\n\t\t{2, 0, -2, -6, -1},\n\t\t{2, -1, 2, -1, 2},\n\n\t\t// 491-500\n\t\t{1, -2, 2, 0, 1},\n\t\t{1, -2, 0, -2, 1},\n\t\t{1, -1, 0, -4, -1},\n\t\t{0, 2, 2, 2, 2},\n\t\t{0, 2, -2, -4, -2},\n\t\t{0, 1, 2, 3, 2},\n\t\t{0, 1, 0, -4, 1},\n\t\t{3, 0, 0, -2, 1},\n\t\t{2, 1, -2, 0, 1},\n\t\t{2, 0, 4, -2, 1},\n\n\t\t// 501-510\n\t\t{2, 0, 0, -3, -1},\n\t\t{2, -2, 0, -2, 1},\n\t\t{2, -1, 2, -2, 1},\n\t\t{1, 0, 0, -6, -1},\n\t\t{1, -2, 0, 0, -1},\n\t\t{1, -2, -2, -2, -1},\n\t\t{0, 1, 4, -2, 1},\n\t\t{0, 0, 2, 3, 1},\n\t\t{2, -1, 0, -1, 0},\n\t\t{1, 3, 0, -2, 0},\n\n\t\t// 511-520\n\t\t{0, 3, 0, -2, 0},\n\t\t{2, -2, 2, -2, 2},\n\t\t{0, 0, 4, -2, 0},\n\t\t{4, -1, 2, 0, 2},\n\t\t{2, 2, -2, -4, -2},\n\t\t{4, 1, 2, 0, 2},\n\t\t{4, -1, -2, -2, -2},\n\t\t{2, 1, 0, -2, -2},\n\t\t{2, 1, -2, -6, -2},\n\t\t{2, 0, 0, -1, 1},\n\n\t\t// 521-530\n\t\t{2, -1, -2, 2, -1},\n\t\t{1, 1, -2, 2, -2},\n\t\t{1, 1, -2, -3, -2},\n\t\t{1, 0, 3, 0, 3},\n\t\t{1, 0, -2, 1, 1},\n\t\t{1, 0, -2, 0, 2},\n\t\t{1, -1, 2, 1, 2},\n\t\t{1, -1, 0, 0, -2},\n\t\t{1, -1, -4, 2, -2},\n\t\t{0, 3, -2, -2, -2},\n\n\t\t// 531-540\n\t\t{0, 1, 0, 4, 1},\n\t\t{0, 0, 4, 2, 2},\n\t\t{3, 0, -2, -2, 0},\n\t\t{2, -2, 0, 0, 0},\n\t\t{1, 1, 2, -4, 0},\n\t\t{1, 1, 0, -3, 0},\n\t\t{1, 0, 2, -3, 0},\n\t\t{1, -1, 2, -2, 0},\n\t\t{0, 2, 0, 2, 0},\n\t\t{0, 0, 2, 4, 0},\n\n\t\t// 541-550\n\t\t{1, 0, 1, 0, 0},\n\t\t{3, 1, 2, -2, 1},\n\t\t{3, 0, 4, -2, 2},\n\t\t{3, 0, 2, 1, 2},\n\t\t{3, 0, 0, 2, -1},\n\t\t{3, 0, 0, 0, 2},\n\t\t{3, 0, -2, 2, -1},\n\t\t{2, 0, 4, -4, 2},\n\t\t{2, 0, 2, -3, 2},\n\t\t{2, 0, 0, 4, 1},\n\n\t\t// 551-560\n\t\t{2, 0, 0, -3, 1},\n\t\t{2, 0, -4, 2, -1},\n\t\t{2, 0, -2, -2, 1},\n\t\t{2, -2, 2, 2, 2},\n\t\t{2, -2, 0, -2, -2},\n\t\t{2, -1, 0, 2, 1},\n\t\t{2, -1, 0, 2, -1},\n\t\t{1, 1, 2, 4, 2},\n\t\t{1, 1, 0, 1, 1},\n\t\t{1, 1, 0, 1, -1},\n\n\t\t// 561-570\n\t\t{1, 1, -2, -6, -2},\n\t\t{1, 0, 0, -3, -1},\n\t\t{1, 0, -4, -2, -1},\n\t\t{1, 0, -2, -6, -1},\n\t\t{1, -2, 2, 2, 1},\n\t\t{1, -2, -2, 2, -1},\n\t\t{1, -1, -2, -4, -1},\n\t\t{0, 2, 0, 0, 2},\n\t\t{0, 1, 2, -4, 2},\n\t\t{0, 1, -2, 4, -1},\n\n\t\t// 571-580\n\t\t{5, 0, 0, 0, 0},\n\t\t{3, 0, 0, -3, 0},\n\t\t{2, 2, 0, -4, 0},\n\t\t{1, -1, 2, 2, 0},\n\t\t{0, 1, 0, 3, 0},\n\t\t{4, 0, -2, 0, -1},\n\t\t{3, 0, -2, -6, -1},\n\t\t{3, 0, -2, -1, -1},\n\t\t{2, 1, 2, 2, 1},\n\t\t{2, 1, 0, 2, 1},\n\n\t\t// 581-590\n\t\t{2, 0, 2, 4, 1},\n\t\t{2, 0, 2, -6, 1},\n\t\t{2, 0, 2, -2, -1},\n\t\t{2, 0, 0, -6, -1},\n\t\t{2, -1, -2, -2, -1},\n\t\t{1, 2, 2, 0, 1},\n\t\t{1, 2, 0, 0, 1},\n\t\t{1, 0, 4, 0, 1},\n\t\t{1, 0, 2, -6, 1},\n\t\t{1, 0, 2, -4, -1},\n\n\t\t// 591-600\n\t\t{1, 0, -1, -2, -1},\n\t\t{1, -1, 2, 4, 1},\n\t\t{1, -1, 2, -3, 1},\n\t\t{1, -1, 0, 4, 1},\n\t\t{1, -1, -2, 1, -1},\n\t\t{0, 1, 2, -2, 3},\n\t\t{3, 0, 0, -2, 0},\n\t\t{1, 0, 1, -2, 0},\n\t\t{0, 2, 0, -4, 0},\n\t\t{0, 0, 2, -4, 0},\n\n\t\t// 601-610\n\t\t{0, 0, 1, -1, 0},\n\t\t{0, 0, 0, 6, 0},\n\t\t{0, 2, 0, 0, -2},\n\t\t{0, 1, -2, 2, -3},\n\t\t{4, 0, 0, 2, 0},\n\t\t{3, 0, 0, -1, 0},\n\t\t{3, -1, 0, 2, 0},\n\t\t{2, 1, 0, 1, 0},\n\t\t{2, 1, 0, -6, 0},\n\t\t{2, -1, 2, 0, 0},\n\n\t\t// 611-620\n\t\t{1, 0, 2, -1, 0},\n\t\t{1, -1, 0, 1, 0},\n\t\t{1, -1, -2, -2, 0},\n\t\t{0, 1, 2, 2, 0},\n\t\t{0, 0, 2, -3, 0},\n\t\t{2, 2, 0, -2, -1},\n\t\t{2, -1, -2, 0, 1},\n\t\t{1, 2, 2, -4, 1},\n\t\t{0, 1, 4, -4, 2},\n\t\t{0, 0, 0, 3, 2},\n\n\t\t// 621-630\n\t\t{5, 0, 2, 0, 1},\n\t\t{4, 1, 2, -2, 2},\n\t\t{4, 0, -2, -2, 0},\n\t\t{3, 1, 2, 2, 2},\n\t\t{3, 1, 0, -2, 0},\n\t\t{3, 1, -2, -6, -2},\n\t\t{3, 0, 0, 0, -2},\n\t\t{3, 0, -2, -4, -2},\n\t\t{3, -1, 0, -3, 0},\n\t\t{3, -1, 0, -2, 0},\n\n\t\t// 631-640\n\t\t{2, 1, 2, 0, 0},\n\t\t{2, 1, 2, -4, 2},\n\t\t{2, 1, 2, -2, 0},\n\t\t{2, 1, 0, -3, 0},\n\t\t{2, 1, -2, 0, -2},\n\t\t{2, 0, 0, -4, 2},\n\t\t{2, 0, 0, -4, -2},\n\t\t{2, 0, -2, -5, -2},\n\t\t{2, -1, 2, 4, 2},\n\t\t{2, -1, 0, -2, 2},\n\n\t\t// 641-650\n\t\t{1, 3, -2, -2, -2},\n\t\t{1, 1, 0, 0, -2},\n\t\t{1, 1, 0, -6, 0},\n\t\t{1, 1, -2, 1, -2},\n\t\t{1, 1, -2, -1, -2},\n\t\t{1, 0, 2, 1, 0},\n\t\t{1, 0, 0, 3, 0},\n\t\t{1, 0, 0, -4, 2},\n\t\t{1, 0, -2, 4, -2},\n\t\t{1, -2, 0, -1, 0},\n\n\t\t// 651-NFLS\n\t\t{0, 1, -4, 2, -1},\n\t\t{1, 0, -2, 0, -3},\n\t\t{0, 0, 4, -4, 4},\n\t}\n\n\t// Number of frequencies: luni-solar\n\tconst NFLS = len(mfals)\n\n\t// Fundamental-argument multipliers: planetary terms\n\tvar mfapl = [...][14]float64{\n\n\t\t// 1-10\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 2, 0, 0, 0, 0, 0},\n\n\t\t// 11-20\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -1, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 21-30\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\n\t\t// 31-40\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 1},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\n\t\t// 41-50\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -1, 0, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, 0, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 2},\n\n\t\t// 51-60\n\t\t{0, 0, 1, -1, 1, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 2},\n\t\t{1, 0, 2, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1},\n\t\t{1, 0, -2, 0, -2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 61-70\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 3, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0},\n\n\t\t// 71-80\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -2, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -5, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, 0},\n\n\t\t// 81-90\n\t\t{2, 0, 0, -2, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, -1},\n\t\t{2, 0, 0, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 8, -13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, -2, 5, 0, 0, 0},\n\t\t{1, 0, 0, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, -1, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\n\t\t// 91-100\n\t\t{1, 0, 0, -2, 0, 0, 19, -21, 3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -8, 13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, 1, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -7, 0, 0, 0, 0, 0, -2},\n\n\t\t// 101-110\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1},\n\t\t{2, 0, 0, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 2},\n\n\t\t// 111-120\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\n\t\t// 121-130\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -1, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\n\t\t// 131-140\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 1, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 141-150\n\t\t{1, 0, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 9, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -4, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -1, 0, 0, 0, 2},\n\n\t\t// 151-160\n\t\t{1, 0, 0, -1, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -4, 10, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, -2},\n\t\t{0, 0, 2, -2, 1, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\n\t\t// 161-170\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, -1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 0, 2, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -9, 13, 0, 0, 0, 0, 0},\n\t\t{2, 0, 2, 0, 2, 0, 0, 2, 0, -3, 0, 0, 0, 0},\n\n\t\t// 171-180\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{1, 0, 0, -1, -1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 1},\n\t\t{1, 0, 2, 0, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, -2, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -2, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0},\n\n\t\t// 181-190\n\t\t{0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 1},\n\n\t\t// 191-200\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -10, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -9, 15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0},\n\n\t\t// 201-210\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -1, 0, 0, 2},\n\t\t{2, 0, 0, -2, 1, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 8, -14, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 211-220\n\t\t{0, 0, 0, 0, 1, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 2},\n\t\t{2, 0, -1, -1, 0, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\n\t\t// 221-230\n\t\t{2, 0, 0, -2, 0, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -9, 4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, -2},\n\n\t\t// 231-240\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -4, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1},\n\t\t{0, 0, 0, 0, 0, 0, 7, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -1, 1, 0, 0, 0},\n\t\t{2, 0, 0, 0, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 2, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, -1},\n\n\t\t// 241-250\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 2, -4, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 3, -5, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 2, 0, -8, 11, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -2, 0, 0, 0},\n\n\t\t// 251-260\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 2, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, -2, -2, -2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 1},\n\n\t\t// 261-270\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 2, -5, 0, 0, 2},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 0, 5, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 2, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 3, -7, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 271-280\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -15, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2},\n\t\t{1, 0, 0, -1, 0, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -3, 7, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -2, 0, 0, 0, 2},\n\n\t\t// 281-290\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 2, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -11, 0, 0, 0, 0, -2},\n\n\t\t// 291-300\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 9, -12, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 1, -1, 0, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, -1},\n\n\t\t// 301-310\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, -1, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -5, 0, 0, 0, 0, -2},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 3, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, 2},\n\n\t\t// 311-320\n\t\t{0, 0, 0, 0, 0, 0, 9, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 2},\n\t\t{0, 0, 2, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\n\t\t// 321-330\n\t\t{0, 0, 2, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 1, -1, 0, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 2, 0, 0},\n\n\t\t// 331-340\n\t\t{0, 0, 0, 0, 1, 0, 3, -5, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 9, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -8, 11, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -6, 0, 0, 0, 0, 0, -2},\n\n\t\t// 341-350\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -13, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -8, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 8, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 1},\n\n\t\t// 351-360\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -4, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, -2},\n\t\t{2, 0, -1, -1, -1, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 361-370\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 4, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -11, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 1, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -5, 0, 0, 0, 0, 2},\n\t\t{1, 0, -2, -2, -2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 1},\n\n\t\t// 371-380\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, 0, -2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, -2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -13, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 2},\n\n\t\t// 381-390\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -8, 15, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 2, -2, 2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, -1, 0, -18, 17, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, -1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 391-400\n\t\t{0, 0, 0, 0, 1, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -16, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2},\n\t\t{0, 0, 0, 0, 2, 0, 0, -1, 2, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -2, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -2, 4, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 4, -5, 0, 0, 0},\n\n\t\t// 401-410\n\t\t{2, 0, 0, -2, -1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -1, -1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, -1, -1, 0, 20, -20, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 1, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 5, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0},\n\n\t\t// 411-420\n\t\t{0, 0, 0, 0, 0, 0, 9, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0},\n\n\t\t// 421-430\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -6, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -2, 0, 0, 20, -21, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -12, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 9, -17, 0, 0, 0, 0, 0},\n\n\t\t// 431-440\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 1, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -7, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -1, 1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{1, 0, -2, 0, -2, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -9, 17, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, -2, -2, -2, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 441-450\n\t\t{0, 0, 2, -2, 2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -10, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -4, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -5, 0, 0, 0, -2},\n\n\t\t// 451-460\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -5, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, 1},\n\t\t{1, 0, 0, -2, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -7, 4, 0, 0, 0, 0, 0},\n\t\t{2, 0, 2, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, -1, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, -2},\n\n\t\t// 461-470\n\t\t{1, 0, 0, -1, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -3, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -5, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 1, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -4, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, -1, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\n\t\t// 471-480\n\t\t{0, 0, 0, 0, 1, 0, 3, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -8, 0, 0, 0, 0, 2},\n\n\t\t// 481-490\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -2, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1},\n\t\t{2, 0, 0, -2, -1, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, 1, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -7, 9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, -1},\n\n\t\t// 491-500\n\t\t{0, 0, 1, -1, 2, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 3, -1, 0, 0, 0},\n\t\t{1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, -1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\n\t\t// 501-510\n\t\t{1, 0, 0, -1, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, 0, -1, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -4, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -8, 11, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, 0, -9, 13, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, 1, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 1, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 7, -13, 0, 0, 0, 0, 0},\n\n\t\t// 511-520\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -4, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, 1},\n\n\t\t// 521-530\n\t\t{0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 9, -17, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\n\t\t// 531-540\n\t\t{2, 0, 0, -2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 17, -16, 0, -2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -1, 0, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -4, 0, 0, 0, 0},\n\n\t\t// 541-550\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -2, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 2},\n\t\t{2, 0, 0, -2, 0, 0, 0, -4, 4, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 2, 2, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\n\t\t// 551-560\n\t\t{1, 0, 0, -2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -4, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 2, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 561-570\n\t\t{0, 0, 0, 0, 0, 0, 8, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0},\n\t\t{2, 0, -2, -2, -2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 1, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, -1, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\n\t\t// 571-580\n\t\t{0, 0, 2, 0, 2, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{2, 0, 2, -2, 2, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{2, 0, 1, -3, 1, 0, -6, 7, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 2, -5, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 5, -5, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 1, 5, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 0, 5, 0, 0, 0},\n\n\t\t// 581-590\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 0, 2, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -2, 0, -2, 0, 0, 5, -9, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, 0, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 2, -2, 2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\n\t\t// 591-600\n\t\t{1, 0, 0, 0, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{1, 0, -2, -2, -2, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, -1, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 2, 2, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -10, 15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, -1, 0, 2, 0, 0, 0, 0, 0, 0},\n\n\t\t// 601-610\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -4, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -1, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, -1, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 2, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\n\t\t// 611-620\n\t\t{0, 0, 0, 2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -1, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 9, -13, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -14, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, -2},\n\n\t\t// 621-630\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -4, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -12, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, -2},\n\n\t\t// 631-640\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -8, 1, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -4, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -13, 0, 0, 0, 0, -2},\n\n\t\t// 641-650\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -5, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -9, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -8, 1, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 5, 0, 0, 0},\n\n\t\t// 651-NFPL\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -6, 3, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0},\n\t}\n\n\t// Number of frequencies: planetary\n\tconst NFPL = len(mfapl)\n\n\t// Pointers into amplitudes array, one pointer per frequency\n\tvar nc = [...]int{\n\n\t\t// 1-100\n\t\t1, 21, 37, 51, 65, 79, 91, 103, 115, 127,\n\t\t139, 151, 163, 172, 184, 196, 207, 219, 231, 240,\n\t\t252, 261, 273, 285, 297, 309, 318, 327, 339, 351,\n\t\t363, 372, 384, 396, 405, 415, 423, 435, 444, 452,\n\t\t460, 467, 474, 482, 490, 498, 506, 513, 521, 528,\n\t\t536, 543, 551, 559, 566, 574, 582, 590, 597, 605,\n\t\t613, 620, 628, 636, 644, 651, 658, 666, 674, 680,\n\t\t687, 695, 702, 710, 717, 725, 732, 739, 746, 753,\n\t\t760, 767, 774, 782, 790, 798, 805, 812, 819, 826,\n\t\t833, 840, 846, 853, 860, 867, 874, 881, 888, 895,\n\n\t\t// 101-200\n\t\t901, 908, 914, 921, 928, 934, 941, 948, 955, 962,\n\t\t969, 976, 982, 989, 996, 1003, 1010, 1017, 1024, 1031,\n\t\t1037, 1043, 1050, 1057, 1064, 1071, 1078, 1084, 1091, 1098,\n\t\t1104, 1112, 1118, 1124, 1131, 1138, 1145, 1151, 1157, 1164,\n\t\t1171, 1178, 1185, 1192, 1199, 1205, 1212, 1218, 1226, 1232,\n\t\t1239, 1245, 1252, 1259, 1266, 1272, 1278, 1284, 1292, 1298,\n\t\t1304, 1310, 1316, 1323, 1329, 1335, 1341, 1347, 1353, 1359,\n\t\t1365, 1371, 1377, 1383, 1389, 1396, 1402, 1408, 1414, 1420,\n\t\t1426, 1434, 1440, 1446, 1452, 1459, 1465, 1471, 1477, 1482,\n\t\t1488, 1493, 1499, 1504, 1509, 1514, 1520, 1527, 1532, 1538,\n\n\t\t// 201-300\n\t\t1543, 1548, 1553, 1558, 1564, 1569, 1574, 1579, 1584, 1589,\n\t\t1594, 1596, 1598, 1600, 1602, 1605, 1608, 1610, 1612, 1617,\n\t\t1619, 1623, 1625, 1627, 1629, 1632, 1634, 1640, 1642, 1644,\n\t\t1646, 1648, 1650, 1652, 1654, 1658, 1660, 1662, 1664, 1668,\n\t\t1670, 1672, 1673, 1675, 1679, 1681, 1683, 1684, 1686, 1688,\n\t\t1690, 1693, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711,\n\t\t1712, 1715, 1717, 1721, 1723, 1725, 1727, 1729, 1731, 1733,\n\t\t1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753,\n\t\t1755, 1757, 1759, 1761, 1762, 1764, 1766, 1768, 1769, 1771,\n\t\t1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1788, 1790,\n\n\t\t// 301-400\n\t\t1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1807, 1809,\n\t\t1811, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831,\n\t\t1833, 1835, 1837, 1839, 1840, 1842, 1844, 1848, 1850, 1852,\n\t\t1854, 1856, 1858, 1859, 1860, 1862, 1864, 1866, 1868, 1869,\n\t\t1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889,\n\t\t1891, 1892, 1896, 1898, 1900, 1901, 1903, 1905, 1907, 1909,\n\t\t1910, 1911, 1913, 1915, 1919, 1921, 1923, 1927, 1929, 1931,\n\t\t1933, 1935, 1937, 1939, 1943, 1945, 1947, 1948, 1949, 1951,\n\t\t1953, 1955, 1957, 1958, 1960, 1962, 1964, 1966, 1968, 1970,\n\t\t1971, 1973, 1974, 1975, 1977, 1979, 1980, 1981, 1982, 1984,\n\n\t\t// 401-500\n\t\t1986, 1988, 1990, 1992, 1994, 1995, 1997, 1999, 2001, 2003,\n\t\t2005, 2007, 2008, 2009, 2011, 2013, 2015, 2017, 2019, 2021,\n\t\t2023, 2024, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2041,\n\t\t2043, 2045, 2046, 2047, 2049, 2051, 2053, 2055, 2056, 2057,\n\t\t2059, 2061, 2063, 2065, 2067, 2069, 2070, 2071, 2072, 2074,\n\t\t2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2092, 2094,\n\t\t2095, 2096, 2097, 2099, 2101, 2105, 2106, 2107, 2108, 2109,\n\t\t2110, 2111, 2113, 2115, 2119, 2121, 2123, 2125, 2127, 2129,\n\t\t2131, 2133, 2135, 2136, 2137, 2139, 2141, 2143, 2145, 2147,\n\t\t2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167,\n\n\t\t// 501-600\n\t\t2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2186,\n\t\t2187, 2188, 2192, 2193, 2195, 2197, 2199, 2201, 2203, 2205,\n\t\t2207, 2209, 2211, 2213, 2217, 2219, 2221, 2223, 2225, 2227,\n\t\t2229, 2231, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240,\n\t\t2241, 2244, 2246, 2248, 2250, 2252, 2254, 2256, 2258, 2260,\n\t\t2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2278, 2280,\n\t\t2282, 2284, 2286, 2288, 2290, 2292, 2294, 2296, 2298, 2300,\n\t\t2302, 2303, 2304, 2305, 2306, 2307, 2309, 2311, 2313, 2315,\n\t\t2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335,\n\t\t2337, 2341, 2343, 2345, 2347, 2349, 2351, 2352, 2355, 2356,\n\n\t\t// 601-700\n\t\t2357, 2358, 2359, 2361, 2363, 2364, 2365, 2366, 2367, 2368,\n\t\t2369, 2370, 2371, 2372, 2373, 2374, 2376, 2378, 2380, 2382,\n\t\t2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393,\n\t\t2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403,\n\t\t2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413,\n\t\t2414, 2415, 2417, 2418, 2430, 2438, 2445, 2453, 2460, 2468,\n\t\t2474, 2480, 2488, 2496, 2504, 2512, 2520, 2527, 2535, 2543,\n\t\t2550, 2558, 2566, 2574, 2580, 2588, 2596, 2604, 2612, 2619,\n\t\t2627, 2634, 2642, 2648, 2656, 2664, 2671, 2679, 2685, 2693,\n\t\t2701, 2709, 2717, 2725, 2733, 2739, 2747, 2753, 2761, 2769,\n\n\t\t// 701-800\n\t\t2777, 2785, 2793, 2801, 2809, 2817, 2825, 2833, 2841, 2848,\n\t\t2856, 2864, 2872, 2878, 2884, 2892, 2898, 2906, 2914, 2922,\n\t\t2930, 2938, 2944, 2952, 2958, 2966, 2974, 2982, 2988, 2996,\n\t\t3001, 3009, 3017, 3025, 3032, 3039, 3045, 3052, 3059, 3067,\n\t\t3069, 3076, 3083, 3090, 3098, 3105, 3109, 3111, 3113, 3120,\n\t\t3124, 3128, 3132, 3136, 3140, 3144, 3146, 3150, 3158, 3161,\n\t\t3165, 3166, 3168, 3172, 3176, 3180, 3182, 3185, 3189, 3193,\n\t\t3194, 3197, 3200, 3204, 3208, 3212, 3216, 3219, 3221, 3222,\n\t\t3226, 3230, 3234, 3238, 3242, 3243, 3247, 3251, 3254, 3258,\n\t\t3262, 3266, 3270, 3274, 3275, 3279, 3283, 3287, 3289, 3293,\n\n\t\t// 801-900\n\t\t3296, 3300, 3303, 3307, 3311, 3315, 3319, 3321, 3324, 3327,\n\t\t3330, 3334, 3338, 3340, 3342, 3346, 3350, 3354, 3358, 3361,\n\t\t3365, 3369, 3373, 3377, 3381, 3385, 3389, 3393, 3394, 3398,\n\t\t3402, 3406, 3410, 3413, 3417, 3421, 3425, 3429, 3433, 3435,\n\t\t3439, 3443, 3446, 3450, 3453, 3457, 3458, 3461, 3464, 3468,\n\t\t3472, 3476, 3478, 3481, 3485, 3489, 3493, 3497, 3501, 3505,\n\t\t3507, 3511, 3514, 3517, 3521, 3524, 3525, 3527, 3529, 3533,\n\t\t3536, 3540, 3541, 3545, 3548, 3551, 3555, 3559, 3563, 3567,\n\t\t3569, 3570, 3574, 3576, 3578, 3582, 3586, 3590, 3593, 3596,\n\t\t3600, 3604, 3608, 3612, 3616, 3620, 3623, 3626, 3630, 3632,\n\n\t\t// 901-1000\n\t\t3636, 3640, 3643, 3646, 3648, 3652, 3656, 3660, 3664, 3667,\n\t\t3669, 3671, 3675, 3679, 3683, 3687, 3689, 3693, 3694, 3695,\n\t\t3699, 3703, 3705, 3707, 3710, 3713, 3717, 3721, 3725, 3729,\n\t\t3733, 3736, 3740, 3744, 3748, 3752, 3754, 3757, 3759, 3763,\n\t\t3767, 3770, 3773, 3777, 3779, 3783, 3786, 3790, 3794, 3798,\n\t\t3801, 3805, 3809, 3813, 3817, 3821, 3825, 3827, 3831, 3835,\n\t\t3836, 3837, 3840, 3844, 3848, 3852, 3856, 3859, 3863, 3867,\n\t\t3869, 3871, 3875, 3879, 3883, 3887, 3890, 3894, 3898, 3901,\n\t\t3905, 3909, 3913, 3917, 3921, 3922, 3923, 3924, 3926, 3930,\n\t\t3932, 3936, 3938, 3940, 3944, 3948, 3952, 3956, 3959, 3963,\n\n\t\t// 1001-1100\n\t\t3965, 3969, 3973, 3977, 3979, 3981, 3982, 3986, 3989, 3993,\n\t\t3997, 4001, 4004, 4006, 4009, 4012, 4016, 4020, 4024, 4026,\n\t\t4028, 4032, 4036, 4040, 4044, 4046, 4050, 4054, 4058, 4060,\n\t\t4062, 4063, 4064, 4068, 4071, 4075, 4077, 4081, 4083, 4087,\n\t\t4089, 4091, 4095, 4099, 4101, 4103, 4105, 4107, 4111, 4115,\n\t\t4119, 4123, 4127, 4129, 4131, 4135, 4139, 4141, 4143, 4145,\n\t\t4149, 4153, 4157, 4161, 4165, 4169, 4173, 4177, 4180, 4183,\n\t\t4187, 4191, 4195, 4198, 4201, 4205, 4209, 4212, 4213, 4216,\n\t\t4217, 4221, 4223, 4226, 4230, 4234, 4236, 4240, 4244, 4248,\n\t\t4252, 4256, 4258, 4262, 4264, 4266, 4268, 4270, 4272, 4276,\n\n\t\t// 1101-1200\n\t\t4279, 4283, 4285, 4287, 4289, 4293, 4295, 4299, 4300, 4301,\n\t\t4305, 4309, 4313, 4317, 4319, 4323, 4325, 4329, 4331, 4333,\n\t\t4335, 4337, 4341, 4345, 4349, 4351, 4353, 4357, 4361, 4365,\n\t\t4367, 4369, 4373, 4377, 4381, 4383, 4387, 4389, 4391, 4395,\n\t\t4399, 4403, 4407, 4411, 4413, 4414, 4415, 4418, 4419, 4421,\n\t\t4423, 4427, 4429, 4431, 4433, 4435, 4437, 4439, 4443, 4446,\n\t\t4450, 4452, 4456, 4458, 4460, 4462, 4466, 4469, 4473, 4477,\n\t\t4481, 4483, 4487, 4489, 4491, 4493, 4497, 4499, 4501, 4504,\n\t\t4506, 4510, 4513, 4514, 4515, 4518, 4521, 4522, 4525, 4526,\n\t\t4527, 4530, 4533, 4534, 4537, 4541, 4542, 4543, 4544, 4545,\n\n\t\t// 1201-1300\n\t\t4546, 4547, 4550, 4553, 4554, 4555, 4558, 4561, 4564, 4567,\n\t\t4568, 4571, 4574, 4575, 4578, 4581, 4582, 4585, 4586, 4588,\n\t\t4590, 4592, 4596, 4598, 4602, 4604, 4608, 4612, 4613, 4616,\n\t\t4619, 4622, 4623, 4624, 4625, 4626, 4629, 4632, 4633, 4636,\n\t\t4639, 4640, 4641, 4642, 4643, 4644, 4645, 4648, 4649, 4650,\n\t\t4651, 4652, 4653, 4656, 4657, 4660, 4661, 4664, 4667, 4670,\n\t\t4671, 4674, 4675, 4676, 4677, 4678, 4681, 4682, 4683, 4684,\n\t\t4687, 4688, 4689, 4692, 4693, 4696, 4697, 4700, 4701, 4702,\n\t\t4703, 4704, 4707, 4708, 4711, 4712, 4715, 4716, 4717, 4718,\n\t\t4719, 4720, 4721, 4722, 4723, 4726, 4729, 4730, 4733, 4736,\n\n\t\t// 1301-(NFLS+NFPL)\n\t\t4737, 4740, 4741, 4742, 4745, 4746, 4749, 4752, 4753,\n\t}\n\n\t// Amplitude coefficients (microarcsec); indexed using the nc\n\t// array.\n\tvar a = [...]float64{\n\n\t\t// 1-105\n\t\t-6844318.44, 9205236.26, 1328.67, 1538.18, 205833.11,\n\t\t153041.79, -3309.73, 853.32, 2037.98, -2301.27,\n\t\t81.46, 120.56, -20.39, -15.22, 1.73, -1.61, -0.10, 0.11,\n\t\t-0.02, -0.02, -523908.04, 573033.42, -544.75, -458.66,\n\t\t12814.01, 11714.49, 198.97, -290.91, 155.74, -143.27,\n\t\t-2.75, -1.03, -1.27, -1.16, 0.00, -0.01, -90552.22,\n\t\t97846.69, 111.23, 137.41, 2187.91, 2024.68, 41.44, -51.26,\n\t\t26.92, -24.46, -0.46, -0.28, -0.22, -0.20, 82168.76,\n\t\t-89618.24, -27.64, -29.05, -2004.36, -1837.32,\n\t\t-36.07, 48.00, -24.43, 22.41, 0.47, 0.24, 0.20, 0.18,\n\t\t58707.02, 7387.02, 470.05, -192.40, 164.33, -1312.21,\n\t\t-179.73, -28.93, -17.36, -1.83, -0.50, 3.57, 0.00, 0.13,\n\t\t-20557.78, 22438.42, -20.84, -17.40, 501.82, 459.68,\n\t\t59.20, -67.30, 6.08, -5.61, -1.36, -1.19, 28288.28,\n\t\t-674.99, -34.69, 35.80, -15.07, -632.54, -11.19, 0.78, -8.41,\n\t\t0.17, 0.01, 0.07, -15406.85, 20069.50, 15.12,\n\n\t\t// 106-219\n\t\t31.80, 448.76, 344.50, -5.77, 1.41, 4.59, -5.02, 0.17,\n\t\t0.24, -11991.74, 12902.66, 32.46, 36.70, 288.49,\n\t\t268.14, 5.70, -7.06, 3.57, -3.23, -0.06, -0.04,\n\t\t-8584.95, -9592.72, 4.42, -13.20, -214.50, 192.06,\n\t\t23.87, 29.83, 2.54, 2.40, 0.60, -0.48, 5095.50,\n\t\t-6918.22, 7.19, 3.92, -154.91, -113.94, 2.86, -1.04,\n\t\t-1.52, 1.73, -0.07, -0.10, -4910.93, -5331.13,\n\t\t0.76, 0.40, -119.21, 109.81, 2.16, 3.20, 1.46, 1.33,\n\t\t0.04, -0.02, -6245.02, -123.48, -6.68, -8.20, -2.76,\n\t\t139.64, 2.71, 0.15, 1.86, 2511.85, -3323.89, 1.07,\n\t\t-0.90, -74.33, -56.17, 1.16, -0.01, -0.75, 0.83, -0.02,\n\t\t-0.04, 2307.58, 3143.98, -7.52, 7.50, 70.31, -51.60, 1.46,\n\t\t0.16, -0.69, -0.79, 0.02, -0.05, 2372.58, 2554.51, 5.93,\n\t\t-6.60, 57.12, -53.05, -0.96, -1.24, -0.71, -0.64, -0.01,\n\t\t-2053.16, 2636.13, 5.13, 7.80, 58.94, 45.91, -0.42,\n\t\t-0.12, 0.61, -0.66, 0.02, 0.03, -1825.49,\n\n\t\t// 220-339\n\t\t-2423.59, 1.23, -2.00, -54.19, 40.82, -1.07, -1.02,\n\t\t0.54, 0.61, -0.04, 0.04, 2521.07, -122.28, -5.97, 2.90,\n\t\t-2.73, -56.37, -0.82, 0.13, -0.75, -1534.09, 1645.01,\n\t\t6.29, 6.80, 36.78, 34.30, 0.92, -1.25, 0.46, -0.41,\n\t\t-0.02, -0.01, 1898.27, 47.70, -0.72, 2.50, 1.07, -42.45,\n\t\t-0.94, 0.02, -0.56, -1292.02, -1387.00, 0.00,\n\t\t0.00, -31.01, 28.89, 0.68, 0.00, 0.38, 0.35, -0.01,\n\t\t-0.01, -1234.96, 1323.81, 5.21, 5.90, 29.60, 27.61,\n\t\t0.74, -1.22, 0.37, -0.33, -0.02, -0.01, 1137.48,\n\t\t-1233.89, -0.04, -0.30, -27.59, -25.43, -0.61, 1.00,\n\t\t-0.34, 0.31, 0.01, 0.01, -813.13, -1075.60, 0.40,\n\t\t0.30, -24.05, 18.18, -0.40, -0.01, 0.24, 0.27, -0.01,\n\t\t0.01, 1163.22, -60.90, -2.94, 1.30, -1.36, -26.01, -0.58,\n\t\t0.07, -0.35, 1029.70, -55.55, -2.63, 1.10, -1.25, -23.02,\n\t\t-0.52, 0.06, -0.31, -556.26, 852.85, 3.16, -4.48, 19.06,\n\t\t12.44, -0.81, -0.27, 0.17, -0.21, 0.00, 0.02, -603.52,\n\n\t\t// 340-467\n\t\t-800.34, 0.44, 0.10, -17.90, 13.49, -0.08, -0.01, 0.18,\n\t\t0.20, -0.01, 0.01, -628.24, 684.99, -0.64, -0.50, 15.32,\n\t\t14.05, 3.18, -4.19, 0.19, -0.17, -0.09, -0.07, -866.48,\n\t\t-16.26, 0.52, -1.30, -0.36, 19.37, 0.43, -0.01, 0.26,\n\t\t-512.37, 695.54, -1.47, -1.40, 15.55, 11.46, -0.16, 0.03,\n\t\t0.15, -0.17, 0.01, 0.01, 506.65, 643.75, 2.54, -2.62,\n\t\t14.40, -11.33, -0.77, -0.06, -0.15, -0.16, 0.00, 0.01,\n\t\t664.57, 16.81, -0.40, 1.00, 0.38, -14.86, -3.71, -0.09,\n\t\t-0.20, 405.91, 522.11, 0.99, -1.50, 11.67, -9.08, -0.25,\n\t\t-0.02, -0.12, -0.13, -305.78, 326.60, 1.75, 1.90, 7.30,\n\t\t6.84, 0.20, -0.04, 300.99, -325.03, -0.44, -0.50, -7.27,\n\t\t-6.73, -1.01, 0.01, 0.00, 0.08, 0.00, 0.02, 438.51,\n\t\t10.47, -0.56, -0.20, 0.24, -9.81, -0.24, 0.01, -0.13,\n\t\t-264.02, 335.24, 0.99, 1.40, 7.49, 5.90, -0.27, -0.02,\n\t\t284.09, 307.03, 0.32, -0.40, 6.87, -6.35, -0.99, -0.01,\n\t\t-250.54, 327.11, 0.08, 0.40, 7.31, 5.60, -0.30, 230.72,\n\n\t\t// 468-595\n\t\t-304.46, 0.08, -0.10, -6.81, -5.16, 0.27, 229.78, 304.17,\n\t\t-0.60, 0.50, 6.80, -5.14, 0.33, 0.01, 256.30, -276.81,\n\t\t-0.28, -0.40, -6.19, -5.73, -0.14, 0.01, -212.82, 269.45,\n\t\t0.84, 1.20, 6.02, 4.76, 0.14, -0.02, 196.64, 272.05,\n\t\t-0.84, 0.90, 6.08, -4.40, 0.35, 0.02, 188.95, 272.22,\n\t\t-0.12, 0.30, 6.09, -4.22, 0.34, -292.37, -5.10, -0.32,\n\t\t-0.40, -0.11, 6.54, 0.14, 0.01, 161.79, -220.67, 0.24,\n\t\t0.10, -4.93, -3.62, -0.08, 261.54, -19.94, -0.95, 0.20,\n\t\t-0.45, -5.85, -0.13, 0.02, 142.16, -190.79, 0.20, 0.10,\n\t\t-4.27, -3.18, -0.07, 187.95, -4.11, -0.24, 0.30, -0.09,\n\t\t-4.20, -0.09, 0.01, 0.00, 0.00, -79.08, 167.90, 0.04,\n\t\t0.00, 3.75, 1.77, 121.98, 131.04, -0.08, 0.10, 2.93,\n\t\t-2.73, -0.06, -172.95, -8.11, -0.40, -0.20, -0.18, 3.87,\n\t\t0.09, 0.01, -160.15, -55.30, -14.04, 13.90, -1.23, 3.58,\n\t\t0.40, 0.31, -115.40, 123.20, 0.60, 0.70, 2.75, 2.58,\n\t\t0.08, -0.01, -168.26, -2.00, 0.20, -0.20, -0.04, 3.76,\n\n\t\t// 596-723\n\t\t0.08, -114.49, 123.20, 0.32, 0.40, 2.75, 2.56, 0.07,\n\t\t-0.01, 112.14, 120.70, 0.28, -0.30, 2.70, -2.51, -0.07,\n\t\t-0.01, 161.34, 4.03, 0.20, 0.20, 0.09, -3.61, -0.08,\n\t\t91.31, 126.64, -0.40, 0.40, 2.83, -2.04, -0.04, 0.01,\n\t\t105.29, 112.90, 0.44, -0.50, 2.52, -2.35, -0.07, -0.01,\n\t\t98.69, -106.20, -0.28, -0.30, -2.37, -2.21, -0.06, 0.01,\n\t\t86.74, -112.94, -0.08, -0.20, -2.53, -1.94, -0.05, -134.81,\n\t\t3.51, 0.20, -0.20, 0.08, 3.01, 0.07, 79.03, 107.31,\n\t\t-0.24, 0.20, 2.40, -1.77, -0.04, 0.01, 132.81, -10.77,\n\t\t-0.52, 0.10, -0.24, -2.97, -0.07, 0.01, -130.31, -0.90,\n\t\t0.04, 0.00, 0.00, 2.91, -78.56, 85.32, 0.00, 0.00,\n\t\t1.91, 1.76, 0.04, 0.00, 0.00, -41.53, 89.10, 0.02,\n\t\t0.00, 1.99, 0.93, 66.03, -71.00, -0.20, -0.20, -1.59,\n\t\t-1.48, -0.04, 60.50, 64.70, 0.36, -0.40, 1.45, -1.35,\n\t\t-0.04, -0.01, -52.27, -70.01, 0.00, 0.00, -1.57, 1.17,\n\t\t0.03, -52.95, 66.29, 0.32, 0.40, 1.48, 1.18, 0.04,\n\n\t\t// 724-851\n\t\t-0.01, 51.02, 67.25, 0.00, 0.00, 1.50, -1.14, -0.03,\n\t\t-55.66, -60.92, 0.16, -0.20, -1.36, 1.24, 0.03, -54.81,\n\t\t-59.20, -0.08, 0.20, -1.32, 1.23, 0.03, 51.32, -55.60,\n\t\t0.00, 0.00, -1.24, -1.15, -0.03, 48.29, 51.80, 0.20,\n\t\t-0.20, 1.16, -1.08, -0.03, -45.59, -49.00, -0.12, 0.10,\n\t\t-1.10, 1.02, 0.03, 40.54, -52.69, -0.04, -0.10, -1.18,\n\t\t-0.91, -0.02, -40.58, -49.51, -1.00, 1.00, -1.11, 0.91,\n\t\t0.04, 0.02, -43.76, 46.50, 0.36, 0.40, 1.04, 0.98,\n\t\t0.03, -0.01, 62.65, -5.00, -0.24, 0.00, -0.11, -1.40,\n\t\t-0.03, 0.01, -38.57, 49.59, 0.08, 0.10, 1.11, 0.86,\n\t\t0.02, -33.22, -44.04, 0.08, -0.10, -0.98, 0.74, 0.02,\n\t\t37.15, -39.90, -0.12, -0.10, -0.89, -0.83, -0.02, 36.68,\n\t\t-39.50, -0.04, -0.10, -0.88, -0.82, -0.02, -53.22, -3.91,\n\t\t-0.20, 0.00, -0.09, 1.19, 0.03, 32.43, -42.19, -0.04,\n\t\t-0.10, -0.94, -0.73, -0.02, -51.00, -2.30, -0.12, -0.10,\n\t\t0.00, 1.14, -29.53, -39.11, 0.04, 0.00, -0.87, 0.66,\n\n\t\t// 852-979\n\t\t0.02, 28.50, -38.92, -0.08, -0.10, -0.87, -0.64, -0.02,\n\t\t26.54, 36.95, -0.12, 0.10, 0.83, -0.59, -0.01, 26.54,\n\t\t34.59, 0.04, -0.10, 0.77, -0.59, -0.02, 28.35, -32.55,\n\t\t-0.16, 0.20, -0.73, -0.63, -0.01, -28.00, 30.40, 0.00,\n\t\t0.00, 0.68, 0.63, 0.01, -27.61, 29.40, 0.20, 0.20,\n\t\t0.66, 0.62, 0.02, 40.33, 0.40, -0.04, 0.10, 0.00,\n\t\t-0.90, -23.28, 31.61, -0.08, -0.10, 0.71, 0.52, 0.01,\n\t\t37.75, 0.80, 0.04, 0.10, 0.00, -0.84, 23.66, 25.80,\n\t\t0.00, 0.00, 0.58, -0.53, -0.01, 21.01, -27.91, 0.00,\n\t\t0.00, -0.62, -0.47, -0.01, -34.81, 2.89, 0.04, 0.00,\n\t\t0.00, 0.78, -23.49, -25.31, 0.00, 0.00, -0.57, 0.53,\n\t\t0.01, -23.47, 25.20, 0.16, 0.20, 0.56, 0.52, 0.02,\n\t\t19.58, 27.50, -0.12, 0.10, 0.62, -0.44, -0.01, -22.67,\n\t\t-24.40, -0.08, 0.10, -0.55, 0.51, 0.01, -19.97, 25.00,\n\t\t0.12, 0.20, 0.56, 0.45, 0.01, 21.28, -22.80, -0.08,\n\t\t-0.10, -0.51, -0.48, -0.01, -30.47, 0.91, 0.04, 0.00,\n\n\t\t// 980-1107\n\t\t0.00, 0.68, 18.58, 24.00, 0.04, -0.10, 0.54, -0.42,\n\t\t-0.01, -18.02, 24.40, -0.04, -0.10, 0.55, 0.40, 0.01,\n\t\t17.74, 22.50, 0.08, -0.10, 0.50, -0.40, -0.01, -19.41,\n\t\t20.70, 0.08, 0.10, 0.46, 0.43, 0.01, -18.64, 20.11,\n\t\t0.00, 0.00, 0.45, 0.42, 0.01, -16.75, 21.60, 0.04,\n\t\t0.10, 0.48, 0.37, 0.01, -18.42, -20.00, 0.00, 0.00,\n\t\t-0.45, 0.41, 0.01, -26.77, 1.41, 0.08, 0.00, 0.00,\n\t\t0.60, -26.17, -0.19, 0.00, 0.00, 0.00, 0.59, -15.52,\n\t\t20.51, 0.00, 0.00, 0.46, 0.35, 0.01, -25.42, -1.91,\n\t\t-0.08, 0.00, -0.04, 0.57, 0.45, -17.42, 18.10, 0.00,\n\t\t0.00, 0.40, 0.39, 0.01, 16.39, -17.60, -0.08, -0.10,\n\t\t-0.39, -0.37, -0.01, -14.37, 18.91, 0.00, 0.00, 0.42,\n\t\t0.32, 0.01, 23.39, -2.40, -0.12, 0.00, 0.00, -0.52,\n\t\t14.32, -18.50, -0.04, -0.10, -0.41, -0.32, -0.01, 15.69,\n\t\t17.08, 0.00, 0.00, 0.38, -0.35, -0.01, -22.99, 0.50,\n\t\t0.04, 0.00, 0.00, 0.51, 0.00, 0.00, 14.47, -17.60,\n\n\t\t// 1108-1235\n\t\t-0.01, 0.00, -0.39, -0.32, -13.33, 18.40, -0.04, -0.10,\n\t\t0.41, 0.30, 22.47, -0.60, -0.04, 0.00, 0.00, -0.50,\n\t\t-12.78, -17.41, 0.04, 0.00, -0.39, 0.29, 0.01, -14.10,\n\t\t-15.31, 0.04, 0.00, -0.34, 0.32, 0.01, 11.98, 16.21,\n\t\t-0.04, 0.00, 0.36, -0.27, -0.01, 19.65, -1.90, -0.08,\n\t\t0.00, 0.00, -0.44, 19.61, -1.50, -0.08, 0.00, 0.00,\n\t\t-0.44, 13.41, -14.30, -0.04, -0.10, -0.32, -0.30, -0.01,\n\t\t-13.29, 14.40, 0.00, 0.00, 0.32, 0.30, 0.01, 11.14,\n\t\t-14.40, -0.04, 0.00, -0.32, -0.25, -0.01, 12.24, -13.38,\n\t\t0.04, 0.00, -0.30, -0.27, -0.01, 10.07, -13.81, 0.04,\n\t\t0.00, -0.31, -0.23, -0.01, 10.46, 13.10, 0.08, -0.10,\n\t\t0.29, -0.23, -0.01, 16.55, -1.71, -0.08, 0.00, 0.00,\n\t\t-0.37, 9.75, -12.80, 0.00, 0.00, -0.29, -0.22, -0.01,\n\t\t9.11, 12.80, 0.00, 0.00, 0.29, -0.20, 0.00, 0.00,\n\t\t-6.44, -13.80, 0.00, 0.00, -0.31, 0.14, -9.19, -12.00,\n\t\t0.00, 0.00, -0.27, 0.21, -10.30, 10.90, 0.08, 0.10,\n\n\t\t// 1236-1363\n\t\t0.24, 0.23, 0.01, 14.92, -0.80, -0.04, 0.00, 0.00,\n\t\t-0.33, 10.02, -10.80, 0.00, 0.00, -0.24, -0.22, -0.01,\n\t\t-9.75, 10.40, 0.04, 0.00, 0.23, 0.22, 0.01, 9.67,\n\t\t-10.40, -0.04, 0.00, -0.23, -0.22, -0.01, -8.28, -11.20,\n\t\t0.04, 0.00, -0.25, 0.19, 13.32, -1.41, -0.08, 0.00,\n\t\t0.00, -0.30, 8.27, 10.50, 0.04, 0.00, 0.23, -0.19,\n\t\t0.00, 0.00, 13.13, 0.00, 0.00, 0.00, 0.00, -0.29,\n\t\t-12.93, 0.70, 0.04, 0.00, 0.00, 0.29, 7.91, -10.20,\n\t\t0.00, 0.00, -0.23, -0.18, -7.84, -10.00, -0.04, 0.00,\n\t\t-0.22, 0.18, 7.44, 9.60, 0.00, 0.00, 0.21, -0.17,\n\t\t-7.64, 9.40, 0.08, 0.10, 0.21, 0.17, 0.01, -11.38,\n\t\t0.60, 0.04, 0.00, 0.00, 0.25, -7.48, 8.30, 0.00,\n\t\t0.00, 0.19, 0.17, -10.98, -0.20, 0.00, 0.00, 0.00,\n\t\t0.25, 10.98, 0.20, 0.00, 0.00, 0.00, -0.25, 7.40,\n\t\t-7.90, -0.04, 0.00, -0.18, -0.17, -6.09, 8.40, -0.04,\n\t\t0.00, 0.19, 0.14, -6.94, -7.49, 0.00, 0.00, -0.17,\n\n\t\t// 1364-1491\n\t\t0.16, 6.92, 7.50, 0.04, 0.00, 0.17, -0.15, 6.20,\n\t\t8.09, 0.00, 0.00, 0.18, -0.14, -6.12, 7.80, 0.04,\n\t\t0.00, 0.17, 0.14, 5.85, -7.50, 0.00, 0.00, -0.17,\n\t\t-0.13, -6.48, 6.90, 0.08, 0.10, 0.15, 0.14, 0.01,\n\t\t6.32, 6.90, 0.00, 0.00, 0.15, -0.14, 5.61, -7.20,\n\t\t0.00, 0.00, -0.16, -0.13, 9.07, 0.00, 0.00, 0.00,\n\t\t0.00, -0.20, 5.25, 6.90, 0.00, 0.00, 0.15, -0.12,\n\t\t-8.47, -0.40, 0.00, 0.00, 0.00, 0.19, 6.32, -5.39,\n\t\t-1.11, 1.10, -0.12, -0.14, 0.02, 0.02, 5.73, -6.10,\n\t\t-0.04, 0.00, -0.14, -0.13, 4.70, 6.60, -0.04, 0.00,\n\t\t0.15, -0.11, -4.90, -6.40, 0.00, 0.00, -0.14, 0.11,\n\t\t-5.33, 5.60, 0.04, 0.10, 0.13, 0.12, 0.01, -4.81,\n\t\t6.00, 0.04, 0.00, 0.13, 0.11, 5.13, 5.50, 0.04,\n\t\t0.00, 0.12, -0.11, 4.50, 5.90, 0.00, 0.00, 0.13,\n\t\t-0.10, -4.22, 6.10, 0.00, 0.00, 0.14, -4.53, 5.70,\n\t\t0.00, 0.00, 0.13, 0.10, 4.18, 5.70, 0.00, 0.00,\n\n\t\t// 1492-1619\n\t\t0.13, -4.75, -5.19, 0.00, 0.00, -0.12, 0.11, -4.06,\n\t\t5.60, 0.00, 0.00, 0.13, -3.98, 5.60, -0.04, 0.00,\n\t\t0.13, 4.02, -5.40, 0.00, 0.00, -0.12, 4.49, -4.90,\n\t\t-0.04, 0.00, -0.11, -0.10, -3.62, -5.40, -0.16, 0.20,\n\t\t-0.12, 0.00, 0.01, 4.38, 4.80, 0.00, 0.00, 0.11,\n\t\t-6.40, -0.10, 0.00, 0.00, 0.00, 0.14, -3.98, 5.00,\n\t\t0.04, 0.00, 0.11, -3.82, -5.00, 0.00, 0.00, -0.11,\n\t\t-3.71, 5.07, 0.00, 0.00, 0.11, 4.14, 4.40, 0.00,\n\t\t0.00, 0.10, -6.01, -0.50, -0.04, 0.00, 0.00, 0.13,\n\t\t-4.04, 4.39, 0.00, 0.00, 0.10, 3.45, -4.72, 0.00,\n\t\t0.00, -0.11, 3.31, 4.71, 0.00, 0.00, 0.11, 3.26,\n\t\t-4.50, 0.00, 0.00, -0.10, -3.26, -4.50, 0.00, 0.00,\n\t\t-0.10, -3.34, -4.40, 0.00, 0.00, -0.10, -3.74, -4.00,\n\t\t3.70, 4.00, 3.34, -4.30, 3.30, -4.30, -3.66, 3.90,\n\t\t0.04, 3.66, 3.90, 0.04, -3.62, -3.90, -3.61, 3.90,\n\t\t-0.20, 5.30, 0.00, 0.00, 0.12, 3.06, 4.30, 3.30,\n\n\t\t// 1620-1747\n\t\t4.00, 0.40, 0.20, 3.10, 4.10, -3.06, 3.90, -3.30,\n\t\t-3.60, -3.30, 3.36, 0.01, 3.14, 3.40, -4.57, -0.20,\n\t\t0.00, 0.00, 0.00, 0.10, -2.70, -3.60, 2.94, -3.20,\n\t\t-2.90, 3.20, 2.47, -3.40, 2.55, -3.30, 2.80, -3.08,\n\t\t2.51, 3.30, -4.10, 0.30, -0.12, -0.10, 4.10, 0.20,\n\t\t-2.74, 3.00, 2.46, 3.23, -3.66, 1.20, -0.20, 0.20,\n\t\t3.74, -0.40, -2.51, -2.80, -3.74, 2.27, -2.90, 0.00,\n\t\t0.00, -2.50, 2.70, -2.51, 2.60, -3.50, 0.20, 3.38,\n\t\t-2.22, -2.50, 3.26, -0.40, 1.95, -2.60, 3.22, -0.40,\n\t\t-0.04, -1.79, -2.60, 1.91, 2.50, 0.74, 3.05, -0.04,\n\t\t0.08, 2.11, -2.30, -2.11, 2.20, -1.87, -2.40, 2.03,\n\t\t-2.20, -2.03, 2.20, 2.98, 0.00, 0.00, 2.98, -1.71,\n\t\t2.40, 2.94, -0.10, -0.12, 0.10, 1.67, 2.40, -1.79,\n\t\t2.30, -1.79, 2.20, -1.67, 2.20, 1.79, -2.00, 1.87,\n\t\t-1.90, 1.63, -2.10, -1.59, 2.10, 1.55, -2.10, -1.55,\n\t\t2.10, -2.59, -0.20, -1.75, -1.90, -1.75, 1.90, -1.83,\n\n\t\t// 1748-1875\n\t\t-1.80, 1.51, 2.00, -1.51, -2.00, 1.71, 1.80, 1.31,\n\t\t2.10, -1.43, 2.00, 1.43, 2.00, -2.43, -1.51, 1.90,\n\t\t-1.47, 1.90, 2.39, 0.20, -2.39, 1.39, 1.90, 1.39,\n\t\t-1.80, 1.47, -1.60, 1.47, -1.60, 1.43, -1.50, -1.31,\n\t\t1.60, 1.27, -1.60, -1.27, 1.60, 1.27, -1.60, 2.03,\n\t\t1.35, 1.50, -1.39, -1.40, 1.95, -0.20, -1.27, 1.49,\n\t\t1.19, 1.50, 1.27, 1.40, 1.15, 1.50, 1.87, -0.10,\n\t\t-1.12, -1.50, 1.87, -1.11, -1.50, -1.11, -1.50, 0.00,\n\t\t0.00, 1.19, 1.40, 1.27, -1.30, -1.27, -1.30, -1.15,\n\t\t1.40, -1.23, 1.30, -1.23, -1.30, 1.22, -1.29, 1.07,\n\t\t-1.40, 1.75, -0.20, -1.03, -1.40, -1.07, 1.20, -1.03,\n\t\t1.15, 1.07, 1.10, 1.51, -1.03, 1.10, 1.03, -1.10,\n\t\t0.00, 0.00, -1.03, -1.10, 0.91, -1.20, -0.88, -1.20,\n\t\t-0.88, 1.20, -0.95, 1.10, -0.95, -1.10, 1.43, -1.39,\n\t\t0.95, -1.00, -0.95, 1.00, -0.80, 1.10, 0.91, -1.00,\n\t\t-1.35, 0.88, 1.00, -0.83, 1.00, -0.91, 0.90, 0.91,\n\n\t\t// 1876-2003\n\t\t0.90, 0.88, -0.90, -0.76, -1.00, -0.76, 1.00, 0.76,\n\t\t1.00, -0.72, 1.00, 0.84, -0.90, 0.84, 0.90, 1.23,\n\t\t0.00, 0.00, -0.52, -1.10, -0.68, 1.00, 1.19, -0.20,\n\t\t1.19, 0.76, 0.90, 1.15, -0.10, 1.15, -0.10, 0.72,\n\t\t-0.90, -1.15, -1.15, 0.68, 0.90, -0.68, 0.90, -1.11,\n\t\t0.00, 0.00, 0.20, 0.79, 0.80, -1.11, -0.10, 0.00,\n\t\t0.00, -0.48, -1.00, -0.76, -0.80, -0.72, -0.80, -1.07,\n\t\t-0.10, 0.64, 0.80, -0.64, -0.80, 0.64, 0.80, 0.40,\n\t\t0.60, 0.52, -0.50, -0.60, -0.80, -0.71, 0.70, -0.99,\n\t\t0.99, 0.56, 0.80, -0.56, 0.80, 0.68, -0.70, 0.68,\n\t\t0.70, -0.95, -0.64, 0.70, 0.64, 0.70, -0.60, 0.70,\n\t\t-0.60, -0.70, -0.91, -0.10, -0.51, 0.76, -0.91, -0.56,\n\t\t0.70, 0.88, 0.88, -0.63, -0.60, 0.55, -0.60, -0.80,\n\t\t0.80, -0.80, -0.52, 0.60, 0.52, 0.60, 0.52, -0.60,\n\t\t-0.48, 0.60, 0.48, 0.60, 0.48, 0.60, -0.76, 0.44,\n\t\t-0.60, 0.52, -0.50, -0.52, 0.50, 0.40, 0.60, -0.40,\n\n\t\t// 2004-2131\n\t\t-0.60, 0.40, -0.60, 0.72, -0.72, -0.51, -0.50, -0.48,\n\t\t0.50, 0.48, -0.50, -0.48, 0.50, -0.48, 0.50, 0.48,\n\t\t-0.50, -0.48, -0.50, -0.68, -0.68, 0.44, 0.50, -0.64,\n\t\t-0.10, -0.64, -0.10, -0.40, 0.50, 0.40, 0.50, 0.40,\n\t\t0.50, 0.00, 0.00, -0.40, -0.50, -0.36, -0.50, 0.36,\n\t\t-0.50, 0.60, -0.60, 0.40, -0.40, 0.40, 0.40, -0.40,\n\t\t0.40, -0.40, 0.40, -0.56, -0.56, 0.36, -0.40, -0.36,\n\t\t0.40, 0.36, -0.40, -0.36, -0.40, 0.36, 0.40, 0.36,\n\t\t0.40, -0.52, 0.52, 0.52, 0.32, 0.40, -0.32, 0.40,\n\t\t-0.32, 0.40, -0.32, 0.40, 0.32, -0.40, -0.32, -0.40,\n\t\t0.32, -0.40, 0.28, -0.40, -0.28, 0.40, 0.28, -0.40,\n\t\t0.28, 0.40, 0.48, -0.48, 0.48, 0.36, -0.30, -0.36,\n\t\t-0.30, 0.00, 0.00, 0.20, 0.40, -0.44, 0.44, -0.44,\n\t\t-0.44, -0.44, -0.44, 0.32, -0.30, 0.32, 0.30, 0.24,\n\t\t0.30, -0.12, -0.10, -0.28, 0.30, 0.28, 0.30, 0.28,\n\t\t0.30, 0.28, -0.30, 0.28, -0.30, 0.28, -0.30, 0.28,\n\n\t\t// 2132-2259\n\t\t0.30, -0.28, 0.30, 0.40, 0.40, -0.24, 0.30, 0.24,\n\t\t-0.30, 0.24, -0.30, -0.24, -0.30, 0.24, 0.30, 0.24,\n\t\t-0.30, -0.24, 0.30, 0.24, -0.30, -0.24, -0.30, 0.24,\n\t\t-0.30, 0.24, 0.30, -0.24, 0.30, -0.24, 0.30, 0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, -0.30, 0.20, 0.30, 0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, 0.30, 0.20, 0.30, -0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, -0.30, -0.36, -0.36, -0.36,\n\t\t-0.04, 0.30, 0.12, -0.10, -0.32, -0.24, 0.20, 0.24,\n\t\t0.20, 0.20, -0.20, -0.20, -0.20, -0.20, -0.20, 0.20,\n\t\t0.20, 0.20, -0.20, 0.20, 0.20, 0.20, 0.20, -0.20,\n\t\t-0.20, 0.00, 0.00, -0.20, -0.20, -0.20, 0.20, -0.20,\n\t\t0.20, 0.20, -0.20, -0.20, -0.20, 0.20, 0.20, 0.20,\n\t\t0.20, 0.20, -0.20, 0.20, -0.20, 0.28, 0.28, 0.28,\n\t\t0.28, 0.28, 0.28, -0.28, 0.28, 0.12, 0.00, 0.24,\n\t\t0.16, -0.20, 0.16, -0.20, 0.16, -0.20, 0.16, 0.20,\n\t\t-0.16, 0.20, 0.16, 0.20, -0.16, 0.20, -0.16, 0.20,\n\n\t\t// 2260-2387\n\t\t-0.16, 0.20, 0.16, -0.20, 0.16, 0.20, 0.16, -0.20,\n\t\t-0.16, 0.20, -0.16, -0.20, -0.16, 0.20, 0.16, 0.20,\n\t\t0.16, -0.20, 0.16, -0.20, 0.16, 0.20, 0.16, 0.20,\n\t\t0.16, 0.20, -0.16, -0.20, 0.16, 0.20, -0.16, 0.20,\n\t\t0.16, 0.20, -0.16, -0.20, 0.16, -0.20, 0.16, -0.20,\n\t\t-0.16, -0.20, 0.24, -0.24, -0.24, 0.24, 0.24, 0.12,\n\t\t0.20, 0.12, 0.20, -0.12, -0.20, 0.12, -0.20, 0.12,\n\t\t-0.20, -0.12, 0.20, -0.12, 0.20, -0.12, -0.20, 0.12,\n\t\t0.20, 0.12, 0.20, 0.12, -0.20, -0.12, 0.20, 0.12,\n\t\t-0.20, -0.12, 0.20, 0.12, 0.20, 0.00, 0.00, -0.12,\n\t\t0.20, -0.12, 0.20, 0.12, -0.20, -0.12, 0.20, 0.12,\n\t\t0.20, 0.00, -0.21, -0.20, 0.00, 0.00, 0.20, -0.20,\n\t\t-0.20, -0.20, 0.20, -0.16, -0.10, 0.00, 0.17, 0.16,\n\t\t0.16, 0.16, 0.16, -0.16, 0.16, 0.16, -0.16, 0.16,\n\t\t-0.16, 0.16, 0.12, 0.10, 0.12, -0.10, -0.12, 0.10,\n\t\t-0.12, 0.10, 0.12, -0.10, -0.12, 0.12, -0.12, 0.12,\n\n\t\t// 2388-2515\n\t\t-0.12, 0.12, -0.12, -0.12, -0.12, -0.12, -0.12, -0.12,\n\t\t-0.12, 0.12, 0.12, 0.12, 0.12, -0.12, -0.12, 0.12,\n\t\t0.12, 0.12, -0.12, 0.12, -0.12, -0.12, -0.12, 0.12,\n\t\t-0.12, -0.12, 0.12, 0.00, 0.11, 0.11, -122.67, 164.70,\n\t\t203.78, 273.50, 3.58, 2.74, 6.18, -4.56, 0.00, -0.04,\n\t\t0.00, -0.07, 57.44, -77.10, 95.82, 128.60, -1.77, -1.28,\n\t\t2.85, -2.14, 82.14, 89.50, 0.00, 0.00, 2.00, -1.84,\n\t\t-0.04, 47.73, -64.10, 23.79, 31.90, -1.45, -1.07, 0.69,\n\t\t-0.53, -46.38, 50.50, 0.00, 0.00, 1.13, 1.04, 0.02,\n\t\t-18.38, 0.00, 63.80, 0.00, 0.00, 0.41, 0.00, -1.43,\n\t\t59.07, 0.00, 0.00, 0.00, 0.00, -1.32, 57.28, 0.00,\n\t\t0.00, 0.00, 0.00, -1.28, -48.65, 0.00, -1.15, 0.00,\n\t\t0.00, 1.09, 0.00, 0.03, -18.30, 24.60, -17.30, -23.20,\n\t\t0.56, 0.41, -0.51, 0.39, -16.91, 26.90, 8.43, 13.30,\n\t\t0.60, 0.38, 0.31, -0.19, 1.23, -1.70, -19.13, -25.70,\n\t\t-0.03, -0.03, -0.58, 0.43, -0.72, 0.90, -17.34, -23.30,\n\n\t\t// 2516-2643\n\t\t0.03, 0.02, -0.52, 0.39, -19.49, -21.30, 0.00, 0.00,\n\t\t-0.48, 0.44, 0.01, 20.57, -20.10, 0.64, 0.70, -0.45,\n\t\t-0.46, 0.00, -0.01, 4.89, 5.90, -16.55, 19.90, 0.14,\n\t\t-0.11, 0.44, 0.37, 18.22, 19.80, 0.00, 0.00, 0.44,\n\t\t-0.41, -0.01, 4.89, -5.30, -16.51, -18.00, -0.11, -0.11,\n\t\t-0.41, 0.37, -17.86, 0.00, 17.10, 0.00, 0.00, 0.40,\n\t\t0.00, -0.38, 0.32, 0.00, 24.42, 0.00, 0.00, -0.01,\n\t\t0.00, -0.55, -23.79, 0.00, 0.00, 0.00, 0.00, 0.53,\n\t\t14.72, -16.00, -0.32, 0.00, -0.36, -0.33, -0.01, 0.01,\n\t\t3.34, -4.50, 11.86, 15.90, -0.11, -0.07, 0.35, -0.27,\n\t\t-3.26, 4.40, 11.62, 15.60, 0.09, 0.07, 0.35, -0.26,\n\t\t-19.53, 0.00, 5.09, 0.00, 0.00, 0.44, 0.00, -0.11,\n\t\t-13.48, 14.70, 0.00, 0.00, 0.33, 0.30, 0.01, 10.86,\n\t\t-14.60, 3.18, 4.30, -0.33, -0.24, 0.09, -0.07, -11.30,\n\t\t-15.10, 0.00, 0.00, -0.34, 0.25, 0.01, 2.03, -2.70,\n\t\t10.82, 14.50, -0.07, -0.05, 0.32, -0.24, 17.46, 0.00,\n\n\t\t// 2644-2771\n\t\t0.00, 0.00, 0.00, -0.39, 16.43, 0.00, 0.52, 0.00,\n\t\t0.00, -0.37, 0.00, -0.01, 9.35, 0.00, 13.29, 0.00,\n\t\t0.00, -0.21, 0.00, -0.30, -10.42, 11.40, 0.00, 0.00,\n\t\t0.25, 0.23, 0.01, 0.44, 0.50, -10.38, 11.30, 0.02,\n\t\t-0.01, 0.25, 0.23, -14.64, 0.00, 0.00, 0.00, 0.00,\n\t\t0.33, 0.56, 0.80, -8.67, 11.70, 0.02, -0.01, 0.26,\n\t\t0.19, 13.88, 0.00, -2.47, 0.00, 0.00, -0.31, 0.00,\n\t\t0.06, -1.99, 2.70, 7.72, 10.30, 0.06, 0.04, 0.23,\n\t\t-0.17, -0.20, 0.00, 13.05, 0.00, 0.00, 0.00, 0.00,\n\t\t-0.29, 6.92, -9.30, 3.34, 4.50, -0.21, -0.15, 0.10,\n\t\t-0.07, -6.60, 0.00, 10.70, 0.00, 0.00, 0.15, 0.00,\n\t\t-0.24, -8.04, -8.70, 0.00, 0.00, -0.19, 0.18, -10.58,\n\t\t0.00, -3.10, 0.00, 0.00, 0.24, 0.00, 0.07, -7.32,\n\t\t8.00, -0.12, -0.10, 0.18, 0.16, 1.63, 1.70, 6.96,\n\t\t-7.60, 0.03, -0.04, -0.17, -0.16, -3.62, 0.00, 9.86,\n\t\t0.00, 0.00, 0.08, 0.00, -0.22, 0.20, -0.20, -6.88,\n\n\t\t// 2772-2899\n\t\t-7.50, 0.00, 0.00, -0.17, 0.15, -8.99, 0.00, 4.02,\n\t\t0.00, 0.00, 0.20, 0.00, -0.09, -1.07, 1.40, -5.69,\n\t\t-7.70, 0.03, 0.02, -0.17, 0.13, 6.48, -7.20, -0.48,\n\t\t-0.50, -0.16, -0.14, -0.01, 0.01, 5.57, -7.50, 1.07,\n\t\t1.40, -0.17, -0.12, 0.03, -0.02, 8.71, 0.00, 3.54,\n\t\t0.00, 0.00, -0.19, 0.00, -0.08, 0.40, 0.00, 9.27,\n\t\t0.00, 0.00, -0.01, 0.00, -0.21, -6.13, 6.70, -1.19,\n\t\t-1.30, 0.15, 0.14, -0.03, 0.03, 5.21, -5.70, -2.51,\n\t\t-2.60, -0.13, -0.12, -0.06, 0.06, 5.69, -6.20, -0.12,\n\t\t-0.10, -0.14, -0.13, -0.01, 2.03, -2.70, 4.53, 6.10,\n\t\t-0.06, -0.05, 0.14, -0.10, 5.01, 5.50, -2.51, 2.70,\n\t\t0.12, -0.11, 0.06, 0.06, -1.91, 2.60, -4.38, -5.90,\n\t\t0.06, 0.04, -0.13, 0.10, 4.65, -6.30, 0.00, 0.00,\n\t\t-0.14, -0.10, -5.29, 5.70, 0.00, 0.00, 0.13, 0.12,\n\t\t-2.23, -4.00, -4.65, 4.20, -0.09, 0.05, 0.10, 0.10,\n\t\t-4.53, 6.10, 0.00, 0.00, 0.14, 0.10, 2.47, 2.70,\n\n\t\t// 2900-3027\n\t\t-4.46, 4.90, 0.06, -0.06, 0.11, 0.10, -5.05, 5.50,\n\t\t0.84, 0.90, 0.12, 0.11, 0.02, -0.02, 4.97, -5.40,\n\t\t-1.71, 0.00, -0.12, -0.11, 0.00, 0.04, -0.99, -1.30,\n\t\t4.22, -5.70, -0.03, 0.02, -0.13, -0.09, 0.99, 1.40,\n\t\t4.22, -5.60, 0.03, -0.02, -0.13, -0.09, -4.69, -5.20,\n\t\t0.00, 0.00, -0.12, 0.10, -3.42, 0.00, 6.09, 0.00,\n\t\t0.00, 0.08, 0.00, -0.14, -4.65, -5.10, 0.00, 0.00,\n\t\t-0.11, 0.10, 0.00, 0.00, -4.53, -5.00, 0.00, 0.00,\n\t\t-0.11, 0.10, -2.43, -2.70, -3.82, 4.20, -0.06, 0.05,\n\t\t0.10, 0.09, 0.00, 0.00, -4.53, 4.90, 0.00, 0.00,\n\t\t0.11, 0.10, -4.49, -4.90, 0.00, 0.00, -0.11, 0.10,\n\t\t2.67, -2.90, -3.62, -3.90, -0.06, -0.06, -0.09, 0.08,\n\t\t3.94, -5.30, 0.00, 0.00, -0.12, -3.38, 3.70, -2.78,\n\t\t-3.10, 0.08, 0.08, -0.07, 0.06, 3.18, -3.50, -2.82,\n\t\t-3.10, -0.08, -0.07, -0.07, 0.06, -5.77, 0.00, 1.87,\n\t\t0.00, 0.00, 0.13, 0.00, -0.04, 3.54, -4.80, -0.64,\n\n\t\t// 3028-3155\n\t\t-0.90, -0.11, 0.00, -0.02, -3.50, -4.70, 0.68, -0.90,\n\t\t-0.11, 0.00, -0.02, 5.49, 0.00, 0.00, 0.00, 0.00,\n\t\t-0.12, 1.83, -2.50, 2.63, 3.50, -0.06, 0.00, 0.08,\n\t\t3.02, -4.10, 0.68, 0.90, -0.09, 0.00, 0.02, 0.00,\n\t\t0.00, 5.21, 0.00, 0.00, 0.00, 0.00, -0.12, -3.54,\n\t\t3.80, 2.70, 3.60, -1.35, 1.80, 0.08, 0.00, 0.04,\n\t\t-2.90, 3.90, 0.68, 0.90, 0.09, 0.00, 0.02, 0.80,\n\t\t-1.10, -2.78, -3.70, -0.02, 0.00, -0.08, 4.10, 0.00,\n\t\t-2.39, 0.00, 0.00, -0.09, 0.00, 0.05, -1.59, 2.10,\n\t\t2.27, 3.00, 0.05, 0.00, 0.07, -2.63, 3.50, -0.48,\n\t\t-0.60, -2.94, -3.20, -2.94, 3.20, 2.27, -3.00, -1.11,\n\t\t-1.50, -0.07, 0.00, -0.03, -0.56, -0.80, -2.35, 3.10,\n\t\t0.00, -0.60, -3.42, 1.90, -0.12, -0.10, 2.63, -2.90,\n\t\t2.51, 2.80, -0.64, 0.70, -0.48, -0.60, 2.19, -2.90,\n\t\t0.24, -0.30, 2.15, 2.90, 2.15, -2.90, 0.52, 0.70,\n\t\t2.07, -2.80, -3.10, 0.00, 1.79, 0.00, 0.00, 0.07,\n\n\t\t// 3156-3283\n\t\t0.00, -0.04, 0.88, 0.00, -3.46, 2.11, 2.80, -0.36,\n\t\t0.50, 3.54, -0.20, -3.50, -1.39, 1.50, -1.91, -2.10,\n\t\t-1.47, 2.00, 1.39, 1.90, 2.07, -2.30, 0.91, 1.00,\n\t\t1.99, -2.70, 3.30, 0.00, 0.60, -0.44, -0.70, -1.95,\n\t\t2.60, 2.15, -2.40, -0.60, -0.70, 3.30, 0.84, 0.00,\n\t\t-3.10, -3.10, 0.00, -0.72, -0.32, 0.40, -1.87, -2.50,\n\t\t1.87, -2.50, 0.32, 0.40, -0.24, 0.30, -1.87, -2.50,\n\t\t-0.24, -0.30, 1.87, -2.50, -2.70, 0.00, 1.55, 2.03,\n\t\t2.20, -2.98, -1.99, -2.20, 0.12, -0.10, -0.40, 0.50,\n\t\t1.59, 2.10, 0.00, 0.00, -1.79, 2.00, -1.03, 1.40,\n\t\t-1.15, -1.60, 0.32, 0.50, 1.39, -1.90, 2.35, -1.27,\n\t\t1.70, 0.60, 0.80, -0.32, -0.40, 1.35, -1.80, 0.44,\n\t\t0.00, 2.23, -0.84, 0.90, -1.27, -1.40, -1.47, 1.60,\n\t\t-0.28, -0.30, -0.28, 0.40, -1.27, -1.70, 0.28, -0.40,\n\t\t-1.43, -1.50, 0.00, 0.00, -1.27, -1.70, 2.11, -0.32,\n\t\t-0.40, -1.23, 1.60, 1.19, -1.30, -0.72, -0.80, 0.72,\n\n\t\t// 3284-3411\n\t\t-0.80, -1.15, -1.30, -1.35, -1.50, -1.19, -1.60, -0.12,\n\t\t0.20, 1.79, 0.00, -0.88, -0.28, 0.40, 1.11, 1.50,\n\t\t-1.83, 0.00, 0.56, -0.12, 0.10, -1.27, -1.40, 0.00,\n\t\t0.00, 1.15, 1.50, -0.12, 0.20, 1.11, 1.50, 0.36,\n\t\t-0.50, -1.07, -1.40, -1.11, 1.50, 1.67, 0.00, 0.80,\n\t\t-1.11, 0.00, 1.43, 1.23, -1.30, -0.24, -1.19, -1.30,\n\t\t-0.24, 0.20, -0.44, -0.90, -0.95, 1.10, 1.07, -1.40,\n\t\t1.15, -1.30, 1.03, -1.10, -0.56, -0.60, -0.68, 0.90,\n\t\t-0.76, -1.00, -0.24, -0.30, 0.95, -1.30, 0.56, 0.70,\n\t\t0.84, -1.10, -0.56, 0.00, -1.55, 0.91, -1.30, 0.28,\n\t\t0.30, 0.16, -0.20, 0.95, 1.30, 0.40, -0.50, -0.88,\n\t\t-1.20, 0.95, -1.10, -0.48, -0.50, 0.00, 0.00, -1.07,\n\t\t1.20, 0.44, -0.50, 0.95, 1.10, 0.00, 0.00, 0.92,\n\t\t-1.30, 0.95, 1.00, -0.52, 0.60, 1.59, 0.24, -0.40,\n\t\t0.91, 1.20, 0.84, -1.10, -0.44, -0.60, 0.84, 1.10,\n\t\t-0.44, 0.60, -0.44, 0.60, -0.84, -1.10, -0.80, 0.00,\n\n\t\t// 3412-3539\n\t\t1.35, 0.76, 0.20, -0.91, -1.00, 0.20, -0.30, -0.91,\n\t\t-1.20, -0.95, 1.00, -0.48, -0.50, 0.88, 1.00, 0.48,\n\t\t-0.50, -0.95, -1.10, 0.20, -0.20, -0.99, 1.10, -0.84,\n\t\t1.10, -0.24, -0.30, 0.20, -0.30, 0.84, 1.10, -1.39,\n\t\t0.00, -0.28, -0.16, 0.20, 0.84, 1.10, 0.00, 0.00,\n\t\t1.39, 0.00, 0.00, -0.95, 1.00, 1.35, -0.99, 0.00,\n\t\t0.88, -0.52, 0.00, -1.19, 0.20, 0.20, 0.76, -1.00,\n\t\t0.00, 0.00, 0.76, 1.00, 0.00, 0.00, 0.76, 1.00,\n\t\t-0.76, 1.00, 0.00, 0.00, 1.23, 0.76, 0.80, -0.32,\n\t\t0.40, -0.72, 0.80, -0.40, -0.40, 0.00, 0.00, -0.80,\n\t\t-0.90, -0.68, 0.90, -0.16, -0.20, -0.16, -0.20, 0.68,\n\t\t-0.90, -0.36, 0.50, -0.56, -0.80, 0.72, -0.90, 0.44,\n\t\t-0.60, -0.48, -0.70, -0.16, 0.00, -1.11, 0.32, 0.00,\n\t\t-1.07, 0.60, -0.80, -0.28, -0.40, -0.64, 0.00, 0.91,\n\t\t1.11, 0.64, -0.90, 0.76, -0.80, 0.00, 0.00, -0.76,\n\t\t-0.80, 1.03, 0.00, -0.36, -0.64, -0.70, 0.36, -0.40,\n\n\t\t// 3540-3667\n\t\t1.07, 0.36, -0.50, -0.52, -0.70, 0.60, 0.00, 0.88,\n\t\t0.95, 0.00, 0.48, 0.16, -0.20, 0.60, 0.80, 0.16,\n\t\t-0.20, -0.60, -0.80, 0.00, -1.00, 0.12, 0.20, 0.16,\n\t\t-0.20, 0.68, 0.70, 0.59, -0.80, -0.99, -0.56, -0.60,\n\t\t0.36, -0.40, -0.68, -0.70, -0.68, -0.70, -0.36, -0.50,\n\t\t-0.44, 0.60, 0.64, 0.70, -0.12, 0.10, -0.52, 0.60,\n\t\t0.36, 0.40, 0.00, 0.00, 0.95, -0.84, 0.00, 0.44,\n\t\t0.56, 0.60, 0.32, -0.30, 0.00, 0.00, 0.60, 0.70,\n\t\t0.00, 0.00, 0.60, 0.70, -0.12, -0.20, 0.52, -0.70,\n\t\t0.00, 0.00, 0.56, 0.70, -0.12, 0.10, -0.52, -0.70,\n\t\t0.00, 0.00, 0.88, -0.76, 0.00, -0.44, 0.00, 0.00,\n\t\t-0.52, -0.70, 0.52, -0.70, 0.36, -0.40, -0.44, -0.50,\n\t\t0.00, 0.00, 0.60, 0.60, 0.84, 0.00, 0.12, -0.24,\n\t\t0.00, 0.80, -0.56, 0.60, -0.32, -0.30, 0.48, -0.50,\n\t\t0.28, -0.30, -0.48, -0.50, 0.12, 0.20, 0.48, -0.60,\n\t\t0.48, 0.60, -0.12, 0.20, 0.24, 0.00, 0.76, -0.52,\n\n\t\t// 3668-3795\n\t\t-0.60, -0.52, 0.60, 0.48, -0.50, -0.24, -0.30, 0.12,\n\t\t-0.10, 0.48, 0.60, 0.52, -0.20, 0.36, 0.40, -0.44,\n\t\t0.50, -0.24, -0.30, -0.48, -0.60, -0.44, -0.60, -0.12,\n\t\t0.10, 0.76, 0.76, 0.20, -0.20, 0.48, 0.50, 0.40,\n\t\t-0.50, -0.24, -0.30, 0.44, -0.60, 0.44, -0.60, 0.36,\n\t\t0.00, -0.64, 0.72, 0.00, -0.12, 0.00, -0.10, -0.40,\n\t\t-0.60, -0.20, -0.20, -0.44, 0.50, -0.44, 0.50, 0.20,\n\t\t0.20, -0.44, -0.50, 0.20, -0.20, -0.20, 0.20, -0.44,\n\t\t-0.50, 0.64, 0.00, 0.32, -0.36, 0.50, -0.20, -0.30,\n\t\t0.12, -0.10, 0.48, 0.50, -0.12, 0.30, -0.36, -0.50,\n\t\t0.00, 0.00, 0.48, 0.50, -0.48, 0.50, 0.68, 0.00,\n\t\t-0.12, 0.56, -0.40, 0.44, -0.50, -0.12, -0.10, 0.24,\n\t\t0.30, -0.40, 0.40, 0.64, 0.00, -0.24, 0.64, 0.00,\n\t\t-0.20, 0.00, 0.00, 0.44, -0.50, 0.44, 0.50, -0.12,\n\t\t0.20, -0.36, -0.50, 0.12, 0.00, 0.64, -0.40, 0.50,\n\t\t0.00, 0.10, 0.00, 0.00, -0.40, 0.50, 0.00, 0.00,\n\n\t\t// 3796-3923\n\t\t-0.40, -0.50, 0.56, 0.00, 0.28, 0.00, 0.10, 0.36,\n\t\t0.50, 0.00, -0.10, 0.36, -0.50, 0.36, 0.50, 0.00,\n\t\t-0.10, 0.24, -0.20, -0.36, -0.40, 0.16, 0.20, 0.40,\n\t\t-0.40, 0.00, 0.00, -0.36, -0.50, -0.36, -0.50, -0.32,\n\t\t-0.50, -0.12, 0.10, 0.20, 0.20, -0.36, 0.40, -0.60,\n\t\t0.60, 0.28, 0.00, 0.52, 0.12, -0.10, 0.40, 0.40,\n\t\t0.00, -0.50, 0.20, -0.20, -0.32, 0.40, 0.16, 0.20,\n\t\t-0.16, 0.20, 0.32, 0.40, 0.56, 0.00, -0.12, 0.32,\n\t\t-0.40, -0.16, -0.20, 0.00, 0.00, 0.40, 0.40, -0.40,\n\t\t-0.40, -0.40, 0.40, -0.36, 0.40, 0.12, 0.10, 0.00,\n\t\t0.10, 0.36, 0.40, 0.00, -0.10, 0.36, 0.40, -0.36,\n\t\t0.40, 0.00, 0.10, 0.32, 0.00, 0.44, 0.12, 0.20,\n\t\t0.28, -0.40, 0.00, 0.00, 0.36, 0.40, 0.32, -0.40,\n\t\t-0.16, 0.12, 0.10, 0.32, -0.40, 0.20, 0.30, -0.24,\n\t\t0.30, 0.00, 0.10, 0.32, 0.40, 0.00, -0.10, -0.32,\n\t\t-0.40, -0.32, 0.40, 0.00, 0.10, -0.52, -0.52, 0.52,\n\n\t\t// 3924-4051\n\t\t0.32, -0.40, 0.00, 0.00, 0.32, 0.40, 0.32, -0.40,\n\t\t0.00, 0.00, -0.32, -0.40, -0.32, 0.40, 0.32, 0.40,\n\t\t0.00, 0.00, 0.32, 0.40, 0.00, 0.00, -0.32, -0.40,\n\t\t0.00, 0.00, 0.32, 0.40, 0.16, 0.20, 0.32, -0.30,\n\t\t-0.16, 0.00, -0.48, -0.20, 0.20, -0.28, -0.30, 0.28,\n\t\t-0.40, 0.00, 0.00, 0.28, -0.40, 0.00, 0.00, 0.28,\n\t\t-0.40, 0.00, 0.00, -0.28, -0.40, 0.28, 0.40, -0.28,\n\t\t-0.40, -0.48, -0.20, 0.20, 0.24, 0.30, 0.44, 0.00,\n\t\t0.16, 0.24, 0.30, 0.16, -0.20, 0.24, 0.30, -0.12,\n\t\t0.20, 0.20, 0.30, -0.16, 0.20, 0.00, 0.00, 0.44,\n\t\t-0.32, 0.30, 0.24, 0.00, -0.36, 0.36, 0.00, 0.24,\n\t\t0.12, -0.20, 0.20, 0.30, -0.12, 0.00, -0.28, 0.30,\n\t\t-0.24, 0.30, 0.12, 0.10, -0.28, -0.30, -0.28, 0.30,\n\t\t0.00, 0.00, -0.28, -0.30, 0.00, 0.00, -0.28, -0.30,\n\t\t0.00, 0.00, 0.28, 0.30, 0.00, 0.00, -0.28, -0.30,\n\t\t-0.28, 0.30, 0.00, 0.00, -0.28, -0.30, 0.00, 0.00,\n\n\t\t// 4052-4179\n\t\t0.28, 0.30, 0.00, 0.00, -0.28, 0.30, 0.28, -0.30,\n\t\t-0.28, 0.30, 0.40, 0.40, -0.24, 0.30, 0.00, -0.10,\n\t\t0.16, 0.00, 0.36, -0.20, 0.30, -0.12, -0.10, -0.24,\n\t\t-0.30, 0.00, 0.00, -0.24, 0.30, -0.24, 0.30, 0.00,\n\t\t0.00, -0.24, 0.30, -0.24, 0.30, 0.24, -0.30, 0.00,\n\t\t0.00, 0.24, -0.30, 0.00, 0.00, 0.24, 0.30, 0.24,\n\t\t-0.30, 0.24, 0.30, -0.24, 0.30, -0.24, 0.30, -0.20,\n\t\t0.20, -0.16, -0.20, 0.00, 0.00, -0.32, 0.20, 0.00,\n\t\t0.10, 0.20, -0.30, 0.20, -0.20, 0.12, 0.20, -0.16,\n\t\t0.20, 0.16, 0.20, 0.20, 0.30, 0.20, 0.30, 0.00,\n\t\t0.00, -0.20, 0.30, 0.00, 0.00, 0.20, 0.30, -0.20,\n\t\t-0.30, -0.20, -0.30, 0.20, -0.30, 0.00, 0.00, 0.20,\n\t\t0.30, 0.00, 0.00, 0.20, 0.30, 0.00, 0.00, 0.20,\n\t\t0.30, 0.00, 0.00, 0.20, 0.30, 0.00, 0.00, 0.20,\n\t\t-0.30, 0.00, 0.00, -0.20, -0.30, 0.00, 0.00, -0.20,\n\t\t0.30, 0.00, 0.00, -0.20, 0.30, 0.00, 0.00, 0.36,\n\n\t\t// 4180-4307\n\t\t0.00, 0.00, 0.36, 0.12, 0.10, -0.24, 0.20, 0.12,\n\t\t-0.20, -0.16, -0.20, -0.13, 0.10, 0.22, 0.21, 0.20,\n\t\t0.00, -0.28, 0.32, 0.00, -0.12, -0.20, -0.20, 0.12,\n\t\t-0.10, 0.12, 0.10, -0.20, 0.20, 0.00, 0.00, -0.32,\n\t\t0.32, 0.00, 0.00, 0.32, 0.32, 0.00, 0.00, -0.24,\n\t\t-0.20, 0.24, 0.20, 0.20, 0.00, -0.24, 0.00, 0.00,\n\t\t-0.24, -0.20, 0.00, 0.00, 0.24, 0.20, -0.24, -0.20,\n\t\t0.00, 0.00, -0.24, 0.20, 0.16, -0.20, 0.12, 0.10,\n\t\t0.20, 0.20, 0.00, -0.10, -0.12, 0.10, -0.16, -0.20,\n\t\t-0.12, -0.10, -0.16, 0.20, 0.20, 0.20, 0.00, 0.00,\n\t\t-0.20, 0.20, -0.20, 0.20, -0.20, 0.20, -0.20, 0.20,\n\t\t0.20, -0.20, -0.20, -0.20, 0.00, 0.00, -0.20, 0.20,\n\t\t0.20, 0.00, -0.20, 0.00, 0.00, -0.20, 0.20, -0.20,\n\t\t0.20, -0.20, -0.20, -0.20, -0.20, 0.00, 0.00, 0.20,\n\t\t0.20, 0.20, 0.20, 0.12, -0.20, -0.12, -0.10, 0.28,\n\t\t-0.28, 0.16, -0.20, 0.00, -0.10, 0.00, 0.10, -0.16,\n\n\t\t// 4308-4435\n\t\t0.20, 0.00, -0.10, -0.16, -0.20, 0.00, -0.10, 0.16,\n\t\t-0.20, 0.16, -0.20, 0.00, 0.00, 0.16, 0.20, -0.16,\n\t\t0.20, 0.00, 0.00, 0.16, 0.20, 0.16, -0.20, 0.16,\n\t\t-0.20, -0.16, 0.20, 0.16, -0.20, 0.00, 0.00, 0.16,\n\t\t0.20, 0.00, 0.00, 0.16, 0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.16, -0.20, -0.16, -0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.00, 0.00, -0.16, 0.20, 0.00, 0.00, 0.16,\n\t\t-0.20, 0.16, 0.20, 0.16, 0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.00, 0.00, -0.16, -0.20, 0.00, 0.00, 0.16,\n\t\t0.20, 0.16, 0.20, 0.00, 0.00, 0.16, 0.20, 0.16,\n\t\t-0.20, 0.16, 0.20, 0.00, 0.00, -0.16, 0.20, 0.00,\n\t\t0.10, 0.12, -0.20, 0.12, -0.20, 0.00, -0.10, 0.00,\n\t\t-0.10, 0.12, 0.20, 0.00, -0.10, -0.12, 0.20, -0.15,\n\t\t0.20, -0.24, 0.24, 0.00, 0.00, 0.24, 0.24, 0.12,\n\t\t-0.20, -0.12, -0.20, 0.00, 0.00, 0.12, 0.20, 0.12,\n\t\t-0.20, 0.12, 0.20, 0.12, 0.20, 0.12, 0.20, 0.12,\n\n\t\t// 4436-4563\n\t\t-0.20, -0.12, 0.20, 0.00, 0.00, 0.12, 0.20, 0.12,\n\t\t0.00, -0.20, 0.00, 0.00, -0.12, -0.20, 0.12, -0.20,\n\t\t0.00, 0.00, 0.12, 0.20, -0.12, 0.20, -0.12, 0.20,\n\t\t0.12, -0.20, 0.00, 0.00, 0.12, 0.20, 0.20, 0.00,\n\t\t0.12, 0.00, 0.00, -0.12, 0.20, 0.00, 0.00, -0.12,\n\t\t-0.20, 0.00, 0.00, -0.12, -0.20, -0.12, -0.20, 0.00,\n\t\t0.00, 0.12, -0.20, 0.12, -0.20, 0.12, 0.20, -0.12,\n\t\t-0.20, 0.00, 0.00, 0.12, -0.20, 0.12, -0.20, 0.12,\n\t\t0.20, 0.12, 0.00, 0.20, -0.12, -0.20, 0.00, 0.00,\n\t\t0.12, 0.20, -0.16, 0.00, 0.16, -0.20, 0.20, 0.00,\n\t\t0.00, -0.20, 0.00, 0.00, -0.20, 0.20, 0.00, 0.00,\n\t\t0.20, 0.20, -0.20, 0.00, 0.00, -0.20, 0.12, 0.00,\n\t\t-0.16, 0.20, 0.00, 0.00, 0.20, 0.12, -0.10, 0.00,\n\t\t0.10, 0.16, -0.16, -0.16, -0.16, -0.16, -0.16, 0.00,\n\t\t0.00, -0.16, 0.00, 0.00, -0.16, -0.16, -0.16, 0.00,\n\t\t0.00, -0.16, 0.00, 0.00, 0.16, 0.00, 0.00, 0.16,\n\n\t\t// 4564-4691\n\t\t0.00, 0.00, 0.16, 0.16, 0.00, 0.00, -0.16, 0.00,\n\t\t0.00, -0.16, -0.16, 0.00, 0.00, 0.16, 0.00, 0.00,\n\t\t-0.16, -0.16, 0.00, 0.00, -0.16, -0.16, 0.12, 0.10,\n\t\t0.12, -0.10, 0.12, 0.10, 0.00, 0.00, 0.12, 0.10,\n\t\t-0.12, 0.10, 0.00, 0.00, 0.12, 0.10, 0.12, -0.10,\n\t\t0.00, 0.00, -0.12, -0.10, 0.00, 0.00, 0.12, 0.10,\n\t\t0.12, 0.00, 0.00, 0.12, 0.00, 0.00, -0.12, 0.00,\n\t\t0.00, 0.12, 0.12, 0.12, 0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00, 0.12,\n\t\t0.00, 0.00, 0.12, -0.12, -0.12, 0.12, 0.12, -0.12,\n\t\t-0.12, 0.00, 0.00, 0.12, -0.12, 0.12, 0.12, -0.12,\n\t\t-0.12, 0.00, 0.00, -0.12, -0.12, 0.00, 0.00, -0.12,\n\t\t0.12, 0.00, 0.00, 0.12, 0.00, 0.00, 0.12, 0.00,\n\t\t0.00, 0.12, -0.12, 0.00, 0.00, -0.12, 0.12, -0.12,\n\t\t-0.12, 0.12, 0.00, 0.00, 0.12, 0.12, 0.12, -0.12,\n\t\t0.00, 0.00, -0.12, -0.12, -0.12, 0.00, 0.00, -0.12,\n\n\t\t// 4692-NA\n\t\t-0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00, -0.12,\n\t\t-0.12, -0.12, -0.12, 0.12, 0.00, 0.00, 0.12, -0.12,\n\t\t0.00, 0.00, -0.12, -0.12, 0.00, 0.00, 0.12, -0.12,\n\t\t-0.12, -0.12, -0.12, 0.12, 0.12, -0.12, -0.12, 0.00,\n\t\t0.00, -0.12, 0.00, 0.00, -0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, -0.12, -0.12, 0.00, 0.00, -0.12,\n\t\t-0.12, 0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, 0.12, 0.12, 0.08, 0.00, 0.04,\n\t}\n\n\t// Number of amplitude coefficients\n\tconst NA = len(a)\n\n\t// Amplitude usage: X or Y, sin or cos, power of T.\n\tvar jaxy = [...]int{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}\n\tvar jasc = [...]int{0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0}\n\tvar japt = [...]int{0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}\n\n\t// Miscellaneous\n\tvar fa [14]float64\n\tvar pt [MAXPT + 1]float64\n\tvar sc, xypr, xypl, xyls [2]float64\n\tvar t, w, arg float64\n\tvar jpt, i, j, jxy, ialast, ifreq, m, ia, jsc int\n\n\t// -------------------------------------------------------------\n\n\t// Interval between fundamental date J2000.0 and given date\n\t// (JC).\n\tt = ((date1 - DJ00) + date2) / DJC\n\n\t// Powers of T.\n\tw = 1.0\n\tfor jpt = 0; jpt <= MAXPT; jpt++ {\n\t\tpt[jpt] = w\n\t\tw *= t\n\t}\n\n\t// Initialize totals in X and Y: polynomial, luni-solar,\n\t// planetary.\n\tfor jxy = 0; jxy < 2; jxy++ {\n\t\txypr[jxy] = 0.0\n\t\txyls[jxy] = 0.0\n\t\txypl[jxy] = 0.0\n\t}\n\n\t// ---------------------------------\n\t// Fundamental arguments (IERS 2003)\n\t// ---------------------------------\n\n\t// Mean anomaly of the Moon.\n\tfa[0] = GoFal03(t)\n\n\t// Mean anomaly of the Sun.\n\tfa[1] = GoFalp03(t)\n\n\t// Mean argument of the latitude of the Moon.\n\tfa[2] = GoFaf03(t)\n\n\t// Mean elongation of the Moon from the Sun.\n\tfa[3] = GoFad03(t)\n\n\t// Mean longitude of the ascending node of the Moon.\n\tfa[4] = GoFaom03(t)\n\n\t// Planetary longitudes, Mercury through Neptune.\n\tfa[5] = GoFame03(t)\n\tfa[6] = GoFave03(t)\n\tfa[7] = GoFae03(t)\n\tfa[8] = GoFama03(t)\n\tfa[9] = GoFaju03(t)\n\tfa[10] = GoFasa03(t)\n\tfa[11] = GoFaur03(t)\n\tfa[12] = GoFane03(t)\n\n\t// General accumulated precession in longitude.\n\tfa[13] = GoFapa03(t)\n\n\t// --------------------------------------\n\t// Polynomial part of precession-nutation\n\t// --------------------------------------\n\n\tfor jxy = 0; jxy < 2; jxy++ {\n\t\tfor j = MAXPT; j >= 0; j-- {\n\t\t\txypr[jxy] += xyp[jxy][j] * pt[j]\n\t\t}\n\t}\n\n\t// ----------------------------------\n\t// Nutation periodic terms, planetary\n\t// ----------------------------------\n\n\t// Work backwards through the coefficients per frequency list.\n\tialast = NA\n\tfor ifreq = NFPL - 1; ifreq >= 0; ifreq-- {\n\n\t\t// Obtain the argument functions.\n\t\targ = 0.0\n\t\tfor i = 0; i < 14; i++ {\n\t\t\tm = int(mfapl[ifreq][i])\n\t\t\tif m != 0 {\n\t\t\t\targ += float64(m) * fa[i]\n\t\t\t}\n\t\t}\n\t\tsc[0] = math.Sin(arg)\n\t\tsc[1] = math.Cos(arg)\n\n\t\t// Work backwards through the amplitudes at this\n\t\t// frequency.\n\t\tia = nc[ifreq+NFLS]\n\t\tfor i = ialast; i >= ia; i-- {\n\n\t\t\t// Coefficient number (0 = 1st).\n\t\t\tj = i - ia\n\n\t\t\t// X or Y.\n\t\t\tjxy = jaxy[j]\n\n\t\t\t// Sin or cos.\n\t\t\tjsc = jasc[j]\n\n\t\t\t// Power of T.\n\t\t\tjpt = japt[j]\n\n\t\t\t// Accumulate the component.\n\t\t\txypl[jxy] += a[i-1] * sc[jsc] * pt[jpt]\n\t\t}\n\t\tialast = ia - 1\n\t}\n\n\t// -----------------------------------\n\t// Nutation periodic terms, luni-solar\n\t// -----------------------------------\n\n\t// Continue working backwards through the number of coefficients\n\t// list.\n\tfor ifreq = NFLS - 1; ifreq >= 0; ifreq-- {\n\n\t\t// Obtain the argument functions.\n\t\targ = 0.0\n\t\tfor i = 0; i < 5; i++ {\n\t\t\tm = mfals[ifreq][i]\n\t\t\tif m != 0 {\n\t\t\t\targ += float64(m) * fa[i]\n\t\t\t}\n\t\t}\n\t\tsc[0] = math.Sin(arg)\n\t\tsc[1] = math.Cos(arg)\n\n\t\t// Work backwards through the amplitudes at this\n\t\t// frequency.\n\t\tia = nc[ifreq]\n\t\tfor i = ialast; i >= ia; i-- {\n\n\t\t\t// Coefficient number (0 = 1st).\n\t\t\tj = i - ia\n\n\t\t\t// X or Y.\n\t\t\tjxy = jaxy[j]\n\n\t\t\t// Sin or cos.\n\t\t\tjsc = jasc[j]\n\n\t\t\t// Power of T.\n\t\t\tjpt = japt[j]\n\n\t\t\t// Accumulate the component.\n\t\t\txyls[jxy] += a[i-1] * sc[jsc] * pt[jpt]\n\t\t}\n\t\tialast = ia - 1\n\t}\n\n\t// ------------------------------------\n\t// Results: CIP unit vector components\n\t// ------------------------------------\n\n\tx = DAS2R * (xypr[0] + (xyls[0]+xypl[0])/1e6)\n\ty = DAS2R * (xypr[1] + (xyls[1]+xypl[1])/1e6)\n\n\treturn\n}",
"func (i *image) converMapCoord(coord, center int) (int, int) {\n\tretx, rety := 0, 0\n\tcoord *= Size\n\tmax := center * 2\n\tif coord < 0 {\n\t\tretx = center + coord\n\t} else {\n\t\tretx = center + coord\n\t}\n\n\tif retx < 0 {\n\t\trety = retx + Size\n\t\tretx = 0\n\t} else if retx > max {\n\t\trety = max\n\t} else {\n\t\trety = retx + Size\n\t}\n\treturn retx, rety\n}",
"func Grid2LatLon(N, E float64, from gGrid, to eDatum) (float64, float64) {\n\t//================\n\t// GRID -> Lat/Lon\n\t//================\n\ty := N + grid[from].falseN\n\tx := E - grid[from].falseE\n\tM := y / grid[from].k0\n\ta := Datum[to].a\n\tb := Datum[to].b\n\te := Datum[to].e\n\tesq := Datum[to].esq\n\tmu := M / (a * (1 - e*e/4 - 3*math.Pow(e, 4)/64 - 5*math.Pow(e, 6)/256))\n\n\tee := math.Sqrt(1 - esq)\n\te1 := (1 - ee) / (1 + ee)\n\tj1 := 3*e1/2 - 27*e1*e1*e1/32\n\tj2 := 21*e1*e1/16 - 55*e1*e1*e1*e1/32\n\tj3 := 151 * e1 * e1 * e1 / 96\n\tj4 := 1097 * e1 * e1 * e1 * e1 / 512\n\t// Footprint Latitude\n\tfp := mu + j1*math.Sin(2*mu) + j2*math.Sin(4*mu) + j3*math.Sin(6*mu) + j4*math.Sin(8*mu)\n\n\tsinfp := math.Sin(fp)\n\tcosfp := math.Cos(fp)\n\ttanfp := sinfp / cosfp\n\teg := (e * a / b)\n\teg2 := eg * eg\n\tC1 := eg2 * cosfp * cosfp\n\tT1 := tanfp * tanfp\n\tR1 := a * (1 - e*e) / math.Pow(1-(e*sinfp)*(e*sinfp), 1.5)\n\tN1 := a / math.Sqrt(1-(e*sinfp)*(e*sinfp))\n\tD := x / (N1 * grid[from].k0)\n\n\tQ1 := N1 * tanfp / R1\n\tQ2 := D * D / 2\n\tQ3 := (5 + 3*T1 + 10*C1 - 4*C1*C1 - 9*eg2*eg2) * (D * D * D * D) / 24\n\tQ4 := (61 + 90*T1 + 298*C1 + 45*T1*T1 - 3*C1*C1 - 252*eg2*eg2) * (D * D * D * D * D * D) / 720\n\t// result lat\n\tlat := fp - Q1*(Q2-Q3+Q4)\n\n\tQ5 := D\n\tQ6 := (1 + 2*T1 + C1) * (D * D * D) / 6\n\tQ7 := (5 - 2*C1 + 28*T1 - 3*C1*C1 + 8*eg2*eg2 + 24*T1*T1) * (D * D * D * D * D) / 120\n\t// result lon\n\tlon := grid[from].lon0 + (Q5-Q6+Q7)/cosfp\n\treturn lat, lon\n}",
"func (s *Service) convertFromPMUser(mu meta.User) (User, error) {\n\tu := User{\n\t\tName: mu.Name,\n\t\tHash: []byte(mu.Hash),\n\t\tPrivileges: convertPMPermissions(mu.Permissions),\n\t}\n\treturn u, nil\n}",
"func toDegrees(input float64) float64 {\n\treturn input * 180 / math.Pi\n}",
"func (converter *Point_Convert) Convert_XY(xy []int) []float64 {\n\tmerc_point := []float64{float64(xy[0])/4096.0*converter.DeltaX + converter.Bds.W, (4096.0-float64(xy[1]))/4096.0*converter.DeltaY + converter.Bds.S}\n\treturn Convert_Merc_Point(merc_point)\n}",
"func toAtoms(v float64) uint64 {\n\treturn uint64(math.Round(v * conventionalConversionFactor))\n}",
"func (crs LambertConformalConic2SP) ToLonLat(east, north float64, gs GeodeticSpheroid) (lon, lat float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\tρi := math.Sqrt(math.Pow(east-crs.Eastf, 2) + math.Pow(crs._ρ(radian(crs.Latf), s)-(north-crs.Northf), 2))\n\tif crs._n(s) < 0 {\n\t\tρi = -ρi\n\t}\n\tti := math.Pow(ρi/(s.MajorAxis()*crs._F(s)), 1/crs._n(s))\n\tφ := math.Pi/2 - 2*math.Atan(ti)\n\tfor i := 0; i < 5; i++ {\n\t\tφ = math.Pi/2 - 2*math.Atan(ti*math.Pow((1-s.E()*math.Sin(φ))/(1+s.E()*math.Sin(φ)), s.E()/2))\n\t}\n\tλ := math.Atan((east-crs.Eastf)/(crs._ρ(radian(crs.Latf), s)-(north-crs.Northf)))/crs._n(s) + radian(crs.Lonf)\n\treturn degree(λ), degree(φ)\n}",
"func makeMoveFromCoords(board *Board, move string, useChess960Castling bool) uint16 {\n\tfromPos := CoordinateToPos(move[0:2])\n\ttoPos := CoordinateToPos(move[2:4])\n\tmovePieceType := GetPieceType(board.Pieces[fromPos])\n\tvar moveType int\n\n\tmoveLen := len(move)\n\tif moveLen == 5 {\n\t\tif move[moveLen-1] == 'n' {\n\t\t\tmoveType = KnightPromotion\n\t\t} else if move[moveLen-1] == 'b' {\n\t\t\tmoveType = BishopPromotion\n\t\t} else if move[moveLen-1] == 'r' {\n\t\t\tmoveType = RookPromotion\n\t\t} else if move[moveLen-1] == 'q' {\n\t\t\tmoveType = QueenPromotion\n\t\t}\n\t} else if move == \"e1g1\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleWKS\n\t} else if move == \"e1c1\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleWQS\n\t} else if move == \"e8g8\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleBKS\n\t} else if move == \"e8c8\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleBQS\n\t} else if move == \"e1h1\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleWKS\n\t} else if move == \"e1a1\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleWQS\n\t} else if move == \"e8h8\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleBKS\n\t} else if move == \"e8a8\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleBQS\n\t} else if toPos == board.EPSquare {\n\t\tmoveType = AttackEP\n\t} else {\n\t\tcapturePiece := board.Pieces[toPos]\n\t\tif capturePiece == NoPiece {\n\t\t\tmoveType = Quiet\n\t\t} else {\n\t\t\tmoveType = Attack\n\t\t}\n\t}\n\treturn MakeMove(fromPos, toPos, moveType)\n}",
"func moonCoords(d float64) moonCoordinates { // geocentric ecliptic coordinates of the moon\n\tL := rad * (218.316 + 13.176396*d) // ecliptic longitude\n\tM := rad * (134.963 + 13.064993*d) // mean anomaly\n\tF := rad * (93.272 + 13.229350*d) // mean distance\n\n\tl := L + rad*6.289*math.Sin(M) // longitude\n\tb := rad * 5.128 * math.Sin(F) // latitude\n\tdt := 385001 - 20905*math.Cos(M) // distance to the moon in km\n\n\treturn moonCoordinates{\n\t\trightAscension(l, b),\n\t\tdeclination(l, b),\n\t\tdt,\n\t}\n}",
"func MToF(m Meter) Feet { return Feet(m / 0.3048) }",
"func Convertir(cantidad float64, desde string, hacia string, relaciones ...RelacionUM) (out float64, err error) {\n\n\t// Chequeo que existan las unidades de medida\n\td, ok := unidades[desde]\n\tif !ok {\n\t\treturn 0, errors.Errorf(\"No existe la unidad de medida %v\", desde)\n\t}\n\th, ok := unidades[hacia]\n\tif !ok {\n\t\treturn 0, errors.Errorf(\"No existe la unidad de medida %v\", hacia)\n\t}\n\n\t// Si son iguales no hago nada\n\tif desde == hacia {\n\t\treturn cantidad, nil\n\t}\n\n\tif d.Tipo != h.Tipo {\n\n\t\t// Empiezo a analizar las relaciones ingresadas\n\t\tfor _, v := range relaciones {\n\n\t\t\t// Quiero convertir 600ml a kg. La relación me dice que 1L = 0.92kg\n\t\t\t// Busco si la relación me convierte los tipos que estoy buscando.\n\t\t\tif unidades[v.Un].Tipo == d.Tipo && unidades[v.De].Tipo == h.Tipo {\n\t\t\t\t// Primero convierto 600 ml => L = 0.6\n\t\t\t\tnuevaUnidadConsistenteConDesde, err := Convertir(cantidad, desde, v.Un)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, errors.Wrap(err, \"error interno derecho\")\n\t\t\t\t}\n\n\t\t\t\t// Convierto 0.6 L => Kg\n\t\t\t\tnuevaUnidadFinal, err := Convertir(nuevaUnidadConsistenteConDesde, v.De, hacia)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, errors.Wrap(err, \"error interno\")\n\t\t\t\t}\n\n\t\t\t\treturn nuevaUnidadFinal * v.EquivaleA, nil\n\t\t\t}\n\n\t\t\t// Si tengo la relación inversa:\n\t\t\t// Quiero convertir 600ml a kg. La relación me dice que 1kg = 0.92L\n\t\t\tif unidades[v.Un].Tipo == h.Tipo && unidades[v.De].Tipo == d.Tipo {\n\t\t\t\trelacionInvertida := RelacionUM{\n\t\t\t\t\tUn: v.De,\n\t\t\t\t\tEquivaleA: 1 / v.EquivaleA,\n\t\t\t\t\tDe: v.Un,\n\t\t\t\t}\n\t\t\t\treturn Convertir(cantidad, desde, hacia, relacionInvertida)\n\n\t\t\t}\n\n\t\t}\n\n\t\t// No eran convertibles y tampoco se ingresó una conversión\n\t\treturn 0, errors.Errorf(\"No se puede convertir %v hacia %v. La primera es una medida de %v, la segunda en cambio es de %v.\",\n\t\t\td.Nombre, h.Nombre, d.Tipo, h.Tipo)\n\t}\n\n\t// Son convertibles\n\treturn cantidad * d.factor / h.factor, nil\n}",
"func (m mathUtil) DegreesToCompass(deg float64) float64 {\n\treturn m.DegreesAdd(deg, -90.0)\n}",
"func (crs TransverseMercator) ToLonLat(east, north float64, gs GeodeticSpheroid) (lon, lat float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\teast -= crs.Eastf\n\tnorth -= crs.Northf\n\tMi := crs._M(radian(crs.Latf), s) + north/crs.Scale\n\tμ := Mi / (s.MajorAxis() * (1 - s.E2()/4 - 3*s.E4()/64 - 5*s.E6()/256))\n\tφ1 := μ + (3*s.Ei()/2-27*s.Ei3()/32)*math.Sin(2*μ) +\n\t\t(21*s.Ei2()/16-55*s.Ei4()/32)*math.Sin(4*μ) +\n\t\t(151*s.Ei3()/96)*math.Sin(6*μ) +\n\t\t(1097*s.Ei4()/512)*math.Sin(8*μ)\n\tR1 := s.MajorAxis() * (1 - s.E2()) / math.Pow(1-s.E2()*sin2(φ1), 3/2)\n\tD := east / (crs._N(φ1, s) * crs.Scale)\n\tφ := φ1 - (crs._N(φ1, s)*math.Tan(φ1)/R1)*(D*D/2-(5+3*crs._T(φ1)+10*\n\t\tcrs._C(φ1, s)-4*crs._C(φ1, s)*crs._C(φ1, s)-9*s.Ei2())*\n\t\tmath.Pow(D, 4)/24+(61+90*crs._T(φ1)+298*crs._C(φ1, s)+45*crs._T(φ1)*\n\t\tcrs._T(φ1)-252*s.Ei2()-3*crs._C(φ1, s)*crs._C(φ1, s))*\n\t\tmath.Pow(D, 6)/720)\n\tλ := radian(crs.Lonf) + (D-(1+2*crs._T(φ1)+crs._C(φ1, s))*D*D*D/6+(5-2*crs._C(φ1, s)+\n\t\t28*crs._T(φ1)-3*crs._C(φ1, s)*crs._C(φ1, s)+8*s.Ei2()+24*crs._T(φ1)*crs._T(φ1))*\n\t\tmath.Pow(D, 5)/120)/math.Cos(φ1)\n\treturn degree(λ), degree(φ)\n}",
"func MOVSS(mx, mx1 operand.Op) { ctx.MOVSS(mx, mx1) }",
"func SwissCoordToGRS80LatLong(coord *SwissCoord) (*cartconvert.PolarCoord, error) {\n\n\tvar fn, fe float64\n\n\tswitch coord.CoordType {\n\tcase LV03:\n\t\tfe = 600000\n\t\tfn = 200000\n\tcase LV95:\n\t\tfe = -2600000\n\t\tfn = -1200000\n\tdefault:\n\t\treturn nil, cartconvert.ErrRange\n\t}\n\n\tgc := cartconvert.InverseTransverseMercator(\n\t\t&cartconvert.GeoPoint{Y: coord.Northing, X: coord.Easting, El: coord.El},\n\t\t46.952406, // lat0\n\t\t7.439583, // long0\n\t\t1,\n\t\tfe, // fe\n\t\tfn) // fn\n\n\tcart := cartconvert.PolarToCartesian(gc)\n\t// According to literature, the Granit87 parameters shall not be used in favour of\n\t// higher accuracy of the following shift values\n\n\t// pt := cartconvert.HelmertLV03ToWGS84Granit87.Transform(&cartconvert.Point3D{X: cart.X, Y: cart.Y, Z: cart.Z})\n\tpt := &cartconvert.Point3D{X: cart.X + 674.374, Y: cart.Y + 15.056, Z: cart.Z + 405.346}\n\n\treturn cartconvert.CartesianToPolar(&cartconvert.CartPoint{X: pt.X, Y: pt.Y, Z: pt.Z, El: cartconvert.GRS80Ellipsoid}), nil\n}",
"func (v GeodeticPoint) WGS84ToGCJ02() GeodeticPoint {\n\tif v.outsideChina() {\n\t\treturn v\n\t}\n\tlat := lat(v.Longitude-105, v.Latitude-35)\n\tlon := lon(v.Longitude-105, v.Latitude-35)\n\tradLat := v.Latitude / 180 * pi\n\tmagic := math.Sin(radLat)\n\tmagic = 1 - ee*magic*magic\n\tsqrtMagic := math.Sqrt(magic)\n\tlat = (lat * 180) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)\n\tlon = (lon * 180) / (a / sqrtMagic * math.Cos(radLat) * pi)\n\treturn GeodeticPoint{v.Latitude + lat, v.Longitude + lon}\n}",
"func molodensky(ilat, ilon float64, from, to eDatum) (float64, float64) {\n\t// from->WGS84 - to->WGS84 = from->WGS84 + WGS84->to = from->to\n\tdX := Datum[from].dX - Datum[to].dX\n\tdY := Datum[from].dY - Datum[to].dY\n\tdZ := Datum[from].dZ - Datum[to].dZ\n\tslat := math.Sin(ilat)\n\tclat := math.Cos(ilat)\n\tslon := math.Sin(ilon)\n\tclon := math.Cos(ilon)\n\tssqlat := slat * slat\n\n\t//dlat = ((-dx * slat * clon - dy * slat * slon + dz * clat)\n\t// + (da * rn * fromEsq * slat * clat / fromA)\n\t// + (df * (rm * adb + rn / adb )* slat * clat))\n\t// / (rm + from.h);\n\n\tfromF := Datum[from].f\n\tdf := Datum[to].f - fromF\n\tfromA := Datum[from].a\n\tda := Datum[to].a - fromA\n\tfromEsq := Datum[from].esq\n\tadb := 1.0 / (1.0 - fromF)\n\trn := fromA / math.Sqrt(1-fromEsq*ssqlat)\n\trm := fromA * (1 - fromEsq) / math.Pow((1-fromEsq*ssqlat), 1.5)\n\tfromH := 0.0 // we're flat!\n\tdlat := (-dX*slat*clon - dY*slat*slon + dZ*clat + da*rn*fromEsq*slat*clat/fromA +\n\t\t+df*(rm*adb+rn/adb)*slat*clat) /\n\t\t(rm + fromH)\n\n\t// result lat (radians)\n\tolat := ilat + dlat\n\n\t// dlon = (-dx * slon + dy * clon) / ((rn + from.h) * clat);\n\tdlon := (-dX*slon + dY*clon) / ((rn + fromH) * clat)\n\t// result lon (radians)\n\tolon := ilon + dlon\n\treturn olat, olon\n}",
"func (f *Fpdf) PointConvert(pt float64) (u float64) {\n\treturn pt / f.k\n}",
"func NewCoordinate(res CoordResolution) (c Coordinates) {\n\tc.Resolution = res\n\tc.Sector.MoveTo(coord_SECTOR_MAX/2, coord_SECTOR_MAX/2)\n\tc.SubSector.MoveTo(coord_SUBSECTOR_MAX/2, coord_SUBSECTOR_MAX/2)\n\tc.StarCoord.MoveTo(coord_STARSYSTEM_MAX/2, coord_STARSYSTEM_MAX/2)\n\tc.Local.Set(coord_LOCAL_MAX/2, coord_LOCAL_MAX/2)\n\n\treturn\n}",
"func NewConversion(from, to Unit, formula string) {\n\texpr, err := govaluate.NewEvaluableExpression(formula)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// create conversion function\n\tfn := func(x float64) float64 {\n\t\tparams := make(map[string]interface{})\n\t\tparams[\"x\"] = x\n\n\t\tres, err := expr.Evaluate(params)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn res.(float64)\n\t}\n\n\tNewConversionFromFn(from, to, fn, formula)\n}",
"func (m *mgr) convert(currentUser *userv1beta1.UserId, s *collaboration.Share) *collaboration.ReceivedShare {\n\trs := &collaboration.ReceivedShare{\n\t\tShare: s,\n\t\tState: collaboration.ShareState_SHARE_STATE_PENDING,\n\t}\n\tif v, ok := m.model.State[currentUser.String()]; ok {\n\t\tif state, ok := v[s.Id.String()]; ok {\n\t\t\trs.State = state\n\t\t}\n\t}\n\tif v, ok := m.model.MountPoint[currentUser.String()]; ok {\n\t\tif mp, ok := v[s.Id.String()]; ok {\n\t\t\trs.MountPoint = mp\n\t\t}\n\t}\n\treturn rs\n}",
"func Turn(start, dest, vel float64) float64 {\n\td := dest - start\n\tif math.Abs(d) < vel {\n\t\treturn dest\n\t}\n\n\tif d > 0 {\n\t\tif d > math.Pi {\n\t\t\tstart -= vel\n\t\t} else {\n\t\t\tstart += vel\n\t\t}\n\t} else {\n\t\tif d < -math.Pi {\n\t\t\tstart += vel\n\t\t} else {\n\t\t\tstart -= vel\n\t\t}\n\t}\n\n\treturn start\n}",
"func (t *Transform) Convert(c CoordConv) lmath.Mat4 {\n\tswitch c {\n\tcase LocalToWorld:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\tltw := *t.localToWorld\n\t\tt.access.Unlock()\n\t\treturn ltw\n\n\tcase WorldToLocal:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\twtl := *t.worldToLocal\n\t\tt.access.Unlock()\n\t\treturn wtl\n\n\tcase ParentToWorld:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\tltw := *t.localToWorld\n\t\tlocal := *t.built\n\t\tt.access.Unlock()\n\n\t\t// Reverse the local transform:\n\t\tlocalInv, _ := local.Inverse()\n\t\treturn localInv.Mul(ltw)\n\n\tcase WorldToParent:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\twtl := *t.worldToLocal\n\t\tlocal := *t.built\n\t\tt.access.Unlock()\n\t\treturn local.Mul(wtl)\n\t}\n\tpanic(\"Convert(): invalid conversion\")\n}",
"func Part2(shipMap ShipMap) string {\n\tminutes := 0\n\toxygenatedPoints := []Point{shipMap.grid[shipMap.osY][shipMap.osX]}\n\n\tfor shipMap.Unoxygenated() > 0 {\n\t\tfor _, point := range oxygenatedPoints {\n\t\t\tneighbors := shipMap.Neighbors(point)\n\t\t\tfor idx := 0; idx < len(neighbors); idx++ {\n\t\t\t\tneighbor := neighbors[idx]\n\t\t\t\tshipMap.grid[neighbor.y][neighbor.x].oxygenated = true\n\t\t\t\tif !containsPoint(oxygenatedPoints, neighbor) {\n\t\t\t\t\toxygenatedPoints = append(oxygenatedPoints, neighbor)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\tminutes++\n\t}\n\n\treturn \"Answer: \" + strconv.Itoa(minutes)\n}",
"func makeFloatFromMandE(negative bool, e int, m []byte, tmp []byte) float64 {\n\t// ±.dddde±dd.\n\tb := tmp[:0]\n\tif n := len(m)*2 + 6; cap(b) < n {\n\t\tb = make([]byte, 0, n)\n\t}\n\tif negative {\n\t\tb = append(b, '-')\n\t}\n\tb = append(b, '.')\n\tfor i, v := range m {\n\t\tt := int(v)\n\t\tif i == len(m) {\n\t\t\tt--\n\t\t}\n\t\tt /= 2\n\t\tb = append(b, byte(t/10)+'0', byte(t%10)+'0')\n\t}\n\tb = append(b, 'e')\n\te = 2 * e\n\tif e < 0 {\n\t\tb = append(b, '-')\n\t\te = -e\n\t} else {\n\t\tb = append(b, '+')\n\t}\n\n\tvar buf [3]byte\n\ti := len(buf)\n\tfor e >= 10 {\n\t\ti--\n\t\tbuf[i] = byte(e%10 + '0')\n\t\te /= 10\n\t}\n\ti--\n\tbuf[i] = byte(e + '0')\n\n\tb = append(b, buf[i:]...)\n\n\t// We unsafely convert the []byte to a string to avoid the usual allocation\n\t// when converting to a string.\n\tf, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&b)), 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}",
"func FTtoM(d Feet) Meter {\n\treturn Meter(d * 3.28084)\n}",
"func DToXY(n, d int) (x, y int) {\n\tt := d\n\tfor s:=1; s<n; s*=2 {\n\t\trx := 1 & (t/2)\n\t\try := 1 & (t ^ rx)\n\t\trot(s, rx, ry, &x, &y)\n\t\tx += s * rx\n\t\ty += s * ry\n\t\tt /= 4\n\t}\n\treturn\n}",
"func Project(zone int, south bool, latitude, longitude float64) (float64, float64) {\n\n\t// False northing\n\tfn := 0.\n\tif south {\n\t\tfn = utmSouthernHemisphereFalseNorthing\n\t}\n\n\th1 := n/2 - n2*2/3 + n3*5/16 + n4*41/180\n\th2 := n2*13/48 - n3*3/5 + n4*557/1440\n\th3 := n3*61/240 - n4*103/140\n\th4 := n4 * 49561 / 161280\n\n\tq := math.Asinh(math.Tan(latitude)) - e*math.Atanh(e*math.Sin(latitude))\n\tβ := math.Atan(math.Sinh(q))\n\n\tη0 := math.Atanh(math.Cos(β) * math.Sin(longitude-λO(zone)))\n\tξ0 := math.Asin(math.Sin(β) * math.Cosh(η0))\n\n\tη1 := h1 * math.Cos(2*ξ0) * math.Sinh(2*η0)\n\tη2 := h2 * math.Cos(4*ξ0) * math.Sinh(4*η0)\n\tη3 := h3 * math.Cos(6*ξ0) * math.Sinh(6*η0)\n\tη4 := h4 * math.Cos(8*ξ0) * math.Sinh(8*η0)\n\n\tξ1 := h1 * math.Sin(2*ξ0) * math.Cosh(2*η0)\n\tξ2 := h2 * math.Sin(4*ξ0) * math.Cosh(4*η0)\n\tξ3 := h3 * math.Sin(6*ξ0) * math.Cosh(6*η0)\n\tξ4 := h4 * math.Sin(8*ξ0) * math.Cosh(8*η0)\n\n\tξ := ξ0 + ξ1 + ξ2 + ξ3 + ξ4\n\tη := η0 + η1 + η2 + η3 + η4\n\n\te := fe + kO*b*η\n\tn := fn + kO*b*ξ\n\treturn e, n\n}",
"func (c *Coord) M() float64 { return c[3] }",
"func (src *DOCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint\n\tdst := dstRaw.(*infrav1alpha4.DOCluster)\n\tif err := Convert_v1alpha3_DOCluster_To_v1alpha4_DOCluster(src, dst, nil); err != nil {\n\t\treturn err\n\t}\n\n\t// Manually restore data from annotations\n\trestored := &infrav1alpha4.DOCluster{}\n\tif ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func moveFromCoord(pos *Position, move string) Move {\n\tfrom := CoordinateToPos(move[0:2])\n\tto := CoordinateToPos(move[2:4])\n\tmoved := pos.Squares[from].Type\n\n\tvar moveType uint8\n\tflag := NoFlag\n\n\tmoveLen := len(move)\n\tif moveLen == 5 {\n\t\tmoveType = Promotion\n\t\tif move[moveLen-1] == 'n' {\n\t\t\tflag = KnightPromotion\n\t\t} else if move[moveLen-1] == 'b' {\n\t\t\tflag = BishopPromotion\n\t\t} else if move[moveLen-1] == 'r' {\n\t\t\tflag = RookPromotion\n\t\t} else if move[moveLen-1] == 'q' {\n\t\t\tflag = QueenPromotion\n\t\t}\n\t} else if move == \"e1g1\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e1c1\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e8g8\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e8c8\" && moved == King {\n\t\tmoveType = Castle\n\t} else if to == pos.EPSq && moved == Pawn {\n\t\tmoveType = Attack\n\t\tflag = AttackEP\n\t} else {\n\t\tcaptured := pos.Squares[to]\n\t\tif captured.Type == NoType {\n\t\t\tmoveType = Quiet\n\t\t} else {\n\t\t\tmoveType = Attack\n\t\t}\n\t}\n\treturn NewMove(from, to, moveType, flag)\n}",
"func (p Point2D) ToPoint() Point {\n\treturn Point{p.X, p.Y, 0}\n}",
"func moment(data []float64, c float64, p float64, N int) float64 {\n\n\tsum := 0.0\n\tfor i := 0; i < N; i++ {\n\t\tsum += math.Pow(data[i]-c, p)\n\t}\n\n\treturn sum / float64(N)\n}",
"func Inverse(lat1, lon1, lat2, lon2 float64) (s12, azi1, azi2 float64) {\n\tlon12 := angNormalize(lon2 - lon1)\n\tlon12 = angRound(lon12)\n\t// Make longitude difference positive.\n\tlonsign := sg(lon12 >= 0)\n\tlon12 *= lonsign\n\tif lon12 == math.Pi {\n\t\tlonsign = 1\n\t}\n\n\t// If really close to the equator, treat as on equator.\n\tlat1 = angRound(lat1)\n\tlat2 = angRound(lat2)\n\n\t// Swap points so that point with higher (abs) latitude is point 1\n\tswapp := sg(math.Abs(lat1) >= math.Abs(lat2))\n\tif swapp < 0 {\n\t\tlonsign *= -1\n\t\tlat1, lat2 = lat2, lat1\n\t}\n\n\t// Make lat1 <= 0\n\tlatsign := sg(lat1 < 0)\n\tlat1 *= latsign\n\tlat2 *= latsign\n\n\t// Now we have\n\t//\n\t// 0 <= lon12 <= 180\n\t// -90 <= lat1 <= 0\n\t// lat1 <= lat2 <= -lat1\n\t//\n\t// lonsign, swapp, latsign register the transformation to bring the\n\t// coordinates to this canonical form. In all cases, false means no change was\n\t// made. We make these transformations so that there are few cases to\n\t// check, e.g., on verifying quadrants in atan2. In addition, this\n\t// enforces some symmetries in the results returned.\n\n\tvar phi, sbet1, cbet1, sbet2, cbet2, s12x, m12x float64\n\n\tphi = lat1\n\t// Ensure cbet1 = +epsilon at poles\n\tsbet1, cbet1 = math.Sincos(phi)\n\tsbet1 *= _f1\n\tif cbet1 == 0. && lat1 < 0 {\n\t\tcbet1 = _tiny\n\t}\n\tsbet1, cbet1 = sinCosNorm(sbet1, cbet1)\n\n\tphi = lat2\n\t// Ensure cbet2 = +epsilon at poles\n\tsbet2, cbet2 = math.Sincos(phi)\n\tsbet2 *= _f1\n\tif cbet2 == 0. {\n\t\tcbet2 = _tiny\n\t}\n\tsbet2, cbet2 = sinCosNorm(sbet2, cbet2)\n\n\t// If cbet1 < -sbet1, then cbet2 - cbet1 is a sensitive measure of the\n\t// |bet1| - |bet2|. Alternatively (cbet1 >= -sbet1), abs(sbet2) + sbet1 is\n\t// a better measure. This logic is used in assigning calp2 in Lambda12.\n\t// Sometimes these quantities vanish and in that case we force bet2 = +/-\n\t// bet1 exactly. An example where is is necessary is the inverse problem\n\t// 48.522876735459 0 -48.52287673545898293 179.599720456223079643\n\t// which failed with Visual Studio 10 (Release and Debug)\n\tif cbet1 < -sbet1 {\n\t\tif cbet2 == cbet1 {\n\t\t\tif sbet2 < 0 {\n\t\t\t\tsbet2 = sbet1\n\t\t\t} else {\n\t\t\t\tsbet2 = -sbet1\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif math.Abs(sbet2) == -sbet1 {\n\t\t\tcbet2 = cbet1\n\t\t}\n\t}\n\n\tlam12 := lon12\n\tslam12, clam12 := math.Sincos(lam12) // lon12 == 90 isn't interesting\n\n\tvar sig12, calp1, salp1, calp2, salp2, omg12 float64\n\t// index zero elements of these arrays are unused\n\tvar (\n\t\tC1a [_nC1 + 1]float64\n\t\tC2a [_nC2 + 1]float64\n\t\tC3a [_nC3]float64\n\t)\n\n\tmeridian := lat1 == -math.Pi/2 || slam12 == 0.0\n\n\tif meridian {\n\n\t\t// Endpoints are on a single full meridian, so the geodesic might lie on\n\t\t// a meridian.\n\n\t\tcalp1, salp2 = clam12, slam12 // Head to the target longitude\n\t\tcalp2, salp2 = 1, 0 // At the target we're heading north\n\n\t\t// tan(bet) = tan(sig) * cos(alp)\n\t\tssig1, csig1 := sbet1, calp1*cbet1\n\t\tssig2, csig2 := sbet2, calp2*cbet2\n\n\t\t// sig12 = sig2 - sig1\n\t\tsig12 = math.Atan2(max(csig1*ssig2-ssig1*csig2, 0), csig1*csig2+ssig1*ssig2)\n\n\t\ts12x, m12x, _ = lengths(_n, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2, C1a[:], C2a[:])\n\n\t\t// Add the check for sig12 since zero length geodesics might yield m12 < 0. Test case was\n\t\t//\n\t\t// echo 20.001 0 20.001 0 | Geod -i\n\t\t//\n\t\t// In fact, we will have sig12 > pi/2 for meridional geodesic which is\n\t\t// not a shortest path.\n\t\tif sig12 < 1 || m12x >= 0 {\n\t\t\tm12x *= _a\n\t\t\ts12x *= _b\n\t\t} else {\n\t\t\t// m12 < 0, i.e., prolate and too close to anti-podal\n\t\t\tmeridian = false\n\t\t}\n\n\t}\n\n\tif !meridian && sbet1 == 0 && (_f <= 0 || lam12 <= math.Pi-_f*math.Pi) {\n\n\t\t// Geodesic runs along equator\n\t\tcalp1, salp1, calp2, salp2 = 0, 1, 0, 1\n\t\ts12x = _a * lam12\n\t\tm12x = _b * math.Sin(lam12/_f1)\n\t\tomg12 = lam12 / _f1\n\t\tsig12 = omg12\n\n\t} else if !meridian {\n\n\t\t// Now point1 and point2 belong within a hemisphere bounded by a\n\t\t// meridian and geodesic is neither meridional or equatorial.\n\n\t\t// Figure a starting point for Newton's method\n\t\tsig12, salp1, calp1, salp2, calp2 = inverseStart(sbet1, cbet1, sbet2, cbet2, lam12, salp2, calp2, C1a[:], C2a[:])\n\n\t\tif sig12 >= 0 {\n\n\t\t\t// Short lines (InverseStart sets salp2, calp2)\n\t\t\tw1 := math.Sqrt(1 - _e2*cbet1*cbet1)\n\t\t\ts12x = sig12 * _a * w1\n\t\t\tm12x = w1 * w1 * _a / _f1 * math.Sin(sig12*_f1/w1)\n\t\t\tomg12 = lam12 / w1\n\n\t\t} else {\n\n\t\t\t// Newton's method\n\t\t\tvar ssig1, csig1, ssig2, csig2, eps, ov float64\n\t\t\tnumit := 0\n\t\t\tfor trip := 0; numit < _maxit; numit++ {\n\t\t\t\tvar v, dv float64\n\n\t\t\t\tv, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps, omg12, dv = \n\t\t\t\t\tlambda12(sbet1, cbet1, sbet2, cbet2, salp1, calp1, trip < 1, C1a[:], C2a[:], C3a[:])\n\t\t\t\tv -= lam12\n\n\t\t\t\tif !(math.Abs(v) > _tiny) || !(trip < 1) {\n\t\t\t\t\tif !(math.Abs(v) <= max(_tol1, ov)) {\n\t\t\t\t\t\tnumit = _maxit\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tdalp1 := -v / dv\n\n\t\t\t\tsdalp1, cdalp1 := math.Sincos(dalp1)\n\t\t\t\tnsalp1 := salp1*cdalp1 + calp1*sdalp1\n\t\t\t\tcalp1 = calp1*cdalp1 - salp1*sdalp1\n\t\t\t\tsalp1 = max(0, nsalp1)\n\t\t\t\tsalp1, calp1 = sinCosNorm(salp1, calp1)\n\n\t\t\t\tif !(math.Abs(v) >= _tol1 && v*v >= ov*_tol0) {\n\t\t\t\t\ttrip++\n\t\t\t\t}\n\t\t\t\tov = math.Abs(v)\n\t\t\t}\n\n\t\t\tif numit >= _maxit {\n\t\t\t\treturn math.NaN(), math.NaN(), math.NaN() // Signal failure.\n\t\t\t}\n\n\t\t\ts12x, m12x, _ = lengths(eps, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2, C1a[:], C2a[:])\n\n\t\t\tm12x *= _a\n\t\t\ts12x *= _b\n\t\t\tomg12 = lam12 - omg12\n\t\t}\n\t}\n\n\ts12 = 0 + s12x // Convert -0 to 0\n\n\t// Convert calp, salp to azimuth accounting for lonsign, swapp, latsign.\n\tif swapp < 0 {\n\t\tsalp1, salp2 = salp2, salp1\n\t\tcalp1, calp2 = calp2, calp1\n\t}\n\n\tsalp1 *= swapp * lonsign; calp1 *= swapp * latsign;\n\tsalp2 *= swapp * lonsign; calp2 *= swapp * latsign;\n\n\t// minus signs give range [-180, 180). 0- converts -0 to +0.\n\tazi1 = 0 - math.Atan2(-salp1, calp1)\n\tazi2 = 0 - math.Atan2(salp2, -calp2) // make it point backwards\n\n\treturn\n}",
"func convertTaskToResult(testID string, task *swarmingAPI.SwarmingRpcsTaskResult, req *pb.DeriveChromiumInvocationRequest) (*pb.TestResult, error) {\n\tresultStatus := getTaskResultStatus(task)\n\tret := &pb.TestResult{\n\t\t// Use ninja target as test_id.\n\t\tTestId: testID,\n\t\tExpected: resultStatus == pb.TestStatus_PASS,\n\t\tStatus: resultStatus,\n\t}\n\n\t// Add the swarming task's url and state to summaryHTML.\n\tbuf := &strings.Builder{}\n\terr := summaryTmpl.Execute(buf, map[string]interface{}{\n\t\t\"url\": fmt.Sprintf(\"https://%s/task?id=%s\", req.SwarmingTask.Hostname, req.SwarmingTask.Id),\n\t\t\"state\": task.State,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret.SummaryHtml = buf.String()\n\n\treturn ret, nil\n}",
"func (p *Point) Unit() *Point {\n\tlength := p.Dist(&Point{})\n\treturn &Point{p.X / length, p.Y / length}\n}",
"func geoToMercator(longitude, latitude float64) (float64, float64) {\n\t// bound to world coordinates\n\tif latitude > 80 {\n\t\tlatitude = 80\n\t} else if latitude < -80 {\n\t\tlatitude = -80\n\t}\n\n\torigin := 6378137 * math.Pi // 6378137 is WGS84 semi-major axis\n\tx := longitude * origin / 180\n\ty := math.Log(math.Tan((90+latitude)*math.Pi/360)) / (math.Pi / 180) * (origin / 180)\n\n\treturn x, y\n}",
"func main() {\n\n\tcvr := Converter{}\n\t// fmt.Println(cvr)\n\n\tft := fmt.Sprintf(\"%f\", float64(cvr.CentimeterToFeet(1)))\n\tfmt.Println(ft + \" ft\")\n\tcm := fmt.Sprintf(\"%f\", float64(cvr.FeetToCentimeter(1)))\n\tfmt.Println(cm + \" cm\")\n\n\tmins := fmt.Sprintf(\"%f\", float64(cvr.SecondsToMinutes(1)))\n\tfmt.Println(mins + \" minutes\")\n\tsecs := fmt.Sprintf(\"%f\", float64(cvr.MinutesToSeconds(1)))\n\tfmt.Println(secs + \" seeconds\")\n\n\tsec := fmt.Sprintf(\"%f\", float64(cvr.MillisecondsToSeconds(1)))\n\tfmt.Println(sec + \" seconds\")\n\tmsec := fmt.Sprintf(\"%f\", float64(cvr.SecondsToMilliseconds(1)))\n\tfmt.Println(msec + \" milliseconds\")\n\n\tfeh := fmt.Sprintf(\"%f\", float64(cvr.CelsiusToFahrenheit(0)))\n\tfmt.Println(feh + \" F\")\n\tcel := fmt.Sprintf(\"%f\", float64(cvr.FahrenheitToCelsius(32)))\n\tfmt.Println(cel + \" C\")\n\n\tdeg := fmt.Sprintf(\"%f\", float64(cvr.RadianToDegree(1)))\n\tfmt.Println(deg + \" degree\")\n\trad := fmt.Sprintf(\"%f\", float64(cvr.DegreeToRadian(1)))\n\tfmt.Println(rad + \" radian\")\n\n\tlbs := fmt.Sprintf(\"%f\", float64(cvr.KilogramToPounds(1)))\n\tfmt.Println(lbs + \" pounds\")\n\tkg := fmt.Sprintf(\"%f\", float64(cvr.PoundsToKilogram(1)))\n\tfmt.Println(kg + \" Kg\")\n\n\tlit := fmt.Sprintf(\"%f\", float64(cvr.GallonsToLiters(1)))\n\tfmt.Println(lit + \" L\")\n\tgal := fmt.Sprintf(\"%f\", float64(cvr.LitersToGallons(1)))\n\tfmt.Println(gal + \" gal\")\n\n}",
"func (p *G2Jac) ToProjFromJac() *G2Jac {\n\t// memalloc\n\tvar buf e2\n\tbuf.Square(&p.Z)\n\n\tp.X.Mul(&p.X, &p.Z)\n\tp.Z.Mul(&p.Z, &buf)\n\n\treturn p\n}",
"func Point_from_uniform(data []byte) (Point,error) { // TODO:check if it return valid point in test\n\tfor i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { // reversal of bytes\n\t\tdata[i], data[j] = data[j], data[i]\n\t}\n\tfor len(data)<32 { // TODO: Ouput error on len< 32 or add zeros\n\t\tdata = append(data,0)\n\t}\n\ttemp := Raw_point()\n\tif C.crypto_core_ed25519_from_uniform((*C.uchar)(&temp.Val[0]), (*C.uchar)(&data[0])) == 0 {\n\t\treturn temp,nil\n\t}\n\treturn temp,errors.New(\"from uniform op not working\")\n \n}",
"func (p *G2Jac) ToAffineFromJac(res *G2Affine) *G2Affine {\n\n\tvar bufs [3]e2\n\n\tif p.Z.IsZero() {\n\t\tres.X.SetZero()\n\t\tres.Y.SetZero()\n\t\treturn res\n\t}\n\n\tbufs[0].Inverse(&p.Z)\n\tbufs[2].Square(&bufs[0])\n\tbufs[1].Mul(&bufs[2], &bufs[0])\n\n\tres.Y.Mul(&p.Y, &bufs[1])\n\tres.X.Mul(&p.X, &bufs[2])\n\n\treturn res\n}",
"func (bc *SwissCoord) String() (fs string) {\n\n\tvar next float64\n\n\tif bc == nil {\n\t\treturn\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tfs += coordliterals[bc.CoordType][i]\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tnext = bc.Easting\n\t\tcase 1:\n\t\t\tnext = bc.Northing\n\t\t}\n\n\t\ttmp := fmt.Sprintf(\"%f\", next)\n\t\tn := len(tmp)\n\t\tfor n > 0 && tmp[n-1] == '0' {\n\t\t\tn--\n\t\t}\n\t\tif n > 0 && tmp[n-1] == '.' {\n\t\t\tn--\n\t\t}\n\t\tfs = fs + tmp[:n]\n\t}\n\treturn\n}",
"func MeterToFeet(m Meter) Foot { return Foot(m / 3) }",
"func MapToOctUV(v vector.Vector3) vector.Vector2 {\n\t// Project the sphere onto the octahedron, and then onto the xy plane\n\t// vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n\tp := vector.\n\t\tNewVector2(v.X(), v.Y()).\n\t\tMultByConstant(1.0 / (math.Abs(v.X()) + math.Abs(v.Y()) + math.Abs(v.Z())))\n\tif v.Z() > 0 {\n\t\treturn p\n\t}\n\n\t// Reflect the folds of the lower hemisphere over the diagonals\n\t// return ((1.0 - math.Abs(p.yx)) * signNotZero(p))\n\treturn multVect(signNotZero(p), vector.NewVector2(1.0-math.Abs(p.Y()), 1.0-math.Abs(p.X())))\n}",
"func (b *Board) MovePiece(from, to Coord) (replaced Piece, err error) {\n\tif from.Col < 0 || from.Col >= Size || to.Col < 0 || to.Col >= Size {\n\t\treturn replaced, errors.New(\"Coordinate out of bounds\")\n\t}\n\n\tif b.Spaces[from.Row][from.Col].Rank == Empty {\n\t\treturn replaced, fmt.Errorf(\"No piece to move at row,col (%d,%d)\", from.Row, from.Col)\n\t}\n\n\treplaced = b.Spaces[to.Row][to.Col]\n\tb.Spaces[to.Row][to.Col] = b.Spaces[from.Row][from.Col]\n\tb.Spaces[from.Row][from.Col].Rank = Empty\n\n\t// Reset the en passant flags for all pieces (pawns) of this color\n\tfor i := 0; i < Size; i++ {\n\t\tfor j := 0; j < Size; j++ {\n\t\t\tif b.Spaces[i][j].Color == b.Spaces[to.Row][to.Col].Color {\n\t\t\t\tb.Spaces[i][j].EnPassantable = false\n\t\t\t}\n\t\t}\n\t}\n\n\t// If this piece is a pawn, see if the opponent could use\n\t// en passant on their next turn and set the flag.\n\tif b.Spaces[to.Row][to.Col].Rank == Pawn &&\n\t\t(to.Row-from.Row == 2 || to.Row-from.Row == -2) {\n\t\tb.Spaces[to.Row][to.Col].EnPassantable = true\n\t}\n\n\treturn\n}",
"func (p PointI) ToPoint2DCentered() Point2D {\n\treturn Point2D{float32(p.X) + 0.5, float32(p.Y) + 0.5}\n}",
"func (m mathUtil) DegreesToRadians(degrees float64) float64 {\n\treturn degrees * _d2r\n}",
"func (u utxo) convert() *bitcoin.UnspentTransactionOutput {\n\ttransactionHash, err := bitcoin.NewHashFromString(\n\t\tu.Outpoint.TransactionHash,\n\t\tbitcoin.ReversedByteOrder,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &bitcoin.UnspentTransactionOutput{\n\t\tOutpoint: &bitcoin.TransactionOutpoint{\n\t\t\tTransactionHash: transactionHash,\n\t\t\tOutputIndex: u.Outpoint.OutputIndex,\n\t\t},\n\t\tValue: u.Value,\n\t}\n}",
"func FToM(f Foot) Meter { return Meter(f * 0.3048) }",
"func MToF(m Meters) Feet {\n\treturn Feet(m * 3.2808)\n}",
"func PathingToEngo(p Point) engo.Point {\n\tx := p.X\n\ty := p.Y\n\treturn engo.Point{X: (float32(x) * discreteStep) + 4, Y: (float32(y) * discreteStep) + 4}\n}",
"func CVTPL2PD(mx, x operand.Op) { ctx.CVTPL2PD(mx, x) }",
"func saturationVapourPressure(tC float64) float64 { // [Pa]\n\t// August-Roche-Magnus approximation (from pg.38 of Lu, N. and J.W. Godt, 2013. Hillslope Hydrology and Stability. Cambridge University Press. 437pp.)\n\treturn 610.49 * math.Exp(17.625*tC/(tC+243.04)) // [Pa=N/m²] R²=1 for -30°C =< T =< 50°C\n}",
"func ConvertToECSPlacementStrategy(ecsParams *ECSParams) ([]*ecs.PlacementStrategy, error) {\n\tif ecsParams == nil {\n\t\treturn nil, nil\n\t}\n\tstrategies := ecsParams.RunParams.TaskPlacement.Strategies\n\n\toutput := []*ecs.PlacementStrategy{}\n\tfor _, strategy := range strategies {\n\t\tecsStrategy := &ecs.PlacementStrategy{\n\t\t\tType: aws.String(strategy.Type),\n\t\t}\n\t\tif strategy.Field != \"\" {\n\t\t\tecsStrategy.Field = aws.String(strategy.Field)\n\t\t}\n\t\toutput = append(output, ecsStrategy)\n\t}\n\n\treturn output, nil\n}",
"func latlon2coord(latlon string) (float64, float64) {\n\tslots := strings.Split(latlon, \",\")\n\tlat, err := strconv.ParseFloat(slots[0], 64)\n\tif err != nil {\n\t\tfmt.Println(\"Error converting latitude to float for:\", latlon)\n\t}\n\tlon, err := strconv.ParseFloat(slots[1], 64)\n\tif err != nil {\n\t\tfmt.Println(\"Error converting longitude to float for:\", latlon)\n\t}\n\treturn lat, lon\n}",
"func (pacif pacificTimeZones) Port_Moresby() string {return \"Pacific/Port_Moresby\" }",
"func grayToY(m *image.Gray, p image.Point, yBlock *block) {\n\tb := m.Bounds()\n\txmax := b.Max.X - 1\n\tymax := b.Max.Y - 1\n\tpix := m.Pix\n\tfor j := 0; j < 8; j++ {\n\t\tfor i := 0; i < 8; i++ {\n\t\t\tidx := m.PixOffset(min(p.X+i, xmax), min(p.Y+j, ymax))\n\t\t\tyBlock[8*j+i] = int32(pix[idx])\n\t\t}\n\t}\n}",
"func MOVUPS(mx, mx1 operand.Op) { ctx.MOVUPS(mx, mx1) }",
"func toFahrenheit(t Celsius) Fahrenheit {\n\n\tvar temp Fahrenheit\n\tvar tt float32\n\ttt = (float32(t) * 1.8) + float32(32)\n\ttemp = Fahrenheit(tt)\n\treturn temp\n\n}",
"func toRadians(input float64) float64 {\n\treturn input * math.Pi / 180\n}",
"func ToCodepoint(s string) (int64, error) {\n\ts = strings.ToUpper(s)\n\tvar base = 16\n\tswitch {\n\tcase strings.HasPrefix(s, \"0X\"), strings.HasPrefix(s, \"U+\"):\n\t\ts = s[2:]\n\tcase strings.HasPrefix(s, \"U\"):\n\t\ts = s[1:]\n\tcase strings.HasPrefix(s, \"0O\"):\n\t\ts = s[2:]\n\t\tbase = 8\n\tcase strings.HasPrefix(s, \"0B\"):\n\t\ts = s[2:]\n\t\tbase = 2\n\t}\n\treturn strconv.ParseInt(s, base, 64)\n}",
"func Fwd(proj *Proj, long, lat float64) (x, y float64, err error) {\n\tif !proj.opened {\n\t\treturn math.NaN(), math.NaN(), errors.New(\"projection is closed\")\n\t}\n\tx1 := C.double(long)\n\ty1 := C.double(lat)\n\te := C.fwd(proj.pj, &x1, &y1)\n\tif e != nil {\n\t\treturn math.NaN(), math.NaN(), errors.New(C.GoString(e))\n\t}\n\treturn float64(x1), float64(y1), nil\n}",
"func (crs LambertConformalConic2SP) ToXYZ(a, b, c float64, gs GeodeticSpheroid) (x, y, z float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\treturn Projection{\n\t\tGeodeticDatum: crs.GeodeticDatum,\n\t\tCoordinateProjection: crs,\n\t}.ToXYZ(a, b, c, s)\n}"
] | [
"0.58092505",
"0.575109",
"0.56481266",
"0.5609317",
"0.5605688",
"0.56055814",
"0.56046015",
"0.54018146",
"0.53192925",
"0.5307692",
"0.5004337",
"0.49508178",
"0.49239808",
"0.48059493",
"0.46658686",
"0.46323827",
"0.4604539",
"0.4589097",
"0.45698458",
"0.45200107",
"0.44337445",
"0.44183862",
"0.44155145",
"0.4399423",
"0.43968463",
"0.43808222",
"0.43762723",
"0.43747625",
"0.43557146",
"0.43342033",
"0.43263054",
"0.43022573",
"0.42878276",
"0.42869902",
"0.42866227",
"0.4283977",
"0.42702183",
"0.42685673",
"0.42475557",
"0.42253587",
"0.42148396",
"0.416444",
"0.4150967",
"0.41442463",
"0.41272098",
"0.41242933",
"0.4116554",
"0.40987828",
"0.4093577",
"0.40717757",
"0.40699244",
"0.40573102",
"0.40503708",
"0.4045565",
"0.4044787",
"0.40409178",
"0.40401956",
"0.4038626",
"0.4038123",
"0.40338382",
"0.40220395",
"0.40137622",
"0.4009941",
"0.39991823",
"0.39966062",
"0.398669",
"0.39830035",
"0.39800048",
"0.39777726",
"0.3971435",
"0.39658517",
"0.39626715",
"0.39597037",
"0.39571708",
"0.39509225",
"0.39454606",
"0.39440024",
"0.3943945",
"0.39417917",
"0.39398965",
"0.39375544",
"0.39360425",
"0.39356178",
"0.39331245",
"0.39223942",
"0.39213637",
"0.3919082",
"0.3913519",
"0.3911809",
"0.3908359",
"0.39031032",
"0.39027834",
"0.38971686",
"0.38968337",
"0.38875538",
"0.38851678",
"0.3882788",
"0.38702628",
"0.38652068",
"0.38643044"
] | 0.70762175 | 0 |
Read is a shortcut function to read and parse projects | Read — это функция-сокращение для чтения и парсинга проектов | func Read(path string) (*Project, error) {
var error error
var data []byte
data, error = ioutil.ReadFile(path)
if error != nil {
return nil, error
}
var project = &Project{}
error = json.Unmarshal(data, project)
if error != nil {
return nil, error
}
return project, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (a *Client) ReadProject(params *ReadProjectParams) (*ReadProjectOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewReadProjectParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"readProject\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/projects/{uuid}\",\n\t\tProducesMediaTypes: []string{\"application/release-manager.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/release-manager.v1+json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ReadProjectReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ReadProjectOK), nil\n\n}",
"func (c *RollbarAPIClient) ReadProject(projectID int) (*Project, error) {\n\tu := c.BaseURL + pathProjectRead\n\n\tl := log.With().\n\t\tInt(\"projectID\", projectID).\n\t\tLogger()\n\tl.Debug().Msg(\"Reading project from API\")\n\n\tresp, err := c.Resty.R().\n\t\tSetResult(projectResponse{}).\n\t\tSetError(ErrorResult{}).\n\t\tSetPathParams(map[string]string{\n\t\t\t\"projectID\": strconv.Itoa(projectID),\n\t\t}).\n\t\tGet(u)\n\tif err != nil {\n\t\tl.Err(err).Msg(\"Error reading project\")\n\t\treturn nil, err\n\t}\n\terr = errorFromResponse(resp)\n\tif err != nil {\n\t\tl.Err(err).Send()\n\t\treturn nil, err\n\t}\n\tpr := resp.Result().(*projectResponse)\n\t// FIXME: This is a workaround for a known bug in the API\n\t// https://github.com/rollbar/terraform-provider-rollbar/issues/23\n\tif pr.Result.Name == \"\" {\n\t\tl.Warn().Msg(\"Project not found\")\n\t\treturn nil, ErrNotFound\n\t}\n\tl.Debug().Msg(\"Project successfully read\")\n\treturn &pr.Result, nil\n\n}",
"func TestRead(t *testing.T) {\n\tgoodConfig := Read(\"./test/good-project\")\n\tif goodConfig.ContextRoot != \"something/else\" {\n\t\tt.Log(\"good project config is incorrect: \" + goodConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n\tbadConfig := Read(\"./test/bad-project\")\n\tif badConfig.ContextRoot != \"./test/bad-project\" {\n\t\tt.Log(\"bad project config is incorrect: \" + badConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n\tmissingConfig := Read(\"./test/missing-project\")\n\tif missingConfig.ContextRoot != \"./test/missing-project\" {\n\t\tt.Log(\"missing project config is incorrect: \" + missingConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n}",
"func readProjectConfig(c context.Context, task proto.Message) error {\n\tprojectID := task.(*internal.ReadProjectConfigTask).ProjectId\n\n\tctx, cancel := context.WithTimeout(c, 150*time.Second)\n\tdefer cancel()\n\n\tjobs, err := globalCatalog.GetProjectJobs(ctx, projectID)\n\tif err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Failed to query for a list of jobs\")\n\t\treturn err\n\t}\n\n\tif err := globalEngine.UpdateProjectJobs(ctx, projectID, jobs); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Failed to update some jobs\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (w WatWorkspace) Read(name string) ([]byte, error) {\n\tpath := filepath.Join(w.root, kWatDirName, name)\n\tcontents, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ioutil.ReadFile: %v (while reading file '%s')\", err, name)\n\t}\n\treturn contents, nil\n}",
"func NewReadProjectOK() *ReadProjectOK {\n\n\treturn &ReadProjectOK{}\n}",
"func (g *projectGateway) ReadProjectAction(params project.ReadProjectParams) middleware.Responder {\n\treadRsp, err := g.projectClient.Read(context.TODO(), &proto.ReadRequest{\n\t\tUuid: string(params.UUID),\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn project.NewReadProjectInternalServerError()\n\t}\n\n\tif uint32(codes.OK) == readRsp.Status {\n\t\tfmt.Println(fmt.Sprintf(\"project.client read: ok. Id = %v\", params.UUID))\n\t} else if uint32(codes.NotFound) == readRsp.Status {\n\t\treturn project.NewReadProjectNotFound()\n\t}\n\n\tpr := &models.Project{\n\t\tUUID: strfmt.UUID(readRsp.Project.Uuid),\n\t\tName: readRsp.Project.Name,\n\t\tDescription: readRsp.Project.Description,\n\t}\n\n\treturn project.NewReadProjectOK().WithPayload(pr)\n}",
"func ReadProjectConfig(projPath string) (*ThrapConfig, error) {\n\tfilename := filepath.Join(projPath, consts.WorkDir, consts.ConfigFile)\n\treturn ReadThrapConfig(filename)\n}",
"func (s *workspaces) Read(ctx context.Context, organization, workspace string) (*Workspace, error) {\n\treturn s.ReadWithOptions(ctx, organization, workspace, nil)\n}",
"func Parse(r io.Reader) (*Project, error) {\n\terrMsg := fmt.Sprintf(\"Cannot read manifest %q\", manifestFile)\n\n\tdec := yaml.NewDecoder(r)\n\tp := &Project{}\n\tif err := dec.Decode(p); err != nil {\n\t\treturn nil, errors.Wrap(err, errMsg)\n\t}\n\n\tif err := p.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"%s validation\", manifestFile))\n\t}\n\n\treturn p, nil\n}",
"func NewReadProjectNotFound() *ReadProjectNotFound {\n\n\treturn &ReadProjectNotFound{}\n}",
"func LoadProjects() error {\n\tprojects = make(map[string]Project)\n\tfile, err := os.Open(projectFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdecoder := json.NewDecoder(file)\n\tconfigs := []projectConfig{}\n\tif err = decoder.Decode(&configs); err != nil {\n\t\treturn err\n\t}\n\tfor _, config := range configs {\n\t\tprojects[config.Name] = &project{config, NewQueue()}\n\t}\n\tif len(projects) == 0 {\n\t\treturn errors.New(\"no projects defined\")\n\t}\n\treturn nil\n}",
"func readBUILD(ctx context.Context, workspaceRoot, buildFilePath string) (*build.File, error) {\n\tnormalizedG3Path, err := getAbsoluteBUILDPath(workspaceRoot, buildFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to resolve workspace relative path: %s\", err)\n\t}\n\tdata, err := platform.ReadFile(ctx, buildFilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn &build.File{Path: normalizedG3Path, Type: build.TypeBuild}, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"reading %q: %s\", buildFilePath, err)\n\t}\n\treturn build.ParseBuild(normalizedG3Path, data)\n}",
"func ReadAll() (p *Page, err error) {\n\tdCmn := config.SourceDir + sep + \"pages\" + sep + \"common\" + sep\n\tdOs := config.SourceDir + sep + \"pages\" + sep + config.OSName() + sep\n\tpaths := []string{dCmn, dOs}\n\tp = &Page{Name: \"Search All\"}\n\tp.Tips = make([]*Tip, 0)\n\tfor _, pt := range paths {\n\t\tfiles, err := ioutil.ReadDir(pt)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif strings.HasSuffix(f.Name(), \".md\") {\n\t\t\t\tpage, err := Read([]string{f.Name()[:len(f.Name())-3]})\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tp.Tips = append(p.Tips, page.Tips...)\n\t\t\t}\n\t\t}\n\t}\n\treturn p, nil\n}",
"func Read(file []string) ComposeFile {\n\tresult := ComposeFile{\n\t\tFile: file,\n\t}\n\tresult.Read()\n\treturn result\n}",
"func (p *broStructure) ParseProject(filter func(info os.FileInfo) bool) {\n\tfor _, dir := range p.listDirs() {\n\t\tfileset := token.NewFileSet()\n\t\tmapped, _ := parser.ParseDir(fileset, dir, filter, parser.AllErrors|parser.ParseComments)\n\t\tfor key, val := range mapped {\n\t\t\tp.packageFiles[key] = val\n\t\t}\n\t}\n}",
"func Test_Read(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadRequest{\n\t\tApi: apiVersion,\n\t\tId: 2,\n\t}\n\tres, _ := ProjectService.Read(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}",
"func Read(path string) (*Package, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fi.IsDir() {\n\t\treturn ReadDir(path)\n\t}\n\treturn ReadFile(path)\n}",
"func Read() (*Config, error) {\n\tcfg := &Config{}\n\n\tif err := env.Parse(cfg); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cfg, nil\n}",
"func (provider *FileProvider) ReadVersion(project string) (version model.Version, err error) {\n\tfilename := path.Join(provider.basePath, project)\n\n\tif _, err = os.Stat(provider.basePath); os.IsNotExist(err) {\n\t\treturn version, errors.Wrapf(err, \"Base directory %v does not exist\", provider.basePath)\n\t}\n\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\treturn version, errors.Wrapf(err, \"File %v does not exist\", filename)\n\t}\n\n\tversionData, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn version, errors.Wrapf(err, \"Failed to read version from file %v\", filename)\n\t}\n\n\tversion, err = model.FromVersionString(string(versionData))\n\tif err != nil {\n\t\treturn version, errors.Wrapf(err, \"Failed to convert version\")\n\t}\n\n\treturn\n}",
"func TestProject_ProjectRead_UsesNameIfIdNotSet(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tCtx: context.Background(),\n\t}\n\n\tid := \"\"\n\tname := \"name\"\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tGetProject(clients.Ctx, core.GetProjectArgs{\n\t\t\tProjectId: &name,\n\t\t\tIncludeCapabilities: converter.Bool(true),\n\t\t\tIncludeHistory: converter.Bool(false),\n\t\t}).\n\t\tTimes(1)\n\n\t_, _ = projectRead(clients, id, name)\n}",
"func (conf *BuildConfig) Read(path string) error {\n\tf, err := os.Open(filepath.Clean(path))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tcloseErr := f.Close()\n\t\tif closeErr != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\tval, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(val, &conf.Config)\n\treturn err\n}",
"func (c *GithubTokenController) Read(ctx *app.ReadGithubTokenContext) error {\n\t// GithubTokenController_Read: start_implement\n\n\t// Put your logic here\n\tbytes, err := ioutil.ReadFile(\"./.deploy/github_api.txt\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tvar token string = string(bytes)\n\n\t// GithubTokenController_Read: end_implement\n\tres := &app.GithubtokenMt{&token}\n\treturn ctx.OK(res)\n}",
"func Read() (*map[string]string, error) {\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := os.Stat(user.HomeDir + \"/.testtrack/assignments.yml\"); os.IsNotExist(err) {\n\t\terr := os.MkdirAll(user.HomeDir+\"/.testtrack\", 0755)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = ioutil.WriteFile(user.HomeDir+\"/.testtrack/assignments.yml\", []byte(\"{}\"), 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tassignmentsBytes, err := ioutil.ReadFile(user.HomeDir + \"/.testtrack/assignments.yml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar assignments map[string]string\n\terr = yaml.Unmarshal(assignmentsBytes, &assignments)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &assignments, nil\n}",
"func Read(s beam.Scope, resourcePaths beam.PCollection) (beam.PCollection, beam.PCollection) {\n\ts = s.Scope(\"fhirio.Read\")\n\treturn read(s, resourcePaths, nil)\n}",
"func (s *workspaces) Readme(ctx context.Context, workspaceID string) (io.Reader, error) {\n\tif !validStringID(&workspaceID) {\n\t\treturn nil, ErrInvalidWorkspaceID\n\t}\n\n\tu := fmt.Sprintf(\"workspaces/%s?include=readme\", url.QueryEscape(workspaceID))\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &workspaceWithReadme{}\n\terr = req.Do(ctx, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.Readme == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn strings.NewReader(r.Readme.RawMarkdown), nil\n}",
"func Read(filename string) (Env, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn strictParse(f, false)\n}",
"func ReadBuildTree(r io.Reader, variableMap map[string]string, variableFiles []string) (*BuildTree, error) {\n\tfileContent, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, stacktrace.Propagate(err, \"Cannot read build content\")\n\t}\n\treturn readBuildTree(\"\", fileContent, variableMap, variableFiles)\n}",
"func ReadPuzzle() (*Puzzle, string) {\n\tvar k *Puzzle\n\tvar title string\n\tvar err error\n\tflag.Parse()\n\targs := flag.Args()\n\tswitch len(args) {\n\tcase 0:\n\t\tk, title, err = sgtPuzzle()\n\tcase 1:\n\t\tfilename := args[0]\n\t\tvar f *os.File\n\t\tf, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t\ttitle = path.Base(filename)\n\t\tk, err = Read(f)\n\tdefault:\n\t\tlog.Fatalf(\"Usage: %s [options] [file]\", os.Args[0])\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn k, title\n}",
"func Read(filePath string) {\n\tif filePath == \"\" {\n\t\t_conf.VolonsPlatform = getEnv(\"VOLONS_PLATFORM\", _conf.VolonsPlatform)\n\t\t_conf.HTTPAddr = getEnv(\"VOLONS_HTTP\", _conf.HTTPAddr)\n\t\t_conf.Database = getEnv(\"VOLONS_DATABASE\", _conf.Database)\n\t\treturn\n\t}\n\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tdecoder := json.NewDecoder(file)\n\tif err = decoder.Decode(&_conf); err != nil {\n\t\tpanic(err.Error())\n\t}\n}",
"func Read(filename string) (deps DependencyMap, err error) {\n\tdeps.Map = make(map[string]*Dependency)\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(data, &deps.Map)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// traverse map and look for empty version fields - provide a default if such found\n\tfor key := range deps.Map {\n\t\tval := deps.Map[key]\n\t\tif val.Version == \"\" {\n\t\t\tswitch val.Type {\n\t\t\tcase TypeGit, TypeGitClone:\n\t\t\t\tval.Version = \"master\"\n\t\t\tcase TypeHg:\n\t\t\t\tval.Version = \"tip\"\n\t\t\tcase TypeBzr:\n\t\t\t\tval.Version = \"trunk\"\n\t\t\tdefault:\n\t\t\t\tval.Version = \"\"\n\t\t\t}\n\t\t\tdeps.Map[key] = val\n\t\t}\n\t}\n\n\tfor name, d := range deps.Map {\n\t\terr := d.SetupVCS(name)\n\t\tif err != nil {\n\t\t\tdelete(deps.Map, name)\n\t\t}\n\n\t}\n\n\tdeps.Path = filename\n\n\treturn\n}",
"func (composeFile *ComposeFile) Read() {\n\tconsole.Debug(\"running docker-compose config [%s]\", composeFile.File)\n\n\t//Load Docker Compose yaml\n\tdcy, err := loadCompose(composeFile.File)\n\tif err != nil {\n\t\tconsole.ErrorExit(err, \"error loading docker-compose yaml files\")\n\t}\n\n\tconsole.Info(string(dcy))\n\t//unmarshal the yaml\n\tvar compose DockerCompose\n\terr = yaml.Unmarshal(dcy, &compose)\n\tif err != nil {\n\t\tconsole.ErrorExit(err, \"error unmarshalling docker-compose.yml\")\n\t}\n\n\tcomposeFile.Data = compose\n}",
"func Read(filename string) error {\n\treturn cfg.Read(filename)\n}",
"func Read(conf *Config, queue *task.Queue) []error {\n\tr := &configReader{queue: queue}\n\tr.read(conf)\n\treturn r.errors\n}",
"func Read(seq []string) (p *Page, err error) {\n\tpage := \"\"\n\tfor i, l := range seq {\n\t\tif len(seq)-1 == i {\n\t\t\tpage = page + l\n\t\t\tbreak\n\t\t} else {\n\t\t\tpage = page + l + \"-\"\n\t\t}\n\t}\n\t// Common pages are more, so we have better luck there\n\tp, err = queryCommon(page)\n\tif err != nil {\n\t\tp, err = queryOS(page)\n\t\tif err != nil {\n\t\t\treturn p, errors.New(\"This page (\" + page + \") doesn't exist yet!\\n\" +\n\t\t\t\t\"Submit new pages here: https://github.com/tldr-pages/tldr\")\n\t\t}\n\t}\n\treturn p, nil\n}",
"func (d *Domains) Read(rootDomain string) {\n\t// start disk.Read for sourceDisk and sourceAuto\n\tif d.configSource != sourceGitlab {\n\t\td.disk.Read(rootDomain)\n\t}\n}",
"func ReadAndParse(filepath string) (TrackData, error) {\n\tvar lines []string\n\tlines, _ = readLocalFile(filepath)\n\ttrackData, err := parseIGC(lines)\n\treturn trackData, err\n}",
"func Read(filename string) ([]Config, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn nil, nil\n}",
"func (p Project) GetData() (Project, error) {\n\tvar project Project\n\terr := DB.QueryRow(\"SELECT id, name, url, path, environment, branch, after_pull_script, after_deploy_script, rsync_option, create_time, update_time FROM project WHERE id = ?\", p.ID).Scan(&project.ID, &project.Name, &project.URL, &project.Path, &project.Environment, &project.Branch, &project.AfterPullScript, &project.AfterDeployScript, &project.RsyncOption, &project.CreateTime, &project.UpdateTime)\n\tif err != nil {\n\t\treturn project, errors.New(\"数据查询失败\")\n\t}\n\treturn project, nil\n}",
"func LoadProject(name string) (*Project, error) {\n p := new(Project)\n err := Mongo.GetOne(\"project\", bson.M{\"name\": name}, p)\n return p, err\n}",
"func ReadSolution(s dbr.SessionRunner, taskIDs, authorIDs []int64, approvedOnly bool, order SolutionOrder) ([]*SolutionModel, error) {\n\tq := s.Select(\n\t\t\"p.id AS id\", \"p.text AS text\", \"p.rating AS rating\",\n\t\t\"p.created_at AS created_at \", \"p.updated_at AS updated_at\",\n\t\t\"p.employee_id AS employee_id\",\n\t\t\"s.task_id AS task_id\", \"s.is_approved AS is_approved\",\n\t).From(dbr.I(\"employee_post\").As(\"p\"))\n\tq.Join(dbr.I(\"solution\").As(\"s\"), \"p.id=s.post_id\")\n\tq.Where(dbr.Neq(\"p.status\", PostStatusDeleted))\n\tif len(taskIDs) != 0 {\n\t\tq.Where(dbr.Eq(\"s.task_id\", taskIDs))\n\t}\n\tif len(authorIDs) != 0 {\n\t\tq.Where(dbr.Eq(\"p.employee_id\", authorIDs))\n\t}\n\tif approvedOnly {\n\t\tq.Where(dbr.Eq(\"s.is_approved\", true))\n\t}\n\n\tif order == ApprovedNewestSolutionOrder {\n\t\tq.OrderDesc(\"s.is_approved\")\n\t}\n\tq.OrderDesc(\"p.created_at\")\n\n\tres := make([]*SolutionModel, 0)\n\t_, err := q.Load(&res)\n\treturn res, err\n}",
"func Read(ctx context.Context, path string, urls URLs, cb Callback) error {\n\teg, ctx := errgroup.WithContext(ctx)\n\tread(ctx, eg, path, urls, cb)\n\n\treturn eg.Wait()\n}",
"func Read(file string) (*Config, error) {\n\treturn readKNFFile(file)\n}",
"func (f *Finding) ReadFinding(m *pubsub.Message) error {\n\tif err := json.Unmarshal(m.Data, &f.sd); err != nil {\n\t\tlog.Println(\"failed to read stackdriver finding\")\n\t\treturn ErrUnmarshal\n\t}\n\n\tif f.sd.LogName == \"\" {\n\t\treturn ErrParsing\n\t}\n\n\tif !strings.HasSuffix(f.sd.LogName, etdFindingSuffix) {\n\t\treturn ErrParsing\n\t}\n\n\tif err := json.Unmarshal(m.Data, &f.etd); err != nil {\n\t\treturn ErrUnmarshal\n\t}\n\n\tswitch f.etd.JSONPayload.DetectionCategory.SubRuleName {\n\t// case for external user granted as project editor.\n\tcase \"external_member_added_to_policy\":\n\t\tif err := json.Unmarshal(m.Data, &f.ext); err != nil {\n\t\t\tlog.Println(\"failed to read ext\")\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t// case for external user granted as project owner.\n\tcase \"external_member_invited_to_policy\":\n\t\tif err := json.Unmarshal(m.Data, &f.ext); err != nil {\n\t\t\tfmt.Println(\"fil2\")\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t}\n\n\tswitch f.etd.JSONPayload.DetectionCategory.RuleName {\n\tcase \"bad_ip\":\n\t\tfallthrough\n\tcase \"bad_domain\":\n\t\tif err := json.Unmarshal(m.Data, &f.badNetwork); err != nil {\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *CargoMetadata) Read() error {\n\tcmd := exec.Command(\"cargo\", \"metadata\", \"--quiet\", \"--format-version\", \"1\")\n\tstdoutStderr, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif len(stdoutStderr) > 0 {\n\t\t\treturn fmt.Errorf(\"%s\", strings.TrimSpace(string(stdoutStderr)))\n\t\t}\n\t\treturn err\n\t}\n\tr := bytes.NewReader(stdoutStderr)\n\tif err := json.NewDecoder(r).Decode(&m); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func parseSlnFile(slnFile string) ([]string, error) {\n\tvar err error\n\tif projectRegExp == nil {\n\t\tprojectRegExp, err = utils.GetRegExp(`Project\\(\"(.*\\..*proj)`)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcontent, err := os.ReadFile(slnFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprojects := projectRegExp.FindAllString(string(content), -1)\n\treturn projects, nil\n}",
"func Read(c *gin.Context) {\n\tvar (\n\t\tp getEnvironments\n\t)\n\tid := c.Params.ByName(\"environmentId\")\n\tif id == \"\" {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"annotationId is missing in uri\"})\n\t\treturn\n\t}\n\tvID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Error occured while converting string to int\")\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Internal Server Error\"})\n\t\treturn\n\t}\n\n\tp.EnvironmentID = vID\n\n\tresult, err := p.read()\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Error occured while performing db query\")\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Internal Server Error\"})\n\t\treturn\n\t}\n\n\tif len(result) == 0 {\n\t\tc.AbortWithStatus(204)\n\t} else {\n\t\tc.JSON(http.StatusOK, result)\n\t}\n}",
"func (r *Reader) Read() (*TaskList, error) {\n\n\trawTask, err := r.buffer.ReadString('\\n')\n\tif err == io.EOF {\n\t\treturn &r.tasks, err\n\t}\n\tutils.Check(err)\n\n\t// Set the split function for a Scanner that returns each line of text,\n\t// stripped of any trailing end-of-line marker\n\tscanner := bufio.NewScanner(strings.NewReader(rawTask))\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\trawTask = scanner.Text()\n\t\t// skip blank lines and comments\n\t\tif rawTask == \"\" || (r.Comment != 0 && strings.HasPrefix(rawTask, \"#\")) {\n\t\t\tfmt.Println(\"****\")\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Printf(\"task: %s (test)\\n\", rawTask)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading input:\", err)\n\t}\n\n\treturn &r.tasks, nil\n}",
"func readConfig(projectHome string) (map[string]string, error) {\n\tvars := make(map[string]string)\n\tsep := \"/\"\n\tif strings.HasSuffix(projectHome, \"/\") {\n\t\tsep = \"\"\n\t}\n\tfileName := projectHome + sep + \"config.yaml\"\n\tconfigFile, err := os.Open(fileName)\n\tif err != nil {\n\t\tprojectHome = \".\"\n\t\tlog.Printf(\"config.readConfig: setting projectHome to: '%s'\\n\",\n\t\t\tprojectHome)\n\t\tfileName = projectHome + \"/config.yaml\"\n\t\tconfigFile, err = os.Open(fileName)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"error opening config.yaml: %v\", err)\n\t\t\treturn map[string]string{}, err\n\t\t}\n\t}\n\tdefer configFile.Close()\n\treader := bufio.NewReader(configFile)\n\teof := false\n\tfor !eof {\n\t\tvar line string\n\t\tline, err = reader.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t\teof = true\n\t\t} else if err != nil {\n\t\t\terr := fmt.Errorf(\"error reading config file: %v\", err)\n\t\t\treturn map[string]string{}, err\n\t\t}\n\t\t// Ignore comments\n\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\ti := strings.Index(line, \":\")\n\t\tif i > 0 {\n\t\t\tvarName := line[:i]\n\t\t\tval := strings.TrimSpace(line[i+1:])\n\t\t\tvars[varName] = val\n\t\t}\n\t}\n\treturn vars, nil\n}",
"func Read(vm *jsonnet.VM, path string) ([]runtime.Object, error) {\n\text := filepath.Ext(path)\n\tif ext == \".json\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\treturn jsonReader(f)\n\t} else if ext == \".yaml\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\treturn yamlReader(f)\n\t} else if ext == \".jsonnet\" {\n\t\treturn jsonnetReader(vm, path)\n\t}\n\n\treturn nil, fmt.Errorf(\"Unknown file extension: %s\", path)\n}",
"func GetProject(w http.ResponseWriter, r *http.Request) {\n\t// Get item params\n\t// Perform get, db n' stuff.\n\t// render.JSON(w, r)\n}",
"func (s *Site) read() error {\n\n\t// Lists of templates (_layouts, _includes) that we find that\n\t// will need to be compiled\n\tlayouts := []string{}\n\n\t// func to walk the jekyll directory structure\n\twalker := func(fn string, fi os.FileInfo, err error) error {\n\t\trel, _ := filepath.Rel(s.Src, fn)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn nil\n\n\t\tcase fi.IsDir() && isHiddenOrTemp(fn):\n\t\t\treturn filepath.SkipDir\n\n\t\t// Ignore directories\n\t\tcase fi.IsDir():\n\t\t\treturn nil\n\n\t\t// Ignore Hidden or Temp files\n\t\t// (starting with . or ending with ~)\n\t\tcase isHiddenOrTemp(rel):\n\t\t\treturn nil\n\n\t\t// Parse Templates\n\t\tcase isTemplate(rel):\n\t\t\tlayouts = append(layouts, fn)\n\n\t\t// Parse Posts\n\t\tcase isPost(rel):\n\t\t\tlogf(MsgParsingPost, rel)\n\t\t\tpermalink := s.Conf.GetString(\"permalink\")\n\t\t\tif permalink == \"\" {\n\t\t\t\t// According to Jekyll documentation 'date' is the\n\t\t\t\t// default permalink\n\t\t\t\tpermalink = \"date\"\n\t\t\t}\n\n\t\t\tpost, err := ParsePost(rel, permalink)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// TODO: this is a hack to get the posts in rev chronological order\n\t\t\ts.posts = append([]Page{post}, s.posts...) //s.posts, post)\n\n\t\t// Parse Pages\n\t\tcase isPage(rel):\n\t\t\tlogf(MsgParsingPage, rel)\n\t\t\tpage, err := ParsePage(rel)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.pages = append(s.pages, page)\n\n\t\t// Move static files, no processing required\n\t\tcase isStatic(rel):\n\t\t\ts.files = append(s.files, rel)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Walk the diretory recursively to get a list of all posts,\n\t// pages, templates and static files.\n\terr := filepath.Walk(s.Src, walker)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Compile all templates found, if any.\n\tif len(layouts) > 0 {\n\t\ts.templ, err = template.New(\"layouts\").Funcs(funcMap).ParseFiles(layouts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the posts, timestamp, etc to the Site Params\n\ts.Conf.Set(\"posts\", s.posts)\n\ts.Conf.Set(\"time\", time.Now())\n\ts.calculateTags()\n\ts.calculateCategories()\n\n\treturn nil\n}",
"func Read(n string, skip bool) ([][]string, error) {\n\tp, err := filepath.Abs(n)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrPathNotExists}\n\t}\n\tf, err := excelize.OpenFile(p)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrFileNotExists}\n\t}\n\ts := f.GetSheetName(1)\n\tif s == \"\" {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrSheetNotExists}\n\t}\n\trows, err := f.Rows(s)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrRows}\n\t}\n\t// Skip heading\n\tif skip {\n\t\trows.Next()\n\t}\n\tdata := make([][]string, 0)\n\tfor rows.Next() {\n\t\tdata = append(data, rows.Columns())\n\t}\n\treturn data, nil\n}",
"func (c *Config) Read(path string) error {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.New(\"reading config \" + path + \", \" + err.Error())\n\t}\n\n\terr = json.Unmarshal(data, c)\n\tif err != nil {\n\t\treturn errors.New(\"parsing config \" + path + \", \" + err.Error())\n\t}\n\n\tabsolutePath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn errors.New(\"error in get absolute path\")\n\t}\n\n\tparentDir := filepath.Dir(absolutePath)\n\tc.DeploymentTemplatePath = parentDir + \"/\" + c.DeploymentTemplatePath\n\n\tdata, err = ioutil.ReadFile(c.DeploymentTemplatePath)\n\tif err != nil {\n\t\treturn errors.New(\"reading deployment template \" + c.DeploymentTemplatePath + \", \" + err.Error())\n\t}\n\tc.DeploymentTemplate = string(data)\n\t//TODO validate\n\tlogger.Infof(\"config listing\")\n\tlogger.Infof(\"deployment template path: %s\", c.DeploymentTemplatePath)\n\tlogger.Infof(\"wait for creating timeout: %d\", c.WaitForCreatingTimeout)\n\tlogger.Infof(\"pod lifetime %d\", c.PodLifetime)\n\tlogger.Infof(\"listen: %s\", c.Listen)\n\tlogger.Infof(\"namespace: %s\", c.Namespace)\n\treturn nil\n}",
"func GetProject(path string) (*cfg.Project, error) {\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, errors.New(\"project config file doesn't exist - try running 'inertia init'\")\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tvar project cfg.Project\n\tif err = toml.Unmarshal(raw, &project); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &project, nil\n}",
"func Read() Config {\n\tfile, err := os.Open(\"config.json\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config: %v\", err)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\terr = decoder.Decode(&config)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config: %v\", err)\n\t}\n\treturn config\n}",
"func Read(config *config.Config, path string) (*Batch, error) {\n\treturn ReadFs(config, path, afero.NewOsFs())\n}",
"func LoadProject(wd string) (*Project, error) {\n\tfile, err := os.OpenFile(\n\t\tfilepath.Join(wd, projectFileName),\n\t\tos.O_RDONLY,\n\t\tprojectFilePerm)\n\n\tif os.IsNotExist(err) {\n\t\treturn nil, ErrNoProject\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tvar out Project\n\tif err := yaml.NewDecoder(file).Decode(&out); err != nil {\n\t\treturn nil, err\n\t}\n\tout.Location = wd\n\n\tout.Mod, err = deps.ParseModule(filepath.Join(wd, \"go.mod\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &out, nil\n}",
"func Read() []string {\n\treturn content.Items\n}",
"func (c *ConfigDB) Read() {\n\tif _, err := toml.DecodeFile(\"config.toml\", &c); err != nil {\n\t\tlog.Fatal(\"[ERROR CONNECTION]\", err)\n\t}\n}",
"func (i *Index) Get(tags []string, all bool) ([]string, error) {\n\tswitch {\n\tcase all:\n\t\terr := i.clean()\n\t\treturn i.projects(), err\n\tcase len(tags) > 0:\n\t\tif err := i.clean(); err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tprojectsWithTags := []string{}\n\t\tfor _, p := range i.projects() {\n\t\t\tfound, err := i.hasTags(p, tags)\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, nil\n\t\t\t}\n\t\t\tif found {\n\t\t\t\tprojectsWithTags = append(projectsWithTags, p)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(projectsWithTags)\n\t\treturn projectsWithTags, nil\n\tdefault:\n\t\tcurProjPath, _, err := Paths()\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tif _, ok := i.Projects[curProjPath]; !ok {\n\t\t\ti.add(curProjPath)\n\t\t\tif err := i.save(); err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\t\t}\n\t\treturn []string{curProjPath}, nil\n\t}\n}",
"func (f *File) Read() error {\n\tf2, err := os.Open(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f2.Close()\n\tif err := json.NewDecoder(f2).Decode(&f.Groups); err != nil {\n\t\treturn err\n\t}\n\tfor _, g := range f.Groups {\n\t\tif err := json.Unmarshal(g.RawSchema, &g.Schema); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func Test_ReadAll(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadAllRequest{\n\t\tApi: apiVersion,\n\t}\n\tres, _ := ProjectService.ReadAll(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}",
"func (p Project) GetData() (Project, error) {\n\tvar project Project\n\terr := sq.\n\t\tSelect(\"id, group_id, name, url, path, environment, branch, after_pull_script, after_deploy_script, rsync_option, create_time, update_time\").\n\t\tFrom(projectTable).\n\t\tWhere(sq.Eq{\"id\": p.ID}).\n\t\tOrderBy(\"id DESC\").\n\t\tRunWith(DB).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&project.ID,\n\t\t\t&project.GroupID,\n\t\t\t&project.Name,\n\t\t\t&project.URL,\n\t\t\t&project.Path,\n\t\t\t&project.Environment,\n\t\t\t&project.Branch,\n\t\t\t&project.AfterPullScript,\n\t\t\t&project.AfterDeployScript,\n\t\t\t&project.RsyncOption,\n\t\t\t&project.CreateTime,\n\t\t\t&project.UpdateTime)\n\tif err != nil {\n\t\treturn project, err\n\t}\n\treturn project, nil\n}",
"func (c *watchImpl) Read(data interface{}) error {\n\tcontent, err := ioutil.ReadFile(c.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch path.Ext(c.filename) {\n\tcase \".json\":\n\t\terr = json.Unmarshal(content, data)\n\tcase \".yaml\", \".yml\":\n\t\tfallthrough\n\tdefault:\n\t\treturn yaml.Unmarshal(content, data)\n\t}\n\n\treturn err\n}",
"func Full(name string) (*Project, error) {\n\tp, err := FromName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.readReadme()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.readDeployEnvs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.Lock = locks.Check(p.Name, time.Now())\n\n\tdefaultBranch, err := p.GetCachedDefaultBranch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.DefaultBranch = defaultBranch\n\n\treturn p, nil\n}",
"func GetProjects(w http.ResponseWriter, r *http.Request, auth string) []Project {\n\tvar projects []Project\n\tprojectFileName := auth + globals.PROJIDFILE\n\t//First see if project already exist\n\tstatus, filepro := caching.ShouldFileCache(projectFileName, globals.PROJIDDIR)\n\tdefer filepro.Close()\n\tif status == globals.Error || status == globals.DirFail {\n\t\thttp.Error(w, \"Failed to create a file\", http.StatusInternalServerError)\n\t\treturn nil\n\t}\n\tif status == globals.Exist {\n\t\t//The file exist\n\t\t//We read from file\n\t\terr := caching.ReadFile(filepro, &projects)\n\t\tif err != nil {\n\t\t\terrmsg := \"The Failed Reading from file with error\" + err.Error()\n\t\t\thttp.Error(w, errmsg, http.StatusInternalServerError)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t//Else we need to query to get it\n\t\tfor i := 0; i < globals.MAXPAGE; i++ {\n\t\t\tvar subProj []Project\n\t\t\tquery := globals.GITAPI + globals.PROJQ + globals.PAGEQ + strconv.Itoa(i+1)\n\t\t\terr := apiGetCall(w, query, auth, &subProj)\n\t\t\tif err != nil {\n\t\t\t\t//The API call has failed\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//When it's empty we no longer need to do calls\n\t\t\tif len(subProj) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tprojects = append(projects, subProj...)\n\t\t}\n\t\tcaching.CacheStruct(filepro, projects)\n\n\t}\n\treturn projects\n}",
"func ReadImports(proj *report.Project, filePath string) error {\n\timports := make(map[string]int)\n\n\terr := filepath.Walk(filePath,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif info.IsDir() {\n\t\t\t\tfset := token.NewFileSet()\n\n\t\t\t\tprojAST, err := parser.ParseDir(fset, path, nil, parser.ImportsOnly)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Something went wrong\")\n\t\t\t\t}\n\n\t\t\t\tfor _, v := range projAST {\n\t\t\t\t\tfor _, vv := range v.Files {\n\t\t\t\t\t\tfor _, i := range vv.Imports {\n\t\t\t\t\t\t\ti.Path.Value = strings.Replace(i.Path.Value, \"\\\"\", \"\", -1)\n\t\t\t\t\t\t\timports[i.Path.Value] = 1 // save in map, to skip duplicates\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tfor in := range imports {\n\t\tproj.InsertImport(in, \"n/a\", \"\", \"\", true)\n\t}\n\treturn nil\n}",
"func Read(name string, lpkgs []*pkg.LocalPackage, cfg *syscfg.Cfg,\n\tgetMapCb GetMapFn) ExtCmdCfg {\n\n\tecfg := ExtCmdCfg{\n\t\tName: name,\n\t}\n\n\tfor _, lpkg := range lpkgs {\n\t\tecfg.readOnePkg(lpkg, cfg, getMapCb)\n\t}\n\n\tstage.SortStageFuncs(ecfg.StageFuncs, ecfg.Name)\n\n\treturn ecfg\n}",
"func (t Task) Read(extension string) (string, error) {\n\ttask, err := ioutil.ReadFile(t.Path + extension)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(task), nil\n}",
"func OpenAndRead(fileName string, data models.MainPages)[]string{\n\tfile,err:=os.Open(fileName)\n\tif err!=nil{\n\t\t_ = fmt.Errorf(\"Something went wrong %s\\n\", err)\n\t}\n\tdefer file.Close()\n\tbyteData,_:=ioutil.ReadAll(file)\n\tbyteData=byteData[:len(byteData)-2]\n\tbyteData = append(byteData, 93)\n\t_ = json.Unmarshal(byteData, &data)\n\turlsSlice:=make([]string,0)\n\tfor _,i:=range data{\n\t\tfor _,j:=range i.Links{\n\t\t\turlsSlice = append(urlsSlice, j)\n\t\t}\n\t}\n\treturn urlsSlice\n}",
"func (feeder *FileFeed) Read(files []string) ([]entity.Input, error) {\n\tinputs := make([]entity.Input, len(files))\n\tfor i, file := range files {\n\t\tlogger.Info(fmt.Sprintf(\"reading fixture: %s\", file))\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn inputs, err\n\t\t}\n\t\text := filepath.Ext(file)\n\t\tinput := entity.Input{\n\t\t\tFilename: extractFilename(file),\n\t\t\tType: ext,\n\t\t\tData: f,\n\t\t}\n\t\tinputs[i] = input\n\t}\n\treturn inputs, nil\n}",
"func (p *Project) load() (err error) {\n\tpPtr, err := readProjectWithId(p.Id)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t}\n\t*p = *pPtr\n\treturn\n}",
"func ReadDetailsFile(readme, masterKey string) helpers.FolderDetailsJSON {\n\tlog.Debug(\"Readme exists\")\n\tvar resultData helpers.FolderDetailsJSON\n\tfile, err := os.Open(readme)\n\thelpers.Check(err, true, \"Reading readme\", helpers.Trace())\n\tbyteValue, _ := ioutil.ReadAll(file)\n\tjson.Unmarshal([]byte(byteValue), &resultData)\n\t//TODO need to validate some of these fields\n\tvar data helpers.FolderDetailsJSON\n\tdata.Title, err = auth.Decrypt(resultData.Title, masterKey, true)\n\tif err != nil {\n\t\t//could not decrypt\n\t\tdata.Title = \"Could not decrypt\"\n\t}\n\tdata.Description, err = auth.Decrypt(resultData.Description, masterKey, true)\n\tif err != nil {\n\t\t//could not decrypt\n\t\tdata.Description = \"Could not decrypt - need to be regenerated\"\n\t}\n\tdata.LastModified = resultData.LastModified\n\t//TODO need to account for file sha later\n\treturn data\n}",
"func ReadConfig() Info {\n\treturn databases\n}",
"func ReadProgram(filePath string) (interface{}, error) {\n\text := strings.ToLower(path.Ext(filePath))\n\n\t// ZIP archive\n\tif ext == \".zip\" {\n\t\treturn readZIP(filePath)\n\t}\n\n\tdata, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar format *FormatInfo\n\tformat, err = detectFormat(filePath, ENCAPSULATION_NONE, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif format.Format == FORMAT_TAP {\n\t\treturn NewTAP(data)\n\t}\n\n\treturn SnapshotData(data).Decode(format.Format)\n}",
"func Read() {\n\tlog.Info(InProgress, \"Reading Settings...\")\n\tjsonstring, err := ioutil.ReadFile(DataPath + \"/settings.json\")\n\tif err == nil {\n\t\tdata := make(map[string]interface{})\n\t\terr := json.Unmarshal(jsonstring, &data)\n\t\tif err == nil {\n\t\t\tRemoteAddress, _ = data[\"RemoteAddress\"].(string)\n\n\t\t\tLocalAddress, _ = data[\"LocalAddress\"].(string)\n\n\t\t\ttmp, ok := data[\"DiskSpace\"].(float64)\n\t\t\tif ok {\n\t\t\t\tDiskSpace = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MaxWorkers\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMaxWorkers = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"QueueMaxLength\"].(float64)\n\t\t\tif ok {\n\t\t\t\tQueueMaxLength = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMaxSize\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMaxSize = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMinCheckDelay\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMinCheckDelay = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMaxStoreTime\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMaxStoreTime = int(tmp)\n\t\t\t}\n\n\t\t\tColorizedLogs, _ = data[\"ColorizedLogs\"].(bool)\n\t\t} else {\n\t\t\tlog.Warn(SettingsReadError, \"Failed to read settings from file (\"+err.Error()+\"). Falling back to defaults or using command line arguments...\")\n\t\t}\n\t}\n\n\tparseCommandLineArgs()\n\tlogger.ColorizedLogs = ColorizedLogs\n\tlog.Info(OK, \"Successfully read Settings.\")\n\tWrite()\n}",
"func Read(t *testing.T, paths ...string) []byte {\n\tt.Helper()\n\n\tpath := filepath.Join(paths...)\n\tfile, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read %v: %v\", path, err)\n\t}\n\treturn file\n}",
"func Read(name string) (map[string]interface{}, error) {\n\tpth := \"./data/\" + name + \".json\"\n\tfile, err := ioutil.ReadFile(pth)\n\tif err != nil {\n\t\tlog.Printf(\"Read() %s err: %v\\n\", name, err)\n\t\treturn nil, err\n\t}\n\tdata := map[string]interface{}{}\n\terr = json.Unmarshal(file, &data)\n\tif err != nil {\n\t\tlog.Printf(\"Read() %s err: %v\\n\", name, err)\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func read(fileName string) (*Configuration, error) {\n\tif fileName == \"\" {\n\t\treturn Config, fmt.Errorf(\"Empty file name\")\n\t}\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn Config, err\n\t}\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(Config)\n\tif err == nil {\n\t\tlog.Infof(\"Read config: %s\", fileName)\n\t} else {\n\t\tlog.Fatal(\"Cannot read config file:\", fileName, err)\n\t}\n\tif err := Config.postReadAdjustments(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn Config, err\n}",
"func (m *OrganizationManager) Read(id string, opts ...RequestOption) (o *Organization, err error) {\n\terr = m.Request(\"GET\", m.URI(\"organizations\", id), &o, opts...)\n\treturn\n}",
"func (r *Repo) ReadNames(h errs.Handler, pkgnames ...string) (pacman.Packages, error) {\n\terrs.Init(&h)\n\tif len(pkgnames) == 0 {\n\t\treturn r.ReadDir(h)\n\t}\n\n\tpkgs, err := pacman.ReadNames(h, r.Directory, pkgnames...)\n\tr.MakeAbs(pkgs)\n\treturn pkgs, err\n}",
"func ReadJPNSoftwareMap(dir string, filename string) (v JPNSoftwareMap, err error) {\n\terr = readJSONFile(dir, filename, &v)\n\treturn v, err\n}",
"func (r Reader) Read(spec *v1alpha1.OCIBuilderSpec, overlayPath string, filepaths ...string) error {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath := strings.Join(filepaths[:], \"/\")\n\tif filepath != \"\" {\n\t\tdir = filepath\n\t}\n\tr.Logger.WithField(\"filepath\", dir+\"/ocibuilder.yaml\").Debugln(\"looking for ocibuilder.yaml\")\n\tfile, err := ioutil.ReadFile(dir + \"/ocibuilder.yaml\")\n\tif err != nil {\n\t\tr.Logger.Infoln(\"ocibuilder.yaml file not found, looking for individual specifications...\")\n\t\tif err := r.readIndividualSpecs(spec, dir); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read individual specs\")\n\t\t}\n\t}\n\n\tif overlayPath != \"\" {\n\t\tr.Logger.WithField(\"overlayPath\", overlayPath).Debugln(\"overlay path not empty - looking for overlay file\")\n\t\tfile, err = applyOverlay(file, overlayPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply overlay to spec at path\")\n\t\t}\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif spec.Params != nil {\n\t\tif err = r.applyParams(file, spec); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply params to spec\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func uploadRead(collection string, filename string) (contents []byte, err error) {\n\thomedir := os.Getenv(\"HOME\")\n\tdirectory := homedir + filePathCollections + collection\n\tfile := directory + \"/\" + filename\n\tcontents, err = ioutil.ReadFile(file)\n\treturn\n}",
"func (p *Building) Read(iprot thrift.TProtocol) (err thrift.TProtocolException) {\n\t//++ read code that /knows/ the struct's metadata\n\t// calls readField_Name, readField_Height, readField_DoorOpen, optionally: readField_BackDoorOpen, optionally: readField_BackWindow\n}",
"func ReadBuildArtifacts(path string) (*fintpb.BuildArtifacts, error) {\n\tbytes, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar message fintpb.BuildArtifacts\n\tif err := prototext.Unmarshal(bytes, &message); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &message, nil\n}",
"func Read(s beam.Scope, project, topic string, opts *ReadOptions) beam.PCollection {\n\ts = s.Scope(\"pubsubio.Read\")\n\n\tpayload := &pipepb.PubSubReadPayload{\n\t\tTopic: pubsubx.MakeQualifiedTopicName(project, topic),\n\t}\n\tif opts != nil {\n\t\tpayload.IdAttribute = opts.IDAttribute\n\t\tpayload.TimestampAttribute = opts.TimestampAttribute\n\t\tif opts.Subscription != \"\" {\n\t\t\tpayload.Subscription = pubsubx.MakeQualifiedSubscriptionName(project, opts.Subscription)\n\t\t}\n\t\tpayload.WithAttributes = opts.WithAttributes\n\t}\n\n\tout := beam.External(s, readURN, protox.MustEncode(payload), nil, []beam.FullType{typex.New(reflectx.ByteSlice)}, false)\n\tif opts != nil && opts.WithAttributes {\n\t\treturn beam.ParDo(s, unmarshalMessageFn, out[0])\n\t}\n\treturn out[0]\n}",
"func readConfigCron(c *router.Context) {\n\trc := requestContext(*c)\n\tprojectsToVisit := map[string]bool{}\n\n\t// Visit all projects in the catalog.\n\tctx, cancel := context.WithTimeout(rc.Context, 150*time.Second)\n\tdefer cancel()\n\tprojects, err := globalCatalog.GetAllProjects(ctx)\n\tif err != nil {\n\t\trc.err(err, \"Failed to grab a list of project IDs from catalog\")\n\t\treturn\n\t}\n\tfor _, id := range projects {\n\t\tprojectsToVisit[id] = true\n\t}\n\n\t// Also visit all registered projects that do not show up in the catalog\n\t// listing anymore. It will unregister all jobs belonging to them.\n\texisting, err := globalEngine.GetAllProjects(rc.Context)\n\tif err != nil {\n\t\trc.err(err, \"Failed to grab a list of project IDs from datastore\")\n\t\treturn\n\t}\n\tfor _, id := range existing {\n\t\tprojectsToVisit[id] = true\n\t}\n\n\t// Handle each project in its own task to avoid \"bad\" projects (e.g. ones with\n\t// lots of jobs) to slow down \"good\" ones.\n\ttasks := make([]*tq.Task, 0, len(projectsToVisit))\n\tfor projectID := range projectsToVisit {\n\t\ttasks = append(tasks, &tq.Task{\n\t\t\tPayload: &internal.ReadProjectConfigTask{ProjectId: projectID},\n\t\t})\n\t}\n\tif err = globalDispatcher.AddTask(rc.Context, tasks...); err != nil {\n\t\trc.err(err, \"Failed to add tasks to task queue\")\n\t} else {\n\t\trc.ok()\n\t}\n}",
"func readConfig() Configuration {\n\tfmt.Println(\"Reading configuration file\")\n\n\tdir, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\tfilepath := []string{dir, \"config.json\"}\n\n\tfile, _ := os.Open(strings.Join(filepath, \"\\\\\"))\n\tdecoder := json.NewDecoder(file)\n\tconfiguration := Configuration{}\n\terr := decoder.Decode(&configuration)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn configuration\n}",
"func (p *SourceProvider) Read() error {\n\tif !p.Config.Enabled {\n\t\treturn nil\n\t}\n\tif p.Connection.KAPI == nil {\n\t\tif err := p.Connection.Connect(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tvar t []provider.Task\n\tif err := p.Connection.ReadAll(&t); err != nil {\n\t\treturn err\n\t}\n\tfor _, task := range t {\n\t\tp.TaskFlow <- task\n\t}\n\tgo p.Connection.WatchTasks(func(t *provider.Task) {\n\t\tp.TaskFlow <- *t\n\t})\n\treturn nil\n}",
"func Read(config io.Reader) (UserCollection, error) {\n\tusers := make(UserCollection, 0)\n\tdecoder := gob.NewDecoder(config)\n\terr := decoder.Decode(&users)\n\treturn users, err\n}",
"func ReadAll(path string) (string, error) {\n\tf, err := os.Open(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontents, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(contents), nil\n}",
"func Read(v *viper.Viper) error {\n\tif err := v.ReadInConfig(); err != nil {\n\t\tif errors.As(err, &viper.ConfigFileNotFoundError{}) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"read config file: %w\", err)\n\t}\n\treturn nil\n}",
"func ReadTasks(path string) ([]Task, error) {\n\tvar tasks []Task\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn tasks, err\n\t}\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn tasks, err\n\t}\n\n\t// Read file and construct struct\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tsplits := strings.Split(line, \",\")\n\t\tif len(splits) == 3 {\n\t\t\tdate := strings.TrimSpace(splits[0])\n\t\t\tname := strings.TrimSpace(splits[1])\n\t\t\tdone := strings.TrimSpace(splits[2])\n\n\t\t\t// handle done\n\t\t\tif done != \"x\" {\n\t\t\t\tdone = \" \"\n\t\t\t}\n\n\t\t\ttask := Task{\n\t\t\t\tDate: date,\n\t\t\t\tName: name,\n\t\t\t\tDone: done,\n\t\t\t}\n\t\t\ttasks = append(tasks, task)\n\t\t} else {\n\t\t\t// continue\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn tasks, err\n\t}\n\n\treturn tasks, nil\n}",
"func readLines(path string) chan string {\n\tch := make(chan string)\n\tgo func() {\n\t\tinputFile, err := os.Open(config.Git_projects_file)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to read the projects file\")\n\t\t\treturn\n\t\t}\n\t\tscanner := bufio.NewScanner(inputFile)\n\t\tfor scanner.Scan() {\n\t\t\tch <- scanner.Text()\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}",
"func New(p *models.Project) task.Task {\n\treturn &Readme{\n\t\tp: p,\n\t}\n}",
"func(c*Config) Read( ){\n\tif _, err := toml.DecodeFile(\"config.toml\", &c); err != nil{\n\t\tlog.Fatal(err)\n\t}\n}",
"func read(s beam.Scope, resourcePaths beam.PCollection, client fhirStoreClient) (beam.PCollection, beam.PCollection) {\n\treturn beam.ParDo2(s, &readResourceFn{fnCommonVariables: fnCommonVariables{client: client}}, resourcePaths)\n}",
"func (t *Trellis) LoadProject() error {\n\tif t.Path != \"\" {\n\t\tos.Chdir(t.Path)\n\t\treturn nil\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpath, ok := t.Detect(wd)\n\n\tif !ok {\n\t\treturn errors.New(\"No Trellis project detected in the current directory or any of its parent directories.\")\n\t}\n\n\tt.Path = path\n\tt.ConfigPath = filepath.Join(path, ConfigDir)\n\tt.Virtualenv = NewVirtualenv(t.ConfigPath)\n\n\tos.Chdir(t.Path)\n\n\tif os.Getenv(\"TRELLIS_VENV\") != \"false\" {\n\t\tif t.Virtualenv.Initialized() {\n\t\t\tt.Virtualenv.Activate()\n\t\t}\n\t}\n\n\tconfigPaths, _ := filepath.Glob(\"group_vars/*/wordpress_sites.yml\")\n\n\tenvs := make([]string, len(configPaths))\n\tt.Environments = make(map[string]*Config, len(configPaths)-1)\n\n\tfor i, p := range configPaths {\n\t\tparts := strings.Split(p, string(os.PathSeparator))\n\t\tenvName := parts[1]\n\t\tenvs[i] = envName\n\n\t\tt.Environments[envName] = t.ParseConfig(p)\n\t}\n\n\treturn nil\n}"
] | [
"0.7127253",
"0.67041546",
"0.63716996",
"0.62035036",
"0.61625195",
"0.61137533",
"0.6112978",
"0.606727",
"0.59411407",
"0.5883763",
"0.5815111",
"0.5776463",
"0.5734309",
"0.5713775",
"0.5671496",
"0.5671328",
"0.5665366",
"0.5647636",
"0.56434774",
"0.5642749",
"0.5633865",
"0.56306696",
"0.5621478",
"0.5579692",
"0.55649203",
"0.55603594",
"0.5546399",
"0.5540173",
"0.54830194",
"0.547427",
"0.5471327",
"0.5464846",
"0.5451853",
"0.54432213",
"0.5437912",
"0.54101974",
"0.5410102",
"0.5405312",
"0.54013926",
"0.53950113",
"0.53889006",
"0.53830725",
"0.53505784",
"0.53501725",
"0.53493315",
"0.53483754",
"0.53483415",
"0.53384686",
"0.53288805",
"0.5326279",
"0.5326258",
"0.53218687",
"0.53177243",
"0.5310507",
"0.53074944",
"0.53019035",
"0.5297654",
"0.52923965",
"0.5290325",
"0.5288904",
"0.5283479",
"0.52801067",
"0.5272562",
"0.5258648",
"0.52574986",
"0.52364033",
"0.5233446",
"0.5229365",
"0.5224857",
"0.52241504",
"0.5223063",
"0.5219078",
"0.52152455",
"0.5208623",
"0.5204207",
"0.5194497",
"0.5193608",
"0.5181029",
"0.51724374",
"0.51723176",
"0.5169314",
"0.5167819",
"0.5164714",
"0.516407",
"0.5163167",
"0.5157766",
"0.51446676",
"0.5144397",
"0.5141068",
"0.51400506",
"0.5126007",
"0.5120998",
"0.5109137",
"0.51084286",
"0.51081634",
"0.5107091",
"0.51052254",
"0.5102859",
"0.50977397",
"0.5097068"
] | 0.78015107 | 0 |
Clone wraps html/template.Clone to also clone the name | Clone оборачивает html/template.Clone, чтобы также клонировать имя | func (t *Template) Clone() (*Template, error) {
var tmpl, err = t.Template.Clone()
return &Template{tmpl, t.Name}, err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (tc *STemplateController) Clone(clone_name string, recursive bool) (*srv_tmpl.ServiceTemplate, error) {\n\turl := urlTemplateAction(tc.ID)\n\taction := make(map[string]interface{})\n\n\taction[\"action\"] = map[string]interface{}{\n\t\t\"perform\": \"clone\",\n\t\t\"params\": map[string]interface{}{\n\t\t\t\"name\": clone_name,\n\t\t\t\"recursive\": recursive,\n\t\t},\n\t}\n\n\t//Get response\n\tresponse, err := tc.c.ClientFlow.HTTPMethod(\"POST\", url, action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.status {\n\t\treturn nil, errors.New(response.body)\n\t}\n\n\t//Build Service from response\n\tstemplate := &srv_tmpl.ServiceTemplate{}\n\tstemplate_str, err := json.Marshal(response.BodyMap()[\"DOCUMENT\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(stemplate_str, stemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stemplate, nil\n}",
"func (b *Buildtemplate) Clone(source buildv1alpha1.BuildTemplate, clientset *client.ConfigSet) (*buildv1alpha1.BuildTemplate, error) {\n\tsource.SetName(\"\")\n\tsource.SetGenerateName(b.Name + \"-\")\n\tsource.SetNamespace(b.Namespace)\n\tsource.SetOwnerReferences([]metav1.OwnerReference{})\n\tsource.SetResourceVersion(\"\")\n\tsource.Kind = \"BuildTemplate\"\n\tif len(clientset.Registry.Secret) != 0 {\n\t\taddSecretVolume(clientset.Registry.Secret, &source)\n\t\tsetEnvConfig(clientset.Registry.Secret, &source)\n\t}\n\treturn createBuildTemplate(source, clientset)\n}",
"func execmTemplateClone(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*template.Template).Clone()\n\tp.Ret(1, ret, ret1)\n}",
"func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}",
"func (i *IContainer) Clone(w http.ResponseWriter, r *http.Request) *IClone {\n\treturn &IClone{\n\t\tIContainer: i,\n\t\tw: w,\n\t\tr: r,\n\t\tmutex: &sync.RWMutex{},\n\t\tthreadData: make(map[string]interface{}),\n\t}\n}",
"func (z *zfsctl) Clone(ctx context.Context, name string, properties map[string]string, source string) *execute {\n\targs := []string{\"clone\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, source, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func (s *CreateViewStatement) Clone() *CreateViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}",
"func (s *DropViewStatement) Clone() *DropViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (llrb *LLRB) Clone(name string) *LLRB {\n\tif !llrb.lock() {\n\t\treturn nil\n\t}\n\n\tnewllrb := NewLLRB(llrb.name, llrb.setts)\n\tnewllrb.llrbstats = llrb.llrbstats\n\tnewllrb.h_upsertdepth = llrb.h_upsertdepth.Clone()\n\tnewllrb.seqno = llrb.seqno\n\n\tnewllrb.setroot(newllrb.clonetree(llrb.getroot()))\n\n\tllrb.unlock()\n\treturn newllrb\n}",
"func (this *Selection) AppendClones(template *html.Node) *Selection {\n\tfor _, parent := range this.Nodes {\n\t\tparent.AppendChild(cloneNode(template))\n\t}\n\treturn this\n}",
"func Clone(url string) {\n\thg(\"clone %s\", url)\n}",
"func (c *ServiceCreate) clone(destination string) error {\n\t_, err := git.PlainClone(destination, false, &git.CloneOptions{\n\t\tURL: \"https://github.com/RobyFerro/go-web.git\",\n\t\tProgress: nil,\n\t})\n\n\treturn err\n}",
"func CloneTemplates() {\n\t_, err := git.PlainClone(\".templates\", false, &git.CloneOptions{\n\t\tURL: \"http://10.1.38.31/afougerouse/templates.git\",\n\t\tProgress: os.Stdout,\n\t})\n\tif err != nil {\n\t\tfmt.Errorf(\"Impossible de récupérer les templates\")\n\t\tos.Exit(1)\n\t}\n}",
"func (s *CreateDatabaseStatement) Clone() *CreateDatabaseStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (s *DropIndexStatement) Clone() *DropIndexStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (s *AlterViewStatement) Clone() *AlterViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}",
"func (w *Window) Clone() *Window {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tother := *w\n\tother.Name = w.Name.Clone()\n\tother.Definition = w.Definition.Clone()\n\treturn &other\n}",
"func (p *PKGBUILD) Clone() *PKGBUILD {\n\tc := New()\n\tc.atoms = p.atoms.Clone()\n\tc.RecomputeInfos(true)\n\treturn c\n}",
"func (r *View) Clone() *View {\n\treturn r.CloneLimit(r.size)\n}",
"func cloneNode(node *html.Node) *html.Node {\n\tclone := &html.Node{\n\t\tType: node.Type,\n\t\tDataAtom: node.DataAtom,\n\t\tData: node.Data,\n\t\tAttr: make([]html.Attribute, len(node.Attr)),\n\t}\n\n\tcopy(clone.Attr, node.Attr)\n\n\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\tclone.AppendChild(cloneNode(c))\n\t}\n\n\treturn clone\n}",
"func (c *Curl) Clone() SpongeFunction {\n\treturn &Curl{\n\t\tstate: c.state,\n\t\trounds: c.rounds,\n\t\tdirection: c.direction,\n\t}\n}",
"func cloneNode(n *html.Node) *html.Node {\n\tnn := &html.Node{\n\t\tType: n.Type,\n\t\tDataAtom: n.DataAtom,\n\t\tData: n.Data,\n\t\tAttr: make([]html.Attribute, len(n.Attr)),\n\t}\n\n\tcopy(nn.Attr, n.Attr)\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tnn.AppendChild(cloneNode(c))\n\t}\n\n\treturn nn\n}",
"func (h *Header) Clone() *Header {\n\thc := &Header{slice: make([]string, len(h.slice))}\n\tcopy(hc.slice, h.slice)\n\treturn hc\n}",
"func (n *Nodes) Clone() data.Clonable {\n\treturn newNodes().Replace(n)\n}",
"func (t Header) Clone() Header {\n\tt.Key = append([]KeyField{}, t.Key...)\n\tt.Data = append([]Field{}, t.Data...)\n\treturn t\n}",
"func (h *PrometheusInstrumentHandler) Clone() model.Part {\n\th0 := *h\n\treturn &h0\n}",
"func (o File) Clone() File {\n\to.Meta = o.Meta.Clone()\n\treturn o\n}",
"func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {\n\treturn c.Client.Builds(namespace).Clone(request)\n}",
"func (s *CreateFunctionStatement) Clone() *CreateFunctionStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\tother.Body = cloneStatements(s.Body)\n\treturn &other\n}",
"func (m *Mocker) Clone(t *testing.T) (clone *Mocker) {\n\tm.Close()\n\n\tclone = New(t)\n\n\tclone.handlers = m.deepCopyHandlers()\n\n\treturn\n}",
"func (w *Wrapper) Clone() *Wrapper {\n\treturn w.cloning(false)\n}",
"func (s *DropFunctionStatement) Clone() *DropFunctionStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (s *DropTableStatement) Clone() *DropTableStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (l LabelDef) Clone() (cL config.RextLabelDef, err error) {\n\tvar cNs config.RextNodeSolver\n\tif l.GetNodeSolver() != nil {\n\t\tif cNs, err = l.GetNodeSolver().Clone(); err != nil {\n\t\t\tlog.WithError(err).Errorln(\"can not clone node solver in label\")\n\t\t\treturn cL, err\n\t\t}\n\t}\n\tcL = NewLabelDef(l.name, cNs)\n\treturn cL, err\n}",
"func CloneRefOfCreateView(n *CreateView) *CreateView {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\tout.ViewName = CloneTableName(n.ViewName)\n\tout.Columns = CloneColumns(n.Columns)\n\tout.Select = CloneSelectStatement(n.Select)\n\treturn &out\n}",
"func (s *ReleaseStatement) Clone() *ReleaseStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (b *Builder) Clone(index int) {\n\tsidx := len(b.stack) - 1 - index\n\t// Change ownership of the top stack value to the clone instruction.\n\tb.stack[sidx].idx = len(b.instructions)\n\tb.pushStack(b.stack[sidx].ty)\n\tb.instructions = append(b.instructions, asm.Clone{\n\t\tIndex: index,\n\t})\n}",
"func Clone(origin string, name string, props *DatasetProps) error {\n\tvar cloneReq struct {\n\t\tOrigin string `nvlist:\"origin\"`\n\t\tProps *DatasetProps `nvlist:\"props\"`\n\t}\n\tcloneReq.Origin = origin\n\tcloneReq.Props = props\n\terrList := make(map[string]int32)\n\tcmd := &Cmd{}\n\treturn NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_CLONE, name, cmd, cloneReq, errList, nil)\n\t// TODO: Partial failures using errList\n}",
"func NewClone() *Clone {\n\treturn &Clone{\n\t\tkeys: make(map[string][]byte),\n\t}\n}",
"func (l *universalLister) Clone() *universalLister {\n\tvar clonedLister universalLister\n\n\tclonedLister.resourceType = l.resourceType\n\tclonedLister.tableName = l.tableName\n\tclonedLister.selectedColumns = l.selectedColumns\n\tclonedLister.tenantColumn = l.tenantColumn\n\tclonedLister.orderByParams = append(clonedLister.orderByParams, l.orderByParams...)\n\n\treturn &clonedLister\n}",
"func (v String) Clone() Node {\n\treturn v\n}",
"func (s *DropDatabaseStatement) Clone() *DropDatabaseStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (d *BulkInsertMapDefinition) Clone() *BulkInsertMapDefinition {\n\tif d == nil {\n\t\treturn d\n\t}\n\tother := *d\n\tother.Name = d.Name.Clone()\n\tother.Type = d.Type.Clone()\n\t//other.MapExpr = d.MapExpr.Clone()\n\treturn &other\n}",
"func (s *CreateIndexStatement) Clone() *CreateIndexStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\tother.Table = s.Table.Clone()\n\tother.Columns = cloneIndexedColumns(s.Columns)\n\tother.WhereExpr = CloneExpr(s.WhereExpr)\n\treturn &other\n}",
"func (c *Client) Clone() (*Client, error) {\n\treturn c.clone(c.config.CloneHeaders)\n}",
"func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {\n\targs := linux.CloneArgs{\n\t\tFlags: uint64(uint32(flags) &^ linux.CSIGNAL),\n\t\tChildTID: uint64(childTID),\n\t\tParentTID: uint64(parentTID),\n\t\tExitSignal: uint64(flags & linux.CSIGNAL),\n\t\tStack: uint64(stack),\n\t\tTLS: uint64(tls),\n\t}\n\tntid, ctrl, err := t.Clone(&args)\n\treturn uintptr(ntid), ctrl, err\n}",
"func (s *Action) Clone(ctx context.Context, c *cli.Context) error {\n\tif len(c.Args()) < 1 {\n\t\treturn errors.Errorf(\"Usage: %s clone repo [mount]\", s.Name)\n\t}\n\n\trepo := c.Args()[0]\n\tmount := \"\"\n\tif len(c.Args()) > 1 {\n\t\tmount = c.Args()[1]\n\t}\n\n\tpath := c.String(\"path\")\n\n\treturn s.clone(ctx, repo, mount, path)\n}",
"func (p *Pie) Clone(generateNewID bool) *Pie {\n\tcloned := *p\n\tif generateNewID {\n\t\tcloned.Id = bson.NewObjectId()\n\t}\n\tcloned.Slices = make([]Slice, len(p.Slices))\n\tcopy(cloned.Slices, p.Slices)\n\treturn &cloned\n}",
"func (ts *STableSpec) Clone(name string, autoIncOffset int64) *STableSpec {\n\tnts, _ := ts.CloneWithSyncColumnOrder(name, autoIncOffset, false)\n\treturn nts\n}",
"func (s *SavepointStatement) Clone() *SavepointStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (s *AlterDatabaseStatement) Clone() *AlterDatabaseStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = other.Name.Clone()\n\treturn &other\n}",
"func (r *Request) Clone() (fiber.Request, error) {\n\tbodyReader := bytes.NewReader(r.Payload())\n\n\tproxyRequest, err := http.NewRequest(r.Method, r.URL.String(), bodyReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tproxyRequest.GetBody = func() (io.ReadCloser, error) {\n\t\treturn ioutil.NopCloser(bodyReader), nil\n\t}\n\n\tproxyRequest.Header = r.Header()\n\n\treturn &Request{CachedPayload: r.CachedPayload, Request: proxyRequest}, nil\n}",
"func Clone(node parlex.ParseNode) *PN {\n\tpn := &PN{\n\t\tLexeme: &lexeme.Lexeme{\n\t\t\tK: node.Kind(),\n\t\t\tV: node.Value(),\n\t\t},\n\t\tC: make([]*PN, node.Children()),\n\t}\n\tfor i := 0; i < node.Children(); i++ {\n\t\tc := Clone(node.Child(i))\n\t\tc.P = pn\n\t\tpn.C[i] = c\n\t}\n\treturn pn\n}",
"func (s *Spec) Clone() *Spec {\n\tres := &Spec{Target: make(map[string]string)}\n\tfor k, v := range s.Target {\n\t\tres.Target[k] = v\n\t}\n\tfor _, app := range s.Apps {\n\t\tres.Apps = append(res.Apps, app.Clone())\n\t}\n\treturn res\n}",
"func Clone(url string) (string, error) {\n\tpath := fmt.Sprintf(\"%s/git/%s\", tmpPath, RandString(10))\n\tif err := os.MkdirAll(path, os.ModePerm); err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err := git.PlainClone(path, false, &git.CloneOptions{\n\t\tURL: url,\n\t})\n\treturn path, err\n}",
"func (c *Cmd) Clone() *Cmd {\n\tres := &Cmd{Cmd: c.Cmd.Clone(), sh: c.sh}\n\tinitSession(c.sh.tb, res)\n\treturn res\n}",
"func (op RollupOp) Clone() RollupOp {\n\tidClone := make([]byte, len(op.ID))\n\tcopy(idClone, op.ID)\n\treturn RollupOp{ID: idClone, AggregationID: op.AggregationID}\n}",
"func (n *Network) Clone() data.Clonable {\n\treturn newNetwork().Replace(n)\n}",
"func (s *VMStorage) Clone() *VMStorage {\n\tns := &VMStorage{\n\t\tc: s.c,\n\t\tauthCfg: s.authCfg,\n\t\tdatasourceURL: s.datasourceURL,\n\t\tappendTypePrefix: s.appendTypePrefix,\n\t\tlookBack: s.lookBack,\n\t\tqueryStep: s.queryStep,\n\n\t\tdataSourceType: s.dataSourceType,\n\t\tevaluationInterval: s.evaluationInterval,\n\n\t\t// init map so it can be populated below\n\t\textraParams: url.Values{},\n\n\t\tdebug: s.debug,\n\t}\n\tif len(s.extraHeaders) > 0 {\n\t\tns.extraHeaders = make([]keyValue, len(s.extraHeaders))\n\t\tcopy(ns.extraHeaders, s.extraHeaders)\n\t}\n\tfor k, v := range s.extraParams {\n\t\tns.extraParams[k] = v\n\t}\n\n\treturn ns\n}",
"func (lit *StringLit) Clone() *StringLit {\n\tif lit == nil {\n\t\treturn nil\n\t}\n\tother := *lit\n\treturn &other\n}",
"func (project *ProjectV1) Clone() *ProjectV1 {\n\tif core.IsNil(project) {\n\t\treturn nil\n\t}\n\tclone := *project\n\tclone.Service = project.Service.Clone()\n\treturn &clone\n}",
"func CloneDefault() *Controller { return defaultCtrl.Clone() }",
"func (r *Helm) Copy() *Helm {\n\treturn &Helm{\n\t\tID: r.ID,\n\t\t//ProjectName: r.ProjectName,\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAddress: r.Address,\n\t\tUsername: r.Username,\n\t\tPrefix: r.Prefix,\n\t}\n}",
"func (t TestRepo) Clone() TestRepo {\n\tpath, err := ioutil.TempDir(\"\", \"gtm\")\n\tCheckFatal(t.test, err)\n\n\tr, err := git.Clone(t.repo.Path(), path, &git.CloneOptions{})\n\tCheckFatal(t.test, err)\n\n\treturn TestRepo{repo: r, test: t.test}\n}",
"func (i *interactor) Clone(from string) error {\n\treturn i.CloneWithRepoOpts(from, RepoOpts{})\n}",
"func (p Page) Clone() Page {\n\tclone := make([]Section, len(p))\n\tfor i, section := range p {\n\t\tclone[i] = section.Clone()\n\t}\n\treturn clone\n}",
"func (s Sequence) Clone() Sequence {\n\tv := Sequence{s.Title, make([]Token, len(s.Tokens))}\n\tcopy(v.Tokens, s.Tokens)\n\treturn v\n}",
"func rawClone(secrets configure.SecretsOutline, repo api.Repo, path string) {\n\terr := os.MkdirAll(path, 0777)\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to create folder at \"+path, err, 1)\n\t}\n\n\tspin := spinner.New(utils.SpinnerCharSet, utils.SpinnerSpeed)\n\tspin.Suffix = fmt.Sprintf(\" Cloning %v/%v\", repo.Owner, repo.Name)\n\tspin.Start()\n\n\t_, err = git.PlainClone(path, false, &git.CloneOptions{\n\t\tURL: fmt.Sprintf(\"https://github.com/%v/%v.git\", repo.Owner, repo.Name),\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: secrets.Username,\n\t\t\tPassword: secrets.PAT,\n\t\t},\n\t})\n\n\tspin.Stop()\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to clone repo\", err, 1)\n\t}\n}",
"func (p *portfolio) clone() (*portfolio, error) {\n\n\tc := &portfolio{\n\t\tname: p.name + \"[cloned]\",\n\t\tisLive: false, // clones are never live\n\t\tbalances: make(map[SymbolType]*BalanceAs, 0),\n\t}\n\n\tfor symbol, balance := range p.balances {\n\t\t// clone balance\n\t\tcb := *balance\n\t\tcb.BuyStrategy = nil\n\t\tcb.SellStrategy = nil\n\t\tc.balances[symbol] = &cb\n\t}\n\n\treturn c, nil\n}",
"func (rd *ReferenceDef) Clone() *ReferenceDef {\n\tcnames := make([]*IndexColName, 0, len(rd.IndexColNames))\n\tfor _, idxColName := range rd.IndexColNames {\n\t\tt := *idxColName\n\t\tcnames = append(cnames, &t)\n\t}\n\treturn &ReferenceDef{TableIdent: rd.TableIdent, IndexColNames: cnames}\n}",
"func clone(s *Scroller) *Scroller {\n\tclone := &Scroller{\n\t\tpos: s.pos,\n\t\tline: s.line,\n\t\toffset: s.offset,\n\t\tdir: s.dir,\n\t\tscrolled: s.scrolled,\n\t\teditor: s.editor,\n\t\tctrl: s.ctrl,\n\t}\n\tfor _, h := range s.scrolled {\n\t\tclone.scrolled = append(clone.scrolled, h)\n\t}\n\treturn clone\n}",
"func (l *Localizer) Clone() CloneableLocalizer {\n\tclone := &Localizer{\n\t\ti18nStorage: l.i18nStorage,\n\t\tTranslationsFS: l.TranslationsFS,\n\t\tLocaleMatcher: l.LocaleMatcher,\n\t\tLanguageTag: l.LanguageTag,\n\t\tTranslationsPath: l.TranslationsPath,\n\t\tloadMutex: l.loadMutex,\n\t}\n\tclone.SetLanguage(DefaultLanguage)\n\n\treturn clone\n}",
"func (t *TaskBox[T, U, C, CT, TF]) Clone() *TaskBox[T, U, C, CT, TF] {\n\tnewBox := NewTaskBox[T, U, C, CT, TF](t.constArgs, t.contextFunc, t.wg, t.task, t.resultCh, t.taskID)\n\treturn &newBox\n}",
"func (c *NotNullConstraint) Clone() *NotNullConstraint {\n\tif c == nil {\n\t\treturn c\n\t}\n\tother := *c\n\tother.Name = c.Name.Clone()\n\treturn &other\n}",
"func (o RenderTemplatesList) Copy() elemental.Identifiables {\n\n\tcopy := append(RenderTemplatesList{}, o...)\n\treturn ©\n}",
"func CloneRefOfRenameIndex(n *RenameIndex) *RenameIndex {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\treturn &out\n}",
"func (m *TestObj) Clone(interface{}) (interface{}, error) { return nil, nil }",
"func isClone(c *yaml.Container) bool {\n\treturn c.Name == \"clone\"\n}",
"func (lit *DateLit) Clone() *DateLit {\n\tif lit == nil {\n\t\treturn nil\n\t}\n\tother := *lit\n\treturn &other\n}",
"func (g *GitLocal) Clone(url string, dir string) error {\n\treturn g.GitFake.Clone(url, dir)\n}",
"func (f *Feature) Clone() *Feature {\n\treturn NewFeature(f.chr, f.element, f.location)\n}",
"func (g *GitCredential) Clone() GitCredential {\n\tclone := GitCredential{}\n\n\tvalue := reflect.ValueOf(g).Elem()\n\ttypeOfT := value.Type()\n\tfor i := 0; i < value.NumField(); i++ {\n\t\tfield := value.Field(i)\n\t\tvalue := field.String()\n\t\tv := reflect.ValueOf(&clone).Elem().FieldByName(typeOfT.Field(i).Name)\n\t\tv.SetString(value)\n\t}\n\n\treturn clone\n}",
"func (w *WebGLRenderTarget) Clone() *WebGLRenderTarget {\n\tw.p.Call(\"clone\")\n\treturn w\n}",
"func (msg *Message) Clone(message *Message) *Message {\n\tmsgID := uuid.New().String()\n\treturn NewRawMessage().BuildHeader(msgID, message.GetParentID(), message.GetTimestamp()).\n\t\tBuildRouter(message.GetSource(), message.GetGroup(), message.GetResource(), message.GetOperation()).\n\t\tFillBody(message.GetContent())\n}",
"func Clone(url, dir, githubToken string) error {\n\t_, err := git.PlainClone(dir, false, &git.CloneOptions{\n\t\tURL: url,\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: \"dummy\", // anything except an empty string\n\t\t\tPassword: githubToken,\n\t\t},\n\t\tSingleBranch: true,\n\t})\n\treturn err\n}",
"func cloneTask(t *Task) *Task {\n c := *t\n return &c\n}",
"func (c *credsImpl) Clone() credentials.TransportCredentials {\n\tclone := *c\n\treturn &clone\n}",
"func Cloner(h Handler) Handler {\n\treturn func(page Page) error {\n\t\treturn h(page.Clone())\n\t}\n}",
"func (this *Selection) Clone() *Selection {\n\tresults := newEmptySelection(this.document)\n\tthis.Each(func(_ int, sel *Selection) {\n\t\tresults = results.AddNodes(cloneNode(sel.Node()))\n\t})\n\treturn results\n}",
"func (c *Client) clone(cloneHeaders bool) (*Client, error) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\n\tconfig := c.config\n\tconfig.modifyLock.RLock()\n\tdefer config.modifyLock.RUnlock()\n\n\tnewConfig := &Config{\n\t\tAddress: config.Address,\n\t\tHttpClient: config.HttpClient,\n\t\tMinRetryWait: config.MinRetryWait,\n\t\tMaxRetryWait: config.MaxRetryWait,\n\t\tMaxRetries: config.MaxRetries,\n\t\tTimeout: config.Timeout,\n\t\tBackoff: config.Backoff,\n\t\tCheckRetry: config.CheckRetry,\n\t\tLogger: config.Logger,\n\t\tLimiter: config.Limiter,\n\t\tAgentAddress: config.AgentAddress,\n\t\tSRVLookup: config.SRVLookup,\n\t\tCloneHeaders: config.CloneHeaders,\n\t\tCloneToken: config.CloneToken,\n\t\tReadYourWrites: config.ReadYourWrites,\n\t}\n\tclient, err := NewClient(newConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cloneHeaders {\n\t\tclient.SetHeaders(c.Headers().Clone())\n\t}\n\n\tif config.CloneToken {\n\t\tclient.SetToken(c.token)\n\t}\n\n\tclient.replicationStateStore = c.replicationStateStore\n\n\treturn client, nil\n}",
"func (e *ExpressionAtom) Clone(cloneTable *pkg.CloneTable) *ExpressionAtom {\n\tclone := &ExpressionAtom{\n\t\tAstID: uuid.New().String(),\n\t\tGrlText: e.GrlText,\n\t}\n\n\tif e.Variable != nil {\n\t\tif cloneTable.IsCloned(e.Variable.AstID) {\n\t\t\tclone.Variable = cloneTable.Records[e.Variable.AstID].CloneInstance.(*Variable)\n\t\t} else {\n\t\t\tcloned := e.Variable.Clone(cloneTable)\n\t\t\tclone.Variable = cloned\n\t\t\tcloneTable.MarkCloned(e.Variable.AstID, cloned.AstID, e.Variable, cloned)\n\t\t}\n\t}\n\n\tif e.FunctionCall != nil {\n\t\tif cloneTable.IsCloned(e.FunctionCall.AstID) {\n\t\t\tclone.FunctionCall = cloneTable.Records[e.FunctionCall.AstID].CloneInstance.(*FunctionCall)\n\t\t} else {\n\t\t\tcloned := e.FunctionCall.Clone(cloneTable)\n\t\t\tclone.FunctionCall = cloned\n\t\t\tcloneTable.MarkCloned(e.FunctionCall.AstID, cloned.AstID, e.FunctionCall, cloned)\n\t\t}\n\t}\n\n\treturn clone\n}",
"func (atc *AtomicTransactionComposer) Clone() AtomicTransactionComposer {\n\tnewTxContexts := make([]transactionContext, len(atc.txContexts))\n\tcopy(newTxContexts, atc.txContexts)\n\tfor i := range newTxContexts {\n\t\tnewTxContexts[i].txn.Group = types.Digest{}\n\t}\n\n\tif len(newTxContexts) == 0 {\n\t\tnewTxContexts = nil\n\t}\n\n\treturn AtomicTransactionComposer{\n\t\tstatus: BUILDING,\n\t\ttxContexts: newTxContexts,\n\t}\n}",
"func (s *Stack) Clone() *Stack {\n\t//return deepcopy.Copy(s);\n\tcopyOfUnderlying := CopyOf(s.Stack).(*cfn.Stack)\n\tcopy := Stack{\n\t\tsrv: s.srv,\n\t\tStack: copyOfUnderlying,\n\t}\n\treturn ©\n}",
"func (n *node) clone() *node {\n\treturn &node{\n\t\tvalue: n.value,\n\t\tchildren: n.cloneChildren(),\n\t}\n}",
"func Clone(c Configuration, owner, name string) (Git, filesystem.Filesystem, error) {\n\tfs := memfs.New()\n\n\trepo, err := git.Clone(memory.NewStorage(), fs, &git.CloneOptions{\n\t\tURL: fmt.Sprintf(\n\t\t\t\"https://%s:%s@github.com/%s/%s.git\",\n\t\t\tc.GithubUsername(),\n\t\t\tc.GithubToken(),\n\t\t\towner,\n\t\t\tname,\n\t\t),\n\t\tReferenceName: plumbing.ReferenceName(fmt.Sprintf(\"refs/heads/%s\", c.BaseBranch())),\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, errors.Errorf(`failed to git clone because \"%s\"`, err)\n\t}\n\n\thead, err := repo.Head()\n\tif err != nil {\n\t\treturn nil, nil, errors.Errorf(`failed to retrieve git head because \"%s\"`, err)\n\t}\n\n\treturn &DefaultGitClient{\n\t\tc: c,\n\t\trepo: repo,\n\t\tbase: head,\n\t}, filesystem.NewMemory(fs), nil\n}",
"func (p *PersistentVolume) Clone() Resource {\n\treturn copyResource(p, &PersistentVolume{})\n}",
"func (b *Bzr) Clone(d *Dependency) (err error) {\n\tif !util.Exists(d.Path()) {\n\t\terr = util.RunCommand(\"go get -u \" + d.Repo)\n\t}\n\treturn\n}",
"func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}",
"func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}",
"func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}"
] | [
"0.7064204",
"0.6950689",
"0.68847734",
"0.6830195",
"0.6234559",
"0.6180015",
"0.61048836",
"0.6068922",
"0.59161884",
"0.58908933",
"0.58908623",
"0.58654845",
"0.58292323",
"0.56611645",
"0.5652772",
"0.56419474",
"0.56299615",
"0.5595035",
"0.55785984",
"0.5573975",
"0.5573531",
"0.55653024",
"0.55572015",
"0.55491114",
"0.55402976",
"0.55376935",
"0.5515249",
"0.55078304",
"0.5497945",
"0.54857093",
"0.547454",
"0.54732776",
"0.5464805",
"0.5452658",
"0.5444036",
"0.54413706",
"0.543367",
"0.5423948",
"0.54180753",
"0.5408492",
"0.5403817",
"0.5402017",
"0.53943497",
"0.5385201",
"0.53738594",
"0.5367252",
"0.53525317",
"0.53407663",
"0.5336136",
"0.5330488",
"0.53269976",
"0.530715",
"0.53042036",
"0.5302983",
"0.53027403",
"0.5294799",
"0.52890897",
"0.52790135",
"0.5278501",
"0.52715516",
"0.5269376",
"0.5258282",
"0.5254007",
"0.52539784",
"0.52415127",
"0.5238041",
"0.5233641",
"0.5230879",
"0.52308637",
"0.5228876",
"0.52276635",
"0.522387",
"0.5222036",
"0.52202195",
"0.5218338",
"0.52153444",
"0.52092487",
"0.5208231",
"0.52062976",
"0.52053624",
"0.5202253",
"0.51991516",
"0.51880383",
"0.5170799",
"0.5163424",
"0.5159808",
"0.5157861",
"0.5154792",
"0.5142336",
"0.51419586",
"0.51391536",
"0.51391083",
"0.5138001",
"0.51354635",
"0.51346415",
"0.51343626",
"0.5130859",
"0.5121956",
"0.5121956",
"0.5121956"
] | 0.76719445 | 0 |
Root creates a new TRoot for use in spawning templates. The name should match the main layout's name (as defined in the layout template) so execution of templates doesn't require a template.Lookup call, which can be somewhat error prone. | Root создает новый TRoot для использования при спауне шаблонов. Имя должно совпадать с именем основного макета (как определено в шаблоне макета), чтобы выполнение шаблонов не требовало вызова template.Lookup, что может быть довольно подвержено ошибкам. | func Root(name, path string) *TRoot {
var tmpl = &Template{template.New(name), name}
var t = &TRoot{tmpl, path}
return t
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (tp *Template) Root(name string) *Template {\n\ttp.root = name\n\treturn tp\n}",
"func (t *TRoot) Name() string {\n\treturn t.template.Name\n}",
"func (*Root) Name() (name string) { return \"/\" }",
"func (r *Root) Name() string { return \"\" }",
"func (a *App) getRoot(w http.ResponseWriter, r *http.Request) {\n\n\tdata := struct {\n\t\tTitle string\n\t\tActive string\n\t\tHeaderTitle string\n\t\tUserRole string\n\t}{\n\t\tTitle: \"Dashboard\",\n\t\tActive: \"Dashboard\",\n\t\tHeaderTitle: \"Project Dashboard\",\n\t\tUserRole: a.getUserRole(r),\n\t}\n\n\tgetTemplate(w, r, \"dashboard\", data)\n\n}",
"func NewRoot(name string, invalidState int) *DSSRoot {\n\troot := &DSSRoot{Name: name}\n\troot.bottom = newDSSNode(invalidState, &pseudo{\"bottom\"})\n\troot.bottom.pathcnt = 1\n\troot.stacks = make([]*Stack, 0, 10)\n\troot.reservoir = ssl.New()\n\treturn root\n}",
"func RootRouter(responseWriter http.ResponseWriter, request *http.Request) {\n\tTemplateInput := getTemplateInputFromRequest(responseWriter, request)\n\treplyWithTemplate(\"indextemplate.html\", TemplateInput, responseWriter, request)\n}",
"func RootHandler(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-type\", \"text/html\")\n\tif err := req.ParseForm(); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing url %s\", err), http.StatusInternalServerError)\n\t}\n\tpath := mux.Vars(req)[\"path\"]\n\tif path == \"\" || path == \"/\" {\n\t\tpath = \"index.tpl\"\n\t}\n\tif !strings.HasSuffix(path, \".tpl\") {\n\t\tpath += \".tpl\"\n\t}\n\tif _, ok := registeredTpl[path]; !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Not found\")\n\t\treturn\n\t}\n\tif err := templates.ExecuteTemplate(w, path, Page{\n\t\tTitle: \"Home\",\n\t}); err != nil {\n\t\tlog.Printf(\"Error executing template: %s\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing template: %s\", err), http.StatusInternalServerError)\n\t}\n}",
"func (r *Root) Root() (fs.Node, error) {\n\treturn newDir(nil, r.registry), nil\n}",
"func rootHandler(w http.ResponseWriter, r *http.Request) {\n\t//fmt.Fprintf(w, \"<h1>Hello All</h1>\")\n\tt,_ := template.ParseFiles(\"root.html\")\n\tt.Execute(w, nil)\n\n}",
"func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {\n\treturn tree.CreateNodeEntry(phi, 0, isFinal)\n}",
"func rootHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tp, err := loadRoot(title)\n\n\tp.Body = template.HTML(blackfriday.MarkdownCommon([]byte(p.Body)))\n\tp.Body = template.HTML(convertWikiMarkup([]byte(p.Body)))\n\n\terr = templates.ExecuteTemplate(w, \"root.html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}",
"func (a *App) root(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"content-type\", \"text/html; charset=utf-8\")\n\tcolor := r.URL.Query().Get(\"color\")\n\tif color == \"\" {\n\t\tcolor = \"black\"\n\t}\n\tvar Deets []Deet\n\ta.DB.Find(&Deets)\n\tdata := Home{\n\t\tPath: html.EscapeString(r.URL.Path),\n\t\tName: \"Grunde\",\n\t\tColor: color,\n\t\tDeets: Deets,\n\t}\n\tt, _ := template.ParseFiles(\"templates/home.html\")\n\tt.Execute(w, data)\n\treturn\n}",
"func New(root, tmplName string) (Template, error) {\n\tvar dirs, files []string\n\tfilename := os.Getenv(\"GOPS_SCHEMA\") + tmplName + ext\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening file: \", err)\n\t\treturn Template{}, err\n\t}\n\tdefer file.Close()\n\n\t// Use bufio scanner, the default Scan method is by line\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := fixLine(scanner.Text())\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdir, file := splitFilename(line)\n\t\tif len(dir) != 0 {\n\t\t\tdirs = append(dirs, dir)\n\t\t}\n\t\tif len(file) != 0 {\n\t\t\tfiles = append(files, line)\n\t\t}\n\t}\n\treturn Template{dirs, files, root, tmplName}, nil\n}",
"func NewRoot() *Root {\n\tr := new(Root)\n\tr.objects.init(8)\n\tr.idCache = make(map[int]libFldDoc)\n\tr.missing = make(map[int]libFldDoc)\n\treturn r\n}",
"func NewRoot(repo restic.Repository, cfg Config) *Root {\n\tdebug.Log(\"NewRoot(), config %v\", cfg)\n\n\troot := &Root{\n\t\trepo: repo,\n\t\tcfg: cfg,\n\t\tblobCache: bloblru.New(blobCacheSize),\n\t}\n\n\tif !cfg.OwnerIsRoot {\n\t\troot.uid = uint32(os.Getuid())\n\t\troot.gid = uint32(os.Getgid())\n\t}\n\n\t// set defaults, if PathTemplates is not set\n\tif len(cfg.PathTemplates) == 0 {\n\t\tcfg.PathTemplates = []string{\n\t\t\t\"ids/%i\",\n\t\t\t\"snapshots/%T\",\n\t\t\t\"hosts/%h/%T\",\n\t\t\t\"tags/%t/%T\",\n\t\t}\n\t}\n\n\troot.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), \"\")\n\n\treturn root\n}",
"func NewRoot() *Root {\n\treturn ExtendRoot(nil)\n}",
"func (t *TRoot) Template() *Template {\n\treturn t.Clone().template\n}",
"func rootHandler(w http.ResponseWriter, r *http.Request) {\r\n\t// Parsea la plantilla root.html \r\n\tif t, err := template.ParseFiles(filepath.Join(templates, \"root.html\")); err != nil {\r\n\t\t// Se ha presentado un error\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t} else {\r\n\t\t// retorna la respuesta al cliente por medio de t.Execute\r\n\t\tt.Execute(w, nil)\r\n\t}\r\n}",
"func (s *SVFS) Root() (fs.Node, error) {\n\t// Mount a specific container\n\tif TargetContainer != \"\" {\n\t\tbaseContainer, _, err := SwiftConnection.Container(TargetContainer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Find segment container too\n\t\tsegmentContainerName := TargetContainer + SegmentContainerSuffix\n\t\tsegmentContainer, _, err := SwiftConnection.Container(segmentContainerName)\n\n\t\t// Create it if missing\n\t\tif err == swift.ContainerNotFound {\n\t\t\tvar container *swift.Container\n\t\t\tcontainer, err = createContainer(segmentContainerName)\n\t\t\tsegmentContainer = *container\n\t\t}\n\t\tif err != nil && err != swift.ContainerNotFound {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &Container{\n\t\t\tDirectory: &Directory{\n\t\t\t\tapex: true,\n\t\t\t\tc: &baseContainer,\n\t\t\t\tcs: &segmentContainer,\n\t\t\t},\n\t\t}, nil\n\t}\n\n\t// Mount all containers within an account\n\treturn &Root{\n\t\tDirectory: &Directory{\n\t\t\tapex: true,\n\t\t},\n\t}, nil\n}",
"func (fs *FS) Root() (fspkg.Node, error) {\n\tte, ok := fs.r.Lookup(\"\")\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to find root in stargz\")\n\t}\n\treturn &node{fs, te}, nil\n}",
"func (b *Bucket) root(w http.ResponseWriter, r *http.Request) {\n\tb.executeTemplate(w, \"bucket.html\", \"\", b)\n}",
"func (td *WebUI) RootPage(w http.ResponseWriter, r *http.Request) {\n\ttd.templateDataMtx.RLock()\n\t// Execute template to a string instead of directly to the\n\t// http.ResponseWriter so that execute errors can be handled first. This can\n\t// avoid partial writes of the page to the client.\n\tchainHeight := td.ExplorerSource.GetHeight()\n\tpageSize := 6\n\tif chainHeight < pageSize {\n\t\tpageSize = chainHeight\n\t}\n\tinitialBlocks := make([]*hcjson.GetBlockVerboseResult, 0, pageSize)\n\tfor i := chainHeight; i > chainHeight-pageSize; i-- {\n\t\tdata := td.ExplorerSource.GetBlockVerbose(i, false)\n\t\tinitialBlocks = append(initialBlocks, data)\n\t}\n\t// hashrate_h_s := initialBlocks[1].Difficulty * (math.Pow(2, 32)) / 150 // h/s\n\thashrate_th_s := td.ExplorerSource.GetNetWorkHashRate()/math.Pow(10,12) // Th/s\n\tstr, err := TemplateExecToString(td.templ, \"home\", struct {\n\t\tInitialData []*hcjson.GetBlockVerboseResult\n\t\tData WebTemplateData\n\t\tStakeDiffWindowSize int64\n\t\tHashRate float64\n\t}{\n\t\tinitialBlocks,\n\t\ttd.TemplateData,\n\t\ttd.params.StakeDiffWindowSize,\n\t\thashrate_th_s,\n\t})\n\ttd.templateDataMtx.RUnlock()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to execute template: %v\", err)\n\t\thttp.Error(w, \"template execute failure\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, str)\n}",
"func RootHandler(w http.ResponseWriter, rq *http.Request) {\n\terr := tmpl.Execute(w, *page)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n}",
"func (nf *NavigationFactory) Root() string {\n\treturn nf.rootPath\n}",
"func (l *loaderImpl) Root() string {\n\treturn l.root\n}",
"func (r *Root) Root() (fs.Node, error) {\n\tdebug.Log(\"Root()\")\n\treturn r, nil\n}",
"func (fs HgmFs) Root() (fs.Node, error) {\n\treturn &HgmDir{hgmFs: fs, localDir: \"/\"}, nil\n}",
"func NewRoot() *Root {\n\t// Create a new Root that describes our base query set up. In this\n\t// example we have a user query that takes one argument called ID\n\troot := Root{\n\t\tQuery: queryType,\n\t\tMutation: mutationType,\n\t}\n\n\treturn &root\n}",
"func (p applicationPackager) defaultTemplate(templateName string, data map[string]interface{}) (template.HTML, error) {\n\n\tfmap := p.templateFMap()\n\treturn p.xmlTemplateWithFuncs(templateName, data, fmap)\n}",
"func NewRoot(buffer int) *Root {\n\n\tr := &Root{\n\t\toutputdrivers: make([]OutputDriver, 0, 1),\n\t\terrorlisteners: make([]ErrorListener, 0),\n\t\tevents: make(chan *Event, buffer),\n\t}\n\n\tr.wg.Add(1)\n\tgo r.run()\n\n\treturn r\n}",
"func NewEngine(rootDir string, tset *TemplateSet) *Engine {\n\ttm := make(templateMap)\n\n\treturn &Engine{\n\t\trootDir: rootDir,\n\t\ttset: tset,\n\t\ttmap: &tm,\n\t}\n}",
"func NewRoot(db *db.DB) *Root {\n\n\t// Create reslver for holding our database.\n\t// More on resolver https://graphql.org/learn/execution/#root-fields-resolvers\n\tresolver := Resolver{db: db}\n\n\t// Create a new Root\n\troot := Root{\n\t\tQuery: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"Query\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"doctor\": doctorQuery(&resolver),\n\t\t\t\t\t\"doctors\": doctorsQuery(&resolver),\n\t\t\t\t\t\"illness\": illnessQuery(&resolver),\n\t\t\t\t\t\"illnesses\": illnessesQuery(&resolver),\n\t\t\t\t\t\"user\": userQuery(&resolver),\n\t\t\t\t\t\"users\": usersQuery(&resolver),\n\t\t\t\t\t\"userByEmail\": userByEmailQuery(&resolver),\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\n\treturn &root\n}",
"func (e *Engine) Layout(key string) *Engine {\n\te.LayoutName = key\n\treturn e\n}",
"func ExtendRoot(overrides RootIface) *Root {\n\tjsiiID, err := jsii.GlobalRuntime.Client().Create(\n\t\t\"jsii$cdk$0.0.0.Root\",\n\t\t[]interface{}{},\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tpanic(\"how are error handled?\" + err.Error())\n\t}\n\treturn &Root{\n\t\tbase: jsii.Base{ID: jsiiID},\n\t\tConstruct: InternalNewConstructAsBaseClass(jsiiID),\n\t}\n}",
"func (v *VaultFS) Root() (fs.Node, error) {\n\tlogrus.Debug(\"returning root\")\n\treturn NewRoot(v.root, v.Logical()), nil\n}",
"func newRoot(view *View, leafAllocation int64) *root {\n\tif leafAllocation < 10 {\n\t\tleafAllocation = 10\n\t}\n\tleafNum := 3 - ((leafAllocation - 1) % 3) + leafAllocation\n\tnodeNum := (leafNum - 1) / 3\n\tr := new(root)\n\tr.leaves = make([]leaf, leafNum, leafNum)\n\tfor i := 0; i < len(r.leaves)-2; i++ {\n\t\tr.leaves[i].nextFree = &r.leaves[i+1]\n\t}\n\tr.nodes = make([]node, nodeNum, nodeNum)\n\tfor i := 0; i < len(r.nodes)-2; i++ {\n\t\tr.nodes[i].nextFree = &r.nodes[i+1]\n\t}\n\tr.freeNode = &r.nodes[0]\n\tr.freeLeaf = &r.leaves[0]\n\trootNode := r.newNode(view)\n\tr.rootNode = rootNode\n\treturn r\n}",
"func (templateManager *TemplateManager) UseLayoutTemplate(layoutTemplateName string) *LayoutTemplateManager {\n\treturn getLayoutTemplateManager(templateManager, layoutTemplateName)\n}",
"func DefaultTmpl() *template.Template {\n\ttmpl, err := template.New(\"sidecar\").Parse(sidecarContainer)\n\tif err != nil {\n\t\topenlogging.Error(\"get default template failed: \" + err.Error())\n\t}\n\treturn tmpl\n}",
"func rootHandler(c appengine.Context, w http.ResponseWriter, r *http.Request) *appError {\n\tlogoutURL, err := user.LogoutURL(c, \"/\")\n\tif err != nil {\n\t\tc.Warningf(\"creating logout URL: %v\", err)\n\t\tlogoutURL = \"/\"\n\t}\n\tuploadURL, err := blobstore.UploadURL(c, \"/upload\", nil)\n\tif err != nil {\n\t\treturn appErrorf(err, \"could not create blobstore upload url\")\n\t}\n\tusername := \"none\"\n\tif u := user.Current(c); u != nil {\n\t\tusername = u.String()\n\t}\n\terr = rootTemplate.Execute(w, &rootTemplateData{\n\t\tLogoutURL: logoutURL,\n\t\tUploadURL: uploadURL.String(),\n\t\tUser: username,\n\t})\n\tif err != nil {\n\t\treturn appErrorf(err, \"could not write template\")\n\t}\n\treturn nil\n}",
"func NewRootModule(table ...module.ModuleInitHandle) types.Module {\n\treturn module.NewModuleFor(\n\t\tutils.Lazy(NewRootContainer).(func() types.Container),\n\t\tImport(event.EventModuleFor(\"root\")),\n\t\tJoin(table...),\n\t\tEvent(Emit(\"ready\"), Emit(\"init\")),\n\t\tBootstrap(func(listener types.EventListener) {\n\t\t\tlistener.Emit(\"exit\")\n\t\t}),\n\t)\n}",
"func (obj *language) Root() string {\n\treturn obj.root\n}",
"func (w *RootWalker) Root() *Root {\n\treturn w.r\n}",
"func RootSymbol(name data.Name) data.Symbol {\n\treturn data.NewQualifiedSymbol(name, RootDomain)\n}",
"func (a *Application) SetRoot(root Primitive, fullscreen bool) *Application {\n\ta.Lock()\n\ta.root = root\n\ta.rootFullscreen = fullscreen\n\tif a.screen != nil {\n\t\ta.screen.Clear()\n\t}\n\ta.Unlock()\n\n\ta.SetFocus(root)\n\n\treturn a\n}",
"func TemplateRootDir() (string, error) {\n\tconfig, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get UserConfigDir\")\n\t}\n\n\ttmplPath := filepath.Join(config, \"suborbital\", \"templates\")\n\n\tif os.Stat(tmplPath); err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\tif err := os.MkdirAll(tmplPath, os.ModePerm); err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to MkdirAll template directory\")\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to Stat template directory\")\n\t\t}\n\t}\n\n\treturn tmplPath, nil\n}",
"func (l *Loader) Root() *ecsgen.Root {\n\treturn l.root\n}",
"func (l *fileLoader) Root() string {\n\treturn l.root\n}",
"func (_ Def) RootElement(\n\t// use GreetingsElement\n\tgreetingsElem GreetingsElement,\n) domui.RootElement {\n\treturn Div(\n\t\tgreetingsElem,\n\t)\n}",
"func (a *API) getRoot(w http.ResponseWriter, r *http.Request) {\n\tout := map[string]string{\n\t\t\"apiName\": \"rutte-api\",\n\t\t\"apiDescription\": \"API's for voting platform\",\n\t\t\"apiVersion\": \"v0.0\",\n\t\t\"appVersion\": version.String(),\n\t}\n\trender.JSON(w, r, out)\n}",
"func (layout Layout) rootLevel() int {\n\treturn layout.numLevels() - 1\n}",
"func T(name string) *template.Template {\n\treturn t(\"_base.html\", name)\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (f *Fs) Root() string {\n\treturn f.root\n}",
"func (fs *fsMutable) initRoot() (err error) {\n\t_, found := fs.lookupTree.Get(formKey(fuseops.RootInodeID))\n\tif found {\n\t\treturn\n\t}\n\terr = fs.createNode(\n\t\tformLookupKey(fuseops.RootInodeID, rootPath),\n\t\tfuseops.RootInodeID,\n\t\trootPath,\n\t\tnil,\n\t\tfuseutil.DT_Directory,\n\t\ttrue)\n\treturn\n}",
"func MakeRoot() [SzRoot]byte {\n\tvar buf [SzRoot]byte\n\tqu := castQueueRootPage(buf[:])\n\tqu.version.Set(queueVersion)\n\treturn buf\n}",
"func (*Root) Sys() interface{} { return nil }",
"func Root(w io.Writer) io.Writer {\n\tswitch x := w.(type) {\n\tcase tree:\n\t\treturn coalesceWriters(x.Root(), w)\n\tcase node:\n\t\treturn coalesceWriters(Root(x.Parent()), w)\n\tcase decorator:\n\t\treturn coalesceWriters(Root(x.Base()), w)\n\tdefault:\n\t\treturn w\n\t}\n}",
"func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {\n\tnewRoot := db.NewFSNodeOverDag(ft.TFile)\n\troot, _, err := fillTrickleRec(db, newRoot, -1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn root, db.Add(root)\n}",
"func (a Generator) Run(root string, data makr.Data) error {\n\tg := makr.New()\n\n\tif a.AsAPI {\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"templates\"))\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"locales\"))\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"public\"))\n\t}\n\tif a.Force {\n\t\tos.RemoveAll(a.Root)\n\t}\n\n\tg.Add(makr.NewCommand(makr.GoGet(\"golang.org/x/tools/cmd/goimports\", \"-u\")))\n\tif a.WithDep {\n\t\tg.Add(makr.NewCommand(makr.GoGet(\"github.com/golang/dep/cmd/dep\", \"-u\")))\n\t}\n\n\tfiles, err := generators.FindByBox(packr.NewBox(\"../newapp/templates\"))\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tfor _, f := range files {\n\t\tif !a.AsAPI {\n\t\t\tg.Add(makr.NewFile(f.WritePath, f.Body))\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.Contains(f.WritePath, \"locales\") || strings.Contains(f.WritePath, \"templates\") || strings.Contains(f.WritePath, \"public\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tg.Add(makr.NewFile(f.WritePath, f.Body))\n\t}\n\n\tdata[\"name\"] = a.Name\n\tif err := refresh.Run(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\ta.setupCI(g, data)\n\n\tif err := a.setupWebpack(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := a.setupPop(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := a.setupDocker(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tg.Add(makr.NewCommand(a.goGet()))\n\n\tg.Add(makr.Func{\n\t\tRunner: func(root string, data makr.Data) error {\n\t\t\tg.Fmt(root)\n\t\t\treturn nil\n\t\t},\n\t})\n\n\ta.setupVCS(g)\n\n\tdata[\"opts\"] = a\n\treturn g.Run(root, data)\n}",
"func (log Logger) Root(root Data) Logger {\n\tnewRoot := Data{}\n\tfor k, v := range log.root {\n\t\tnewRoot[k] = v\n\t}\n\tfor k, v := range root {\n\t\tnewRoot[k] = v\n\t}\n\tlog.root = newRoot\n\treturn log\n}",
"func (dfs *DaosFileSystem) Root() *DaosNode {\n\treturn dfs.root\n}",
"func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}",
"func (o GetAppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (adder *Adder) PinRoot(root ipld.Node) error {\n\tif !adder.Pin {\n\t\treturn nil\n\t}\n\n\trnk := root.Cid()\n\n\terr := adder.dagService.Add(adder.ctx, root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif adder.tempRoot.Defined() {\n\t\terr := adder.pinning.Unpin(adder.ctx, adder.tempRoot, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadder.tempRoot = rnk\n\t}\n\n\tdur, err := pin.ExpiresAtWithUnitAndCount(pin.DefaultDurationUnit, adder.PinDuration)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadder.pinning.PinWithMode(rnk, dur, pin.Recursive)\n\treturn adder.pinning.Flush(adder.ctx)\n}",
"func (p *Project) Root() string {\n\treturn p.root\n}",
"func NewSolutionsRoot()(*SolutionsRoot) {\n m := &SolutionsRoot{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func (d *DriveDB) createRoot() error {\n\tlaunch, _ := time.Unix(1335225600, 0).MarshalText()\n\tfile := &gdrive.File{\n\t\tId: d.rootId,\n\t\tTitle: \"/\",\n\t\tMimeType: driveFolderMimeType,\n\t\tLastViewedByMeDate: string(launch),\n\t\tModifiedDate: string(launch),\n\t\tCreatedDate: string(launch),\n\t}\n\t// Inode allocation special-cases the rootId, so we can let the usual\n\t// code paths do all the work\n\t_, err := d.UpdateFile(nil, file)\n\treturn err\n}",
"func (hr *GinRendererS) Instance(name string, data interface{}) render.Render {\n\tif simplateViewPathTemplates[name] == nil {\n\t\tsugar.Warnf(\"no template of name: %s\", name)\n\t}\n\n\tlayoutFile := defaultLayoutFile\n\n\t// body\n\tvar buf bytes.Buffer\n\tExecuteViewPathTemplate(&buf, name, data)\n\tdataT := make(gin.H)\n\tdataMap, ok := data.(gin.H)\n\tif ok {\n\t\tdataMap[\"LayoutContent\"] = template.HTML(buf.String())\n\t\tdataT = dataMap\n\t\t// custom layout\n\t\tif layout, ok := dataMap[\"layout\"]; ok {\n\t\t\tlayoutFile = layout.(string)\n\t\t}\n\t} else {\n\t\tdataT[\"LayoutContent\"] = template.HTML(buf.String())\n\t}\n\treturn render.HTML{\n\t\tTemplate: simplateViewPathTemplates[layoutFile],\n\t\tData: dataT,\n\t}\n}",
"func (t *Variable) Root() Type {\n\tt.instanceMu.Lock()\n\tdefer t.instanceMu.Unlock()\n\tif t.Instance == nil {\n\t\treturn t\n\t}\n\tr := t.Instance.Root()\n\tt.Instance = r\n\treturn r\n}",
"func (d *Document) Root() Node {\n\treturn Node{0, d.rev, d}\n}",
"func (t *TraceWrapper) NewRootSpan(name string) *SpanWrapper {\n\ts := t.newSpan(name)\n\ts.IsRoot = true\n\treturn s\n}",
"func (t *Tree) UpdateRoot(p *Pos, model ModelInterface) {\n\tif t.p == nil || t.p.Hash() != p.Hash() {\n\t\tt.p = p\n\t\tt.root = t.NewTreeNode(nil, 0, false, 1, true)\n\t\tt.root.rootify(p, model)\n\t}\n}",
"func NewRoot(db *sql.Db) (*QRoot, *MRoot) {\n\tqueryResolver := QueryResolver{db: db}\n\tmutationResolver := MutationResolver{db: db}\n\n\tqueryRoot := QRoot{\n\t\tQuery: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"RootQuery\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"GetPages\": &graphql.Field{\n\t\t\t\t\t\tType: graphql.NewList(WikiPage),\n\t\t\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\t\t\"Title\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Tags\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResolve: queryResolver.GetPagesResolver,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\tmutRoot := MRoot{\n\t\tMutation: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"RootMutation\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"SavePage\": &graphql.Field{\n\t\t\t\t\t\tType: WikiPage,\n\t\t\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\t\t\"Title\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Tags\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Ingress\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"MainText\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"SideBarInfo\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"ProfileImagePath\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"BodyImagePath\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Visible\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.Boolean),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Author\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResolve: mutationResolver.SavePageResolver,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\treturn &queryRoot, &mutRoot\n}",
"func TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := syscall.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *RPCClient) Root() (t *RPCClientRoot) {\n\treturn &RPCClientRoot{r}\n}",
"func (t *TRoot) Build(path string) (*Template, error) {\n\tvar tNew, err = t.template.Clone()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = tNew.ParseFiles(filepath.Join(t.Path, path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttNew.Name = path\n\treturn tNew, nil\n}",
"func (r *router) Root() *OpenAPI {\n\treturn r.root\n}",
"func (o ClusterTemplateOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterTemplate) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func pivotRoot() error {\n\n\tnewRoot, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Get pwd error %v\\n\", err)\n\t}\n\n\t// 声明新的mount namespace独立\n\tif err := unix.Mount(\"\", \"/\", \"\", unix.MS_PRIVATE|unix.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t// bind mount new_root to itself - this is a slight hack needed to satisfy requirement (2)\n\tif err := unix.Mount(newRoot, newRoot, \"bind\", unix.MS_BIND|unix.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mount newRoot %s to itself error: %v\", newRoot, err)\n\t}\n\n\t// create putOld directory\n\tputOld := filepath.Join(newRoot, \"/.pivot_root\")\n\tif err := os.MkdirAll(putOld, 0777); err != nil {\n\t\treturn fmt.Errorf(\"creating putOld directory %v\", err)\n\t}\n\n\t// The following restrictions apply to new_root and put_old:\n\t// 1. They must be directories.\n\t// 2. new_root and put_old must not be on the same filesystem as the current root.\n\t// 3. put_old must be underneath new_root, that is, adding a nonzero number of /.. to the string pointed to by put_old must yield the same directory as new_root.\n\t// 4. No other filesystem may be mounted on put_old.\n\t// see https://man7.org/linux/man-pages/man2/pivot_root.2.html\n\n\tif err := unix.PivotRoot(newRoot, putOld); err != nil {\n\t\treturn fmt.Errorf(\"syscalling PivotRoot %v\", err)\n\t}\n\n\t// Note that this also applies to the calling process: pivotRoot() may\n\t// or may not affect its current working directory. It is therefore\n\t// recommended to call chdir(\"/\") immediately after pivotRoot().\n\tif err := os.Chdir(\"/\"); err != nil {\n\t\treturn fmt.Errorf(\"while Chdir %v\", err)\n\t}\n\n\t// umount putOld, which now lives at .pivot_root\n\tputOld = \"/.pivot_root\"\n\tif err := unix.Unmount(putOld, unix.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"while unmount putOld %v\", err)\n\t}\n\n\t// remove put_old\n\tif err := os.RemoveAll(putOld); err != nil {\n\t\treturn fmt.Errorf(\"while remove putOld %v\", err)\n\t}\n\n\t//graphdriver.NewWorkSpace()\n\treturn nil\n}",
"func CreateRootObject(ctx context.Context, object *models.Device) (rep string, err error) {\n\tobjectType := labelsType(object.Labels).getType()\n\tif objectType == \"\" {\n\t\treturn \"Khong biet loai doi tuong\", fmt.Errorf(\"Khong biet loai doi tuong\")\n\t}\n\n\tif id := convertNameId(object.Name); object.Name == \"\" || id != \"\" {\n\t\treturn \"Ten doi tuong da ton tai\", fmt.Errorf(\"Ten doi tuong da ton tai\")\n\t}\n\trootId := uuid.New().String()\n\tobject.Id = rootId\n\n\tif objectType == DEVICETYPE {\n\t\tobject.Labels = make([]string, 5)\n\t\tobject.Labels[0] = objectType\n\t\tobject.Labels[1] = ROOTOBJECT\n\t\tobject.Labels[2] = SUBOBJECT\n\t\tobject.Labels[3] = rootId\n\t\tobject.Labels[4] = UNINITIALIZIED\n\t} else {\n\t\tobject.Labels = make([]string, 3)\n\t\tobject.Labels[0] = objectType\n\t\tobject.Labels[1] = ROOTOBJECT\n\t\tobject.Labels[2] = INITIALIZIED // truong hop nay tam cho = da dc khoi tao\n\t}\n\n\tp := make(map[string]models.ProtocolProperties)\n\tp[PROTOCOLSNETWORKNAME] = object.Protocols[PROTOCOLSNETWORKNAME]\n\tobject.Protocols = p\n\n\tdsName := object.Service.Name\n\n\t// rootObject se thuoc ve deviceSerivce: manager-service\n\tobject.Service = models.DeviceService{\n\t\tName: DSManagerName,\n\t}\n\tif objectType == DEVICETYPE {\n\t\tobject.Service.Name = dsName\n\t}\n\n\trep, err = clientMetaDevice.Add(object, ctx)\n\tif err != nil {\n\t\tLoggingClient.Error(err.Error())\n\t\treturn rep, err\n\t}\n\n\tisInit := false\n\tvar newObject models.Device\n\tfor count := 0; (isInit == false) && (count <= CountRetryConst); count++ {\n\t\tnewObject, err = clientMetaDevice.Device(rootId, ctx)\n\t\tif labelsType(newObject.Labels).isInitializied() {\n\t\t\tisInit = true\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(TimeStepRetryCont * time.Millisecond)\n\t}\n\tif isInit == false {\n\t\tLoggingClient.Warn(\"Object:\" + newObject.Name + \"chua duoc khoi tao boi Device Service:\" + newObject.Service.Name)\n\t}\n\n\tcacheAddUpdateRoot(newObject)\n\n\tif objectType == DEVICETYPE {\n\t\t// doi voi Device, root va sub hop la 1\n\t\tcacheAddSubId(rootId, dsName, rootId)\n\t\tcacheAddMapNameId(object.Name, rootId)\n\t\tcacheUpdateMapMaster(dsName)\n\t}\n\tif isInit == false {\n\t\trep = rep + \"\\nBut Object is uninitialized by DS:\" + newObject.Service.Name\n\t}\n\treturn rep, err\n}",
"func (f *FS) Root() (fs.Node, error) {\n\treturn &Node{fs: f}, nil\n}",
"func (x *Indexer) Root() string {\n\treturn x.config.IndexRoot\n}",
"func (d *dataUsageCache) root() *dataUsageEntry {\n\treturn d.find(d.Info.Name)\n}",
"func (e *GoViewEngine) Init(fs *vfs.VFS, appCfg *config.Config, baseDir string) error {\n\tif e.EngineBase == nil {\n\t\te.EngineBase = new(EngineBase)\n\t}\n\n\tif err := e.EngineBase.Init(fs, appCfg, baseDir, \"go\", \".html\"); err != nil {\n\t\treturn err\n\t}\n\n\t// Add template func\n\tAddTemplateFunc(template.FuncMap{\n\t\t\"safeHTML\": e.tmplSafeHTML,\n\t\t\"import\": e.tmplInclude,\n\t\t\"include\": e.tmplInclude, // alias for import\n\t})\n\n\t// load common templates\n\tif err := e.loadCommonTemplates(); err != nil {\n\t\treturn err\n\t}\n\n\t// collect all layouts\n\tlayouts, err := e.LayoutFiles()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// load layout templates\n\tif err = e.loadLayoutTemplates(layouts); err != nil {\n\t\treturn err\n\t}\n\n\tif !e.IsLayoutEnabled {\n\t\t// since pages directory processed above, no error expected here\n\t\t_ = e.loadNonLayoutTemplates(\"pages\")\n\t}\n\n\tif e.VFS.IsExists(filepath.Join(e.BaseDir, \"errors\")) {\n\t\tif err = e.loadNonLayoutTemplates(\"errors\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c Control) ServeRoot(w http.ResponseWriter, r *http.Request) {\n\ttemplate := map[string]interface{}{}\n\tc.Config.RLock()\n\tobjects := make([]map[string]string, len(c.Config.Tasks))\n\tfor i, task := range c.Config.Tasks {\n\t\tstatus := task.Status()\n\t\tstatusStr := []string{\"stopped\", \"running\", \"restarting\"}[status]\n\t\taction := []string{\"start\", \"stop\", \"stop\"}[status]\n\t\tactionName := []string{\"Start\", \"Stop\", \"Restarting\"}[status]\n\t\targs := \"[\" + filepath.Base(task.Dir) + \"] \" + strings.Join(task.Args, \" \")\n\t\tobjects[i] = map[string]string{\"action\": action, \"status\": statusStr, \"args\": args,\n\t\t\t\"actionName\": actionName, \"id\": strconv.FormatInt(task.ID, 10)}\n\t}\n\ttemplate[\"tasks\"] = objects\n\tc.Config.RUnlock()\n\n\tserveTemplate(w, r, \"tasks\", template)\n}",
"func (fs *FS) Root() (fs.Node, error) {\n\tfs.μ.RLock()\n\tdefer fs.μ.RUnlock()\n\treturn fs.rnode, nil\n}",
"func PivotRoot(newroot string) error {\n\tputold := filepath.Join(newroot, \"/.pivot_root\")\n\n\t// bind mount newroot to itself - this is a slight hack needed to satisfy the\n\t// pivot_root requirement that newroot and putold must not be on the same\n\t// filesystem as the current root\n\tif err := syscall.Mount(newroot, newroot, \"\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t// create putold directory\n\tif err := os.MkdirAll(putold, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// call pivot_root\n\tif err := syscall.PivotRoot(newroot, putold); err != nil {\n\t\treturn err\n\t}\n\n\t// ensure current working directory is set to new root\n\tif err := os.Chdir(\"/\"); err != nil {\n\t\treturn err\n\t}\n\n\t// umount putold, which now lives at /.pivot_root\n\tputold = \"/.pivot_root\"\n\tif err := syscall.Unmount(putold, syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\n\t// remove putold\n\tif err := os.RemoveAll(putold); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *DjangoEngine) RootDir(root string) *DjangoEngine {\n\tif s.fs != nil && root != \"\" && root != \"/\" && root != \".\" && root != s.rootDir {\n\t\tsub, err := fs.Sub(s.fs, s.rootDir)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ts.fs = sub // here so the \"middleware\" can work.\n\t}\n\n\ts.rootDir = filepath.ToSlash(root)\n\treturn s\n}",
"func (p *PrecompiledTemplate) Name() TemplateName {\n\treturn p.name\n}",
"func (o GetAppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}",
"func New(viewsRootBox *rice.Box) *gintemplate.TemplateEngine {\n\treturn NewWithConfig(viewsRootBox, gintemplate.DefaultConfig)\n}",
"func (o AppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (h *Header) Root() tview.Primitive {\n\treturn h.Title\n}",
"func newRootDir(t *testing.T) (string, error) {\n\tdir := filepath.Join(os.TempDir(), \"siadirs\", t.Name())\n\terr := os.RemoveAll(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn dir, nil\n}"
] | [
"0.7597663",
"0.6376488",
"0.5993522",
"0.56988734",
"0.5487304",
"0.5406119",
"0.53739834",
"0.53517014",
"0.5342579",
"0.5333323",
"0.531819",
"0.5317198",
"0.531694",
"0.5315867",
"0.5252057",
"0.5209373",
"0.5184296",
"0.51761675",
"0.5142933",
"0.50949264",
"0.50891984",
"0.50867033",
"0.50426966",
"0.5007543",
"0.4993895",
"0.4983141",
"0.4965593",
"0.4955867",
"0.4924029",
"0.48988584",
"0.48931003",
"0.48905867",
"0.4879961",
"0.48607707",
"0.48474193",
"0.4799199",
"0.47986636",
"0.4793196",
"0.47701746",
"0.47573155",
"0.47505507",
"0.47480547",
"0.47399166",
"0.47283563",
"0.47208825",
"0.4716239",
"0.47154376",
"0.4710307",
"0.47068053",
"0.46995333",
"0.46888158",
"0.46887434",
"0.46791112",
"0.46791112",
"0.46791112",
"0.46791112",
"0.46791112",
"0.46791112",
"0.46791112",
"0.4670568",
"0.4670201",
"0.4669716",
"0.46536106",
"0.46520787",
"0.464968",
"0.46309862",
"0.46274748",
"0.46272066",
"0.46207917",
"0.46118388",
"0.46067166",
"0.4597768",
"0.45894706",
"0.45776272",
"0.4576774",
"0.4568939",
"0.4560809",
"0.45606235",
"0.4555624",
"0.4546457",
"0.45449397",
"0.45410758",
"0.45407683",
"0.4539045",
"0.45369947",
"0.45319352",
"0.45273465",
"0.45174327",
"0.45117167",
"0.4504748",
"0.45025828",
"0.44999987",
"0.44992474",
"0.4498746",
"0.44976404",
"0.44937658",
"0.44908887",
"0.44884482",
"0.44727764",
"0.44698247"
] | 0.7620937 | 0 |
Funcs allows adding template function maps to TRoots; this should be done before creating any templates, or else previously created templates won't get the newest function maps | Funcs позволяет добавлять отображения шаблонных функций в TRoots; это должно быть выполнено перед созданием любых шаблонов, иначе ранее созданные шаблоны не получат самые последние отображения функций | func (t *TRoot) Funcs(fnList FuncMap) *TRoot {
t.template.Funcs(template.FuncMap(fnList))
return t
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t *Tmpl) Funcs(funcMap template.FuncMap) {\n\tt.Template = t.Template.Funcs(funcMap)\n\tt.funcs = funcMap\n}",
"func (app *App) TemplateFuncs(funcs ...template.FuncMap) *App {\n\tapp.templateFuncs = append(app.templateFuncs, funcs...)\n\treturn app\n}",
"func (templateManager *TemplateManager) Funcs(funcs template.FuncMap) {\n\tfor _, rootTemplate := range templateManager.rootTemplates {\n\t\trootTemplate.Funcs(funcs)\n\t}\n\tfor name, function := range funcs {\n\t\ttemplateManager.funcs[name] = function\n\t}\n}",
"func (tp *Template) Funcs(funcs ...template.FuncMap) *Template {\n\ttp.funcs = append(tp.funcs, funcs...)\n\treturn tp\n}",
"func NewFuncMap() template.FuncMap {\n\treturn map[string]any{\n\t\t\"ctx\": func() any { return nil }, // template context function\n\n\t\t\"DumpVar\": dumpVar,\n\n\t\t// -----------------------------------------------------------------\n\t\t// html/template related functions\n\t\t\"dict\": dict, // it's lowercase because this name has been widely used. Our other functions should have uppercase names.\n\t\t\"Eval\": Eval,\n\t\t\"Safe\": Safe,\n\t\t\"Escape\": html.EscapeString,\n\t\t\"QueryEscape\": url.QueryEscape,\n\t\t\"JSEscape\": template.JSEscapeString,\n\t\t\"Str2html\": Str2html, // TODO: rename it to SanitizeHTML\n\t\t\"URLJoin\": util.URLJoin,\n\t\t\"DotEscape\": DotEscape,\n\n\t\t\"PathEscape\": url.PathEscape,\n\t\t\"PathEscapeSegments\": util.PathEscapeSegments,\n\n\t\t// utils\n\t\t\"StringUtils\": NewStringUtils,\n\t\t\"SliceUtils\": NewSliceUtils,\n\t\t\"JsonUtils\": NewJsonUtils,\n\n\t\t// -----------------------------------------------------------------\n\t\t// svg / avatar / icon\n\t\t\"svg\": svg.RenderHTML,\n\t\t\"EntryIcon\": base.EntryIcon,\n\t\t\"MigrationIcon\": MigrationIcon,\n\t\t\"ActionIcon\": ActionIcon,\n\n\t\t\"SortArrow\": SortArrow,\n\n\t\t// -----------------------------------------------------------------\n\t\t// time / number / format\n\t\t\"FileSize\": base.FileSize,\n\t\t\"CountFmt\": base.FormatNumberSI,\n\t\t\"TimeSince\": timeutil.TimeSince,\n\t\t\"TimeSinceUnix\": timeutil.TimeSinceUnix,\n\t\t\"DateTime\": timeutil.DateTime,\n\t\t\"Sec2Time\": util.SecToTime,\n\t\t\"LoadTimes\": func(startTime time.Time) string {\n\t\t\treturn fmt.Sprint(time.Since(startTime).Nanoseconds()/1e6) + \"ms\"\n\t\t},\n\n\t\t// -----------------------------------------------------------------\n\t\t// setting\n\t\t\"AppName\": func() string {\n\t\t\treturn setting.AppName\n\t\t},\n\t\t\"AppSubUrl\": func() string {\n\t\t\treturn setting.AppSubURL\n\t\t},\n\t\t\"AssetUrlPrefix\": func() string {\n\t\t\treturn setting.StaticURLPrefix + \"/assets\"\n\t\t},\n\t\t\"AppUrl\": func() string {\n\t\t\t// The usage of AppUrl should be avoided as much as possible,\n\t\t\t// because the AppURL(ROOT_URL) may not match user's visiting site and the ROOT_URL in app.ini may be incorrect.\n\t\t\t// And it's difficult for Gitea to guess absolute URL correctly with zero configuration,\n\t\t\t// because Gitea doesn't know whether the scheme is HTTP or HTTPS unless the reverse proxy could tell Gitea.\n\t\t\treturn setting.AppURL\n\t\t},\n\t\t\"AppVer\": func() string {\n\t\t\treturn setting.AppVer\n\t\t},\n\t\t\"AppDomain\": func() string { // documented in mail-templates.md\n\t\t\treturn setting.Domain\n\t\t},\n\t\t\"AssetVersion\": func() string {\n\t\t\treturn setting.AssetVersion\n\t\t},\n\t\t\"DefaultShowFullName\": func() bool {\n\t\t\treturn setting.UI.DefaultShowFullName\n\t\t},\n\t\t\"ShowFooterTemplateLoadTime\": func() bool {\n\t\t\treturn setting.Other.ShowFooterTemplateLoadTime\n\t\t},\n\t\t\"AllowedReactions\": func() []string {\n\t\t\treturn setting.UI.Reactions\n\t\t},\n\t\t\"CustomEmojis\": func() map[string]string {\n\t\t\treturn setting.UI.CustomEmojisMap\n\t\t},\n\t\t\"MetaAuthor\": func() string {\n\t\t\treturn setting.UI.Meta.Author\n\t\t},\n\t\t\"MetaDescription\": func() string {\n\t\t\treturn setting.UI.Meta.Description\n\t\t},\n\t\t\"MetaKeywords\": func() string {\n\t\t\treturn setting.UI.Meta.Keywords\n\t\t},\n\t\t\"EnableTimetracking\": func() bool {\n\t\t\treturn setting.Service.EnableTimetracking\n\t\t},\n\t\t\"DisableGitHooks\": func() bool {\n\t\t\treturn setting.DisableGitHooks\n\t\t},\n\t\t\"DisableWebhooks\": func() bool {\n\t\t\treturn setting.DisableWebhooks\n\t\t},\n\t\t\"DisableImportLocal\": func() bool {\n\t\t\treturn !setting.ImportLocalPaths\n\t\t},\n\t\t\"DefaultTheme\": func() string {\n\t\t\treturn setting.UI.DefaultTheme\n\t\t},\n\t\t\"NotificationSettings\": func() map[string]any {\n\t\t\treturn map[string]any{\n\t\t\t\t\"MinTimeout\": int(setting.UI.Notification.MinTimeout / time.Millisecond),\n\t\t\t\t\"TimeoutStep\": int(setting.UI.Notification.TimeoutStep / time.Millisecond),\n\t\t\t\t\"MaxTimeout\": int(setting.UI.Notification.MaxTimeout / time.Millisecond),\n\t\t\t\t\"EventSourceUpdateTime\": int(setting.UI.Notification.EventSourceUpdateTime / time.Millisecond),\n\t\t\t}\n\t\t},\n\t\t\"MermaidMaxSourceCharacters\": func() int {\n\t\t\treturn setting.MermaidMaxSourceCharacters\n\t\t},\n\n\t\t// -----------------------------------------------------------------\n\t\t// render\n\t\t\"RenderCommitMessage\": RenderCommitMessage,\n\t\t\"RenderCommitMessageLinkSubject\": RenderCommitMessageLinkSubject,\n\n\t\t\"RenderCommitBody\": RenderCommitBody,\n\t\t\"RenderCodeBlock\": RenderCodeBlock,\n\t\t\"RenderIssueTitle\": RenderIssueTitle,\n\t\t\"RenderEmoji\": RenderEmoji,\n\t\t\"RenderEmojiPlain\": emoji.ReplaceAliases,\n\t\t\"ReactionToEmoji\": ReactionToEmoji,\n\t\t\"RenderNote\": RenderNote,\n\n\t\t\"RenderMarkdownToHtml\": RenderMarkdownToHtml,\n\t\t\"RenderLabel\": RenderLabel,\n\t\t\"RenderLabels\": RenderLabels,\n\n\t\t// -----------------------------------------------------------------\n\t\t// misc\n\t\t\"ShortSha\": base.ShortSha,\n\t\t\"ActionContent2Commits\": ActionContent2Commits,\n\t\t\"IsMultilineCommitMessage\": IsMultilineCommitMessage,\n\t\t\"CommentMustAsDiff\": gitdiff.CommentMustAsDiff,\n\t\t\"MirrorRemoteAddress\": mirrorRemoteAddress,\n\n\t\t\"FilenameIsImage\": FilenameIsImage,\n\t\t\"TabSizeClass\": TabSizeClass,\n\t}\n}",
"func (t *Tmpl) FuncMap(f template.FuncMap) {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\tt.Tmpl.Funcs(f)\n}",
"func (t *Tmpl) FuncMap(f template.FuncMap) {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\tt.Tmpl.Funcs(f)\n}",
"func AddFuncMap(key string, fn interface{}) error {\n\tbeegoTplFuncMap[key] = fn\n\treturn nil\n}",
"func AddFuncMap(key string, fn interface{}) error {\n\ttplFuncMap[key] = fn\n\treturn nil\n}",
"func (g *Group) Funcs(f template.FuncMap) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tif g.tmpls != nil {\n\t\tfor _, t := range g.tmpls.Templates() {\n\t\t\tt.Funcs(f)\n\t\t}\n\t}\n\tif g.funcs == nil {\n\t\tg.funcs = template.FuncMap{}\n\t}\n\tfor k, v := range f {\n\t\tg.funcs[k] = v\n\t}\n}",
"func TmplFunctionsMap() template.FuncMap {\n\tfuncMap := template.FuncMap{\n\t\t\"envOrDef\": envOrDefault,\n\t\t\"env\": env,\n\t\t\"fileMD5\": fileMD5,\n\t\t\"Iterate\": Iterate,\n\t}\n\treturn funcMap\n}",
"func WithFuncs(funcs gotemplate.FuncMap) Opt {\n\treturn func(t *gotemplate.Template) (*gotemplate.Template, error) {\n\t\treturn t.Funcs(funcs), nil\n\t}\n}",
"func AddTemplateFuncsNamespace(ns func(d *deps.Deps) *TemplateFuncsNamespace) {\n\tTemplateFuncsNamespaceRegistry = append(TemplateFuncsNamespaceRegistry, ns)\n}",
"func (f *tmplFuncs) funcMap() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"cleanLabel\": f.cleanLabel,\n\t\t\"cleanType\": f.cleanType,\n\t\t\"fieldType\": f.fieldType,\n\t\t\"dict\": f.dict,\n\t\t\"ext\": filepath.Ext,\n\t\t\"dir\": func(s string) string {\n\t\t\tdir, _ := path.Split(s)\n\t\t\treturn dir\n\t\t},\n\t\t\"trimExt\": stripExt,\n\t\t\"slug\": slug,\n\t\t\"comments\": comments,\n\t\t\"sub\": f.sub,\n\t\t\"filepath\": f.filepath,\n\t\t\"gatewayMethod\": f.gatewayMethod,\n\t\t\"gatewayPath\": f.gatewayPath,\n\t\t\"urlToType\": f.urlToType,\n\t\t\"jsonMessage\": f.jsonMessage,\n\t\t\"location\": f.location,\n\t\t\"AllMessages\": func(fixNames bool) []*descriptor.DescriptorProto {\n\t\t\treturn util.AllMessages(f.f, fixNames)\n\t\t},\n\t\t\"AllEnums\": func(fixNames bool) []*descriptor.EnumDescriptorProto {\n\t\t\treturn util.AllEnums(f.f, fixNames)\n\t\t},\n\t}\n}",
"func addtoFuncmap(propfnmap map[string]template.FuncMap, propList []string, name string,\n\tfn interface{}) {\n\n\tfor _, prop := range propList {\n\t\tif _, ok := propfnmap[prop]; !ok {\n\t\t\tpropfnmap[prop] = make(template.FuncMap)\n\t\t}\n\t\tpropfnmap[prop][name] = fn\n\t}\n}",
"func AllCustomFuncs() template.FuncMap {\n\tf := sprig.TxtFuncMap()\n\trt := runtaskFuncs()\n\tfor k, v := range rt {\n\t\tf[k] = v\n\t}\n\trc := runCommandFuncs()\n\tfor k, v := range rc {\n\t\tf[k] = v\n\t}\n\tver := kubever.TemplateFunctions()\n\tfor k, v := range ver {\n\t\tf[k] = v\n\t}\n\tsp := csp.TemplateFunctions()\n\tfor k, v := range sp {\n\t\tf[k] = v\n\t}\n\tps := poolselection.TemplateFunctions()\n\tfor k, v := range ps {\n\t\tf[k] = v\n\t}\n\tur := result.TemplateFunctions()\n\tfor k, v := range ur {\n\t\tf[k] = v\n\t}\n\tnod := node.TemplateFunctions()\n\tfor k, v := range nod {\n\t\tf[k] = v\n\t}\n\n\treturn f\n}",
"func FuncMap() template.FuncMap {\n\treturn template.FuncMap(map[string]interface{}{\n\t\t\"requiredEnvs\": RequiredEnvs,\n\t\t\"requiredVals\": RequiredVals,\n\t\t\"requiredFiles\": RequiredFiles,\n\t\t\"sh\": Sh,\n\t})\n}",
"func CreateGoFuncMaps(auth authModule) template.FuncMap {\n\tm := template.FuncMap{\n\t\t\"hash\": utils.HashString,\n\t\t\"add\": func(a, b int) int { return a + b },\n\t\t\"generateId\": func() string { return ksuid.New().String() },\n\t\t\"marshalJSON\": func(a interface{}) (string, error) {\n\t\t\tdata, err := json.Marshal(a)\n\t\t\treturn string(data), err\n\t\t},\n\t}\n\tif auth != nil {\n\t\tm[\"encrypt\"] = auth.Encrypt\n\t}\n\n\treturn m\n}",
"func (f JSXFuncs) MakeMap() template.FuncMap { return MakeMap(f) }",
"func funcMap() template.FuncMap {\n\tr := sprig.TxtFuncMap()\n\n\tl := template.FuncMap{\n\t\t\"fileContents\": fileContents(),\n\t}\n\n\tfor k, v := range l {\n\t\tif _, ok := r[k]; ok {\n\t\t\tk = \"c_\" + k\n\t\t}\n\t\tr[k] = v\n\t}\n\n\treturn r\n}",
"func execmTemplateFuncs(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*template.Template).Funcs(args[1].(template.FuncMap))\n\tp.Ret(2, ret)\n}",
"func (f *Funcs) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"coldef\": f.coldef,\n\t\t\"viewdef\": f.viewdef,\n\t\t\"procdef\": f.procdef,\n\t\t\"driver\": f.driverfn,\n\t\t\"constraint\": f.constraintfn,\n\t\t\"esc\": f.escType,\n\t\t\"fields\": f.fields,\n\t\t\"engine\": f.enginefn,\n\t\t\"literal\": f.literal,\n\t\t\"comma\": f.comma,\n\t\t\"isEndConstraint\": f.isEndConstraint,\n\t}\n}",
"func (f HTMLFuncs) MakeMap() template.FuncMap { return MakeMap(f) }",
"func runtaskFuncs() (f template.FuncMap) {\n\treturn template.FuncMap{\n\t\t\"pickContains\": pickContains,\n\t\t\"pickSuffix\": pickSuffix,\n\t\t\"pickPrefix\": pickPrefix,\n\t\t\"toYaml\": ToYaml,\n\t\t\"fromYaml\": fromYaml,\n\t\t\"jsonpath\": jsonPath,\n\t\t\"saveAs\": saveAs,\n\t\t\"saveas\": saveAs,\n\t\t\"saveIf\": saveIf,\n\t\t\"saveif\": saveIf,\n\t\t\"addTo\": addTo,\n\t\t\"noop\": noop,\n\t\t\"notFoundErr\": notFoundErr,\n\t\t\"verifyErr\": verifyErr,\n\t\t\"versionMismatchErr\": versionMismatchErr,\n\t\t\"isLen\": isLen,\n\t\t\"nestedKeyMap\": nestedKeyMap,\n\t\t\"keyMap\": keyMap,\n\t\t\"splitKeyMap\": splitKeyMap,\n\t\t\"splitListTrim\": splitListTrim,\n\t\t\"splitListLen\": splitListLen,\n\t\t\"randomize\": randomize,\n\t\t\"IfNotNil\": ifNotNil,\n\t\t\"debugf\": debugf,\n\t\t\"getMapofString\": util.GetNestedMap,\n\t}\n}",
"func AddFileFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateFileFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}",
"func GenericFuncMap() template.FuncMap {\n\tgfm := make(template.FuncMap, len(genericMap))\n\tfor k, v := range genericMap {\n\t\tgfm[k] = v\n\t}\n\treturn gfm\n}",
"func funcMap(i *funcMapInput) template.FuncMap {\n\tvar scratch Scratch\n\n\tr := template.FuncMap{\n\t\t// API functions\n\t\t\"datacenters\": datacentersFunc(i.brain, i.used, i.missing),\n\t\t\"file\": fileFunc(i.brain, i.used, i.missing, i.sandboxPath),\n\t\t\"key\": keyFunc(i.brain, i.used, i.missing),\n\t\t\"keyExists\": keyExistsFunc(i.brain, i.used, i.missing),\n\t\t\"keyOrDefault\": keyWithDefaultFunc(i.brain, i.used, i.missing),\n\t\t\"ls\": lsFunc(i.brain, i.used, i.missing, true),\n\t\t\"safeLs\": safeLsFunc(i.brain, i.used, i.missing),\n\t\t\"node\": nodeFunc(i.brain, i.used, i.missing),\n\t\t\"nodes\": nodesFunc(i.brain, i.used, i.missing),\n\t\t\"secret\": secretFunc(i.brain, i.used, i.missing),\n\t\t\"secrets\": secretsFunc(i.brain, i.used, i.missing),\n\t\t\"service\": serviceFunc(i.brain, i.used, i.missing),\n\t\t\"services\": servicesFunc(i.brain, i.used, i.missing),\n\t\t\"tree\": treeFunc(i.brain, i.used, i.missing, true),\n\t\t\"safeTree\": safeTreeFunc(i.brain, i.used, i.missing),\n\n\t\t// Scratch\n\t\t\"scratch\": func() *Scratch { return &scratch },\n\n\t\t// Helper functions\n\t\t\"base64Decode\": base64Decode,\n\t\t\"base64Encode\": base64Encode,\n\t\t\"base64URLDecode\": base64URLDecode,\n\t\t\"base64URLEncode\": base64URLEncode,\n\t\t\"byKey\": byKey,\n\t\t\"byTag\": byTag,\n\t\t\"contains\": contains,\n\t\t\"containsAll\": containsSomeFunc(true, true),\n\t\t\"containsAny\": containsSomeFunc(false, false),\n\t\t\"containsNone\": containsSomeFunc(true, false),\n\t\t\"containsNotAll\": containsSomeFunc(false, true),\n\t\t\"env\": envFunc(i.env),\n\t\t\"executeTemplate\": executeTemplateFunc(i.t),\n\t\t\"explode\": explode,\n\t\t\"explodeMap\": explodeMap,\n\t\t\"in\": in,\n\t\t\"indent\": indent,\n\t\t\"loop\": loop,\n\t\t\"join\": join,\n\t\t\"trimSpace\": trimSpace,\n\t\t\"parseBool\": parseBool,\n\t\t\"parseFloat\": parseFloat,\n\t\t\"parseInt\": parseInt,\n\t\t\"parseJSON\": parseJSON,\n\t\t\"parseUint\": parseUint,\n\t\t\"plugin\": plugin,\n\t\t\"regexReplaceAll\": regexReplaceAll,\n\t\t\"regexMatch\": regexMatch,\n\t\t\"replaceAll\": replaceAll,\n\t\t\"timestamp\": timestamp,\n\t\t\"toLower\": toLower,\n\t\t\"toJSON\": toJSON,\n\t\t\"toJSONPretty\": toJSONPretty,\n\t\t\"toTitle\": toTitle,\n\t\t\"toTOML\": toTOML,\n\t\t\"toUpper\": toUpper,\n\t\t\"toYAML\": toYAML,\n\t\t\"split\": split,\n\t\t\"byMeta\": byMeta,\n\t\t\"sockaddr\": sockaddr,\n\t\t// Math functions\n\t\t\"add\": add,\n\t\t\"subtract\": subtract,\n\t\t\"multiply\": multiply,\n\t\t\"divide\": divide,\n\t\t\"modulo\": modulo,\n\t}\n\n\tfor _, bf := range i.functionBlacklist {\n\t\tif _, ok := r[bf]; ok {\n\t\t\tr[bf] = blacklisted\n\t\t}\n\t}\n\n\treturn r\n}",
"func getAllFuncs() template.FuncMap {\n\treturn template.FuncMap{\"markDown\": markDowner, \"date\": dater.FriendlyDater, \"holder\": holder}\n}",
"func (e *Engine) AddFuncMap(m map[string]interface{}) *Engine {\n\te.Mutex.Lock()\n\tfor name, fn := range m {\n\t\te.Funcmap[name] = fn\n\t}\n\te.Mutex.Unlock()\n\treturn e\n}",
"func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"kubeVersionCompare\": Compare,\n\t\t\"kubeVersionEq\": Equals,\n\t\t\"kubeVersionGt\": GreaterThan,\n\t\t\"kubeVersionGte\": GreaterThanOrEquals,\n\t\t\"kubeVersionLt\": LessThan,\n\t\t\"kubeVersionLte\": LessThanOrEquals,\n\t}\n}",
"func (g *Generator) AddFuncs(fm map[string]interface{}) {\n\tfor name, f := range fm {\n\t\tg.funcs[name] = f\n\t}\n}",
"func FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"errors\": ErrorsStub,\n\t}\n}",
"func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"createCSPListFromUIDs\": newListFromUIDs,\n\t\t\"createCSPListFromUIDNodeMap\": newListFromUIDNode,\n\t}\n}",
"func tmplFuncs() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"qr\": barcoder(),\n\t}\n}",
"func NewFunctionMap() template.FuncMap {\n\tfuncMap := engine.FuncMap()\n\tfuncMap[\"hashPassword\"] = util.HashPassword\n\tfuncMap[\"removeScheme\"] = util.RemoveScheme\n\treturn funcMap\n}",
"func AddRandomFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateRandomFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}",
"func (p applicationPackager) xmlTemplateWithFuncs(name string, data map[string]interface{}, fmap template.FuncMap) (template.HTML, error) {\n\tpath := path.Join(p.xmlService.templatePath, name)\n\ttmpl := template.Must(template.New(name).Funcs(fmap).ParseFiles(path))\n\tvar output bytes.Buffer\n\tif err := tmpl.Execute(&output, data); err != nil {\n\t\treturn template.HTML(\"\"), err\n\t}\n\treturn template.HTML(applyBulkFixes(output.String())), nil\n}",
"func (app *App) AddTemplateFunction(name string, f interface{}) {\n\tapp.templates.templatesMutex.Lock()\n\tdefer app.templates.templatesMutex.Unlock()\n\tapp.templates.funcMap[name] = f\n}",
"func TxtFuncMap(ctx *generatorContext) template.FuncMap {\n\tfuncMap := sprig.TxtFuncMap()\n\n\tfuncMap[\"protoComment\"] = proto.Comment\n\tfuncMap[\"protoFullName\"] = proto.FullName\n\n\tfuncMap[\"phpNamespace\"] = php.Namespace\n\tfuncMap[\"phpServiceName\"] = php.ServiceName\n\tfuncMap[\"phpMessageName\"] = func(t string) (string, error) {\n\t\tmsg := ctx.registry.MessageDefinition(t)\n\t\tif msg == nil {\n\t\t\treturn \"\", errors.New(\"message definition not found for \" + t)\n\t\t}\n\n\t\treturn php.MessageName(msg), nil\n\t}\n\n\treturn funcMap\n}",
"func (widgets *Widgets) FuncMap(enableInlineEdit bool) template.FuncMap {\n\tfuncMap := template.FuncMap{}\n\n\tfuncMap[\"render_widget\"] = func(widgetName, widgetKey string, context *Context) template.HTML {\n\t\treturn widgets.Render(widgetName, widgetKey)\n\t}\n\n\treturn funcMap\n}",
"func (t *TemplateConfig) addFunctions(s *ServerlessConfig) {\n\tif len(t.Resources) == 0 {\n\t\tt.Resources = map[string]SAMFunction{}\n\t}\n\n\tfor n, f := range s.Functions {\n\t\tfName := flect.New(n).Camelize().String() + \"Function\"\n\t\t// ensure to add only http event functions\n\t\tev := f.Events[0].HTTP\n\t\tif ev != nil {\n\t\t\tt.Resources[fName] = SAMFunction{\n\t\t\t\tType: \"AWS::Serverless::Function\",\n\t\t\t\tProperties: SAMFnProp{\n\t\t\t\t\tRuntime: \"go1.x\",\n\t\t\t\t\tHandler: strings.TrimPrefix(f.Handler, \"bin/\"),\n\t\t\t\t\tCodeURI: \"debug\",\n\t\t\t\t\tEvents: map[string]SAMEvent{\n\t\t\t\t\t\t\"http\": SAMEvent{\n\t\t\t\t\t\t\tType: \"Api\",\n\t\t\t\t\t\t\tProperties: SAMProp{\n\t\t\t\t\t\t\t\tPath: \"/\" + ev.Path,\n\t\t\t\t\t\t\t\tMethod: ev.Method,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n}",
"func (b *Builder) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"inputs_for\": b.Inputs,\n\t\t\"inputs_and_errors_for\": func(v interface{}, errs []error) (template.HTML, error) {\n\t\t\treturn b.Inputs(v, errs...)\n\t\t},\n\t}\n}",
"func (info Terminfo) FuncMap() map[string]string {\n\tr := make(map[string]string, maxFuncs)\n\tr[\"EnterCA\"] = info.Funcs[FuncEnterCA]\n\tr[\"ExitCA\"] = info.Funcs[FuncExitCA]\n\tr[\"ShowCursor\"] = info.Funcs[FuncShowCursor]\n\tr[\"HideCursor\"] = info.Funcs[FuncHideCursor]\n\tr[\"ClearScreen\"] = info.Funcs[FuncClearScreen]\n\tr[\"SGR0\"] = info.Funcs[FuncSGR0]\n\tr[\"Underline\"] = info.Funcs[FuncUnderline]\n\tr[\"Bold\"] = info.Funcs[FuncBold]\n\tr[\"Blink\"] = info.Funcs[FuncBlink]\n\tr[\"Reverse\"] = info.Funcs[FuncReverse]\n\tr[\"EnterKeypad\"] = info.Funcs[FuncEnterKeypad]\n\tr[\"ExitKeypad\"] = info.Funcs[FuncExitKeypad]\n\tr[\"EnterMouse\"] = info.Funcs[FuncEnterMouse]\n\tr[\"ExitMouse\"] = info.Funcs[FuncExitMouse]\n\treturn r\n}",
"func (t *T) AddFunctions(functions ...string) {\n\tt.functionPackages = append(t.functionPackages, functions...)\n}",
"func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"cspGetPolicies\": GetPolicies,\n\t\t\"cspFilterPoolIDs\": FilterPoolIDs,\n\t\t\"cspAntiAffinity\": AntiAffinityLabel,\n\t\t\"cspPreferAntiAffinity\": PreferAntiAffinityLabel,\n\t\t\"preferScheduleOnHost\": PreferScheduleOnHostAnnotation,\n\t\t\"capacityAwareProvisioning\": CapacityAwareProvisioning,\n\t}\n}",
"func AddEnvFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateEnvFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}",
"func (e *Engine) WithFuncMap(fm template.FuncMap) *Engine {\n\treturn &Engine{\n\t\trootDir: e.rootDir,\n\t\ttset: e.tset,\n\t\ttmap: e.tmap,\n\t\tfmap: fm,\n\t\tglobalContext: e.globalContext,\n\t\teveryload: e.everyload,\n\t}\n}",
"func (p *Generator) GetFuncMap() template.FuncMap {\n\tf := template.FuncMap{\n\t\t\"CreateIntegrationDiagram\": p.CreateIntegrationDiagram,\n\t\t\"CreateSequenceDiagram\": p.CreateSequenceDiagram,\n\t\t\"CreateParamDataModel\": p.CreateParamDataModel,\n\t\t\"CreateReturnDataModel\": p.CreateReturnDataModel,\n\t\t\"CreateTypeDiagram\": p.CreateTypeDiagram,\n\t\t\"CreateRedoc\": p.CreateRedoc,\n\t\t\"GenerateDataModel\": p.GenerateDataModel,\n\t\t\"GetParamType\": p.GetParamType,\n\t\t\"GetReturnType\": p.GetReturnType,\n\t\t\"SourcePath\": p.SourcePath,\n\t\t\"Packages\": p.Packages,\n\t\t\"MacroPackages\": p.MacroPackages,\n\t\t\"hasPattern\": syslutil.HasPattern,\n\t\t\"ModuleAsPackages\": p.ModuleAsPackages,\n\t\t\"ModulePackageName\": ModulePackageName,\n\t\t\"SortedKeys\": SortedKeys,\n\t\t\"Attribute\": Attribute,\n\t\t\"ServiceMetadata\": ServiceMetadata,\n\t\t\"Fields\": Fields,\n\t\t\"FieldType\": FieldType,\n\t\t\"SanitiseOutputName\": SanitiseOutputName,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"ToCamel\": strcase.ToCamel,\n\t\t\"Remove\": Remove,\n\t\t\"ToTitle\": strings.ToTitle,\n\t\t\"Base\": filepath.Base,\n\t\t\"Last\": Last,\n\t}\n\tfor name, function := range sprig.FuncMap() {\n\t\tf[name] = function\n\t}\n\treturn f\n}",
"func (ctx versionCtx) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"Sequence\": ctx.sequence,\n\t\t\"Cursor\": ctx.cursor,\n\t\t\"ChannelName\": ctx.channelName,\n\t\t\"VersionLabel\": ctx.versionLabel,\n\t\t\"ReleaseNotes\": ctx.releaseNotes,\n\t\t\"IsAirgap\": ctx.isAirgap,\n\t}\n}",
"func PopulateTemplateWithFuncMap(templateTitle string, templateContent string,\n\taMap map[string]interface{}, funcMap template.FuncMap) string {\n\n\t// Populate the template\n\tvar err error\n\tt := template.New(templateTitle)\n\n\tt = t.Funcs(funcMap)\n\n\tt, err = t.Parse(templateContent)\n\tcheckError(err)\n\n\tbuff := bytes.NewBufferString(\"\")\n\tt.Execute(buff, aMap)\n\treturn buff.String()\n}",
"func registeTemplateFunc(t *template.Template) *template.Template {\n\treturn t.Funcs(template.FuncMap{\"unescaped\": unescaped})\n\t//TODO:add more func\n}",
"func AddCryptoFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateCryptoFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}",
"func (e *Engine) FuncMap() map[string]interface{} {\n\treturn e.Funcmap\n}",
"func (p *Generator) GetFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"CreateIntegrationDiagram\": p.CreateIntegrationDiagram,\n\t\t\"CreateSequenceDiagram\": p.CreateSequenceDiagram,\n\t\t\"CreateParamDataModel\": p.CreateParamDataModel,\n\t\t\"CreateReturnDataModel\": p.CreateReturnDataModel,\n\t\t\"CreateTypeDiagram\": p.CreateTypeDiagram,\n\t\t\"GenerateDataModel\": p.GenerateDataModel,\n\t\t\"CreateQueryParamDataModel\": p.CreateQueryParamDataModel,\n\t\t\"CreatePathParamDataModel\": p.CreatePathParamDataModel,\n\t\t\"GetParamType\": p.GetParamType,\n\t\t\"GetRows\": p.GetRows,\n\t\t\"GetReturnType\": p.GetReturnType,\n\t\t\"hasPattern\": syslutil.HasPattern,\n\t\t\"ModuleAsPackages\": p.ModuleAsPackages,\n\t\t\"ModulePackageName\": ModulePackageName,\n\t\t\"SortedKeys\": SortedKeys,\n\t\t\"Attribute\": Attribute,\n\t\t\"SanitiseOutputName\": SanitiseOutputName,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"Base\": filepath.Base,\n\t}\n}",
"func (tp *Template) Func(name string, f interface{}) *Template {\n\treturn tp.Funcs(template.FuncMap{name: f})\n}",
"func Funcs(init Handler, fns []ContractFunctionInterface) map[coretypes.Hname]ContractFunctionInterface {\n\tret := map[coretypes.Hname]ContractFunctionInterface{\n\t\tcoretypes.EntryPointInit: Func(\"init\", init),\n\t}\n\tfor _, f := range fns {\n\t\thname := f.Hname()\n\t\tif _, ok := ret[hname]; ok {\n\t\t\tpanic(fmt.Sprintf(\"Duplicate function: %s\", f.Name))\n\t\t}\n\n\t\thandlers := 0\n\t\tif f.Handler != nil {\n\t\t\thandlers += 1\n\t\t}\n\t\tif f.ViewHandler != nil {\n\t\t\thandlers += 1\n\t\t}\n\t\tif handlers != 1 {\n\t\t\tpanic(\"Exactly one of Handler, ViewHandler must be set\")\n\t\t}\n\n\t\tret[hname] = f\n\t}\n\treturn ret\n}",
"func templateHelpers(fs *token.FileSet) template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"ast\": func(n ast.Node) string {\n\t\t\treturn nodeToString(fs, n)\n\t\t},\n\t\t\"join\": strings.Join,\n\t\t\"params\": func(f *Func) []string {\n\t\t\treturn f.Params(fs)\n\t\t},\n\t\t\"fields\": func(f *Func) []string {\n\t\t\treturn f.Fields(fs)\n\t\t},\n\t\t\"results\": func(f *Func) []string {\n\t\t\treturn f.Results(fs)\n\t\t},\n\t\t\"receiver\": func(f *Func) string {\n\t\t\tif f.ReceiverType() == nil {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\treturn strings.Replace(nodeToString(fs, f.ReceiverType()), \"*\", \"\", -1) + \".\"\n\t\t},\n\t\t\"want\": func(s string) string { return strings.Replace(s, \"got\", \"want\", 1) },\n\t}\n}",
"func getFuncMap(failMessage *string) template.FuncMap {\n\tm := sprig.TxtFuncMap()\n\tm[\"fail\"] = func(msg string) (string, error) {\n\t\t*failMessage = msg\n\t\treturn \"\", errors.New(msg)\n\t}\n\treturn m\n}",
"func (tmplts *Templates) Add(tmplt *Template) error {\n\t//match the hot reload to contained templates\n\t//if hot reload is enabled\n\tif tmplts.HotReload {\n\t\ttmplt.HotReload = true\n\t}\n\n\t//initialize template if not yet initialized\n\tif !tmplt.initialized {\n\t\terr := tmplt.Init()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//use template name as map key\n\ttmplts.templates[tmplt.name] = tmplt\n\n\treturn nil\n}",
"func initBuiltinFuncs(builtin *types.Package) {\n\tfns := [...]struct {\n\t\tname string\n\t\ttparams []typeTParam\n\t\tparams []typeXParam\n\t\tresult xType\n\t}{\n\t\t{\"copy\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"dst\", xtSlice}, {\"src\", xtSlice}}, types.Typ[types.Int]},\n\t\t// func [Type any] copy(dst, src []Type) int\n\n\t\t{\"close\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"c\", xtChanIn}}, nil},\n\t\t// func [Type any] close(c chan<- Type)\n\n\t\t{\"append\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"slice\", xtSlice}, {\"elems\", xtEllipsis}}, xtSlice},\n\t\t// func [Type any] append(slice []Type, elems ...Type) []Type\n\n\t\t{\"delete\", []typeTParam{{\"Key\", comparable}, {\"Elem\", any}}, []typeXParam{{\"m\", xtMap}, {\"key\", 0}}, nil},\n\t\t// func [Key comparable, Elem any] delete(m map[Key]Elem, key Key)\n\t}\n\tgbl := builtin.Scope()\n\tfor _, fn := range fns {\n\t\ttparams := newTParams(fn.tparams)\n\t\tn := len(fn.params)\n\t\tparams := make([]*types.Var, n)\n\t\tfor i, param := range fn.params {\n\t\t\ttyp := newXParamType(tparams, param.typ)\n\t\t\tparams[i] = types.NewParam(token.NoPos, builtin, param.name, typ)\n\t\t}\n\t\tvar ellipsis bool\n\t\tif tidx, ok := fn.params[n-1].typ.(int); ok && (tidx&xtEllipsis) != 0 {\n\t\t\tellipsis = true\n\t\t}\n\t\tvar results *types.Tuple\n\t\tif fn.result != nil {\n\t\t\ttyp := newXParamType(tparams, fn.result)\n\t\t\tresults = types.NewTuple(types.NewParam(token.NoPos, builtin, \"\", typ))\n\t\t}\n\t\ttsig := NewTemplateSignature(tparams, nil, types.NewTuple(params...), results, ellipsis, tokFlagApproxType)\n\t\tvar tfn types.Object = NewTemplateFunc(token.NoPos, builtin, fn.name, tsig)\n\t\tif fn.name == \"append\" { // append is a special case\n\t\t\tappendString := NewInstruction(token.NoPos, builtin, \"append\", appendStringInstr{})\n\t\t\ttfn = NewOverloadFunc(token.NoPos, builtin, \"append\", appendString, tfn)\n\t\t} else if fn.name == \"copy\" {\n\t\t\t// func [S string] copy(dst []byte, src S) int\n\t\t\ttparams := newTParams([]typeTParam{{\"S\", tstring}})\n\t\t\tdst := types.NewParam(token.NoPos, builtin, \"dst\", types.NewSlice(types.Typ[types.Byte]))\n\t\t\tsrc := types.NewParam(token.NoPos, builtin, \"src\", tparams[0])\n\t\t\tret := types.NewParam(token.NoPos, builtin, \"\", types.Typ[types.Int])\n\t\t\ttsig := NewTemplateSignature(tparams, nil, types.NewTuple(dst, src), types.NewTuple(ret), false)\n\t\t\tcopyString := NewTemplateFunc(token.NoPos, builtin, \"copy\", tsig)\n\t\t\ttfn = NewOverloadFunc(token.NoPos, builtin, \"copy\", copyString, tfn)\n\t\t}\n\t\tgbl.Insert(tfn)\n\t}\n\toverloads := [...]struct {\n\t\tname string\n\t\tfns [3]typeBFunc\n\t}{\n\t\t{\"complex\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"r\", types.UntypedFloat}, {\"i\", types.UntypedFloat}}, types.UntypedComplex},\n\t\t\t{[]typeBParam{{\"r\", types.Float32}, {\"i\", types.Float32}}, types.Complex64},\n\t\t\t{[]typeBParam{{\"r\", types.Float64}, {\"i\", types.Float64}}, types.Complex128},\n\t\t}},\n\t\t// func complex(r, i untyped_float) untyped_complex\n\t\t// func complex(r, i float32) complex64\n\t\t// func complex(r, i float64) complex128\n\n\t\t{\"real\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"c\", types.UntypedComplex}}, types.UntypedFloat},\n\t\t\t{[]typeBParam{{\"c\", types.Complex64}}, types.Float32},\n\t\t\t{[]typeBParam{{\"c\", types.Complex128}}, types.Float64},\n\t\t}},\n\t\t// func real(c untyped_complex) untyped_float\n\t\t// func real(c complex64) float32\n\t\t// func real(c complex128) float64\n\n\t\t{\"imag\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"c\", types.UntypedComplex}}, types.UntypedFloat},\n\t\t\t{[]typeBParam{{\"c\", types.Complex64}}, types.Float32},\n\t\t\t{[]typeBParam{{\"c\", types.Complex128}}, types.Float64},\n\t\t}},\n\t\t// func imag(c untyped_complex) untyped_float\n\t\t// func imag(c complex64) float32\n\t\t// func imag(c complex128) float64\n\t}\n\tfor _, overload := range overloads {\n\t\tfns := []types.Object{\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[0]),\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[1]),\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[2]),\n\t\t}\n\t\tgbl.Insert(NewOverloadFunc(token.NoPos, builtin, overload.name, fns...))\n\t}\n\t// func panic(v interface{})\n\t// func recover() interface{}\n\t// func print(args ...interface{})\n\t// func println(args ...interface{})\n\temptyIntfVar := types.NewVar(token.NoPos, builtin, \"v\", TyEmptyInterface)\n\temptyIntfTuple := types.NewTuple(emptyIntfVar)\n\temptyIntfSlice := types.NewSlice(TyEmptyInterface)\n\temptyIntfSliceVar := types.NewVar(token.NoPos, builtin, \"args\", emptyIntfSlice)\n\temptyIntfSliceTuple := types.NewTuple(emptyIntfSliceVar)\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"panic\", types.NewSignature(nil, emptyIntfTuple, nil, false)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"recover\", types.NewSignature(nil, nil, emptyIntfTuple, false)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"print\", types.NewSignature(nil, emptyIntfSliceTuple, nil, true)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"println\", types.NewSignature(nil, emptyIntfSliceTuple, nil, true)))\n\n\t// new & make are special cases, they require to pass a type.\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"new\", newInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"make\", makeInstr{}))\n\n\t// len & cap are special cases, because they may return a constant value.\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"len\", lenInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"cap\", capInstr{}))\n\n\t// unsafe\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Sizeof\", unsafeSizeofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Alignof\", unsafeAlignofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Offsetof\", unsafeOffsetofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Add\", unsafeAddInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Slice\", unsafeSliceInstr{}))\n}",
"func (t *AxispointChaincode) initFunctionMaps() {\n\tt.tableMap = make(map[string]int)\n\tt.funcMap = make(map[string]InvokeFunc)\n\tt.funcMap[\"addRoyaltyStatements\"] = addRoyaltyStatements\n\tt.funcMap[\"generateExploitationReports\"] = generateExploitationReports\n\tt.funcMap[\"updateExploitationReports\"] = updateExploitationReports\n\tt.funcMap[\"getExploitationReports\"] = getExploitationReports\n\tt.funcMap[\"getRoyaltyStatements\"] = getRoyaltyStatements\n\tt.funcMap[\"resetLedger\"] = resetLedger\n\tt.funcMap[\"ping\"] = ping\n\tt.funcMap[\"addCopyrightDataReports\"] = addCopyrightDataReports\n\tt.funcMap[\"getCopyrightDataReportByID\"] = getCopyrightDataReportByID\n\tt.funcMap[\"deleteCopyrightDataReportByIDs\"] = deleteCopyrightDataReportByIDs\n\tt.funcMap[\"updateCopyrightDataReports\"] = updateCopyrightDataReports\n\tt.funcMap[\"searchForCopyrightDataReportWithParameters\"] = searchForCopyrightDataReportWithParameters\n\tt.funcMap[\"getAllCopyrightDataReports\"] = getAllCopyrightDataReports\n\tt.funcMap[\"deleteAsset\"] = deleteAsset\n\tt.funcMap[\"deleteAssetByUUID\"] = deleteAssetByUUID\n\tt.funcMap[\"getAssetByUUID\"] = getAssetByUUID\n\tt.funcMap[\"getRoyaltyStatementsByUUIDs\"] = getRoyaltyStatementsByUUIDs\n\tt.funcMap[\"updateRoyaltyStatements\"] = updateRoyaltyStatements\n\tt.funcMap[\"insertExploitationReports\"] = insertExploitationReports\n\tt.funcMap[\"addCollectionRights\"] = addCollectionRights\n\tt.funcMap[\"getCollectionRights\"] = getCollectionRights\n\tt.funcMap[\"updateCollectionRights\"] = updateCollectionRights\n\tt.funcMap[\"addIpiOrg\"] = addIpiOrg\n\tt.funcMap[\"updateIpiOrg\"] = updateIpiOrg\n\tt.funcMap[\"getIpiOrgByUUID\"] = getIpiOrgByUUID\n\tt.funcMap[\"getAllIpiOrgs\"] = getAllIpiOrgs\n\tt.funcMap[\"deleteIpiOrgByUUID\"] = deleteIpiOrgByUUID\n\tt.funcMap[\"generateCollectionStatement\"] = generateCollectionStatement\n\tt.funcMap[\"addRoyaltyStatementAndEvent\"] = addRoyaltyStatementAndEvent\n\n}",
"func SetTemplateFSFunc(fnt templateFSFunc) {\n\tbeeTemplateFS = fnt\n}",
"func NewTemplate(templateFuncs template.FuncMap) *Template {\n\tt := &Template{}\n\n\t// Default functions are defined and available for all templates being rendered.\n\t// These base function help with provided basic formatting so don't have to use javascript/jquery,\n\t// transformation happens server-side instead of client-side to provide base-level consistency.\n\t// Any defined function below will be overwritten if a matching function key is included.\n\tt.Funcs = template.FuncMap{\n\t\t// probably could provide examples of each of these\n\t\t\"Minus\": func(a, b int) int {\n\t\t\treturn a - b\n\t\t},\n\t\t\"Add\": func(a, b int) int {\n\t\t\treturn a + b\n\t\t},\n\t\t\"Mod\": func(a, b int) int {\n\t\t\treturn int(math.Mod(float64(a), float64(b)))\n\t\t},\n\t\t\"AssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"AppAssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"SiteS3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"S3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"AppBaseUrl\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"Http2Https\": func(u string) string {\n\t\t\treturn strings.Replace(u, \"http:\", \"https:\", 1)\n\t\t},\n\t\t\"StringHasPrefix\": func(str, match string) bool {\n\t\t\tif strings.HasPrefix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringHasSuffix\": func(str, match string) bool {\n\t\t\tif strings.HasSuffix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringContains\": func(str, match string) bool {\n\t\t\tif strings.Contains(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"NavPageClass\": func(uri, uriMatch, uriClass string) string {\n\t\t\tu, err := url.Parse(uri)\n\t\t\tif err != nil {\n\t\t\t\treturn \"?\"\n\t\t\t}\n\t\t\tif strings.HasPrefix(u.Path, uriMatch) {\n\t\t\t\treturn uriClass\n\t\t\t}\n\t\t\treturn \"\"\n\t\t},\n\t\t\"UrlEncode\": func(k string) string {\n\t\t\treturn url.QueryEscape(k)\n\t\t},\n\t\t\"html\": func(value interface{}) template.HTML {\n\t\t\treturn template.HTML(fmt.Sprint(value))\n\t\t},\n\t\t\"HasAuth\": func(ctx context.Context) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasAuth()\n\t\t},\n\t\t\"HasRole\": func(ctx context.Context, roles ...string) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasRole(roles...)\n\t\t},\n\n\t\t\"CmpString\": func(str1 string, str2Ptr *string) bool {\n\t\t\tvar str2 string\n\t\t\tif str2Ptr != nil {\n\t\t\t\tstr2 = *str2Ptr\n\t\t\t}\n\t\t\tif str1 == str2 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"HasField\": func(v interface{}, name string) bool {\n\t\t\trv := reflect.ValueOf(v)\n\t\t\tif rv.Kind() == reflect.Ptr {\n\t\t\t\trv = rv.Elem()\n\t\t\t}\n\t\t\tif rv.Kind() != reflect.Struct {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn rv.FieldByName(name).IsValid()\n\t\t},\n\t\t\"dict\": func(values ...interface{}) (map[string]interface{}, error) {\n\t\t\tif len(values) == 0 {\n\t\t\t\treturn nil, errors.New(\"invalid dict call\")\n\t\t\t}\n\n\t\t\tdict := make(map[string]interface{})\n\n\t\t\tfor i := 0; i < len(values); i++ {\n\t\t\t\tkey, isset := values[i].(string)\n\t\t\t\tif !isset {\n\t\t\t\t\tif reflect.TypeOf(values[i]).Kind() == reflect.Map {\n\t\t\t\t\t\tm := values[i].(map[string]interface{})\n\t\t\t\t\t\tfor i, v := range m {\n\t\t\t\t\t\t\tdict[i] = v\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, errors.New(\"dict values must be maps\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(values) {\n\t\t\t\t\t\treturn nil, errors.New(\"specify the key for non array values\")\n\t\t\t\t\t}\n\t\t\t\t\tdict[key] = values[i]\n\t\t\t\t}\n\n\t\t\t}\n\t\t\treturn dict, nil\n\t\t},\n\t}\n\tfor fn, f := range templateFuncs {\n\t\tt.Funcs[fn] = f\n\t}\n\n\treturn t\n}",
"func loadTestFuncs(ptest *build.Package) (*testFuncs, error) {\n\tt := &testFuncs{\n\t\tPackage: ptest,\n\t}\n\tlog.Debugf(\"loadTestFuncs: %v, %v\", ptest.TestGoFiles, ptest.XTestGoFiles)\n\tfor _, file := range ptest.TestGoFiles {\n\t\tif err := t.load(filepath.Join(ptest.Dir, file), \"_test\", &t.ImportTest, &t.NeedTest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tfor _, file := range ptest.XTestGoFiles {\n\t\tif err := t.load(filepath.Join(ptest.Dir, file), \"_xtest\", &t.ImportXtest, &t.NeedXtest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}",
"func (m *Basic) FuncMap(_ *model.Card, _ int) template.FuncMap {\n\treturn defaultFuncMap\n}",
"func (p *Plugin) GetFuncs() map[string]app.Func {\n\n\treturn map[string]app.Func{\n\n\t\t\"send\": func(c app.Context) (interface{}, error) {\n\n\t\t\tif c.Has(\"message\") == false {\n\t\t\t\treturn nil, errors.New(\"message param required!\")\n\t\t\t}\n\n\t\t\treturn nil, beeep.Notify(\n\t\t\t\tc.GetOr(\"title\", c.App.Name).(string),\n\t\t\t\tc.Get(\"message\").(string),\n\t\t\t\tc.App.Path(\"icon.png\"),\n\t\t\t)\n\t\t},\n\t}\n}",
"func TestFuncMaps(t *testing.T) {\n\n\t// Test FuncValue map\n\tfor fName, fValue := range goHamlib.FuncValue {\n\t\t_, ok := goHamlib.FuncName[fValue]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Func %d does not exist in FuncName map\", fValue)\n\t\t}\n\t\tif fName != goHamlib.FuncName[fValue] {\n\t\t\tt.Fatalf(\"Name of Func inconsisted: %s\", fName)\n\t\t}\n\t}\n\n\t// Test FuncName map\n\tfor fValue, fName := range goHamlib.FuncName {\n\t\t_, ok := goHamlib.FuncValue[fName]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Func %s does not exist in FuncValue map\", fName)\n\t\t}\n\t\tif fValue != goHamlib.FuncValue[fName] {\n\t\t\tt.Fatalf(\"Value of Func inconsisted: %s\", fName)\n\t\t}\n\t}\n}",
"func (info *fileInfo) addFuncPtrDecls() {\n\tgen := &ast.GenDecl{\n\t\tTokPos: info.importCPos,\n\t\tTok: token.VAR,\n\t\tLparen: info.importCPos,\n\t\tRparen: info.importCPos,\n\t}\n\tnames := make([]string, 0, len(info.functions))\n\tfor name := range info.functions {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tfor _, name := range names {\n\t\tobj := &ast.Object{\n\t\t\tKind: ast.Typ,\n\t\t\tName: \"C.\" + name + \"$funcaddr\",\n\t\t}\n\t\tvalueSpec := &ast.ValueSpec{\n\t\t\tNames: []*ast.Ident{&ast.Ident{\n\t\t\t\tNamePos: info.importCPos,\n\t\t\t\tName: \"C.\" + name + \"$funcaddr\",\n\t\t\t\tObj: obj,\n\t\t\t}},\n\t\t\tType: &ast.SelectorExpr{\n\t\t\t\tX: &ast.Ident{\n\t\t\t\t\tNamePos: info.importCPos,\n\t\t\t\t\tName: \"unsafe\",\n\t\t\t\t},\n\t\t\t\tSel: &ast.Ident{\n\t\t\t\t\tNamePos: info.importCPos,\n\t\t\t\t\tName: \"Pointer\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tobj.Decl = valueSpec\n\t\tgen.Specs = append(gen.Specs, valueSpec)\n\t}\n\tinfo.Decls = append(info.Decls, gen)\n}",
"func Helpers() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"orderedPrefixSearch\": func(argName string, columnName string) string {\n\t\t\treturn fmt.Sprintf(\"lower(%s) ~ :~%s\", columnName, argName)\n\t\t},\n\t\t\"textSearch\": func(argName string, columnNames ...string) string {\n\t\t\tvar buf strings.Builder\n\t\t\tbuf.WriteRune('(')\n\t\t\tfor i, columnName := range columnNames {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tbuf.WriteString(\" OR \")\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\"to_tsvector('english', replace(lower(%s), '.', ' ')) @@ plainto_tsquery('english', replace(lower(:%s),'.',' '))\", columnName, argName))\n\t\t\t}\n\n\t\t\tbuf.WriteRune(')')\n\t\t\treturn buf.String()\n\t\t},\n\t}\n}",
"func MakeMap(f Functor) template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"rowsset\": func(interface{}) string { return \"\" }, // empty pipeline\n\t\t// acepp overrides rowsset and adds setrows\n\n\t\t\"class\": f.Class,\n\t\t\"colspan\": f.Colspan,\n\t\t\"jsxClose\": f.JSXClose,\n\t}\n}",
"func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {\n\tfor _, fn := range middlewareFuncs {\n\t\tr.Use(MiddlewareFunc(fn))\n\t}\n}",
"func NewMapFunc(t mockConstructorTestingTNewMapFunc) *MapFunc {\n\tmock := &MapFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func addCloudformationLambdaFunctions(template *cloudformation.Template, functions map[string]cloudformation.AWSServerlessFunction) {\n\t// convert all lambda functions to serverless functions so that invoke works for them\n\tfor n, f := range template.GetAllAWSLambdaFunctionResources() {\n\t\tif _, found := functions[n]; !found {\n\t\t\tfunctions[n] = lambdaToServerless(f)\n\t\t}\n\t}\n}",
"func generateGoTypesValidateFuncs(idx *jsonschema.Index) ([]byte, error) {\n\tw := bytes.NewBufferString(\"\\n\")\n\tfor _, k := range sortedMapKeysbyName(idx) {\n\t\tt, err := generateGoTypeValidateFunc((*idx)[k], idx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif string(t) != \"\" {\n\t\t\tfmt.Fprintf(w, \"%s\\n\", t)\n\t\t}\n\t}\n\n\treturn format.Source(w.Bytes())\n}",
"func AWSFuncs(f map[string]interface{}) {\n\tf2 := CreateAWSFuncs(context.Background())\n\tfor k, v := range f2 {\n\t\tf[k] = v\n\t}\n}",
"func (app *App) TemplateFunc(name string, f interface{}) *App {\n\treturn app.TemplateFuncs(template.FuncMap{name: f})\n}",
"func (ac *Config) LuaFunctionMap(w http.ResponseWriter, req *http.Request, luadata []byte, filename string) (template.FuncMap, error) {\n\tac.pongomutex.Lock()\n\tdefer ac.pongomutex.Unlock()\n\n\t// Retrieve a Lua state\n\tL := ac.luapool.Get()\n\tdefer ac.luapool.Put(L)\n\n\t// Prepare an empty map of functions (and variables)\n\tfuncs := make(template.FuncMap)\n\n\t// Give no filename (an empty string will be handled correctly by the function).\n\tac.LoadCommonFunctions(w, req, filename, L, nil, nil)\n\n\t// Run the script\n\tif err := L.DoString(string(luadata)); err != nil {\n\t\t// Close the Lua state\n\t\tL.Close()\n\n\t\t// Logging and/or HTTP response is handled elsewhere\n\t\treturn funcs, err\n\t}\n\n\t// Extract the available functions from the Lua state\n\tglobalTable := L.G.Global\n\tglobalTable.ForEach(func(key, value lua.LValue) {\n\t\t// Check if the current value is a string variable\n\t\tif luaString, ok := value.(lua.LString); ok {\n\t\t\t// Store the variable in the same map as the functions (string -> interface)\n\t\t\t// for ease of use together with templates.\n\t\t\tfuncs[key.String()] = luaString.String()\n\t\t} else if luaTable, ok := value.(*lua.LTable); ok {\n\n\t\t\t// Convert the table to a map and save it.\n\t\t\t// Ignore values of a different type.\n\t\t\tmapinterface, _ := convert.Table2map(luaTable, false)\n\t\t\tswitch m := mapinterface.(type) {\n\t\t\tcase map[string]string:\n\t\t\t\tfuncs[key.String()] = map[string]string(m)\n\t\t\tcase map[string]int:\n\t\t\t\tfuncs[key.String()] = map[string]int(m)\n\t\t\tcase map[int]string:\n\t\t\t\tfuncs[key.String()] = map[int]string(m)\n\t\t\tcase map[int]int:\n\t\t\t\tfuncs[key.String()] = map[int]int(m)\n\t\t\t}\n\n\t\t\t// Check if the current value is a function\n\t\t} else if luaFunc, ok := value.(*lua.LFunction); ok {\n\t\t\t// Only export the functions defined in the given Lua code,\n\t\t\t// not all the global functions. IsG is true if the function is global.\n\t\t\tif !luaFunc.IsG {\n\n\t\t\t\tfunctionName := key.String()\n\n\t\t\t\t// Register the function, with a variable number of string arguments\n\t\t\t\t// Functions returning (string, error) are supported by html.template\n\t\t\t\tfuncs[functionName] = func(args ...string) (any, error) {\n\t\t\t\t\t// Create a brand new Lua state\n\t\t\t\t\tL2 := ac.luapool.New()\n\t\t\t\t\tdefer L2.Close()\n\n\t\t\t\t\t// Set up a new Lua state with the current http.ResponseWriter and *http.Request\n\t\t\t\t\tac.LoadCommonFunctions(w, req, filename, L2, nil, nil)\n\n\t\t\t\t\t// Push the Lua function to run\n\t\t\t\t\tL2.Push(luaFunc)\n\n\t\t\t\t\t// Push the given arguments\n\t\t\t\t\tfor _, arg := range args {\n\t\t\t\t\t\tL2.Push(lua.LString(arg))\n\t\t\t\t\t}\n\n\t\t\t\t\t// Run the Lua function\n\t\t\t\t\terr := L2.PCall(len(args), lua.MultRet, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// If calling the function did not work out, return the infostring and error\n\t\t\t\t\t\treturn utils.Infostring(functionName, args), err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Empty return value if no values were returned\n\t\t\t\t\tvar retval any\n\n\t\t\t\t\t// Return the first of the returned arguments, as a string\n\t\t\t\t\tif L2.GetTop() >= 1 {\n\t\t\t\t\t\tlv := L2.Get(-1)\n\t\t\t\t\t\ttbl, isTable := lv.(*lua.LTable)\n\t\t\t\t\t\tswitch {\n\t\t\t\t\t\tcase isTable:\n\t\t\t\t\t\t\t// lv was a Lua Table\n\t\t\t\t\t\t\tretval = gluamapper.ToGoValue(tbl, gluamapper.Option{\n\t\t\t\t\t\t\t\tNameFunc: func(s string) string {\n\t\t\t\t\t\t\t\t\treturn s\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif ac.debugMode && ac.verboseMode {\n\t\t\t\t\t\t\t\tlog.Info(utils.Infostring(functionName, args) + \" -> (map)\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase lv.Type() == lua.LTString:\n\t\t\t\t\t\t\t// lv is a Lua String\n\t\t\t\t\t\t\tretstr := L2.ToString(1)\n\t\t\t\t\t\t\tretval = retstr\n\t\t\t\t\t\t\tif ac.debugMode && ac.verboseMode {\n\t\t\t\t\t\t\t\tlog.Info(utils.Infostring(functionName, args) + \" -> \\\"\" + retstr + \"\\\"\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tretval = \"\"\n\t\t\t\t\t\t\tlog.Warn(\"The return type of \" + utils.Infostring(functionName, args) + \" can't be converted\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// No return value, return an empty string and nil\n\t\t\t\t\treturn retval, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Return the map of functions\n\treturn funcs, nil\n}",
"func (r *nodeRederFuncs) RegisterFuncs(reg NodeRendererFuncRegisterer) {\n\n\t// blocks\n\treg.Register(ast.KindDocument, r.renderDocument)\n\treg.Register(ast.KindHeading, r.renderHeading)\n\treg.Register(ast.KindBlockquote, r.renderBlockquote)\n\treg.Register(ast.KindCodeBlock, r.renderCodeBlock)\n\treg.Register(ast.KindFencedCodeBlock, r.renderCodeBlock)\n\treg.Register(ast.KindHTMLBlock, r.renderHTMLBlock)\n\treg.Register(ast.KindList, r.renderList)\n\treg.Register(ast.KindListItem, r.renderListItem)\n\treg.Register(ast.KindParagraph, r.renderParagraph)\n\treg.Register(ast.KindTextBlock, r.renderTextBlock)\n\treg.Register(ast.KindThematicBreak, r.renderThematicBreak)\n\n\t// inlines\n\treg.Register(ast.KindAutoLink, r.renderAutoLink)\n\treg.Register(ast.KindCodeSpan, r.renderCodeSpan)\n\treg.Register(ast.KindEmphasis, r.renderEmphasis)\n\treg.Register(ast.KindImage, r.renderImage)\n\treg.Register(ast.KindLink, r.renderLink)\n\t// m[ast.KindRawHTML] = r.renderRawHTML // Not applicable to PDF\n\treg.Register(ast.KindText, r.renderText)\n\treg.Register(ast.KindString, r.renderText)\n\n\t// GFM Extensions\n\t// Tables\n\treg.Register(east.KindTable, r.renderTable)\n\treg.Register(east.KindTableHeader, r.renderTableHeader)\n\treg.Register(east.KindTableRow, r.renderTableRow)\n\treg.Register(east.KindTableCell, r.renderTableCell)\n\t// Strikethrough\n\treg.Register(east.KindStrikethrough, r.renderStrikethrough)\n\t// Checkbox\n\treg.Register(east.KindTaskCheckBox, r.renderTaskCheckBox)\n}",
"func registerTemplateAPIs(ws *restful.WebService) {\n\n\terr := filepath.Walk(DockerfilePath, walkDockerfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk dockerfile path, \", log.Fields{\"path\": DockerfilePath, \"err\": err})\n\t}\n\n\terr = filepath.Walk(YamlPath, walkYamlfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk yamlfile path, \", log.Fields{\"path\": YamlPath, \"err\": err})\n\t}\n\n\tws.Route(ws.GET(\"/templates/yamls\").\n\t\tTo(listYamlfiles).\n\t\tDoc(\"list all yaml templates\"))\n\n\tws.Route(ws.GET(\"/templates/yamls/{yamlfile}\").\n\t\tTo(getYamlfile).\n\t\tDoc(\"get one yaml template\").\n\t\tParam(ws.PathParameter(\"yamlfile\", \"yaml file name\").DataType(\"string\")))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles\").\n\t\tTo(listDockerfiles).\n\t\tDoc(\"list all docekrfile templates\"))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles/{dockerfile}\").\n\t\tTo(getDockerfile).\n\t\tDoc(\"get one docekrfile template\").\n\t\tParam(ws.PathParameter(\"dockerfile\", \"dockerfile name\").DataType(\"string\")))\n\n}",
"func TestAnalyzeFunctions(t *testing.T) {\n\tvar tests = []analyzeTest{\n\t\t{\n\t\t\tname: \"unknown function gives unknown usage\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{myFunc($a.b)}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"b\": \"?\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"length does not affect usage\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{if length($a) > 0}\n\t\t\t\t\t\t{$a[0].b}\n\t\t\t\t\t{/if}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"b\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"augmentMap adds to both maps\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t* @param b\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{let $c: augmentMap($a,$b)/}\n\t\t\t\t\t{$c.d}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t\t\"b\": map[string]interface{}{\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"augmentMap and quoteKeysIfJs do not affect structure\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t* @param b\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{let $x: augmentMap($a,$b)/}\n\t\t\t\t\t{let $y: quoteKeysIfJs($a)/}\n\t\t\t\t\t{$x.c}\n\t\t\t\t\t{$y.d}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"c\": \"*\",\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t\t\"b\": map[string]interface{}{\n\t\t\t\t\t\"c\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\ttestAnalyze(t, tests)\n}",
"func exportFuncs(wasmExportsMap map[string]js.Func) {\n\n\tfor k, v := range wasmExportsMap {\n\t\tjs.Global().Set(k, v) // set function definition on js 'window' object\n\t}\n}",
"func (t *TemplateFuncsNamespace) AddMethodMapping(m interface{}, aliases []string, examples [][2]string) {\n\tif t.MethodMappings == nil {\n\t\tt.MethodMappings = make(map[string]TemplateFuncMethodMapping)\n\t}\n\n\tname := methodToName(m)\n\n\t// sanity check\n\tfor _, e := range examples {\n\t\tif e[0] == \"\" {\n\t\t\tpanic(t.Name + \": Empty example for \" + name)\n\t\t}\n\t}\n\tfor _, a := range aliases {\n\t\tif a == \"\" {\n\t\t\tpanic(t.Name + \": Empty alias for \" + name)\n\t\t}\n\t}\n\n\tt.MethodMappings[name] = TemplateFuncMethodMapping{\n\t\tMethod: m,\n\t\tAliases: aliases,\n\t\tExamples: examples,\n\t}\n\n}",
"func BeeFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"trim\": strings.TrimSpace,\n\t\t\"bold\": colors.Bold,\n\t\t\"headline\": colors.MagentaBold,\n\t\t\"foldername\": colors.RedBold,\n\t\t\"endline\": EndLine,\n\t\t\"tmpltostr\": TmplToString,\n\t}\n}",
"func wrapHelpers(fs template.FuncMap) template.FuncMap {\n\twrappedHelpers := make(template.FuncMap, len(fs))\n\tfor key, helper := range fs {\n\t\thelperV := reflect.ValueOf(helper)\n\n\t\t// ignore if current helper is not a func\n\t\tif helperV.Kind() != reflect.Func {\n\t\t\tcontinue\n\t\t}\n\n\t\thelperT := helperV.Type()\n\t\tparamsCount := helperT.NumIn()\n\t\tparamsTypes := make([]string, paramsCount)\n\t\tfor i := 0; i < paramsCount; i++ {\n\t\t\tparamsTypes[i] = helperT.In(i).Name()\n\t\t}\n\n\t\t// create the wrapper func\n\t\twrappedHelpers[key] = func(ps ...interface{}) interface{} {\n\t\t\t// if the helper func need more params than ps length, throw an error\n\t\t\tif len(ps) < paramsCount {\n\t\t\t\t// panic will be catched be text/template executor\n\t\t\t\tpanic(fmt.Sprintf(\"missing params (expected: %s)\", strings.Join(paramsTypes, \", \")))\n\t\t\t}\n\n\t\t\t// for all helper's params, forward values from wrapper\n\t\t\tvalues := make([]reflect.Value, len(ps))\n\t\t\tfor i := 0; i < len(ps); i++ {\n\t\t\t\tif value, ok := ps[i].(*val); ok {\n\t\t\t\t\t// if the value is a pointer to val, we should return its internal value\n\t\t\t\t\tvalues[i] = reflect.ValueOf((*value)[\"_\"])\n\t\t\t\t} else if value, ok := ps[i].(val); ok {\n\t\t\t\t\t// if the value is a val, we should return its internal value\n\t\t\t\t\tvalues[i] = reflect.ValueOf(value[\"_\"])\n\t\t\t\t} else if v := reflect.ValueOf(ps[i]); v.IsValid() {\n\t\t\t\t\t// for all params that are not val (string, integer...) use it directly\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t} else {\n\t\t\t\t\t// if the value is not valid (means that given value is nil with unknown type), convert to nil void pointer\n\t\t\t\t\tvar v *void\n\t\t\t\t\tvalues[i] = reflect.ValueOf(v)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresults := helperV.Call(values)\n\t\t\tif len(results) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn results[0].Interface()\n\t\t}\n\t}\n\treturn wrappedHelpers\n}",
"func (dumbRouter *DumbRouter) AddFunctionMapping(funcURL string, function func(req *http.Request, res http.ResponseWriter)) {\n\tdumbRouter.routes[funcURL] = function\n}",
"func Register(funcName string, backend TemplateFunc, buildFlags FlagsFunc) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tbackends[funcName] = backend\n\tflags[funcName] = buildFlags\n}",
"func templateFunctionList(elements ...interface{}) []interface{} {\n\treturn elements\n}",
"func (ctx *Context) UseFunc(fns ...MiddlewareFunc) {\n\tif ctx.middlewares == nil {\n\t\tctx.middlewares = make([]Middleware, 0)\n\t}\n\tfor _, fn := range fns {\n\t\tctx.middlewares = append(ctx.middlewares, fn)\n\t}\n}",
"func (ft *FeatureTemplate) ToMap() (map[string]interface{}, error) {\n\tftAttrMap := make(map[string]interface{})\n\n\tif ft.TemplateName != \"\" {\n\t\tftAttrMap[\"templateName\"] = ft.TemplateName\n\t}\n\n\tif ft.TemplateDescription != \"\" {\n\t\tftAttrMap[\"templateDescription\"] = ft.TemplateDescription\n\t}\n\n\tif ft.TemplateType != \"\" {\n\t\tftAttrMap[\"templateType\"] = ft.TemplateType\n\t}\n\n\tftAttrMap[\"deviceType\"] = ft.DeviceType\n\n\tif ft.TemplateMinVersion != \"\" {\n\t\tftAttrMap[\"templateMinVersion\"] = ft.TemplateMinVersion\n\t}\n\n\tftAttrMap[\"factoryDefault\"] = ft.FactoryDefault\n\n\tftAttrMap[\"templateDefinition\"] = ft.TemplateDefinition\n\n\treturn ftAttrMap, nil\n}",
"func (t *CleanupTasks) AddFunc(f func()) {\n\t*t = append(*t, f)\n}",
"func VerifyKernelFuncs(requiredKernelFuncs ...string) (map[string]struct{}, error) {\n\treturn funcCache.verifyKernelFuncs(requiredKernelFuncs)\n}",
"func (t *dynamicTemplateHandler) AddDataMap(m map[string]interface{}) templateHandler {\n\tif len(t.data) == 0 {\n\t\tt.data = m\n\t} else {\n\t\tfor k, v := range m {\n\t\t\tt.data[k] = v\n\t\t}\n\t}\n\treturn t\n}",
"func createUsageFuncMap() *usageFuncMap {\n\tusageFuncs := make(usageFuncMap)\n\n\tusageFuncs[\"L-D64F1F14\"] = getUsageFunc(getUsageApplicationVersions)\n\tusageFuncs[\"L-1CEABD17\"] = getUsageFunc(getUsageApplications)\n\tusageFuncs[\"L-8EFC1C51\"] = getUsageFunc(getUsageEnvironments)\n\n\treturn &usageFuncs\n}",
"func DefaultFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"go\": ToGo,\n\t\t\"goPrivate\": ToGoPrivate,\n\t\t\"lcFirst\": LcFirst,\n\t\t\"ucFirst\": UcFirst,\n\t}\n}",
"func (t *staticTemplateHandler) AddDataMap(m map[string]interface{}) templateHandler {\n\tif len(t.data) == 0 {\n\t\tt.data = m\n\t} else {\n\t\tfor k, v := range m {\n\t\t\tt.data[k] = v\n\t\t}\n\t}\n\treturn t\n}",
"func WithFuncMap(fm map[string]interface{}) Option {\n\treturn optFuncMap(fm)\n}",
"func servicesFunc(ctx *TemplateContext) func(...string) (interface{}, error) {\n\treturn func(s ...string) (interface{}, error) {\n\t\treturn ctx.GetServices(s...)\n\t}\n}",
"func parseTemplates() (){\n templates = make(map[string]*template.Template)\n if files, err := ioutil.ReadDir(CONFIG.TemplatesDir) ; err != nil {\n msg := \"Error reading templates directory: \" + err.Error()\n log.Fatal(msg)\n } else {\n for _, f := range files {\n fmt.Println(f.Name())\n err = nil\n\n tpl, tplErr := template.New(f.Name()).Funcs(template.FuncMap{\n \"humanDate\": humanDate,\n \"humanSize\": humanSize,}).ParseFiles(CONFIG.TemplatesDir + \"/\" + f.Name())\n if tplErr != nil {\n log.Fatal(\"Error parsing template: \" + tplErr.Error())\n } else {\n templates[f.Name()] = tpl\n }\n }\n }\n return\n}",
"func NewTextFormatterWithFuncs(s string, funcMap template.FuncMap) *TextFormatter {\n\treturn &TextFormatter{\n\t\td: newTextFormatterTemplate(s, ansi.Reset, funcMap),\n\t\tfuncMap: funcMap,\n\t\tnewline: []byte(textFormatterNewline),\n\t}\n}",
"func (p Parser[T]) MakerFuncs() map[string][]string {\n\tmakerFuncs := make(map[string][]string)\n\n\tfor k, mi := range p.makers {\n\t\tmakerFuncs[k] = mi.Args\n\t}\n\n\treturn makerFuncs\n}"
] | [
"0.717656",
"0.7118898",
"0.7076246",
"0.6852086",
"0.6742249",
"0.66769844",
"0.66769844",
"0.66100323",
"0.6607756",
"0.65820014",
"0.65328634",
"0.6508341",
"0.6488062",
"0.64860225",
"0.6327243",
"0.6289445",
"0.62823504",
"0.6241177",
"0.6225048",
"0.61612254",
"0.61488265",
"0.6148699",
"0.61347926",
"0.6123938",
"0.61072356",
"0.6088767",
"0.607822",
"0.6007276",
"0.5990527",
"0.59717596",
"0.5970036",
"0.5965837",
"0.5963748",
"0.5929307",
"0.5914741",
"0.59101117",
"0.5892639",
"0.5849835",
"0.5779633",
"0.5765674",
"0.57436496",
"0.57342625",
"0.5686728",
"0.561268",
"0.5583465",
"0.5562426",
"0.55254734",
"0.55128944",
"0.5454626",
"0.54527634",
"0.541912",
"0.54059374",
"0.5384006",
"0.53823256",
"0.5366316",
"0.5351334",
"0.5348236",
"0.5334538",
"0.5308448",
"0.5299905",
"0.5169403",
"0.51526153",
"0.5148066",
"0.5113138",
"0.5091994",
"0.5083098",
"0.5082569",
"0.50747645",
"0.50635123",
"0.50535154",
"0.4986208",
"0.4960462",
"0.49404758",
"0.49317297",
"0.4926387",
"0.49218342",
"0.4900608",
"0.48992777",
"0.48931074",
"0.48909193",
"0.48777875",
"0.48761857",
"0.4870326",
"0.48597503",
"0.48565537",
"0.48561093",
"0.48470026",
"0.48101425",
"0.48058653",
"0.48049405",
"0.4786097",
"0.478153",
"0.47774488",
"0.47708803",
"0.47631213",
"0.47557652",
"0.47527704",
"0.47491264",
"0.4743886",
"0.47363737"
] | 0.72019356 | 0 |
Clone creates a copy of the TRoot for ease of creating sublayouts. Since TRoots cannot be executed externally, we don't have the possibility of returning an error. | Clone создаёт копию TRoot для удобства создания подрасположений. Поскольку TRootы не могут выполняться внешним образом, у нас нет возможности возвращать ошибку. | func (t *TRoot) Clone() *TRoot {
var clone, _ = t.template.Clone()
return &TRoot{clone, t.Path}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t *Template) Clone() (*Template, error) {\n\tvar tmpl, err = t.Template.Clone()\n\treturn &Template{tmpl, t.Name}, err\n}",
"func (w *WebGLRenderTarget) Clone() *WebGLRenderTarget {\n\tw.p.Call(\"clone\")\n\treturn w\n}",
"func (b *Buildtemplate) Clone(source buildv1alpha1.BuildTemplate, clientset *client.ConfigSet) (*buildv1alpha1.BuildTemplate, error) {\n\tsource.SetName(\"\")\n\tsource.SetGenerateName(b.Name + \"-\")\n\tsource.SetNamespace(b.Namespace)\n\tsource.SetOwnerReferences([]metav1.OwnerReference{})\n\tsource.SetResourceVersion(\"\")\n\tsource.Kind = \"BuildTemplate\"\n\tif len(clientset.Registry.Secret) != 0 {\n\t\taddSecretVolume(clientset.Registry.Secret, &source)\n\t\tsetEnvConfig(clientset.Registry.Secret, &source)\n\t}\n\treturn createBuildTemplate(source, clientset)\n}",
"func (t *TaskBox[T, U, C, CT, TF]) Clone() *TaskBox[T, U, C, CT, TF] {\n\tnewBox := NewTaskBox[T, U, C, CT, TF](t.constArgs, t.contextFunc, t.wg, t.task, t.resultCh, t.taskID)\n\treturn &newBox\n}",
"func (r *View) Clone() *View {\n\treturn r.CloneLimit(r.size)\n}",
"func (c *Container) Clone() *Container {\n\tinterf, err := copystructure.Copy(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc2, ok := interf.(*Container)\n\tif !ok {\n\t\tpanic(\"copystructure.Copy(*tlc.Container) did not return a *tlc.Container\")\n\t}\n\n\treturn c2\n}",
"func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {\n\targs := linux.CloneArgs{\n\t\tFlags: uint64(uint32(flags) &^ linux.CSIGNAL),\n\t\tChildTID: uint64(childTID),\n\t\tParentTID: uint64(parentTID),\n\t\tExitSignal: uint64(flags & linux.CSIGNAL),\n\t\tStack: uint64(stack),\n\t\tTLS: uint64(tls),\n\t}\n\tntid, ctrl, err := t.Clone(&args)\n\treturn uintptr(ntid), ctrl, err\n}",
"func (t *FaultDomainTree) Copy() *FaultDomainTree {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\ttCopy := NewFaultDomainTree().\n\t\tWithNodeDomain(t.Domain).\n\t\tWithID(t.ID)\n\tfor _, c := range t.Children {\n\t\ttCopy.Children = append(tCopy.Children, c.Copy())\n\t}\n\n\treturn tCopy\n}",
"func (i *IContainer) Clone(w http.ResponseWriter, r *http.Request) *IClone {\n\treturn &IClone{\n\t\tIContainer: i,\n\t\tw: w,\n\t\tr: r,\n\t\tmutex: &sync.RWMutex{},\n\t\tthreadData: make(map[string]interface{}),\n\t}\n}",
"func (w *Wrapper) Clone() *Wrapper {\n\treturn w.cloning(false)\n}",
"func execmTemplateClone(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*template.Template).Clone()\n\tp.Ret(1, ret, ret1)\n}",
"func (t *TRoot) Template() *Template {\n\treturn t.Clone().template\n}",
"func (cte *CTE) Clone() *CTE {\n\tif cte == nil {\n\t\treturn nil\n\t}\n\tother := *cte\n\tother.TableName = cte.TableName.Clone()\n\tother.Columns = cloneIdents(cte.Columns)\n\tother.Select = cte.Select.Clone()\n\treturn &other\n}",
"func (m *Mocker) Clone(t *testing.T) (clone *Mocker) {\n\tm.Close()\n\n\tclone = New(t)\n\n\tclone.handlers = m.deepCopyHandlers()\n\n\treturn\n}",
"func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalTopN)\n\t*cloned = *lt\n\tbase, err := lt.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tcloned.ByItems = make([]*util.ByItems, 0, len(lt.ByItems))\n\tfor _, it := range lt.ByItems {\n\t\tcloned.ByItems = append(cloned.ByItems, it.Clone())\n\t}\n\tcloned.PartitionBy = make([]property.SortItem, 0, len(lt.PartitionBy))\n\tfor _, it := range lt.PartitionBy {\n\t\tcloned.PartitionBy = append(cloned.PartitionBy, it.Clone())\n\t}\n\treturn cloned, nil\n}",
"func (tree *Tree) GetCopy() *Tree {\n\tnewTree := &Tree{\n\t\tmaxEntry: tree.maxEntry,\n\t\tminEntry: tree.minEntry,\n\t\tdistCalc: tree.distCalc,\n\t\tObjectCount: tree.ObjectCount,\n\t\tsplitMecha: tree.splitMecha,\n\t}\n\n\tvar newRoot node\n\troot := tree.root\n\tnewEntryList := copyEntryList(tree.root)\n\tif root.isLeaf() {\n\t\tnewRoot = &leaf{\n\t\t\tradius: root.getRadius(),\n\t\t\tcentroidObject: root.getCentroidObject(),\n\t\t\tentryList: newEntryList,\n\t\t}\n\t} else {\n\t\tnewRoot = &branch{\n\t\t\tradius: root.getRadius(),\n\t\t\tcentroidObject: root.getCentroidObject(),\n\t\t\tentryList: newEntryList,\n\t\t}\n\t}\n\tfor idx := range newEntryList {\n\t\tnewEntryList[idx].setParent(newRoot)\n\t}\n\tnewTree.root = newRoot\n\treturn newTree\n}",
"func (t Topology) Copy() Topology {\n\treturn Topology{\n\t\tNodes: t.Nodes.Copy(),\n\t}\n}",
"func (llrb *LLRB) Clone(name string) *LLRB {\n\tif !llrb.lock() {\n\t\treturn nil\n\t}\n\n\tnewllrb := NewLLRB(llrb.name, llrb.setts)\n\tnewllrb.llrbstats = llrb.llrbstats\n\tnewllrb.h_upsertdepth = llrb.h_upsertdepth.Clone()\n\tnewllrb.seqno = llrb.seqno\n\n\tnewllrb.setroot(newllrb.clonetree(llrb.getroot()))\n\n\tllrb.unlock()\n\treturn newllrb\n}",
"func (tc *STemplateController) Clone(clone_name string, recursive bool) (*srv_tmpl.ServiceTemplate, error) {\n\turl := urlTemplateAction(tc.ID)\n\taction := make(map[string]interface{})\n\n\taction[\"action\"] = map[string]interface{}{\n\t\t\"perform\": \"clone\",\n\t\t\"params\": map[string]interface{}{\n\t\t\t\"name\": clone_name,\n\t\t\t\"recursive\": recursive,\n\t\t},\n\t}\n\n\t//Get response\n\tresponse, err := tc.c.ClientFlow.HTTPMethod(\"POST\", url, action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.status {\n\t\treturn nil, errors.New(response.body)\n\t}\n\n\t//Build Service from response\n\tstemplate := &srv_tmpl.ServiceTemplate{}\n\tstemplate_str, err := json.Marshal(response.BodyMap()[\"DOCUMENT\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(stemplate_str, stemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stemplate, nil\n}",
"func (t *Tree) Copy() *Tree {\n\tif t == nil {\n\t\treturn nil\n\t}\n\treturn &Tree{\n\t\tName: t.Name,\n\t\tRoot: t.Root.CopyStatement(),\n\t\ttext: t.text,\n\t}\n}",
"func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) {\n\tcloned, err := (*PhysicalCTE)(p).Clone()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*PhysicalCTEStorage)(cloned.(*PhysicalCTE)), nil\n}",
"func (bt *BinarySearchTree) Clone() *BinarySearchTree {\n\tt := &TreeNode{Val: bt.root.Val}\n\tclone(bt.root, t)\n\treturn &BinarySearchTree{root: t}\n}",
"func (p *PKGBUILD) Clone() *PKGBUILD {\n\tc := New()\n\tc.atoms = p.atoms.Clone()\n\tc.RecomputeInfos(true)\n\treturn c\n}",
"func (self *Rectangle) Clone() *Rectangle{\n return &Rectangle{self.Object.Call(\"clone\")}\n}",
"func (m *TestObj) Clone(interface{}) (interface{}, error) { return nil, nil }",
"func (tri *Triangle) Clone() *Triangle {\n\tnewTri := NewTriangle(tri.Mesh)\n\tfor _, vertex := range tri.Vertices {\n\t\tnewTri.SetVertices(vertex.Clone())\n\t}\n\tnewTri.RecalculateCenter()\n\treturn newTri\n}",
"func (p Page) Clone() Page {\n\tclone := make([]Section, len(p))\n\tfor i, section := range p {\n\t\tclone[i] = section.Clone()\n\t}\n\treturn clone\n}",
"func (p *Partitions) clone() *Partitions {\n\treplicas := make([][]*Node, len(p.Replicas))\n\n\tfor i := range p.Replicas {\n\t\tr := make([]*Node, len(p.Replicas[i]))\n\t\tcopy(r, p.Replicas[i])\n\t\treplicas[i] = r\n\t}\n\n\tregimes := make([]int, len(p.regimes))\n\tcopy(regimes, p.regimes)\n\n\treturn &Partitions{\n\t\tReplicas: replicas,\n\t\tSCMode: p.SCMode,\n\t\tregimes: regimes,\n\t}\n}",
"func (m *MerkleTree) Clone() *MerkleTree {\n\treturn &MerkleTree{\n\t\tnonce: m.nonce,\n\t\troot: m.root.clone(nil).(*interiorNode),\n\t\thash: append([]byte{}, m.hash...),\n\t}\n}",
"func (EmptyNode) Clone() Node { return EmptyNode{} }",
"func (w *Window) Clone() *Window {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tother := *w\n\tother.Name = w.Name.Clone()\n\tother.Definition = w.Definition.Clone()\n\treturn &other\n}",
"func clone(s *Scroller) *Scroller {\n\tclone := &Scroller{\n\t\tpos: s.pos,\n\t\tline: s.line,\n\t\toffset: s.offset,\n\t\tdir: s.dir,\n\t\tscrolled: s.scrolled,\n\t\teditor: s.editor,\n\t\tctrl: s.ctrl,\n\t}\n\tfor _, h := range s.scrolled {\n\t\tclone.scrolled = append(clone.scrolled, h)\n\t}\n\treturn clone\n}",
"func (tq *TenantQuery) Clone() *TenantQuery {\n\tif tq == nil {\n\t\treturn nil\n\t}\n\treturn &TenantQuery{\n\t\tconfig: tq.config,\n\t\tctx: tq.ctx.Clone(),\n\t\torder: append([]OrderFunc{}, tq.order...),\n\t\tinters: append([]Interceptor{}, tq.inters...),\n\t\tpredicates: append([]predicate.Tenant{}, tq.predicates...),\n\t\t// clone intermediate query.\n\t\tsql: tq.sql.Clone(),\n\t\tpath: tq.path,\n\t}\n}",
"func (this *Selection) Clone() *Selection {\n\tresults := newEmptySelection(this.document)\n\tthis.Each(func(_ int, sel *Selection) {\n\t\tresults = results.AddNodes(cloneNode(sel.Node()))\n\t})\n\treturn results\n}",
"func (this *Selection) AppendClones(template *html.Node) *Selection {\n\tfor _, parent := range this.Nodes {\n\t\tparent.AppendChild(cloneNode(template))\n\t}\n\treturn this\n}",
"func (mesh *Mesh) Clone() *Mesh {\n\tnewMesh := NewMesh(mesh.Name)\n\tfor _, t := range mesh.Triangles {\n\t\tnewTri := t.Clone()\n\t\tnewMesh.Triangles = append(newMesh.Triangles, newTri)\n\t\tnewTri.Mesh = mesh\n\t}\n\treturn newMesh\n}",
"func (rp *routeTree) clone() queryTree {\n\tresult := *rp\n\tresult.vindexPreds = make([]*vindexPlusPredicates, len(rp.vindexPreds))\n\tfor i, pred := range rp.vindexPreds {\n\t\t// we do this to create a copy of the struct\n\t\tp := *pred\n\t\tresult.vindexPreds[i] = &p\n\t}\n\treturn &result\n}",
"func CloneMPT(mpt MerklePatriciaTrieI) *MerklePatriciaTrie {\n\tclone := NewMerklePatriciaTrie(mpt.GetNodeDB(), mpt.GetVersion(), mpt.GetRoot())\n\treturn clone\n}",
"func (n *NodeInfo) Clone() *NodeInfo {\n\tclone := &NodeInfo{\n\t\tnode: n.node,\n\t\tGeneration: n.Generation,\n\t}\n\tif len(n.Pods) > 0 {\n\t\tclone.Pods = append([]*PodInfo(nil), n.Pods...)\n\t}\n\n\tif len(n.PodsWithAffinity) > 0 {\n\t\tclone.PodsWithAffinity = append([]*PodInfo(nil), n.PodsWithAffinity...)\n\t}\n\tif len(n.PodsWithRequiredAntiAffinity) > 0 {\n\t\tclone.PodsWithRequiredAntiAffinity = append([]*PodInfo(nil), n.PodsWithRequiredAntiAffinity...)\n\t}\n\treturn clone\n}",
"func (s *CreateViewStatement) Clone() *CreateViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}",
"func (atc *AtomicTransactionComposer) Clone() AtomicTransactionComposer {\n\tnewTxContexts := make([]transactionContext, len(atc.txContexts))\n\tcopy(newTxContexts, atc.txContexts)\n\tfor i := range newTxContexts {\n\t\tnewTxContexts[i].txn.Group = types.Digest{}\n\t}\n\n\tif len(newTxContexts) == 0 {\n\t\tnewTxContexts = nil\n\t}\n\n\treturn AtomicTransactionComposer{\n\t\tstatus: BUILDING,\n\t\ttxContexts: newTxContexts,\n\t}\n}",
"func (l *universalLister) Clone() *universalLister {\n\tvar clonedLister universalLister\n\n\tclonedLister.resourceType = l.resourceType\n\tclonedLister.tableName = l.tableName\n\tclonedLister.selectedColumns = l.selectedColumns\n\tclonedLister.tenantColumn = l.tenantColumn\n\tclonedLister.orderByParams = append(clonedLister.orderByParams, l.orderByParams...)\n\n\treturn &clonedLister\n}",
"func (p *PersistentVolume) Clone() Resource {\n\treturn copyResource(p, &PersistentVolume{})\n}",
"func (v Uint) Clone() Node {\n\treturn v\n}",
"func (p *PhysicalWindow) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalWindow)\n\t*cloned = *p\n\tbase, err := p.physicalSchemaProducer.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.physicalSchemaProducer = *base\n\tcloned.PartitionBy = make([]property.SortItem, 0, len(p.PartitionBy))\n\tfor _, it := range p.PartitionBy {\n\t\tcloned.PartitionBy = append(cloned.PartitionBy, it.Clone())\n\t}\n\tcloned.OrderBy = make([]property.SortItem, 0, len(p.OrderBy))\n\tfor _, it := range p.OrderBy {\n\t\tcloned.OrderBy = append(cloned.OrderBy, it.Clone())\n\t}\n\tcloned.WindowFuncDescs = make([]*aggregation.WindowFuncDesc, 0, len(p.WindowFuncDescs))\n\tfor _, it := range p.WindowFuncDescs {\n\t\tcloned.WindowFuncDescs = append(cloned.WindowFuncDescs, it.Clone())\n\t}\n\tif p.Frame != nil {\n\t\tcloned.Frame = p.Frame.Clone()\n\t}\n\n\treturn cloned, nil\n}",
"func (b BoundingBox) Clone() BoundingBoxer {\n\treturn b\n}",
"func (s *Spec) Clone() *Spec {\n\tres := &Spec{Target: make(map[string]string)}\n\tfor k, v := range s.Target {\n\t\tres.Target[k] = v\n\t}\n\tfor _, app := range s.Apps {\n\t\tres.Apps = append(res.Apps, app.Clone())\n\t}\n\treturn res\n}",
"func (n *Nodes) Clone() data.Clonable {\n\treturn newNodes().Replace(n)\n}",
"func (c *Cmd) Clone() *Cmd {\n\tres := &Cmd{Cmd: c.Cmd.Clone(), sh: c.sh}\n\tinitSession(c.sh.tb, res)\n\treturn res\n}",
"func (mock *MockWorldMapWithGrid) Clone() world.WorldMap {\n\targs := mock.Called()\n\treturn args.Get(0).(world.WorldMap)\n}",
"func (bt *Tree) Copy() *Tree {\n\tcp := &Tree{bytes: bt.bytes, length: bt.length, root: &node{}}\n\tnodes := make([]*node, 0, bt.Length())\n\tnodeCopies := make([]*node, 0, bt.Length())\n\tnodes = append(nodes, bt.root)\n\tnodeCopies = append(nodeCopies, cp.root)\n\n\tfor {\n\t\tif len(nodes) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tn := nodes[0]\n\t\tcpn := nodeCopies[0]\n\t\tnodes = nodes[1:]\n\t\tnodeCopies = nodeCopies[1:]\n\t\tfor _, e := range n.edges {\n\t\t\tcpt := &node{key: e.target.key, data: e.target.data}\n\t\t\tcpn.edges = append(cpn.edges, &edge{label: e.label, target: cpt})\n\t\t\tnodes = append(nodes, e.target)\n\t\t\tnodeCopies = append(nodeCopies, cpt)\n\t\t}\n\t}\n\n\treturn cp\n}",
"func (cp *ControlPlane) Clone() data.Clonable {\n\treturn newControlPlane().Replace(cp)\n}",
"func (ls *PhysicalSort) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalSort)\n\tcloned.IsPartialSort = ls.IsPartialSort\n\tbase, err := ls.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tfor _, it := range ls.ByItems {\n\t\tcloned.ByItems = append(cloned.ByItems, it.Clone())\n\t}\n\treturn cloned, nil\n}",
"func (d *WindowDefinition) Clone() *WindowDefinition {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tother := *d\n\tother.Base = d.Base.Clone()\n\tother.Partitions = cloneExprs(d.Partitions)\n\tother.OrderingTerms = cloneOrderingTerms(d.OrderingTerms)\n\tother.Frame = d.Frame.Clone()\n\treturn &other\n}",
"func (mock *MockWorldMap) Clone() world.WorldMap {\n\targs := mock.Called()\n\treturn args.Get(0).(world.WorldMap)\n}",
"func (pm partitionMap) clone() partitionMap {\n\t// Make deep copy of map.\n\tpmap := make(partitionMap, len(pm))\n\tfor ns := range pm {\n\t\tpmap[ns] = pm[ns].clone()\n\t}\n\treturn pmap\n}",
"func (t TestRepo) Clone() TestRepo {\n\tpath, err := ioutil.TempDir(\"\", \"gtm\")\n\tCheckFatal(t.test, err)\n\n\tr, err := git.Clone(t.repo.Path(), path, &git.CloneOptions{})\n\tCheckFatal(t.test, err)\n\n\treturn TestRepo{repo: r, test: t.test}\n}",
"func (w *Wrapper) Copy() *Wrapper {\n\treturn w.cloning(true)\n}",
"func (s *Selection) Clone() *Selection {\n\tns := newEmptySelection(s.document)\n\tns.Nodes = cloneNodes(s.Nodes)\n\treturn ns\n}",
"func (cur *sequenceCursor) clone() *sequenceCursor {\n\tvar parent *sequenceCursor\n\tif cur.parent != nil {\n\t\tparent = cur.parent.clone()\n\t}\n\tcl := newSequenceCursor(parent, cur.seq, cur.idx)\n\treturn cl\n}",
"func (v *Values) Clone() *Values {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\n\treturn newValues(v.root)\n}",
"func (pm *Map) Clone() *Map {\n\treturn &Map{\n\t\tless: pm.less,\n\t\troot: pm.root.incref(),\n\t}\n}",
"func (v Bool) Clone() Node {\n\treturn v\n}",
"func (self *Rectangle) Clone1O(output *Rectangle) *Rectangle{\n return &Rectangle{self.Object.Call(\"clone\", output)}\n}",
"func (entry *UtxoEntry) Clone() *UtxoEntry {\n\tif entry == nil {\n\t\treturn nil\n\t}\n\n\tnewEntry := &UtxoEntry{\n\t\tamount: entry.amount,\n\t\tpkScript: entry.pkScript,\n\t\tticketMinOuts: entry.ticketMinOuts,\n\t\tblockHeight: entry.blockHeight,\n\t\tblockIndex: entry.blockIndex,\n\t\tscriptVersion: entry.scriptVersion,\n\t\tstate: entry.state,\n\t\tpackedFlags: entry.packedFlags,\n\t}\n\n\treturn newEntry\n}",
"func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {\n\treturn c.Client.Builds(namespace).Clone(request)\n}",
"func (z *zfsctl) Clone(ctx context.Context, name string, properties map[string]string, source string) *execute {\n\targs := []string{\"clone\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, source, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func cloneTask(t *Task) *Task {\n c := *t\n return &c\n}",
"func (c *Compound) Copy() Modifiable {\n\tnewC := new(Compound)\n\tnewC.LayeredPoint = c.LayeredPoint.Copy()\n\tnewSubRenderables := make(map[string]Modifiable)\n\tc.lock.RLock()\n\tfor k, v := range c.subRenderables {\n\t\tnewSubRenderables[k] = v.Copy()\n\t}\n\tc.lock.RUnlock()\n\tnewC.subRenderables = newSubRenderables\n\tnewC.curRenderable = c.curRenderable\n\tnewC.lock = sync.RWMutex{}\n\treturn newC\n}",
"func (this *Context) Clone() *Context {\n\tvar clone = *this\n\tclone.Parent = this\n\treturn &clone\n}",
"func (this *DynMap) Clone() *DynMap {\n\tmp := New()\n\tfor k, v := range(this.Map) {\n\t\tsubmp, ok := ToDynMap(this.Map[k])\n\t\tif ok {\n\t\t\tv = submp.Clone()\n\t\t}\n\t\tmp.Put(k, v)\n\t}\n\treturn mp\n}",
"func (s *VMStorage) Clone() *VMStorage {\n\tns := &VMStorage{\n\t\tc: s.c,\n\t\tauthCfg: s.authCfg,\n\t\tdatasourceURL: s.datasourceURL,\n\t\tappendTypePrefix: s.appendTypePrefix,\n\t\tlookBack: s.lookBack,\n\t\tqueryStep: s.queryStep,\n\n\t\tdataSourceType: s.dataSourceType,\n\t\tevaluationInterval: s.evaluationInterval,\n\n\t\t// init map so it can be populated below\n\t\textraParams: url.Values{},\n\n\t\tdebug: s.debug,\n\t}\n\tif len(s.extraHeaders) > 0 {\n\t\tns.extraHeaders = make([]keyValue, len(s.extraHeaders))\n\t\tcopy(ns.extraHeaders, s.extraHeaders)\n\t}\n\tfor k, v := range s.extraParams {\n\t\tns.extraParams[k] = v\n\t}\n\n\treturn ns\n}",
"func (ns NodeSolver) Clone() (cNs config.RextNodeSolver, err error) {\n\tvar cOpts config.RextKeyValueStore\n\tif cOpts, err = ns.GetOptions().Clone(); err != nil {\n\t\tlog.WithError(err).Errorln(\"can not clone options in node solver\")\n\t\treturn cNs, err\n\t}\n\tcNs = NewNodeSolver(ns.MType, ns.nodePath, cOpts)\n\treturn cNs, err\n}",
"func (w *WorldMapImpl) Clone() WorldMap {\n\tif len(w.Grid) > 0 {\n\t\tgrid := make([][]int, len(w.Grid))\n\t\tfor i := 0; i < len(w.Grid); i++ {\n\t\t\tgrid[i] = make([]int, len(w.Grid[i]))\n\t\t\tfor j := 0; j < len(w.Grid[i]); j++ {\n\t\t\t\tgrid[i][j] = w.Grid[i][j]\n\n\t\t\t}\n\t\t}\n\t\treturn &WorldMapImpl{\n\t\t\tGrid: grid,\n\t\t}\n\t} else {\n\t\treturn &WorldMapImpl{\n\t\t\tGrid: make([][]int, 0),\n\t\t}\n\t}\n}",
"func (siq *SubItemQuery) Clone() *SubItemQuery {\n\tif siq == nil {\n\t\treturn nil\n\t}\n\treturn &SubItemQuery{\n\t\tconfig: siq.config,\n\t\tlimit: siq.limit,\n\t\toffset: siq.offset,\n\t\torder: append([]OrderFunc{}, siq.order...),\n\t\tpredicates: append([]predicate.SubItem{}, siq.predicates...),\n\t\twithParent: siq.withParent.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: siq.sql.Clone(),\n\t\tpath: siq.path,\n\t}\n}",
"func (tq *TeamQuery) Clone() *TeamQuery {\n\tif tq == nil {\n\t\treturn nil\n\t}\n\treturn &TeamQuery{\n\t\tconfig: tq.config,\n\t\tctx: tq.ctx.Clone(),\n\t\torder: append([]OrderFunc{}, tq.order...),\n\t\tinters: append([]Interceptor{}, tq.inters...),\n\t\tpredicates: append([]predicate.Team{}, tq.predicates...),\n\t\twithTasks: tq.withTasks.Clone(),\n\t\twithUsers: tq.withUsers.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: tq.sql.Clone(),\n\t\tpath: tq.path,\n\t}\n}",
"func (t *OrderingTerm) Clone() *OrderingTerm {\n\tif t == nil {\n\t\treturn nil\n\t}\n\tother := *t\n\tother.X = CloneExpr(t.X)\n\treturn &other\n}",
"func (t *Transaction) Clone() *Transaction {\n\tclone := *t\n\tclone.res = t.res.Clone()\n\treturn &clone\n}",
"func (t *Texture) Clone() *Texture {\n\tt.p.Call(\"clone\")\n\treturn t\n}",
"func (v Int) Clone() Node {\n\treturn v\n}",
"func (c Container) Clone() Container {\n\tif n := len(c); n > 0 {\n\t\tvalues := make(Container, n, n)\n\t\tcopy(values, c)\n\t\treturn values\n\t}\n\treturn NewContainer()\n}",
"func (conf *ThrapConfig) Clone() *ThrapConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\n\tc := &ThrapConfig{\n\t\tVCS: make(map[string]*VCSConfig, len(conf.VCS)),\n\t\tOrchestrator: make(map[string]*OrchestratorConfig, len(conf.Orchestrator)),\n\t\tRegistry: make(map[string]*RegistryConfig, len(conf.Registry)),\n\t\tSecrets: make(map[string]*SecretsConfig, len(conf.Secrets)),\n\t}\n\n\tfor k, v := range conf.VCS {\n\t\tc.VCS[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Orchestrator {\n\t\tc.Orchestrator[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Registry {\n\t\tc.Registry[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Secrets {\n\t\tc.Secrets[k] = v.Clone()\n\t}\n\n\treturn conf\n}",
"func (v Posit8x4) Clone() Posit8x4 {\n\tout := Posit8x4{impl: make([]Posit8, 4)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Clone()\n\t}\n\treturn out\n}",
"func (s *AlterViewStatement) Clone() *AlterViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}",
"func (s *DropViewStatement) Clone() *DropViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}",
"func (l LabelDef) Clone() (cL config.RextLabelDef, err error) {\n\tvar cNs config.RextNodeSolver\n\tif l.GetNodeSolver() != nil {\n\t\tif cNs, err = l.GetNodeSolver().Clone(); err != nil {\n\t\t\tlog.WithError(err).Errorln(\"can not clone node solver in label\")\n\t\t\treturn cL, err\n\t\t}\n\t}\n\tcL = NewLabelDef(l.name, cNs)\n\treturn cL, err\n}",
"func (env *Environment) Clone() ext.Environment {\n\tclone := NewEnvironment()\n\tclone.VM = env.VM.Clone()\n\tclone.VM.StopChan = make(chan func(), 1)\n\tclone.timeLimit = env.timeLimit\n\tclone.timeLimits = env.timeLimits\n\treturn clone\n}",
"func SafeClone(v PhysicalPlan) (_ PhysicalPlan, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.Errorf(\"%v\", r)\n\t\t}\n\t}()\n\treturn v.Clone()\n}",
"func (v *VersionVector) Clone() *VersionVector {\n\tdots := make(Dots)\n\n\tv.l.RLock()\n\tfor actor, t := range v.dots {\n\t\tdots[actor] = t\n\t}\n\tv.l.RUnlock()\n\n\treturn &VersionVector{\n\t\tdots: dots,\n\t}\n}",
"func (b *Builder) Clone(index int) {\n\tsidx := len(b.stack) - 1 - index\n\t// Change ownership of the top stack value to the clone instruction.\n\tb.stack[sidx].idx = len(b.instructions)\n\tb.pushStack(b.stack[sidx].ty)\n\tb.instructions = append(b.instructions, asm.Clone{\n\t\tIndex: index,\n\t})\n}",
"func (p *PhysicalSelection) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalSelection)\n\tbase, err := p.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tcloned.Conditions = util.CloneExprs(p.Conditions)\n\treturn cloned, nil\n}",
"func (t *Transform) Copy() *Transform {\n\tt.access.RLock()\n\tcpy := &Transform{\n\t\tparent: t.parent,\n\t\tpos: t.pos,\n\t\trot: t.rot,\n\t\tscale: t.scale,\n\t\tshear: t.shear,\n\t}\n\tif t.built != nil {\n\t\tbuiltCpy := *t.built\n\t\tcpy.built = &builtCpy\n\t}\n\tif t.localToWorld != nil {\n\t\tltwCpy := *t.localToWorld\n\t\tcpy.localToWorld = <wCpy\n\t}\n\tif t.worldToLocal != nil {\n\t\twtlCpy := *t.worldToLocal\n\t\tcpy.worldToLocal = &wtlCpy\n\t}\n\tif t.quat != nil {\n\t\tquatCpy := *t.quat\n\t\tcpy.quat = &quatCpy\n\t}\n\tt.access.RUnlock()\n\treturn cpy\n}",
"func (lm *LevelMetadata) clone() LevelMetadata {\n\treturn LevelMetadata{\n\t\tlevel: lm.level,\n\t\ttree: lm.tree.Clone(),\n\t}\n}",
"func (wq *WidgetQuery) Clone() *WidgetQuery {\n\tif wq == nil {\n\t\treturn nil\n\t}\n\treturn &WidgetQuery{\n\t\tconfig: wq.config,\n\t\tlimit: wq.limit,\n\t\toffset: wq.offset,\n\t\torder: append([]OrderFunc{}, wq.order...),\n\t\tpredicates: append([]predicate.Widget{}, wq.predicates...),\n\t\twithType: wq.withType.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: wq.sql.Clone(),\n\t\tpath: wq.path,\n\t}\n}",
"func (t *Analysis) Clone() *Analysis {\n\tshadow := new(Analysis)\n\n\tshadow.Status = t.Status\n\n\tshadow.Living = make([]life.Location, len(t.Living))\n\tcopy(shadow.Living, t.Living)\n\n\tshadow.Changes = make([]changedLocation, len(t.Changes))\n\tcopy(shadow.Changes, t.Changes)\n\n\treturn shadow\n}",
"func (w *XPubWallet) Clone() Wallet {\n\txpub, err := parseXPub(w.Meta.XPub())\n\tif err != nil {\n\t\tlogger.WithError(err).Panic(\"Clone parseXPub failed\")\n\t}\n\n\treturn &XPubWallet{\n\t\tMeta: w.Meta.clone(),\n\t\tEntries: w.Entries.clone(),\n\t\txpub: xpub,\n\t}\n}",
"func (m *VirtualRouter) Clone(into interface{}) (interface{}, error) {\n\tvar out *VirtualRouter\n\tvar ok bool\n\tif into == nil {\n\t\tout = &VirtualRouter{}\n\t} else {\n\t\tout, ok = into.(*VirtualRouter)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"mismatched object types\")\n\t\t}\n\t}\n\t*out = *(ref.DeepCopy(m).(*VirtualRouter))\n\treturn out, nil\n}",
"func (h *PrometheusInstrumentHandler) Clone() model.Part {\n\th0 := *h\n\treturn &h0\n}",
"func (s Section) Clone() Section {\n\tclone := s\n\tclone.Content = make([]Item, len(s.Content))\n\tfor i, item := range s.Content {\n\t\tclone.Content[i] = item\n\t\tif child, isPage := item.Value.(Page); isPage {\n\t\t\tclone.Content[i].Value = child.Clone()\n\t\t}\n\t}\n\treturn clone\n}",
"func (m Menu) Clone() Menu {\n\treturn append(Menu(nil), m...)\n}"
] | [
"0.63205683",
"0.59134096",
"0.5870232",
"0.57930756",
"0.57629037",
"0.57532215",
"0.5749004",
"0.5738241",
"0.57212216",
"0.56898284",
"0.5618096",
"0.56123704",
"0.5608078",
"0.5585105",
"0.5582553",
"0.55766046",
"0.5572645",
"0.5569114",
"0.5555808",
"0.5512746",
"0.5509048",
"0.54857713",
"0.54754907",
"0.5447611",
"0.5428428",
"0.5398798",
"0.53897285",
"0.5380573",
"0.53725076",
"0.5332701",
"0.53304625",
"0.5320563",
"0.5301875",
"0.5286824",
"0.5273401",
"0.5272623",
"0.5257006",
"0.5254104",
"0.5241664",
"0.52212536",
"0.52099985",
"0.5208984",
"0.52045536",
"0.52022195",
"0.5193276",
"0.5191461",
"0.5184856",
"0.51821196",
"0.51766616",
"0.5154336",
"0.51510584",
"0.5145258",
"0.5126335",
"0.51242685",
"0.5123656",
"0.51187396",
"0.51179475",
"0.5114202",
"0.5111775",
"0.5106948",
"0.510596",
"0.50990003",
"0.5085309",
"0.50837755",
"0.5082292",
"0.50682247",
"0.5062879",
"0.50573826",
"0.50562644",
"0.5051051",
"0.5042378",
"0.50390154",
"0.5033749",
"0.50299466",
"0.50278413",
"0.5014897",
"0.5013278",
"0.50080884",
"0.49944496",
"0.49915272",
"0.49907395",
"0.49822733",
"0.49821818",
"0.49788922",
"0.4970108",
"0.4954307",
"0.49461117",
"0.4944665",
"0.49442548",
"0.49411273",
"0.49385718",
"0.49315497",
"0.49294412",
"0.49273285",
"0.4926956",
"0.4924103",
"0.49214095",
"0.49154657",
"0.49086714",
"0.48978373"
] | 0.7969468 | 0 |
ReadPartials parses the given files into the TRoot instance for gathering things like the toplevel layout, navigation elements, etc. The list of files is relative to the TRoot's Path. Returns on the first error encountered, if any. | ReadPartials парсит заданные файлы в экземпляр TRoot для сбора элементов вроде верхнего уровня разметки, навигационных элементов и т.д. Список файлов относится к пути TRoot. Возвращает при первом возникшем ошибке, если таковая есть. | func (t *TRoot) ReadPartials(files ...string) error {
for _, file := range files {
var _, err = t.template.ParseFiles(filepath.Join(t.Path, file))
if err != nil {
return err
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t *TRoot) MustReadPartials(files ...string) {\n\tvar err = t.ReadPartials(files...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func PartialLatexFiles(path string) {\n\t//read all files in directory sections\n\t// this must be settleable and discoverable\n\tvar counter int\n\n\ttype Content struct {\n\t\tfileName string\n\t\tContents string\n\t}\n\n\tvar contentsList map[string]string\n\tcontentsList = make(map[string]string)\n\n\tList := new(Content)\n\n\t//append(s []T, x ...T)\n\tfiles, _ := ioutil.ReadDir(\"./sections\")\n\tfor _, f := range files {\n\t\tif f.Name() == \"main.tex\" {\n\t\t\tfmt.Println(\"We found a main file\")\n\t\t}\n\t\tfmt.Println(f.Name())\n\t\tS1, err := ioutil.ReadFile(\"./sections/\" + f.Name())\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tList.fileName = f.Name()\n\t\tList.Contents = string(S1)\n\t\tfmt.Println(string(S1))\n\n\t\tcontentsList[f.Name()] = string(S1)\n\t\tcounter++\n\n\t}\n\tfmt.Println(\"TEST\", contentsList[\"main.tex\"])\n\t//inFile, _ := ioutil.ReadFile(path)\n\t//fmt.Println(\"CONCATENATION:\", Y.contents)\n\t//fmt.Printf(\"Found %v files\", counter)\n}",
"func loadPartials() (map[string]string, error) {\n\tg := make(map[string]string)\n\t//load resources from paths\n\tfor key, path := range paths {\n\t\tbody, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tg[key] = string(body)\n\t}\n\treturn g, nil\n}",
"func Read(files []string) (documents []Document) {\n\n\tfor _, fp := range files {\n\t\tf, err := ioutil.ReadFile(fp)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"There was an error reading the file\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tyamlDocumentsInFile := bytes.SplitN(f, []byte(\"---\\n\"), -1)\n\t\t//fmt.Printf(\"%q\\n\", yamlDocumentsInFile)\n\n\t\tif (len(yamlDocumentsInFile) % 2) != 0 {\n\t\t\tfmt.Println(\"File \", fp, \" has an odd number of documents. File must consist of pairs of preamble and template documents, in order.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tfor i := 0; i < len(yamlDocumentsInFile); i += 2 {\n\n\t\t\tdoc := Document{}\n\t\t\terr = yaml.Unmarshal(yamlDocumentsInFile[i], &doc.Preamble)\n\t\t\tdoc.Template = string(yamlDocumentsInFile[i+1])\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"There was an error unmarshaling yaml\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t//fmt.Printf(\"%+v\\n\", doc)\n\n\t\t\t// Perform type conversions to handle lists of maps or single map\n\t\t\tswitch p := doc.Preamble.ReadParams.(type) {\n\t\t\tcase []interface{}:\n\t\t\t\tfor _, params := range p {\n\n\t\t\t\t\t// We cannot derive a map[string]inteface{} from interface{} directly\n\t\t\t\t\tparamsMap, _ := params.(map[interface{}]interface{})\n\n\t\t\t\t\ttParams := typeCastMap(paramsMap)\n\n\t\t\t\t\tdocument := Document{}\n\t\t\t\t\tdocument.Preamble.Params = tParams\n\t\t\t\t\tdocument.Template = doc.Template\n\n\t\t\t\t\tdocuments = append(documents, document)\n\t\t\t\t}\n\t\t\tcase interface{}:\n\t\t\t\t// We cannot derive a map[string]inteface{} from interface{} directly\n\t\t\t\ttParams := p.(map[interface{}]interface{})\n\n\t\t\t\tdoc.Preamble.Params = typeCastMap(tParams)\n\n\t\t\t\tdocuments = append(documents, doc)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"I don't know how to deal with type %T %+v!\\n\", p, p)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn\n}",
"func ReadPartialReport(scope beam.Scope, partialReportFile string) beam.PCollection {\n\tallFiles := ioutils.AddStrInPath(partialReportFile, \"*\")\n\tlines := textio.ReadSdf(scope, allFiles)\n\treturn beam.ParDo(scope, &parseEncryptedPartialReportFn{}, lines)\n}",
"func LoadTemplates(relativePath string, pOpt *ParseOptions) {\n\t// Initializes the template map\n\ttemplates = make(map[string]*template.Template)\n\n\t// Save Path to Base file\n\tpOpt.BasePath = relativePath\n\n\t// Check if every option is set\n\tif pOpt.BaseName == \"\" {\n\t\tpOpt.BaseName = DefaultParseOptions.BaseName\n\t}\n\n\tif pOpt.Delimiter == \"\" {\n\t\tpOpt.Delimiter = DefaultParseOptions.Delimiter\n\t}\n\n\tif pOpt.Ext == \"\" {\n\t\tpOpt.Ext = DefaultParseOptions.Ext\n\t}\n\n\tif pOpt.NonBaseFolder == \"\" {\n\t\tpOpt.NonBaseFolder = DefaultParseOptions.NonBaseFolder\n\t}\n\n\t// Start checking the main dir of the views\n\tcheckDir(relativePath, pOpt, false)\n}",
"func (s *server) loadTemplates() error {\n includePath := \"templates/\"\n layoutPath := \"templates/layout/\"\n\n if s.templates == nil {\n s.templates = make(map[string]*template.Template)\n }\n\n layoutFiles, err := filepath.Glob(layoutPath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get included templates\")\n return err\n }\n\n includeFiles, err := filepath.Glob(includePath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get layout templates\")\n return err\n }\n\n mainTemplate := template.New(\"main\")\n mainTemplate, err = mainTemplate.Parse(mainTmpl)\n if err != nil {\n log.Println(\"failed to parse main template\")\n return err\n }\n\n for _, file := range includeFiles {\n fileName := filepath.Base(file)\n files := append(layoutFiles, file)\n s.templates[fileName], err = mainTemplate.Clone()\n if err != nil {\n return err\n }\n s.templates[fileName] = template.Must(\n s.templates[fileName].ParseFiles(files...))\n }\n\n s.bufpool = bpool.NewBufferPool(64)\n return nil\n}",
"func Load(pathPrefix string) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttpl = template.New(\"index\").Funcs(funcs)\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, templatePath, \"*html\")))\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, partialPath, \"*.html\")))\n}",
"func (conf *Config) ReadSpecs() error {\n\tconf.specs = make([]*tomlSpec, 0, len(conf.specFiles))\n\tconf.indexes = make(map[string]*indexSpec, len(conf.specFiles))\n\tfor _, path := range conf.specFiles {\n\t\tspec, err := ReadSpec(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't read spec '%s': %v\", path, err)\n\t\t}\n\t\t// here is where we put overrides like setting the prefix\n\t\t// from command-line parameters before doing more validation and\n\t\t// populating inferred fields.\n\t\tif spec.Prefix == \"\" {\n\t\t\tspec.Prefix = conf.Prefix\n\t\t}\n\t\terr = spec.CleanupIndexes(conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconf.specs = append(conf.specs, spec)\n\t}\n\tfor _, spec := range conf.specs {\n\t\terr := spec.CleanupWorkloads(conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (p *Parse) ParseFiles() (e error) {\n\tvar wg sync.WaitGroup\n\tfor _, fname := range p.Files {\n\t\twg.Add(1)\n\t\tgo func(fname string) {\n\t\t\tdefer wg.Done()\n\t\t\tfset := token.NewFileSet() // positions are relative to fset\n\n\t\t\t// Parse the file given in arguments\n\t\t\tf, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)\n\t\t\tif err != nil {\n\t\t\t\te = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbs, err := ioutil.ReadFile(fname)\n\t\t\tif err != nil {\n\t\t\t\te = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstructMap, baseMap := p.parseTypes(f)\n\t\t\t// Parse structs\n\t\t\tstructKeys := make([]string, 0, len(structMap))\n\t\t\tfor k := range structMap {\n\t\t\t\tstructKeys = append(structKeys, k)\n\t\t\t}\n\t\t\tsort.Strings(structKeys)\n\t\t\tp.Lock()\n\t\t\tfor _, structName := range structKeys {\n\t\t\t\tp.mappings[structName] = p.parseStruct(structMap[structName], structName, bs)\n\t\t\t}\n\t\t\tp.Unlock()\n\t\t\tbaseKeys := make([]string, 0, len(baseMap))\n\t\t\tfor k := range baseMap {\n\t\t\t\tbaseKeys = append(baseKeys, k)\n\t\t\t}\n\t\t\tsort.Strings(baseKeys)\n\t\t\tp.Lock()\n\t\t\tfor _, baseName := range baseKeys {\n\t\t\t\tp.baseMappings[baseName] = field{\n\t\t\t\t\ttyp: baseMap[baseName],\n\t\t\t\t\tname: baseName,\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.Unlock()\n\t\t}(fname)\n\t}\n\twg.Wait()\n\treturn nil\n}",
"func (contrl *MailController) LoadTemplateFiles(filenames ...string) {\n\tcontrl.HTMLTemplate = template.Must(template.ParseFiles(filenames...))\n}",
"func ReadAll() (p *Page, err error) {\n\tdCmn := config.SourceDir + sep + \"pages\" + sep + \"common\" + sep\n\tdOs := config.SourceDir + sep + \"pages\" + sep + config.OSName() + sep\n\tpaths := []string{dCmn, dOs}\n\tp = &Page{Name: \"Search All\"}\n\tp.Tips = make([]*Tip, 0)\n\tfor _, pt := range paths {\n\t\tfiles, err := ioutil.ReadDir(pt)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif strings.HasSuffix(f.Name(), \".md\") {\n\t\t\t\tpage, err := Read([]string{f.Name()[:len(f.Name())-3]})\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tp.Tips = append(p.Tips, page.Tips...)\n\t\t\t}\n\t\t}\n\t}\n\treturn p, nil\n}",
"func (t *Tmpl) LoadTemplates(dir string) error {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\t// Walk over the views directory\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\n\t\t// Check if file has .html extension\n\t\tif strings.HasSuffix(info.Name(), \".html\") {\n\t\t\tif t.Tmpl, err = t.Tmpl.ParseFiles(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}",
"func (t *Tmpl) LoadTemplates(dir string) error {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\t// Walk over the views directory\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\n\t\t// Check for walking error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Check if file has .html extension\n\t\tif strings.HasSuffix(info.Name(), \".html\") {\n\t\t\tif t.Tmpl, err = t.Tmpl.ParseFiles(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}",
"func loadTemplates() {\n\n\tfmt.Println(\"About to load templates\")\n\n\t// get layouts\n\tlayouts, err := filepath.Glob(\"templates/layouts/*.layout\")\n\tpanicOnError(err)\n\n\t// get list of main pages\n\tpages, err := filepath.Glob(\"templates/pages/*.html\")\n\tpanicOnError(err)\n\n\tfor _, page := range pages {\n\t\tfiles := append(layouts, page)\n\t\ttemplateName := filepath.Base(page)\n\n\t\tnewTemplate := template.Must(template.ParseFiles(files...))\n\t\tnewTemplate.Option(\"missingkey=default\")\n\n\t\tappTemplates[templateName] = newTemplate\n\t}\n\n\t// loaded templates\n\tfor file, _ := range appTemplates {\n\t\tfmt.Printf(\"Loaded Template: %s\\n\", file)\n\t\tfmt.Printf(\"loaded: %s\\n\", file)\n\t}\n\n}",
"func ParseTemplates(path string) *template.Template {\n\treturn template.Must(template.ParseFiles(\n\t\t\"web/templates/partial/head.html\",\n\t\t\"web/templates/partial/header.html\",\n\t\t\"web/templates/partial/footer.html\",\n\t\tpath,\n\t\t\"web/templates/base.html\",\n\t))\n}",
"func loadInitialFiles(t *testing.T, data dataSection) int32 {\n\tfilesDocs := make([]interface{}, 0, len(data.Files))\n\tchunksDocs := make([]interface{}, 0, len(data.Chunks))\n\tvar chunkSize int32\n\n\tfor _, v := range data.Files {\n\t\tdocBytes, err := v.MarshalJSON()\n\t\ttesthelpers.RequireNil(t, err, \"error converting raw message to bytes: %s\", err)\n\t\tdoc := bsonx.Doc{}\n\t\terr = bson.UnmarshalExtJSON(docBytes, false, &doc)\n\t\ttesthelpers.RequireNil(t, err, \"error creating file document: %s\", err)\n\n\t\t// convert length from int32 to int64\n\t\tif length, err := doc.LookupErr(\"length\"); err == nil {\n\t\t\tdoc = doc.Delete(\"length\")\n\t\t\tdoc = doc.Append(\"length\", bsonx.Int64(int64(length.Int32())))\n\t\t}\n\t\tif cs, err := doc.LookupErr(\"chunkSize\"); err == nil {\n\t\t\tchunkSize = cs.Int32()\n\t\t}\n\n\t\tfilesDocs = append(filesDocs, doc)\n\t}\n\n\tfor _, v := range data.Chunks {\n\t\tdocBytes, err := v.MarshalJSON()\n\t\ttesthelpers.RequireNil(t, err, \"error converting raw message to bytes: %s\", err)\n\t\tdoc := bsonx.Doc{}\n\t\terr = bson.UnmarshalExtJSON(docBytes, false, &doc)\n\t\ttesthelpers.RequireNil(t, err, \"error creating file document: %s\", err)\n\n\t\t// convert data $hex to binary value\n\t\tif hexStr, err := doc.LookupErr(\"data\", \"$hex\"); err == nil {\n\t\t\thexBytes := convertHexToBytes(t, hexStr.StringValue())\n\t\t\tdoc = doc.Delete(\"data\")\n\t\t\tdoc = append(doc, bsonx.Elem{\"data\", bsonx.Binary(0x00, hexBytes)})\n\t\t}\n\n\t\t// convert n from int64 to int32\n\t\tif n, err := doc.LookupErr(\"n\"); err == nil {\n\t\t\tdoc = doc.Delete(\"n\")\n\t\t\tdoc = append(doc, bsonx.Elem{\"n\", bsonx.Int32(n.Int32())})\n\t\t}\n\n\t\tchunksDocs = append(chunksDocs, doc)\n\t}\n\n\tif len(filesDocs) > 0 {\n\t\t_, err := files.InsertMany(ctx, filesDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into files: %s\", err)\n\t\t_, err = expectedFiles.InsertMany(ctx, filesDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into expected files: %s\", err)\n\t}\n\n\tif len(chunksDocs) > 0 {\n\t\t_, err := chunks.InsertMany(ctx, chunksDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into chunks: %s\", err)\n\t\t_, err = expectedChunks.InsertMany(ctx, chunksDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into expected chunks: %s\", err)\n\t}\n\n\treturn chunkSize\n}",
"func fileTests() map[string]struct {\n\tsrc string\n\ttree *ast.Tree\n} {\n\tvar render = ast.NewRender(p(3, 7, 29, 61), \"/partial2.html\")\n\trender.Tree = ast.NewTree(\"\", []ast.Node{\n\t\tast.NewText(p(1, 1, 0, 4), []byte(\"<div>\"), ast.Cut{}),\n\t\tast.NewShow(p(1, 6, 5, 17), []ast.Expression{ast.NewIdentifier(p(1, 9, 8, 14), \"content\")}, ast.ContextHTML),\n\t\tast.NewText(p(1, 19, 18, 23), []byte(\"</div>\"), ast.Cut{}),\n\t}, ast.FormatHTML)\n\treturn map[string]struct {\n\t\tsrc string\n\t\ttree *ast.Tree\n\t}{\n\t\t\"/simple.html\": {\n\t\t\t\"<!DOCTYPE html>\\n<html>\\n<head><title>{{ title }}</title></head>\\n<body>{{ content }}</body>\\n</html>\",\n\t\t\tast.NewTree(\"\", []ast.Node{\n\t\t\t\tast.NewText(p(1, 1, 0, 35), []byte(\"<!DOCTYPE html>\\n<html>\\n<head><title>\"), ast.Cut{}),\n\t\t\t\tast.NewShow(p(3, 14, 36, 46), []ast.Expression{ast.NewIdentifier(p(3, 17, 39, 43), \"title\")}, ast.ContextHTML),\n\t\t\t\tast.NewText(p(3, 25, 47, 68), []byte(\"</title></head>\\n<body>\"), ast.Cut{}),\n\t\t\t\tast.NewShow(p(4, 7, 69, 81), []ast.Expression{ast.NewIdentifier(p(4, 10, 72, 78), \"content\")}, ast.ContextHTML),\n\t\t\t\tast.NewText(p(4, 20, 82, 96), []byte(\"</body>\\n</html>\"), ast.Cut{}),\n\t\t\t}, ast.FormatHTML),\n\t\t},\n\t\t\"/simple2.html\": {\n\t\t\t\"<!DOCTYPE html>\\n<html>\\n<body>{{ render \\\"/partial2.html\\\" }}</body>\\n</html>\",\n\t\t\tast.NewTree(\"\", []ast.Node{\n\t\t\t\tast.NewText(p(1, 1, 0, 28), []byte(\"<!DOCTYPE html>\\n<html>\\n<body>\"), ast.Cut{}),\n\t\t\t\trender,\n\t\t\t\tast.NewText(p(3, 37, 59, 73), []byte(\"</body>\\n</html>\"), ast.Cut{}),\n\t\t\t}, ast.FormatHTML),\n\t\t},\n\t\t\"/partial2.html\": {\n\t\t\t\"<div>{{ content }}</div>\",\n\t\t\tnil,\n\t\t},\n\t}\n}",
"func (t *Pongo2Engine) Load() (err error) {\n\n\terr = recoverTemplateNotFound()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\t//var root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\t// TODO (kostyarin): follow symlinks\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.opts.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.opts.templateDir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.opts.ext)\n\t\ttplExample := pongo2.Must(pongo2.FromFile(path))\n\t\tt.tmplMap[rel] = tplExample\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.opts.templateDir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func (ui *GUI) loadTemplates() error {\n\tvar templates []string\n\tfindTemplate := func(path string, f os.FileInfo, err error) error {\n\t\t// If path doesn't exist, or other error with path, return error so\n\t\t// that Walk will quit and return the error to the caller.\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !f.IsDir() && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\terr := filepath.Walk(ui.cfg.GUIDir, findTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thttpTemplates := template.New(\"template\").Funcs(template.FuncMap{\n\t\t\"hashString\": util.HashString,\n\t\t\"upper\": strings.ToUpper,\n\t\t\"percentString\": util.PercentString,\n\t})\n\n\t// Since template.Must panics with non-nil error, it is much more\n\t// informative to pass the error to the caller to log it and exit\n\t// gracefully.\n\thttpTemplates, err = httpTemplates.ParseFiles(templates...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tui.templates = template.Must(httpTemplates, nil)\n\treturn nil\n}",
"func (graph *Graph) ReadPages(count int, pathToTemplates string) ([]goquery.Document, error) {\n\tvar docs []goquery.Document\n\terr := filepath.Walk(pathToTemplates,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tdoc, err := graph.createDocument(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdocs = append(docs, *doc)\n\t\t\treturn err\n\t\t})\n\tdocs = graph.correctDocsLength(count, docs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn docs, nil\n}",
"func loadRelTemplates(ps []string) (*template.Template, error) {\n\tif len(ps) == 0 {\n\t\treturn nil, errors.New(\"muta-template: At least one path is required\")\n\t}\n\n\t// TODO: Intelligently assign base to the most base path\n\tbase := filepath.Dir(ps[0])\n\n\tvar t *template.Template\n\tvar lt *template.Template\n\tfor _, tmpl := range ps {\n\t\ttmplName, err := filepath.Rel(base, tmpl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif t == nil {\n\t\t\tt = template.New(tmplName)\n\t\t\tlt = t\n\t\t} else {\n\t\t\tlt = t.New(tmplName)\n\t\t}\n\n\t\tb, err := ioutil.ReadFile(tmpl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, err = lt.Parse(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn t, nil\n}",
"func (st *Stemplate) load() error {\n\n\ttemplates, terr := filepath.Glob(st.templatesDir + \"*.tmpl\")\n\tif terr != nil {\n\t\treturn terr\n\t}\n\n\tcontents, err := filepath.Glob(st.templatesDir + \"*.html\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, c := range contents {\n\t\tcurrent := append(templates, c)\n\t\tst.templates[filepath.Base(c)] = template.Must(template.ParseFiles(current...))\n\t}\n\n\treturn nil\n\n}",
"func (t *Template) ParseFiles() (*Template, error) {\n\t_, err := t.Template.ParseFiles(t.parseFiles...)\n\treturn t, err\n}",
"func (v *VTemplates) Load(name string, ext string, fileList, delims []string) (*template.Template, error) {\n\tif len(fileList) == 0 {\n\t\treturn nil, fmt.Errorf(\"Empty File Lists\")\n\t}\n\n\tvar tl *template.Template\n\tvar ok bool\n\n\tv.rw.RLock()\n\ttl, ok = v.loaded[name]\n\tv.rw.RUnlock()\n\n\tif ok {\n\t\tif !v.Debug {\n\t\t\treturn tl, nil\n\t\t}\n\t}\n\n\tvar tree = template.New(name)\n\n\t//check if the delimiter array has content if so,set them\n\tif len(delims) > 0 && len(delims) >= 2 {\n\t\ttree.Delims(delims[0], delims[1])\n\t}\n\n\tfor _, fp := range fileList {\n\t\t//is it a file ? if no error then use it else try a directory\n\t\tvf, err := v.VDir.GetFile(fp)\n\n\t\tif err == nil {\n\t\t\t_, err = LoadVirtualTemplateFile(vf, tree)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t} else {\n\t\t\tvd, err := v.VDir.GetDir(fp)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\terr = LoadVirtualTemplateDir(tree, vd, name, ext)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tv.rw.Lock()\n\tv.loaded[name] = tree\n\tv.rw.Unlock()\n\n\treturn tree, nil\n}",
"func (t *Template) ParseFiles() (*Template, error) {\n\n\tif err := genParseFileList(t); err != nil {\n\t\treturn t, err\n\t}\n\t_, err := t.Template.ParseFiles(t.parseFiles...)\n\n\treturn t, err\n}",
"func (t *TemplMerger) LoadTemplates(files []string) error {\n\tm := make(map[string][]byte)\n\tfor _, file := range files {\n\t\t// check the file is a template\n\t\tif !(filepath.Ext(file) == \".tem\" || filepath.Ext(file) == \".art\") {\n\t\t\treturn fmt.Errorf(\"file '%s' is not a template file, artisan templates are either .tem or .art files\\n\", file)\n\t\t}\n\t\t// ensure the template path is absolute\n\t\tpath, err := core.AbsPath(file)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"path '%s' cannot be converted to absolute path: %s\\n\", file, err)\n\t\t}\n\t\t// read the file content\n\t\tbytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot read file %s: %s\\n\", file, err)\n\t\t}\n\t\tm[path] = t.transpileOperators(bytes)\n\t}\n\tt.template = m\n\treturn nil\n}",
"func (feeder *FileFeed) Read(files []string) ([]entity.Input, error) {\n\tinputs := make([]entity.Input, len(files))\n\tfor i, file := range files {\n\t\tlogger.Info(fmt.Sprintf(\"reading fixture: %s\", file))\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn inputs, err\n\t\t}\n\t\text := filepath.Ext(file)\n\t\tinput := entity.Input{\n\t\t\tFilename: extractFilename(file),\n\t\t\tType: ext,\n\t\t\tData: f,\n\t\t}\n\t\tinputs[i] = input\n\t}\n\treturn inputs, nil\n}",
"func (f *FileStore) Load(collection string) ([]string, error) {\n\titems := []string{}\n\tbase := filepath.Join(f.Base, collection)\n\tbase, err := filepath.Abs(base)\n\tif err != nil {\n\t\tlog.Println(\"Error getting abs path to\", collection, err)\n\t\treturn nil, err\n\t}\n\tlog.Println(\"Loading collection\", collection, \"from\", base)\n\tfilepath.Walk(base, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\t// Ignore\n\t\t\tlog.Println(\"store ignoring walk error\", err)\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t/*\n\t\t\tif info.IsDir() {\n\t\t\t\tlog.Println(\"skipping dir\", path)\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t*/\n\t\tif filepath.Ext(path) == \".txt\" {\n\t\t\tlog.Println(\"loading item\", path)\n\t\t\ttext, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titems = append(items, string(text))\n\t\t} else {\n\t\t\tlog.Println(\"skipping non item\", path, filepath.Ext(path))\n\t\t}\n\t\treturn nil\n\t})\n\treturn items, nil\n\n}",
"func FindTemplates(root, base string) (map[string]TemplateLoader, error) {\n\ttemplates := make(map[string]TemplateLoader)\n\trootBase := filepath.Join(root, base)\n\terr := filepath.Walk(rootBase, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\text := filepath.Ext(path)\n\t\tif ext != \".tpl\" {\n\t\t\treturn nil\n\t\t}\n\n\t\trelative, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not find relative path to base root: %s\", rootBase)\n\t\t}\n\n\t\trelative = strings.TrimLeft(relative, string(os.PathSeparator))\n\t\ttemplates[relative] = FileLoader(path)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn templates, nil\n}",
"func (r *TemplateFileSearcher) Search(precise bool, terms ...string) (ComponentMatches, []error) {\n\tmatches := ComponentMatches{}\n\tvar errs []error\n\tfor _, term := range terms {\n\t\tif term == \"__templatefile_fail\" {\n\t\t\terrs = append(errs, fmt.Errorf(\"unable to find the specified template file: %s\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\tvar isSingleItemImplied bool\n\t\tobj, err := r.Builder.\n\t\t\tWithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).\n\t\t\tNamespaceParam(r.Namespace).RequireNamespace().\n\t\t\tFilenameParam(false, &resource.FilenameOptions{Recursive: false, Filenames: terms}).\n\t\t\tDo().\n\t\t\tIntoSingleItemImplied(&isSingleItemImplied).\n\t\t\tObject()\n\n\t\tif err != nil {\n\t\t\tswitch {\n\t\t\t// FIXME: remove below condition as soon as we land https://github.com/kubernetes/kubernetes/pull/109488\n\t\t\tcase strings.Contains(err.Error(), \"is not valid: no match\") && strings.Contains(err.Error(), \"pattern\"):\n\t\t\t\tcontinue\n\t\t\tcase strings.Contains(err.Error(), \"does not exist\") && strings.Contains(err.Error(), \"the path\"):\n\t\t\t\tcontinue\n\t\t\tcase strings.Contains(err.Error(), \"not a directory\") && strings.Contains(err.Error(), \"the path\"):\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\tif syntaxErr, ok := err.(*json.SyntaxError); ok {\n\t\t\t\t\terr = fmt.Errorf(\"at offset %d: %v\", syntaxErr.Offset, err)\n\t\t\t\t}\n\t\t\t\terrs = append(errs, fmt.Errorf(\"unable to load template file %q: %v\", term, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif list, isList := obj.(*corev1.List); isList && !isSingleItemImplied {\n\t\t\tif len(list.Items) == 1 {\n\t\t\t\tobj = list.Items[0].Object\n\t\t\t\tisSingleItemImplied = true\n\t\t\t}\n\t\t}\n\n\t\tif !isSingleItemImplied {\n\t\t\terrs = append(errs, fmt.Errorf(\"there is more than one object in %q\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\ttemplate, ok := obj.(*templatev1.Template)\n\t\tif !ok {\n\t\t\terrs = append(errs, fmt.Errorf(\"object in %q is not a template\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches = append(matches, &ComponentMatch{\n\t\t\tValue: term,\n\t\t\tArgument: fmt.Sprintf(\"--file=%q\", template.Name),\n\t\t\tName: template.Name,\n\t\t\tDescription: fmt.Sprintf(\"Template file %s\", term),\n\t\t\tScore: 0,\n\t\t\tTemplate: template,\n\t\t})\n\t}\n\n\treturn matches, errs\n}",
"func templatesPartialsSpinnerTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/spinner.tmpl\"\n\tname := \"templates/partials/spinner.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func readFromFile() error {\n\tvar errlist []error\n\tif err := readByJSON(userPath, &userList); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tif err := readByJSON(meetingPath, &meetingList); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tif err := readByJSON(curUserPath, &curUser); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tswitch len(errlist) {\n\tcase 1:\n\t\treturn errlist[0]\n\tcase 2:\n\t\treturn errors.New(errlist[0].Error() + \"\\n\" + errlist[1].Error())\n\tcase 3:\n\t\treturn errors.New(errlist[0].Error() + \"\\n\" + errlist[1].Error() + \"\\n\" + errlist[2].Error())\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func DecryptPartials(decInfo *DecInfo, p kyber.Scalar) *AuthInfo {\n\tfmt.Println(\"All partials received, start decrypting\")\n\tshares := decInfo.shares\n\tC := decInfo.C\n\tA := decInfo.A\n\tt := int(math.Ceil(float64(len(shares)/2) + 1))\n\tR, err := share.RecoverCommit(suite, shares, t, len(shares))\n\tif err != nil {\n\t\tLog(err)\n\t\treturn nil\n\t}\n\tdecPoint := suite.Point().Sub(\n\t\tC,\n\t\tsuite.Point().Sub(\n\t\t\tR,\n\t\t\tsuite.Point().Mul(p, A),\n\t\t),\n\t)\n\tdecKey, err := decPoint.Data()\n\t//fmt.Println(\"Recovered patient key: \", decKey[:24])\n\tif err != nil {\n\t\tLog(err)\n\t\treturn nil\n\t}\n\tauthInfo := new(AuthInfo)\n\t//fmt.Println(\"Decrypting info with iv \", decInfo.iv)\n\tdata := DecryptInfo(decKey[:24], decInfo.iv, decInfo.encInfo)\n\tjson.Unmarshal(data, authInfo)\n\tfmt.Printf(\"Recovered file info: %v\\n\", authInfo)\n\treturn authInfo\n}",
"func LoadTemplates(dir string, basefile string) (*Templates, error) {\n\tt := &Templates{BaseDir: dir, BaseFile: basefile}\n\treturn t, t.Scan()\n}",
"func (t *Templates) Parse(dir string) (*Templates, error) {\n\tt.Dir = dir\n\tif err := filepath.Walk(dir, t.parseFile); err != nil {\n\t\treturn t, err\n\t}\n\n\tif len(t.Views) == 0 {\n\t\treturn t, fmt.Errorf(\"no views were found\")\n\t}\n\n\t// create view templates\n\tfor name, tmpl := range t.Views {\n\t\tvar err error\n\t\tt.Templates[name], err = template.New(name).Parse(tmpl)\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\n\t// add partials to the view templates\n\tfor _, baseTmpl := range t.Templates {\n\t\tfor name, tmpl := range t.Partials {\n\t\t\tvar err error\n\t\t\tbaseTmpl, err = baseTmpl.New(name).Parse(tmpl)\n\t\t\tif err != nil {\n\t\t\t\treturn t, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn t, nil\n}",
"func ParseFiles(log logr.Logger, files map[string]string) ([]*unstructured.Unstructured, error) {\n\tobjects := make([]*unstructured.Unstructured, 0)\n\tfor name, content := range files {\n\t\tif _, file := filepath.Split(name); file == \"NOTES.txt\" {\n\t\t\tcontinue\n\t\t}\n\t\tdecodedObjects, err := DecodeObjects(log, name, []byte(content))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode files for %q: %w\", name, err)\n\t\t}\n\t\tobjects = append(objects, decodedObjects...)\n\t}\n\treturn objects, nil\n}",
"func (f *factory) loadPages() error {\n\tfileNames, err := listDirFunc(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fileNames) == 0 {\n\t\t// page file not exist\n\t\treturn nil\n\t}\n\n\tfor _, fn := range fileNames {\n\t\tseqNumStr := fn[0 : strings.Index(fn, pageSuffix)-1]\n\t\tseq, err := strconv.ParseInt(seqNumStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.AcquirePage(seq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (application *Application) LoadTemplates() error {\n\tvar templates []string\n\n\t// Create function to collect our template files\n\tfn := func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() != true && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Look for all the template files\n\terr := filepath.Walk(application.Configuration.TemplatePath, fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure we can parse all the template files\n\tapplication.Template = template.Must(template.ParseFiles(templates...))\n\treturn nil\n}",
"func (g *Group) Files(files ...string) error {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tfor _, f := range files {\n\t\tf = filepath.Join(g.dir, f)\n\t\terr := g.load(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func ParseTemplateFiles(filenames ...string) (t *template.Template) {\n\tvar files []string\n\tt = template.New(\"layout\")\n\tfor _, file := range filenames {\n\t\tfiles = append(files, fmt.Sprintf(\"templates/%s.html\", file))\n\t}\n\tt = template.Must(t.ParseFiles(files...))\n\treturn\n}",
"func (f *FileDir) GetFilesContents(file []byte, reply *FileDir) error {\n\n\t//line contains full path of the file\n\ttime.Sleep(5 * time.Second)\n\tfilePath := string(file) //taking the path of the file from the byte variable\n\n\tcontent, err := ioutil.ReadFile(filePath) //reading the contents of the file\n\tif err != nil {\n\t\tfmt.Println(\"File reading error\", err)\n\t\treturn nil\n\t}\n\n\tdata := string(content) //converting the contents of the file to string\n\t*reply = FileDir{data} //referencing the content to the sent to the client\n\treadBlocked = false\n\treturn nil\n}",
"func templatesPartialsJsonLdBaseTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/json-ld/base.tmpl\"\n\tname := \"templates/partials/json-ld/base.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func (g *Glob) PartialMatch(start int, elems []string) (matched bool, exact bool, remainder *Glob) {\n\tg = g.Split(start)\n\tallExact := true\n\tfor i := 0; i < len(elems); i++ {\n\t\tvar matched, exact bool\n\t\tif matched, exact, g = g.MatchInitialSegment(elems[i]); !matched {\n\t\t\treturn false, false, nil\n\t\t} else if !exact {\n\t\t\tallExact = false\n\t\t}\n\t}\n\treturn true, allExact, g\n}",
"func LoadTemplates(rootTemp string, childTemps []string) {\n\trootTemplate = rootTemp\n\tchildTemplates = childTemps\n}",
"func readFiles(files []string) *Collection {\n\tc := Collection{Stats: make(map[BenchKey]*Benchstat)}\n\tfor _, file := range files {\n\t\treadFile(file, &c)\n\t}\n\treturn &c\n}",
"func (bsr *blockStreamReader) MustInitFromFilePart(path string) {\n\tbsr.reset()\n\n\t// Files in the part are always read without OS cache pollution,\n\t// since they are usually deleted after the merge.\n\tconst nocache = true\n\n\tmetaindexPath := filepath.Join(path, metaindexFilename)\n\tindexPath := filepath.Join(path, indexFilename)\n\tcolumnsHeaderPath := filepath.Join(path, columnsHeaderFilename)\n\ttimestampsPath := filepath.Join(path, timestampsFilename)\n\tfieldValuesPath := filepath.Join(path, fieldValuesFilename)\n\tfieldBloomFilterPath := filepath.Join(path, fieldBloomFilename)\n\tmessageValuesPath := filepath.Join(path, messageValuesFilename)\n\tmessageBloomFilterPath := filepath.Join(path, messageBloomFilename)\n\n\tbsr.ph.mustReadMetadata(path)\n\n\t// Open data readers\n\tmetaindexReader := filestream.MustOpen(metaindexPath, nocache)\n\tindexReader := filestream.MustOpen(indexPath, nocache)\n\tcolumnsHeaderReader := filestream.MustOpen(columnsHeaderPath, nocache)\n\ttimestampsReader := filestream.MustOpen(timestampsPath, nocache)\n\tfieldValuesReader := filestream.MustOpen(fieldValuesPath, nocache)\n\tfieldBloomFilterReader := filestream.MustOpen(fieldBloomFilterPath, nocache)\n\tmessageValuesReader := filestream.MustOpen(messageValuesPath, nocache)\n\tmessageBloomFilterReader := filestream.MustOpen(messageBloomFilterPath, nocache)\n\n\t// Initialize streamReaders\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}",
"func parseTemplateFiles(filenames ...string) (t *template.Template) {\n\tvar files []string\n\tt = template.New(\"layout\")\n\tfor _, file := range filenames {\n\t\tfiles = append(files, fmt.Sprintf(\"templates/%s.html\", file))\n\t}\n\tt = template.Must(t.ParseFiles(files...))\n\treturn\n}",
"func templatesPartialsBannersCensusTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/banners/census.tmpl\"\n\tname := \"templates/partials/banners/census.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func (x *Indexer) readfiles() error {\n\tdocPath := x.config.FnamesPath()\n\tf, e := os.OpenFile(docPath, os.O_RDONLY, 0644)\n\tif e != nil {\n\t\treturn e\n\t}\n\tdefer f.Close()\n\t//r := bufio.NewReader(f)\n\tfnames, err := readFnames(f)\n\tx.fnames = fnames\n\treturn err\n}",
"func (p *Project) LoadRcFiles() error {\n\tvar files []string\n\tpath := p.LaunchPath\n\tfor {\n\t\tfiles = append(files, filepath.Join(path, RcFile))\n\t\tif path == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tdir := filepath.Dir(path)\n\t\tif dir == \".\" {\n\t\t\tpath = \"\"\n\t\t} else {\n\t\t\tpath = dir\n\t\t}\n\t}\n\n\terrs := &errors.AggregatedError{}\n\tfor i := len(files) - 1; i >= 0; i-- {\n\t\t_, err := p.Load(files[i])\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\terrs.Add(err)\n\t\t}\n\t}\n\treturn errs.Aggregate()\n}",
"func templatesPartialsJsonLdHomepageTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/json-ld/homepage.tmpl\"\n\tname := \"templates/partials/json-ld/homepage.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func ReadPaths(filePaths []string) ([]ParsedTektonResource, error) {\n\tparsedResources := []ParsedTektonResource{}\n\n\tfor _, filePath := range filePaths {\n\t\t// Check both the existence of the file and if it is a directory.\n\t\tinfo, err := os.Stat(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"No such file or directory: %s\", filePath)\n\t\t}\n\n\t\t// If this is a directory, recursively read the subpaths.\n\t\tif info.IsDir() {\n\t\t\tfiles, err := ioutil.ReadDir(filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"Unable to read dir %s\", filePath)\n\t\t\t}\n\n\t\t\tsubpaths := make([]string, 0, len(files))\n\t\t\tfor _, file := range files {\n\t\t\t\tsubpaths = append(subpaths, path.Join(filePath, file.Name()))\n\t\t\t}\n\n\t\t\t// Recursively call this function with the sub-paths of this directory.\n\t\t\tresources, err := ReadPaths(subpaths)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tparsedResources = append(parsedResources, resources...)\n\t\t\tcontinue\n\t\t}\n\n\t\t// This path points to a single file. Read it and append the parsed resource.\n\t\tresource, err := readPath(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparsedResources = append(parsedResources, resource...)\n\t}\n\n\treturn parsedResources, nil\n}",
"func ReadInfoFiles(\n\tfilePathPrefix string,\n\tnamespace ident.ID,\n\tshard uint32,\n\treaderBufferSize int,\n\tdecodingOpts msgpack.DecodingOptions,\n) []schema.IndexInfo {\n\tvar indexEntries []schema.IndexInfo\n\tdecoder := msgpack.NewDecoder(decodingOpts)\n\tforEachInfoFile(filePathPrefix, namespace, shard, readerBufferSize, func(_ string, data []byte) {\n\t\tdecoder.Reset(msgpack.NewDecoderStream(data))\n\t\tinfo, err := decoder.DecodeIndexInfo()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tindexEntries = append(indexEntries, info)\n\t})\n\treturn indexEntries\n}",
"func (t *Tmpl) Load() (err error) {\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\tvar root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.dir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.ext)\n\n\t\t// load or reload\n\t\tvar (\n\t\t\tnt = root.New(rel)\n\t\t\tb []byte\n\t\t)\n\n\t\tif b, err = ioutil.ReadFile(path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = nt.Parse(string(b))\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.dir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\t// necessary for reloading\n\tif t.funcs != nil {\n\t\troot = root.Funcs(t.funcs)\n\t}\n\n\tt.Template = root // set or replace\n\treturn\n}",
"func setupTemplates(folder string) error {\n\n\tcontents, err := ioutil.ReadDir(folder)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar files []string\n\n\tfor _, file := range contents {\n\t\tfull_name := file.Name()\n\t\tfiles = append(files, filepath.Join(folder, full_name))\n\t}\n\n\tvar temperr error\n\n\ttemplates, temperr = ParseFiles(files...)\n\n\tif temperr != nil {\n\t\treturn temperr\n\t}\n\n\treturn nil\n}",
"func load(filenames ...string) *template.Template {\n\treturn template.Must(template.ParseFiles(joinTemplateDir(filenames...)...)).Lookup(\"root\")\n}",
"func LoadAllTemplates() (Templates, error) {\n\ttemplateMap := make(map[string]*template.Template)\n\n\t// We walk through every file in the templates folder.\n\terr := filepath.Walk(\"./assets/templates\", func(path string, info fs.FileInfo, err error) error {\n\t\t// We only care about files which are actual templates and are not of special use (like \"_base\")\n\t\tif !strings.HasPrefix(info.Name(), \"_\") && strings.HasSuffix(path, \".template.html\") {\n\t\t\t// We bundle the templates together with the base template so that we can render them together later.\n\t\t\tt, err := template.ParseFiles(\"./assets/templates/_base.template.html\", path)\n\t\t\tif err == nil {\n\t\t\t\t// We keep the parsed template in the templateMap by it's filename.\n\t\t\t\ttemplateMap[info.Name()] = t\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t})\n\n\tif err != nil {\n\t\treturn Templates{}, err\n\t}\n\n\treturn Templates{\n\t\ttemplateMap: templateMap,\n\t}, nil\n}",
"func readPartialAttributeList(bytes *Bytes) (ret PartialAttributeList, err error) {\n\tret = PartialAttributeList(make([]PartialAttribute, 0, 10))\n\terr = bytes.ReadSubBytes(classUniversal, tagSequence, ret.readComponents)\n\tif err != nil {\n\t\terr = LdapError{fmt.Sprintf(\"readPartialAttributeList:\\n%s\", err.Error())}\n\t\treturn\n\t}\n\treturn\n}",
"func (s *server) parseTemplates(w http.ResponseWriter, files ...string) (tpl *template.Template, err error) {\n\t// Automatically adds app layout\n\tfiles = append(files, filepath.Join(\"layouts\", \"app\"))\n\tfor i, v := range files {\n\t\tfiles[i] = filepath.Join(\"client\", \"templates\", v) + \".tpl\"\n\t}\n\t// Automatically adds components folder\n comps, err := ioutil.ReadDir(\".\" + filepath.Join(\"client\", \"templates\", \"components\"))\n if err != nil {\n s.logErr(\"failed to read components\", err)\n return nil, err\n }\n\tfor _, v := range comps {\n files = append(files, v.Name())\n }\n\n\ttpl, err = template.New(\"\").Funcs(template.FuncMap{\n\t\t\"echo\": func(input string) string {\n\t\t\treturn input\n\t\t},\n\t\t\"isMarkdown\": func(data interface{}) bool {\n\t\t\tswitch data.(type) {\n\t\t\tcase markdown:\n\t\t\t\treturn true\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t},\n\t}).ParseFiles(files...)\n\tif err != nil {\n\t\ts.logErr(\"Error parsing template file\", err)\n\t\treturn nil, err\n\t}\n\treturn tpl, nil\n}",
"func (w *WaysMapping) Load(filesPath []string) error {\n\n\tstartTime := time.Now()\n\n\tfor _, f := range filesPath {\n\t\terr := w.loadFromSingleFile(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tglog.Infof(\"Loaded way2patterns mapping count %d, takes %f seconds\", w.Count(), time.Now().Sub(startTime).Seconds())\n\treturn nil\n}",
"func LoadSpecFiles(filesPath string) (Specs, error) {\n\tspecs := Specs{SpecsByName: make(map[string]SpecDef)}\n\tvar files []string\n\n\tfilesInPath, err := ioutil.ReadDir(filesPath)\n\tif err != nil {\n\t\treturn specs, err\n\t}\n\tfor _, f := range filesInPath {\n\t\tif ok, _ := regexp.MatchString(fileNameMatcher, f.Name()); ok {\n\t\t\tfiles = append(files, path.Join(filesPath, f.Name()))\n\t\t}\n\t}\n\n\tfor _, file := range files {\n\t\tf, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"fail to read service spec file %s: %s \", file, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sd SpecDef\n\t\terr = yaml.Unmarshal(f, &sd)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"fail parse service spec file %s: %s\", file, err)\n\t\t\tcontinue\n\t\t}\n\t\tlogrus.Debugf(\"spec file loaded for service:%s\", sd.Service)\n\t\tspecs.SpecsByName[sd.Service] = sd\n\t}\n\n\treturn specs, nil\n}",
"func readFiles(files []string, numReaders int) {\n\t// Init a concurrency limiter\n\tblocker := make(chan int, numReaders)\n\tfor i := 0; i < numReaders; i++ {\n\t\tlog.Println(\"Starting reader\", i)\n\t\tblocker <- 1\n\t}\n\n\t// Init three more chans to communicate with the routines\n\tfinished := make(chan chan int)\n\n\tgo keepScore(\"Total\", countTotal, finished)\n\tgo keepScore(\"Errors\", countErrors, finished)\n\n\t// Loop over the files in reverse order\n\t// We start with the biggest files\n\t// Should give a more equal finishing time\n\tfor i := len(files) - 1; i >= 0; i-- {\n\t\t// This blocks\n\t\t<-blocker\n\n\t\tgo readFile(files[i], blocker)\n\t}\n\n\t// Block until everything finished.\n\tfor i := 0; i < numReaders; i++ {\n\t\t<-blocker\n\t\tlog.Println(\"Terminated reader\", i, \"-> no more files to read\")\n\t}\n\n\tfor i := 0; i < 2; i++ {\n\t\tw := make(chan int)\n\t\tfinished <- w\n\n\t\t// Wait for it to print, then continue\n\t\t<-w\n\t}\n}",
"func (p *FileMap) loadFilesRecursively(cwd string) error {\n\tfileList, err := ioutil.ReadDir(cwd)\n\n\tif err != nil {\n\t\terr = StringError{s: \"ERROR: Can't open \\\"\" + cwd + \"\\\" directory!\"}\n\t\treturn err\n\t}\n\n\tfor _, f := range fileList {\n\t\tfileName := f.Name()\n\n\t\tif f.IsDir() {\n\t\t\terr := p.loadFilesRecursively(cwd + fileName + \"/\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tbaseName, ext := getBaseAndExt(fileName)\n\n\t\t\t_, err := p.load(cwd+baseName, ext)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Println(\"INFO: Loaded file: \" + cwd + filepath.Base(fileName))\n\t\t}\n\t}\n\treturn nil\n}",
"func Partial(dst, src *os.File, dstOffset, srcOffset, n int64, fallback bool) error {\n\terr := reflinkRangeInternal(dst, src, dstOffset, srcOffset, n)\n\tif (err != nil) && fallback {\n\t\t_, err = copyFileRange(dst, src, dstOffset, srcOffset, n)\n\t}\n\n\tif (err != nil) && fallback {\n\t\t// seek both src & dst\n\t\treader := io.NewSectionReader(src, srcOffset, n)\n\t\twriter := §ionWriter{w: dst, base: dstOffset}\n\t\t_, err = io.CopyN(writer, reader, n)\n\t}\n\treturn err\n}",
"func LoadAllTemplates() (*template.Template, error) {\n\treturn FindAndParseTemplates(templateDir, \".tmpl\", template.FuncMap{\n\t\t\"DeducePosX\": DeducePosX,\n\t\t\"DeducePosY\": DeducePosY,\n\t\t\"ItemRarity\": ItemRarity,\n\t\t\"ItemRarityType\": ItemRarityType,\n\t\t\"ItemRarityHeight\": ItemRarityHeight,\n\t\t\"InfluenceName\": InfluenceName,\n\t\t\"GenSpecialBackground\": GenSpecialBackground,\n\t\t\"ColorType\": ColorType,\n\t\t\"AugmentedType\": AugmentedType,\n\t\t\"WordWrap\": WordWrap,\n\t\t\"ConvToCssProgress\": ConvToCssProgress,\n\t\t\"ReplacePoEMarkup\": ReplacePoEMarkup,\n\t\t\"PoEMarkup\": PoEMarkup,\n\t\t\"PoEMarkupLinesOnly\": PoEMarkupLinesOnly,\n\t\t\"ColorToSocketClass\": ColorToSocketClass,\n\t\t\"SocketRight\": SocketRight,\n\t\t\"SocketedClass\": SocketedClass,\n\t\t\"SocketedId\": SocketedId,\n\t\t\"AltWeaponImage\": AltWeaponImage,\n\t\t\"SellDescription\": SellDescription,\n\t\t\"XpToNextLevel\": models.XpToNextLevel,\n\t\t\"CurrentXp\": models.CurrentXp,\n\t\t\"XpNeeded\": models.XpNeeded,\n\t\t\"PrettyPrint\": models.PrettyPrint,\n\t\t\"ContainsPattern\": ContainsPattern,\n\t\t\"GenProperties\": GenProperties,\n\t\t\"SearchItem\": SearchItem,\n\t\t\"GenNaiveSearchIndex\": GenNaiveSearchIndex,\n\t\t\"ItemCategory\": ItemCategory,\n\t\t\"Version\": func() string {\n\t\t\treturn misc.Version\n\t\t},\n\t\t\"attr\": func(s string) template.HTMLAttr {\n\t\t\treturn template.HTMLAttr(s)\n\t\t},\n\t\t\"ieq\": func(a, b string) bool {\n\t\t\treturn strings.EqualFold(a, b)\n\t\t},\n\t\t\"safe\": func(s string) template.HTML {\n\t\t\treturn template.HTML(s)\n\t\t},\n\t\t\"add\": func(a, b int) int {\n\t\t\treturn a + b\n\t\t},\n\t\t\"percentage\": func(a, b int) float64 {\n\t\t\tif b == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\treturn (float64(a) / float64(b)) * 100\n\t\t},\n\t\t\"squeeze\": func(s string) string {\n\t\t\treturn strings.Map(\n\t\t\t\tfunc(r rune) rune {\n\t\t\t\t\tif unicode.IsLetter(r) {\n\t\t\t\t\t\treturn r\n\t\t\t\t\t}\n\t\t\t\t\treturn -1\n\t\t\t\t},\n\t\t\t\ts,\n\t\t\t)\n\t\t},\n\t\t\"dict\": func(values ...interface{}) (map[string]interface{}, error) {\n\t\t\tif len(values) == 0 {\n\t\t\t\treturn nil, errors.New(\"invalid dict call\")\n\t\t\t}\n\t\t\tdict := make(map[string]interface{})\n\t\t\tfor i := 0; i < len(values); i++ {\n\t\t\t\tkey, isset := values[i].(string)\n\t\t\t\tif !isset {\n\t\t\t\t\tif reflect.TypeOf(values[i]).Kind() == reflect.Map {\n\t\t\t\t\t\tm := values[i].(map[string]interface{})\n\t\t\t\t\t\tfor i, v := range m {\n\t\t\t\t\t\t\tdict[i] = v\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, errors.New(\"dict values must be maps\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(values) {\n\t\t\t\t\t\treturn nil, errors.New(\"specify the key for non array values\")\n\t\t\t\t\t}\n\t\t\t\t\tdict[key] = values[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn dict, nil\n\t\t},\n\t\t\"nl2br\": func(line string) string {\n\t\t\treturn strings.Replace(line, \"\\n\", \"<br />\", -1)\n\t\t},\n\t\t\"PrettyDate\": func() string {\n\t\t\treturn time.Now().Format(\"2006-01-02 15:04:05\")\n\t\t},\n\t\t\"DateFormat\": func(d time.Time) string {\n\t\t\treturn d.Format(\"2006-01-02\")\n\t\t},\n\t})\n}",
"func templatesPartialsBannersSurveyTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/banners/survey.tmpl\"\n\tname := \"templates/partials/banners/survey.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func ReadRentalAgreementTemplates(rows *sql.Rows, a *RentalAgreementTemplate) error {\n\treturn rows.Scan(&a.RATID, &a.BID, &a.RATemplateName, &a.CreateTS, &a.CreateBy, &a.LastModTime, &a.LastModBy)\n}",
"func (d *galleryDocument) LoadTemplates(t *template.Template) error {\n\treturn nil\n}",
"func parseTemplates() (){\n templates = make(map[string]*template.Template)\n if files, err := ioutil.ReadDir(CONFIG.TemplatesDir) ; err != nil {\n msg := \"Error reading templates directory: \" + err.Error()\n log.Fatal(msg)\n } else {\n for _, f := range files {\n fmt.Println(f.Name())\n err = nil\n\n tpl, tplErr := template.New(f.Name()).Funcs(template.FuncMap{\n \"humanDate\": humanDate,\n \"humanSize\": humanSize,}).ParseFiles(CONFIG.TemplatesDir + \"/\" + f.Name())\n if tplErr != nil {\n log.Fatal(\"Error parsing template: \" + tplErr.Error())\n } else {\n templates[f.Name()] = tpl\n }\n }\n }\n return\n}",
"func loadTranslations(trPath string) error {\n\tfiles, _ := filepath.Glob(trPath + \"/*.json\")\n\n\tif len(files) == 0 {\n\t\treturn errors.New(\"no translations found\")\n\t}\n\n\tfor _, file := range files {\n\t\terr := loadFileToMap(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func templatesPartialsFeedbackTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/feedback.tmpl\"\n\tname := \"templates/partials/feedback.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func (ts *TranslationService) loadFiles() {\n\tfor _, fileName := range ts.translationFiles {\n\t\terr := ts.i18bundle.LoadTranslationFile(fileName)\n\t\tif err != nil {\n\t\t\tts.logger.Warn(fmt.Sprintf(\"loading of translationfile %s failed: %s\", fileName, err))\n\t\t}\n\t}\n\n\tts.lastReload = time.Now()\n}",
"func FindAndParseTemplates(rootDir, ext string, funcMap template.FuncMap) (*template.Template, error) {\n\tcleanRoot := filepath.Clean(rootDir)\n\tpfx := len(cleanRoot) + 1\n\troot := template.New(\"\")\n\n\terr := filepath.Walk(cleanRoot, func(path string, info os.FileInfo, e1 error) error {\n\t\tif !info.IsDir() && strings.HasSuffix(path, ext) {\n\t\t\tif e1 != nil {\n\t\t\t\treturn e1\n\t\t\t}\n\n\t\t\tb, e2 := ioutil.ReadFile(path)\n\t\t\tif e2 != nil {\n\t\t\t\treturn e2\n\t\t\t}\n\n\t\t\tname := path[pfx:]\n\t\t\tt := root.New(name).Funcs(funcMap)\n\t\t\t_, e2 = t.Parse(string(b))\n\t\t\tif e2 != nil {\n\t\t\t\treturn e2\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn root, err\n}",
"func (manager *Manager) OrderedLoadSchemasFromFiles(filePaths []string) error {\n\tMaxDepth := 8 // maximum number of nested schemas\n\tfor i := MaxDepth; i > 0 && len(filePaths) > 0; i-- {\n\t\trest := make([]string, 0)\n\t\tfor _, filePath := range filePaths {\n\t\t\tif filePath == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := manager.LoadSchemaFromFile(filePath)\n\t\t\tif err != nil && err.Error() != \"data isn't map\" {\n\t\t\t\tif i == 1 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trest = append(rest, filePath)\n\t\t\t}\n\t\t}\n\t\tfilePaths = rest\n\t}\n\treturn nil\n}",
"func parseAllFiles() {\n\tbasePath := \"/home/andrea/infos/\" // TODO change this\n\tfiles, _ := ioutil.ReadDir(basePath)\n\tfor _, f := range files {\n\t\terr, f := model.FromJSON(basePath + f.Name())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfilms = append(films, f)\n\t}\n}",
"func parseFiles(p string, files []os.FileInfo) {\n\tfor _, f := range files {\n\t\tnewP := path.Join(p, f.Name())\n\t\tif f.IsDir() {\n\t\t\tnewFiles, err := ioutil.ReadDir(newP)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tparseFiles(newP, newFiles)\n\t\t}\n\t\tparseFile(newP)\n\t}\n}",
"func loadDocuments(paths []string) (map[string]string, error) {\n\tignoreFileExtensions := func(abspath string, info os.FileInfo, depth int) bool {\n\t\treturn !contains([]string{\".yaml\", \".yml\", \".json\"}, filepath.Ext(info.Name()))\n\t}\n\n\tdocumentPaths, err := loader.FilteredPaths(paths, ignoreFileExtensions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"filter data paths: %w\", err)\n\t}\n\n\tdocuments := make(map[string]string)\n\tfor _, documentPath := range documentPaths {\n\t\tcontents, err := ioutil.ReadFile(documentPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"read file: %w\", err)\n\t\t}\n\n\t\tdocuments[documentPath] = string(contents)\n\t}\n\n\treturn documents, nil\n}",
"func (r *FileRepository) ReadFileSlice(path string) ([]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to open file: %s\", path)\n\t}\n\tdefer file.Close()\n\tdata := make([]string, 0)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tdata = append(data, scanner.Text())\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to scan file\")\n\t}\n\treturn data, nil\n}",
"func Templates(basepath string) (messages []Message) {\n\tmessages = []Message{}\n\tpath := filepath.Join(basepath, \"templates\")\n\tif fi, err := os.Stat(path); err != nil {\n\t\tmessages = append(messages, Message{Severity: WarningSev, Text: \"No templates\"})\n\t\treturn\n\t} else if !fi.IsDir() {\n\t\tmessages = append(messages, Message{Severity: ErrorSev, Text: \"'templates' is not a directory\"})\n\t\treturn\n\t}\n\n\ttpl := template.New(\"tpl\").Funcs(sprig.TxtFuncMap())\n\n\terr := filepath.Walk(basepath, func(name string, fi os.FileInfo, e error) error {\n\t\t// If an error is returned, we fail. Non-fatal errors should just be\n\t\t// added directly to messages.\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tdata, err := ioutil.ReadFile(name)\n\t\tif err != nil {\n\t\t\tmessages = append(messages, Message{\n\t\t\t\tSeverity: ErrorSev,\n\t\t\t\tText: fmt.Sprintf(\"cannot read %s: %s\", name, err),\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\n\t\t// An error rendering a file should emit a warning.\n\t\tnewtpl, err := tpl.Parse(string(data))\n\t\tif err != nil {\n\t\t\tmessages = append(messages, Message{\n\t\t\t\tSeverity: ErrorSev,\n\t\t\t\tText: fmt.Sprintf(\"error processing %s: %s\", name, err),\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t\ttpl = newtpl\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tmessages = append(messages, Message{Severity: ErrorSev, Text: err.Error()})\n\t}\n\n\treturn\n}",
"func (c *MixinSpec) MixinFiles(primaryFile string, mixinFiles []string, w io.Writer) ([]string, error) {\n\n\tprimaryDoc, err := loads.Spec(primaryFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprimary := primaryDoc.Spec()\n\n\tvar mixins []*spec.Swagger\n\tfor _, mixinFile := range mixinFiles {\n\t\tif c.KeepSpecOrder {\n\t\t\tmixinFile = generator.WithAutoXOrder(mixinFile)\n\t\t}\n\t\tmixin, lerr := loads.Spec(mixinFile)\n\t\tif lerr != nil {\n\t\t\treturn nil, lerr\n\t\t}\n\t\tmixins = append(mixins, mixin.Spec())\n\t}\n\n\tcollisions := analysis.Mixin(primary, mixins...)\n\tanalysis.FixEmptyResponseDescriptions(primary)\n\n\treturn collisions, writeToFile(primary, !c.Compact, c.Format, string(c.Output))\n}",
"func compareFiles(t *testing.T) {\n\tactualCursor, err := files.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for files: %s\", err)\n\texpectedCursor, err := expectedFiles.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for expected files: %s\", err)\n\n\tfor expectedCursor.Next(ctx) {\n\t\tif !actualCursor.Next(ctx) {\n\t\t\tt.Fatalf(\"files has fewer documents than expectedFiles\")\n\t\t}\n\n\t\tvar actualFile bsonx.Doc\n\t\tvar expectedFile bsonx.Doc\n\n\t\terr = actualCursor.Decode(&actualFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding actual file: %s\", err)\n\t\terr = expectedCursor.Decode(&expectedFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding expected file: %s\", err)\n\n\t\tcompareGfsDoc(t, expectedFile, actualFile, primitive.ObjectID{})\n\t}\n}",
"func getFilesFromIndex(p string, r io.Reader) ([]*FileInfo, Paragraph, error) {\n\treturn getFilesFromRelease(p, r)\n}",
"func (f *File) Read() error {\n\tf2, err := os.Open(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f2.Close()\n\tif err := json.NewDecoder(f2).Decode(&f.Groups); err != nil {\n\t\treturn err\n\t}\n\tfor _, g := range f.Groups {\n\t\tif err := json.Unmarshal(g.RawSchema, &g.Schema); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (r Reader) Read(spec *v1alpha1.OCIBuilderSpec, overlayPath string, filepaths ...string) error {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath := strings.Join(filepaths[:], \"/\")\n\tif filepath != \"\" {\n\t\tdir = filepath\n\t}\n\tr.Logger.WithField(\"filepath\", dir+\"/ocibuilder.yaml\").Debugln(\"looking for ocibuilder.yaml\")\n\tfile, err := ioutil.ReadFile(dir + \"/ocibuilder.yaml\")\n\tif err != nil {\n\t\tr.Logger.Infoln(\"ocibuilder.yaml file not found, looking for individual specifications...\")\n\t\tif err := r.readIndividualSpecs(spec, dir); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read individual specs\")\n\t\t}\n\t}\n\n\tif overlayPath != \"\" {\n\t\tr.Logger.WithField(\"overlayPath\", overlayPath).Debugln(\"overlay path not empty - looking for overlay file\")\n\t\tfile, err = applyOverlay(file, overlayPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply overlay to spec at path\")\n\t\t}\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif spec.Params != nil {\n\t\tif err = r.applyParams(file, spec); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply params to spec\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (ref Collections) Read(start collection.SeqNum, p []collection.Data) (n int, err error) {\n\tdrv, ok := ref.repo.drives[ref.drive]\n\tif !ok {\n\t\treturn 0, collection.NotFound{Drive: ref.drive, Collection: start}\n\t}\n\tlength := collection.SeqNum(len(drv.Collections))\n\tif start >= length {\n\t\treturn 0, collection.NotFound{Drive: ref.drive, Collection: start}\n\t}\n\tfor n < len(p) && start+collection.SeqNum(n) < length {\n\t\tp[n] = drv.Collections[start+collection.SeqNum(n)].Data\n\t\tn++\n\t}\n\treturn n, nil\n}",
"func Initialize(basePath string) error {\n log(dtalog.DBG, \"Initialize(%q) called\", basePath)\n dir, err := os.Open(basePath)\n if err != nil {\n log(dtalog.ERR, \"Initialize(%q): error opening directory for read: %s\",\n basePath, err)\n return err\n }\n \n filez, err := dir.Readdirnames(0)\n if err != nil {\n log(dtalog.ERR, \"Initialize(%q): error reading from directory: %s\",\n basePath, err)\n dir.Close()\n return err\n }\n dir.Close()\n \n Paths = make([]string, 0, len(filez))\n Limbo = make(map[string]*string)\n \n for _, fname := range filez {\n pth := filepath.Join(basePath, fname)\n f, err := os.Open(pth)\n if err != nil {\n log(dtalog.WRN, \"Initialize(): error opening file %q: %s\", pth, err)\n continue\n }\n defer f.Close()\n log(dtalog.DBG, \"Initialize(): reading file %q\", pth)\n \n Paths = append(Paths, pth)\n cur_ptr := &(Paths[len(Paths)-1])\n \n dcdr := json.NewDecoder(f)\n var raw interface{}\n var raw_slice []interface{}\n var i ref.Interface\n var idx string\n for dcdr.More() {\n err = dcdr.Decode(&raw)\n if err != nil {\n log(dtalog.WRN, \"Initialize(): error decoding from file %q: %s\",\n pth, err)\n continue\n }\n raw_slice = raw.([]interface{})\n if len(raw_slice) < 2 {\n log(dtalog.WRN, \"Initialize(): in file %q: slice too short: %q\",\n pth, raw_slice)\n continue\n }\n \n idx = raw_slice[0].(string)\n i = ref.Deref(idx)\n if i == nil {\n Limbo[idx] = cur_ptr\n } else {\n i.(Interface).SetDescPage(cur_ptr)\n }\n }\n }\n return nil\n}",
"func templatesPartialsHeaderTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/header.tmpl\"\n\tname := \"templates/partials/header.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}",
"func LoadTranslations() error {\r\n\tbox := packr.NewBox(translationsPath)\r\n\tfor _, translationFilePath := range box.List() {\r\n\t\ttranslationFile, err := box.Open(translationFilePath)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tdefer translationFile.Close()\r\n\r\n\t\tbyteValue, _ := ioutil.ReadAll(translationFile)\r\n\r\n\t\tvar translation translationItem\r\n\t\terr = json.Unmarshal(byteValue, &translation)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\ttranslations = append(translations, translation)\r\n\t}\r\n\r\n\treturn nil\r\n}",
"func ParseSnpFiles() []byte {\n\tcwd, _ := os.Getwd()\n\n\tsnippet := make(map[string]snippetItem)\n\terr := filepath.WalkDir(cwd, func(filePath string, d os.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif strings.HasSuffix(filePath, \".txt\") {\n\t\t\tstripCwd := filePath[len(cwd)+1:]\n\n\t\t\t// scope\n\t\t\tscopeAttr := \"\"\n\t\t\tidx := strings.Index(stripCwd, string(os.PathSeparator))\n\t\t\tif idx != -1 {\n\t\t\t\tscopeAttr = stripCwd[:idx]\n\t\t\t}\n\t\t\tif scopeAttr == \"global\" { //file in global folder is use for all language\n\t\t\t\tscopeAttr = \"\"\n\t\t\t}\n\n\t\t\t// prefix\n\t\t\tprefixWithTxt := strings.ReplaceAll(stripCwd, string(os.PathSeparator), \" \")\n\t\t\tprefixWithScope := prefixWithTxt[:len(prefixWithTxt)-4]\n\t\t\tprefix := prefixWithScope[len(scopeAttr)+1:]\n\n\t\t\t// description\n\t\t\tdescription := prefix\n\n\t\t\t// body\n\t\t\trawContent, readFileErr := ioutil.ReadFile(filePath)\n\t\t\tif readFileErr != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tstringContent := strings.TrimRight(strings.TrimLeft(string(rawContent), \"\\n\"), \"\\n\")\n\t\t\tbody := strings.Split(stringContent, \"\\n\")\n\n\t\t\t// item\n\t\t\titem := snippetItem{scopeAttr, description, body, prefix}\n\n\t\t\tsnippet[prefix] = item\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tjson, jsonEncodingErr := jsonMarshal(snippet)\n\n\tif jsonEncodingErr != nil {\n\t\tlog.Fatal(jsonEncodingErr)\n\t}\n\n\treturn json\n}",
"func (c *Client) LoadAndParseAll() ([]*RouteInfo, error) {\n\tresponse, err := c.etcd.Get(c.routesRoot, false, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, etcdIndex := c.iterateDefs(response.Node, 0)\n\tif response.EtcdIndex > etcdIndex {\n\t\tetcdIndex = response.EtcdIndex\n\t}\n\n\tc.etcdIndex = etcdIndex\n\treturn parseRoutes(data), nil\n}",
"func (s *Site) read() error {\n\n\t// Lists of templates (_layouts, _includes) that we find that\n\t// will need to be compiled\n\tlayouts := []string{}\n\n\t// func to walk the jekyll directory structure\n\twalker := func(fn string, fi os.FileInfo, err error) error {\n\t\trel, _ := filepath.Rel(s.Src, fn)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn nil\n\n\t\tcase fi.IsDir() && isHiddenOrTemp(fn):\n\t\t\treturn filepath.SkipDir\n\n\t\t// Ignore directories\n\t\tcase fi.IsDir():\n\t\t\treturn nil\n\n\t\t// Ignore Hidden or Temp files\n\t\t// (starting with . or ending with ~)\n\t\tcase isHiddenOrTemp(rel):\n\t\t\treturn nil\n\n\t\t// Parse Templates\n\t\tcase isTemplate(rel):\n\t\t\tlayouts = append(layouts, fn)\n\n\t\t// Parse Posts\n\t\tcase isPost(rel):\n\t\t\tlogf(MsgParsingPost, rel)\n\t\t\tpermalink := s.Conf.GetString(\"permalink\")\n\t\t\tif permalink == \"\" {\n\t\t\t\t// According to Jekyll documentation 'date' is the\n\t\t\t\t// default permalink\n\t\t\t\tpermalink = \"date\"\n\t\t\t}\n\n\t\t\tpost, err := ParsePost(rel, permalink)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// TODO: this is a hack to get the posts in rev chronological order\n\t\t\ts.posts = append([]Page{post}, s.posts...) //s.posts, post)\n\n\t\t// Parse Pages\n\t\tcase isPage(rel):\n\t\t\tlogf(MsgParsingPage, rel)\n\t\t\tpage, err := ParsePage(rel)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.pages = append(s.pages, page)\n\n\t\t// Move static files, no processing required\n\t\tcase isStatic(rel):\n\t\t\ts.files = append(s.files, rel)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Walk the diretory recursively to get a list of all posts,\n\t// pages, templates and static files.\n\terr := filepath.Walk(s.Src, walker)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Compile all templates found, if any.\n\tif len(layouts) > 0 {\n\t\ts.templ, err = template.New(\"layouts\").Funcs(funcMap).ParseFiles(layouts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the posts, timestamp, etc to the Site Params\n\ts.Conf.Set(\"posts\", s.posts)\n\ts.Conf.Set(\"time\", time.Now())\n\ts.calculateTags()\n\ts.calculateCategories()\n\n\treturn nil\n}",
"func ReadFiles(path string) ([]string, error) {\n\tfiles, err := getPolicyFiles(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"search rego files: %w\", err)\n\t}\n\n\treturn files, nil\n}",
"func (r *Atomustache) loadLayouts() error {\n\n\tfiles, err := ioutil.ReadDir(r.LayoutsFolder)\n\tif err != nil {\n\t\treturn errors.New(\"Error reading layouts folder: \" + err.Error())\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasSuffix(file.Name(), r.Ext) {\n\n\t\t\tk := noExt(file.Name())\n\t\t\tpath := r.LayoutsFolder + \"/\" + file.Name()\n\n\t\t\tv, fErr := ioutil.ReadFile(path)\n\t\t\tif fErr != nil {\n\t\t\t\treturn errors.New(\"Error reading file (\" + path + \"): \" + fErr.Error())\n\t\t\t}\n\n\t\t\tt, mErr := ParseString(string(v), nil)\n\t\t\tif mErr != nil {\n\t\t\t\treturn errors.New(\"Error parsing string for file (\" + path + \"): \" + mErr.Error())\n\t\t\t}\n\n\t\t\tr.Layouts[k] = t\n\t\t}\n\t}\n\n\treturn nil\n}",
"func TemplateFromFiles(paths ...string) (*template.Template, error) {\n\tvar ps []string\n\tfor _, p := range paths {\n\t\tps = append(ps, srcutil.Path(p))\n\t}\n\n\tt, err := template.ParseFiles(ps...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"template.ParseFiles() failed: %v\", err)\n\t}\n\n\treturn t, nil\n}",
"func LoadPartialStrain() error {\n\tbin, err := runner.LookUp()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := env.CheckWithoutDB(); err != nil {\n\t\treturn fmt.Errorf(\"error in checking for env vars %s\", err)\n\t}\n\tmg.SerialDeps(\n\t\tmg.F(strain, bin),\n\t\tmg.F(characteristics, bin),\n\t\tmg.F(strainProp, bin),\n\t\tmg.F(strainSyn, bin),\n\t\tmg.F(strainInv, bin),\n\t\tmg.F(phenotype, bin),\n\t\tmg.F(genotype, bin),\n\t\t// mg.F(Gwdi, bin),\n\t)\n\treturn nil\n}",
"func readFirstFile(groupDir string, filenames []string) (string, error) {\n\tvar errors *multierror.Error\n\t// If reading all the files fails, return list of read errors.\n\tfor _, filename := range filenames {\n\t\tcontent, err := Blkio.Group(groupDir).Read(filename)\n\t\tif err == nil {\n\t\t\treturn content, nil\n\t\t}\n\t\terrors = multierror.Append(errors, err)\n\t}\n\terr := errors.ErrorOrNil()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not read any of files %q: %w\", filenames, err)\n\t}\n\treturn \"\", nil\n}",
"func layoutFiles() []string {\n\tfiles, err := filepath.Glob(layoutDir + \"*\" + templateExt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn files\n}",
"func (fc finderClient) FullReads(ctx context.Context,\n\thost, index, shard string,\n\tids []strfmt.UUID,\n) ([]objects.Replica, error) {\n\tn := len(ids)\n\trs, err := fc.cl.FetchObjects(ctx, host, index, shard, ids)\n\tif m := len(rs); err == nil && n != m {\n\t\terr = fmt.Errorf(\"malformed full read response: length expected %d got %d\", n, m)\n\t}\n\treturn rs, err\n}",
"func layoutFiles() []string {\n\tfiles, err := filepath.Glob(LayoutDir + \"*\" + TemplateExt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn files\n\n}"
] | [
"0.7431444",
"0.6224712",
"0.59201527",
"0.5268718",
"0.5262331",
"0.49429238",
"0.48908126",
"0.4883328",
"0.48543856",
"0.48400208",
"0.48294452",
"0.4807043",
"0.47279018",
"0.4716206",
"0.46896315",
"0.46685848",
"0.4665916",
"0.46120083",
"0.459389",
"0.45938268",
"0.45871404",
"0.4541784",
"0.45270562",
"0.45091152",
"0.45059425",
"0.4501014",
"0.4492821",
"0.4446799",
"0.44453302",
"0.44176167",
"0.4417381",
"0.44113466",
"0.44065922",
"0.43891612",
"0.43873656",
"0.43621662",
"0.4360776",
"0.43600437",
"0.43535838",
"0.43449432",
"0.43215057",
"0.43195644",
"0.43163866",
"0.4313737",
"0.43122166",
"0.43068168",
"0.42649832",
"0.42482495",
"0.42420903",
"0.42401147",
"0.4229452",
"0.42293945",
"0.42290333",
"0.42216733",
"0.42182526",
"0.42121673",
"0.4206539",
"0.41946155",
"0.4187105",
"0.4170945",
"0.41695198",
"0.416057",
"0.41521892",
"0.41438296",
"0.41289818",
"0.41272125",
"0.41253906",
"0.41237497",
"0.41041315",
"0.41037583",
"0.41034436",
"0.40929705",
"0.40886405",
"0.4085789",
"0.4085546",
"0.40836304",
"0.40801147",
"0.407531",
"0.40705058",
"0.40666047",
"0.4059801",
"0.4053407",
"0.4052831",
"0.4051648",
"0.40502256",
"0.40410423",
"0.40407407",
"0.40390825",
"0.40328193",
"0.40296334",
"0.40234235",
"0.40188706",
"0.40159386",
"0.40132284",
"0.4009592",
"0.40025985",
"0.40011102",
"0.3988371",
"0.39854783",
"0.39839083"
] | 0.86572236 | 0 |
MustReadPartials calls ReadPartials and panics on any error | MustReadPartials вызывает ReadPartials и вызывает панику при любой ошибке | func (t *TRoot) MustReadPartials(files ...string) {
var err = t.ReadPartials(files...)
if err != nil {
panic(err)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t *TRoot) ReadPartials(files ...string) error {\n\tfor _, file := range files {\n\t\tvar _, err = t.template.ParseFiles(filepath.Join(t.Path, file))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func TestExtractPartialRead(t *testing.T) {\n\trc := mutate.Extract(invalidImage{})\n\tif _, err := io.Copy(io.Discard, io.LimitReader(rc, 1)); err != nil {\n\t\tt.Errorf(\"Could not read one byte from reader\")\n\t}\n\tif err := rc.Close(); err != nil {\n\t\tt.Errorf(\"rc.Close: %v\", err)\n\t}\n}",
"func (r *readerWithStats) MustReadFull(data []byte) {\n\tfs.MustReadData(r.r, data)\n\tr.bytesRead += uint64(len(data))\n}",
"func (r *ReaderAt) MustReadAt(p []byte, off int64) {\n\tif len(p) == 0 {\n\t\treturn\n\t}\n\tif off < 0 {\n\t\tlogger.Panicf(\"off=%d cannot be negative\", off)\n\t}\n\tend := off + int64(len(p))\n\tif len(r.mmapData) == 0 || (len(p) > 8*1024 && !r.isInPageCache(off, end)) {\n\t\t// Read big blocks directly from file.\n\t\t// This could be faster than reading these blocks from mmap,\n\t\t// since it triggers less page faults.\n\t\tn, err := r.f.ReadAt(p, off)\n\t\tif err != nil {\n\t\t\tlogger.Panicf(\"FATAL: cannot read %d bytes at offset %d of file %q: %s\", len(p), off, r.f.Name(), err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tlogger.Panicf(\"FATAL: unexpected number of bytes read; got %d; want %d\", n, len(p))\n\t\t}\n\t\tif len(r.mmapData) > 0 {\n\t\t\tr.markInPageCache(off, end)\n\t\t}\n\t} else {\n\t\tif off > int64(len(r.mmapData)-len(p)) {\n\t\t\tlogger.Panicf(\"off=%d is out of allowed range [0...%d] for len(p)=%d\", off, len(r.mmapData)-len(p), len(p))\n\t\t}\n\t\tsrc := r.mmapData[off:]\n\t\tif r.isInPageCache(off, end) {\n\t\t\t// It is safe copying the data with copy(), since it is likely it is in the page cache.\n\t\t\t// This is up to 4x faster than copyMmap() below.\n\t\t\tcopy(p, src)\n\t\t} else {\n\t\t\t// The data may be missing in the page cache, so it is better to copy it via cgo trick\n\t\t\t// in order to avoid P stalls in Go runtime.\n\t\t\t// See https://medium.com/@valyala/mmap-in-go-considered-harmful-d92a25cb161d for details.\n\t\t\tcopyMmap(p, src)\n\t\t\tr.markInPageCache(off, end)\n\t\t}\n\t}\n\treadCalls.Inc()\n\treadBytes.Add(len(p))\n}",
"func ReadPartialReport(scope beam.Scope, partialReportFile string) beam.PCollection {\n\tallFiles := ioutils.AddStrInPath(partialReportFile, \"*\")\n\tlines := textio.ReadSdf(scope, allFiles)\n\treturn beam.ParDo(scope, &parseEncryptedPartialReportFn{}, lines)\n}",
"func (e PartialContent) IsPartialContent() {}",
"func (bsr *blockStreamReader) MustInitFromFilePart(path string) {\n\tbsr.reset()\n\n\t// Files in the part are always read without OS cache pollution,\n\t// since they are usually deleted after the merge.\n\tconst nocache = true\n\n\tmetaindexPath := filepath.Join(path, metaindexFilename)\n\tindexPath := filepath.Join(path, indexFilename)\n\tcolumnsHeaderPath := filepath.Join(path, columnsHeaderFilename)\n\ttimestampsPath := filepath.Join(path, timestampsFilename)\n\tfieldValuesPath := filepath.Join(path, fieldValuesFilename)\n\tfieldBloomFilterPath := filepath.Join(path, fieldBloomFilename)\n\tmessageValuesPath := filepath.Join(path, messageValuesFilename)\n\tmessageBloomFilterPath := filepath.Join(path, messageBloomFilename)\n\n\tbsr.ph.mustReadMetadata(path)\n\n\t// Open data readers\n\tmetaindexReader := filestream.MustOpen(metaindexPath, nocache)\n\tindexReader := filestream.MustOpen(indexPath, nocache)\n\tcolumnsHeaderReader := filestream.MustOpen(columnsHeaderPath, nocache)\n\ttimestampsReader := filestream.MustOpen(timestampsPath, nocache)\n\tfieldValuesReader := filestream.MustOpen(fieldValuesPath, nocache)\n\tfieldBloomFilterReader := filestream.MustOpen(fieldBloomFilterPath, nocache)\n\tmessageValuesReader := filestream.MustOpen(messageValuesPath, nocache)\n\tmessageBloomFilterReader := filestream.MustOpen(messageBloomFilterPath, nocache)\n\n\t// Initialize streamReaders\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}",
"func (bsr *blockStreamReader) MustInitFromInmemoryPart(mp *inmemoryPart) {\n\tbsr.reset()\n\n\tbsr.ph = mp.ph\n\n\t// Initialize streamReaders\n\tmetaindexReader := mp.metaindex.NewReader()\n\tindexReader := mp.index.NewReader()\n\tcolumnsHeaderReader := mp.columnsHeader.NewReader()\n\ttimestampsReader := mp.timestamps.NewReader()\n\tfieldValuesReader := mp.fieldValues.NewReader()\n\tfieldBloomFilterReader := mp.fieldBloomFilter.NewReader()\n\tmessageValuesReader := mp.messageValues.NewReader()\n\tmessageBloomFilterReader := mp.messageBloomFilter.NewReader()\n\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}",
"func MustReadInt(r io.Reader) int {\n\tvar res int\n\t_, err := fmt.Fscanf(r, \"%d\", &res)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to read int: %v\", err))\n\t}\n\treturn res\n}",
"func (f *FakelogicalReader) ReadNotCalled() bool {\n\treturn len(f.ReadCalls) == 0\n}",
"func (_this *StreamingReadBuffer) RequireAndRetry(position int, requiredByteCount int, operation func(positionOffset int)) {\n\toffset := _this.RequireBytes(position, requiredByteCount)\n\toperation(position + offset)\n}",
"func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn rm.customCheckRequiredFieldsMissingMethod(r)\n}",
"func requestBodyRemains(rc io.ReadCloser) bool {\n\tif rc == NoBody {\n\t\treturn false\n\t}\n\tswitch v := rc.(type) {\n\tcase *expectContinueReader:\n\t\treturn requestBodyRemains(v.readCloser)\n\tcase *body:\n\t\treturn v.bodyRemains()\n\tdefault:\n\t\tpanic(\"unexpected type \" + fmt.Sprintf(\"%T\", rc))\n\t}\n}",
"func MustReadAll(r io.Reader) []byte {\n\tall, err := io.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn all\n}",
"func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn r.ko.Spec.StageName == nil || r.ko.Spec.APIID == nil\n\n}",
"func (c *Client) GetPartial(bucket, key string, offset, length int64) (rc io.ReadCloser, err error) {\n\tif offset < 0 {\n\t\treturn nil, errors.New(\"invalid negative offset\")\n\t}\n\n\treq := newReq(c.keyURL(bucket, key))\n\tif length >= 0 {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-%d\", offset, offset+length-1))\n\t} else {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-\", offset))\n\t}\n\tc.Auth.SignRequest(req)\n\n\tres, err := c.transport().RoundTrip(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tswitch res.StatusCode {\n\tcase http.StatusOK, http.StatusPartialContent:\n\t\treturn res.Body, nil\n\tcase http.StatusNotFound:\n\t\tres.Body.Close()\n\t\treturn nil, os.ErrNotExist\n\tcase http.StatusRequestedRangeNotSatisfiable:\n\t\tres.Body.Close()\n\t\treturn nil, blob.ErrOutOfRangeOffsetSubFetch\n\tdefault:\n\t\tres.Body.Close()\n\t\treturn nil, fmt.Errorf(\"Amazon HTTP error on GET: %d\", res.StatusCode)\n\t}\n}",
"func (_this *StreamingReadBuffer) RequireBytes(position int, byteCount int) (positionOffset int) {\n\tpositionOffset = _this.RequestBytes(position, byteCount)\n\tif byteCount+position+positionOffset > len(_this.Buffer) {\n\t\tpanic(UnexpectedEOD)\n\t}\n\treturn\n}",
"func TestNonFatalRead(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\texpectedData := []byte(\"expectedData\")\n\n\t// In memory pipe\n\tca, cb := net.Pipe()\n\trequire.NoError(t, cb.Close())\n\n\tconn := &muxErrorConn{ca, []muxErrorConnReadResult{\n\t\t// Non-fatal timeout error\n\t\t{packetio.ErrTimeout, nil},\n\t\t{nil, expectedData},\n\t\t{io.ErrShortBuffer, nil},\n\t\t{nil, expectedData},\n\t\t{io.EOF, nil},\n\t}}\n\n\tm := NewMux(Config{\n\t\tConn: conn,\n\t\tBufferSize: testPipeBufferSize,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\n\te := m.NewEndpoint(MatchAll)\n\n\tbuff := make([]byte, testPipeBufferSize)\n\tn, err := e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\tn, err = e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\t<-m.closedCh\n\trequire.NoError(t, m.Close())\n\trequire.NoError(t, ca.Close())\n}",
"func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn r.ko.Spec.JobDefinitionName == nil\n\n}",
"func TestMap_GettersPanic(t *testing.T) {\n\tschema := config.Schema{\n\t\t\"foo\": {},\n\t\t\"bar\": {Type: config.Bool},\n\t}\n\n\tm, err := config.Load(schema, nil)\n\tassert.NoError(t, err)\n\n\tassert.Panics(t, func() { m.GetRaw(\"egg\") })\n\tassert.Panics(t, func() { m.GetString(\"bar\") })\n\tassert.Panics(t, func() { m.GetBool(\"foo\") })\n\tassert.Panics(t, func() { m.GetInt64(\"foo\") })\n}",
"func TestPartitionReader__Lazy(t *testing.T) {\n\tengine, _ := open(nil)\n\tpart, _ := initPartition(\"test.partition\", engine)\n\n\tb := make([]byte, 100)\n\n\t// Fill to differentiate it from zero-allocated slices.\n\tfor i := 0; i < len(b); i++ {\n\t\tb[i] = 1\n\t}\n\n\tpart.Write(1, b)\n\tpart.Write(2, b)\n\tpart.Write(3, b)\n\n\t// Clear local cache\n\tpart.segments = make(map[uint64]*segment)\n\n\tr := part.Reader(0, 0)\n\tbuf := make([]byte, 10)\n\n\t// Track iterations\n\ti := 0\n\n\tfor {\n\t\tn, err := r.Read(buf)\n\n\t\t// One segment should be loaded every 10 iterations for this\n\t\t// size buffer.\n\t\tif i%10 == 0 {\n\t\t\tassert.Equal(t, i/10+1, len(part.segments))\n\t\t}\n\n\t\t// Nothing expected for this iteration\n\t\tif i == 30 && (err != io.EOF || n != 0) {\n\t\t\tt.Fatal(\"expected an EOF with zero bytes\")\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif n != len(buf) {\n\t\t\tt.Fatalf(\"expected only 10 bytes to be read, got %d\", n)\n\t\t}\n\n\t\ti += 1\n\t}\n}",
"func (ctx *serverRequestContextImpl) TryReadBody(body interface{}) (bool, error) {\n\tbuf, err := ctx.ReadBodyBytes()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tempty := len(buf) == 0\n\tif !empty {\n\t\terr = json.Unmarshal(buf, body)\n\t\tif err != nil {\n\t\t\treturn true, caerrors.NewHTTPErr(400, caerrors.ErrBadReqBody, \"Invalid request body: %s; body=%s\",\n\t\t\t\terr, string(buf))\n\t\t}\n\t}\n\treturn empty, nil\n}",
"func testFailingRead(c *testContext, flow testFlow) {\n\tc.t.Helper()\n\ttestReadInternal(c, flow, true /* packetShouldBeDropped */)\n}",
"func (f *Find) AllowPartialResults(allowPartialResults bool) *Find {\n\tif f == nil {\n\t\tf = new(Find)\n\t}\n\n\tf.allowPartialResults = &allowPartialResults\n\treturn f\n}",
"func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}",
"func TestCheckRequiredTemplate_Read_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceCheckRequiredTemplate()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenErr := flattenCheckRequiredTemplate(resourceData, &requiredTemplateCheckTest, requiredTemplateCheckProjectID)\n\n\tpipelinesChecksClient := azdosdkmocks.NewMockPipelineschecksextrasClient(ctrl)\n\tclients := &client.AggregatedClient{PipelinesChecksClientExtras: pipelinesChecksClient, Ctx: context.Background()}\n\n\texpectedArgs := pipelineschecksextras.GetCheckConfigurationArgs{\n\t\tId: requiredTemplateCheckTest.Id,\n\t\tProject: &requiredTemplateCheckProjectID,\n\t\tExpand: converter.ToPtr(pipelineschecksextras.CheckConfigurationExpandParameterValues.Settings),\n\t}\n\n\tpipelinesChecksClient.\n\t\tEXPECT().\n\t\tGetCheckConfiguration(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"GetServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Read(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"GetServiceEndpoint() Failed\")\n\trequire.Nil(t, flattenErr)\n}",
"func Partial(dst, src *os.File, dstOffset, srcOffset, n int64, fallback bool) error {\n\terr := reflinkRangeInternal(dst, src, dstOffset, srcOffset, n)\n\tif (err != nil) && fallback {\n\t\t_, err = copyFileRange(dst, src, dstOffset, srcOffset, n)\n\t}\n\n\tif (err != nil) && fallback {\n\t\t// seek both src & dst\n\t\treader := io.NewSectionReader(src, srcOffset, n)\n\t\twriter := §ionWriter{w: dst, base: dstOffset}\n\t\t_, err = io.CopyN(writer, reader, n)\n\t}\n\treturn err\n}",
"func ExampleMustAbsorbTrytes() {}",
"func MustReadIn() string {\n\tfor i := 0; i < 3; i++ {\n\t\ttext, err := tryReadStdIn()\n\t\tif err == nil {\n\t\t\treturn strings.TrimSpace(text)\n\t\t}\n\t\t// TODO Theme\n\t\tfmt.Printf(\"Error while reading stdin:: %s. Failed %d/3\\n\", err, i)\n\t}\n\tfmt.Println(\"Could not read stdin. Aborting...\")\n\tos.Exit(1)\n\treturn \"\"\n}",
"func SKIPPEDTestAccessAfterUnmap(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"mossMMap\")\n\tdefer os.RemoveAll(tmpDir)\n\n\tf, err := os.Create(tmpDir + string(os.PathSeparator) + \"test.file\")\n\tif err != nil {\n\t\tt.Errorf(\"expected open file to work, err: %v\", err)\n\t}\n\n\tdefer f.Close()\n\n\toffset := 1024 * 1024 * 1024 // 1 GB.\n\n\tf.WriteAt([]byte(\"hello\"), int64(offset))\n\n\tvar mm mmap.MMap\n\n\tmm, err = mmap.Map(f, mmap.RDONLY, 0)\n\tif err != nil {\n\t\tt.Errorf(\"expected mmap to work, err: %v\", err)\n\t}\n\n\tx := mm[offset : offset+5]\n\n\tif string(x) != \"hello\" {\n\t\tt.Errorf(\"expected hello\")\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(\"Recovered in f\", r)\n\t\t} else {\n\t\t\tt.Errorf(\"expected recover from panic\")\n\t\t}\n\t}()\n\n\tmm.Unmap()\n\n\t/*\n\t\t\tThe following access of x results in a segfault, like...\n\n\t\t\t\tunexpected fault address 0x4060c000\n\t\t\t\tfatal error: fault\n\t\t\t\t[signal 0xb code=0x1 addr=0x4060c000 pc=0xb193f]\n\n\t\t The recover() machinery doesn't handle this situation, however,\n\t\t as it's not a normal kind of panic()\n\t*/\n\tif x[0] != 'h' {\n\t\tt.Errorf(\"expected h, but actually expected a segfault\")\n\t}\n\n\tt.Errorf(\"expected segfault, but instead unmmapped mem access worked\")\n}",
"func (c *readConverter) ensureLeftover() error {\n\tif len(c.leftover) > 0 {\n\t\treturn nil\n\t}\n\tpayload, err := c.cr.ReadChunk()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.leftover = payload\n\treturn nil\n}",
"func (*offsetPageInfoImpl) PartialCount(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn page.partialCount, nil\n}",
"func FuzzReadSubTreesNoProof(data []byte) int {\n\tbuildAndCompareTreesFromFuzz(data, math.MaxUint64)\n\tif len(data) > 2 {\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func MustReadFile(path string) []byte {\n\tdata, err := openData(path)\n\tif err != nil {\n\t\tlog.Panicf(\"reading %q: %v\", path, err)\n\t}\n\treturn data\n}",
"func anyNonRead(source *Source) bool {\n\tfor _, entry := range source.Entries {\n\t\tif !entry.Read {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (se *StateEngine) Partial(addr string) error {\n\tpoints, err := se.prepare(addr)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn se.trajectory(points, true)\n}",
"func TestMultipartStreamReadahead(t *testing.T) {\n\ttestBody1 := `\nThis is a multi-part message. This line is ignored.\n--MyBoundary\nfoo-bar: baz\n\nBody\n--MyBoundary\n`\n\ttestBody2 := `foo-bar: bop\n\nBody 2\n--MyBoundary--\n`\n\tdone1 := make(chan struct{})\n\treader := NewReader(\n\t\tio.MultiReader(\n\t\t\tstrings.NewReader(testBody1),\n\t\t\t&sentinelReader{done1},\n\t\t\tstrings.NewReader(testBody2)),\n\t\t\"MyBoundary\")\n\n\tvar i int\n\treadPart := func(hdr textproto.MIMEHeader, body string) {\n\t\tpart, err := reader.NextPart()\n\t\tif part == nil || err != nil {\n\t\t\tt.Fatalf(\"Part %d: NextPart failed: %v\", i, err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(part.Header, hdr) {\n\t\t\tt.Errorf(\"Part %d: part.Header = %v, want %v\", i, part.Header, hdr)\n\t\t}\n\t\tdata, err := io.ReadAll(part)\n\t\texpectEq(t, body, string(data), fmt.Sprintf(\"Part %d body\", i))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Part %d: ReadAll failed: %v\", i, err)\n\t\t}\n\t\ti++\n\t}\n\n\treadPart(textproto.MIMEHeader{\"Foo-Bar\": {\"baz\"}}, \"Body\")\n\n\tselect {\n\tcase <-done1:\n\t\tt.Errorf(\"Reader read past second boundary\")\n\tdefault:\n\t}\n\n\treadPart(textproto.MIMEHeader{\"Foo-Bar\": {\"bop\"}}, \"Body 2\")\n}",
"func (s *LoaderSuite) TestLoadRubbish() {\n\tif testing.Short() {\n\t\ts.T().Skip()\n\t}\n\t_, err := s.loader.Load(context.TODO(), \"ajklfjkjva\")\n\ts.NotNil(err)\n}",
"func loadPartials() (map[string]string, error) {\n\tg := make(map[string]string)\n\t//load resources from paths\n\tfor key, path := range paths {\n\t\tbody, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tg[key] = string(body)\n\t}\n\treturn g, nil\n}",
"func (lds *LeakyDataStore) SetFirstTimeViewCustomPartialError(val bool) {\n\tlds.config.FirstTimeViewCustomPartialError = val\n}",
"func (p *SafePast) SafeReadSpec(name string) ([]*messages.RumorMessage, bool) {\n\tp.mux.RLock()\n\tdefer p.mux.RUnlock()\n\n\tres, ok := p.messagesList[name]\n\treturn res, ok\n}",
"func (m *DigestHolderMock) MinimockReadInspect() {\n\tfor _, e := range m.ReadMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.Read with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.ReadMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterReadCounter) < 1 {\n\t\tif m.ReadMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to DigestHolderMock.Read\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.Read with params: %#v\", *m.ReadMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcRead != nil && mm_atomic.LoadUint64(&m.afterReadCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DigestHolderMock.Read\")\n\t}\n}",
"func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}",
"func (tr *Reader) skipUnread() {\n\tnr := tr.nb + tr.pad // number of bytes to skip\n\ttr.nb, tr.pad = 0, 0\n\tif sr, ok := tr.r.(io.Seeker); ok {\n\t\tif _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\t_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)\n}",
"func (b *RawBackend) handleRawRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n\tpath := data.Get(\"path\").(string)\n\n\tif b.recoveryMode {\n\t\tb.logger.Info(\"reading\", \"path\", path)\n\t}\n\n\t// Prevent access of protected paths\n\tfor _, p := range protectedPaths {\n\t\tif strings.HasPrefix(path, p) {\n\t\t\terr := fmt.Sprintf(\"cannot read '%s'\", path)\n\t\t\treturn logical.ErrorResponse(err), logical.ErrInvalidRequest\n\t\t}\n\t}\n\n\t// Run additional checks if needed\n\tif err := b.checkRaw(path); err != nil {\n\t\tb.logger.Warn(err.Error(), \"path\", path)\n\t\treturn logical.ErrorResponse(\"cannot read '%s'\", path), logical.ErrInvalidRequest\n\t}\n\n\tentry, err := b.barrier.Get(ctx, path)\n\tif err != nil {\n\t\treturn handleErrorNoReadOnlyForward(err)\n\t}\n\tif entry == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Run this through the decompression helper to see if it's been compressed.\n\t// If the input contained the compression canary, `outputBytes` will hold\n\t// the decompressed data. If the input was not compressed, then `outputBytes`\n\t// will be nil.\n\toutputBytes, _, err := compressutil.Decompress(entry.Value)\n\tif err != nil {\n\t\treturn handleErrorNoReadOnlyForward(err)\n\t}\n\n\t// `outputBytes` is nil if the input is uncompressed. In that case set it to the original input.\n\tif outputBytes == nil {\n\t\toutputBytes = entry.Value\n\t}\n\n\tresp := &logical.Response{\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": string(outputBytes),\n\t\t},\n\t}\n\treturn resp, nil\n}",
"func (s *ss) mustReadRune() (r rune) {\n\tr = s.getRune()\n\tif r == eof {\n\t\ts.error(io.ErrUnexpectedEOF)\n\t}\n\treturn\n}",
"func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search {\n\tr.values.Set(\"allow_partial_search_results\", strconv.FormatBool(allowpartialsearchresults))\n\n\treturn r\n}",
"func (fi *File) CtxReadFull(ctx context.Context, b []byte) (int, error) {\n\tfi.Lock()\n\tdefer fi.Unlock()\n\treturn fi.mod.CtxReadFull(ctx, b)\n}",
"func (suite *IntPartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart, _ := newIntPartFromString(\"9\")\n\tbuff := make([]byte, 0, 0)\n\tcount, _ := part.Read(buff)\n\tsuite.Equal(0, count)\n}",
"func (s *Server) consistentRead() error {\n\tdefer metrics.MeasureSince([]string{\"rpc\", \"consistentRead\"}, time.Now())\n\tfuture := s.raft.VerifyLeader()\n\tif err := future.Error(); err != nil {\n\t\treturn err //fail fast if leader verification fails\n\t}\n\t// poll consistent read readiness, wait for up to RPCHoldTimeout milliseconds\n\tif s.isReadyForConsistentReads() {\n\t\treturn nil\n\t}\n\tjitter := lib.RandomStagger(s.config.RPCHoldTimeout / jitterFraction)\n\tdeadline := time.Now().Add(s.config.RPCHoldTimeout)\n\n\tfor time.Now().Before(deadline) {\n\n\t\tselect {\n\t\tcase <-time.After(jitter):\n\t\t\t// Drop through and check before we loop again.\n\n\t\tcase <-s.shutdownCh:\n\t\t\treturn fmt.Errorf(\"shutdown waiting for leader\")\n\t\t}\n\n\t\tif s.isReadyForConsistentReads() {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn structs.ErrNotReadyForConsistentReads\n}",
"func (m *MockIOInterface) ReadFull(r io.Reader, buf []byte) (int, error) {\n\tret := m.ctrl.Call(m, \"ReadFull\", r, buf)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (f *FakelogicalReader) ReadCalledN(n int) bool {\n\treturn len(f.ReadCalls) >= n\n}",
"func CheckGetRaw(raw *Raw, fileLength int64) error {\n\t// if raw.Length < 0 ,read All data\n\tif raw.Offset < 0 {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d is a negative integer\", raw.Offset)\n\t}\n\tif raw.Length < 0 {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the length: %d is a negative integer\", raw.Length)\n\t}\n\tif fileLength < raw.Offset {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d is lager than the file length: %d\", raw.Offset, fileLength)\n\t}\n\n\tif fileLength < (raw.Offset + raw.Length) {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d and length: %d is lager than the file length: %d\", raw.Offset, raw.Length, fileLength)\n\t}\n\treturn nil\n}",
"func (mr *MockIOInterfaceMockRecorder) ReadFull(r, buf interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ReadFull\", reflect.TypeOf((*MockIOInterface)(nil).ReadFull), r, buf)\n}",
"func (f *FailingReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"Simulated read error\")\n}",
"func canRetry(args interface{}, err error) bool {\n\t// No leader errors are always safe to retry since no state could have\n\t// been changed.\n\tif structs.IsErrNoLeader(err) {\n\t\treturn true\n\t}\n\n\t// Reads are safe to retry for stream errors, such as if a server was\n\t// being shut down.\n\tinfo, ok := args.(structs.RPCInfo)\n\tif ok && info.IsRead() && lib.IsErrEOF(err) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func testRead(c *testContext, flow testFlow) {\n\tc.t.Helper()\n\ttestReadInternal(c, flow, false /* packetShouldBeDropped */)\n}",
"func isAllowedPartialIndexColType(columnTableDef *tree.ColumnTableDef) bool {\n\tswitch fam := columnTableDef.Type.(*types.T).Family(); fam {\n\tcase types.BoolFamily, types.IntFamily, types.FloatFamily, types.DecimalFamily,\n\t\ttypes.StringFamily, types.DateFamily, types.TimeFamily, types.TimeTZFamily,\n\t\ttypes.TimestampFamily, types.TimestampTZFamily, types.BytesFamily:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func TestMultiReaderCopy(t *testing.T) {\n\tslice := []Reader{strings.NewReader(\"hello world\")}\n\tr := MultiReader(slice...)\n\tslice[0] = nil\n\tdata, err := ReadAll(r)\n\tif err != nil || string(data) != \"hello world\" {\n\t\tt.Errorf(\"ReadAll() = %q, %v, want %q, nil\", data, err, \"hello world\")\n\t}\n}",
"func (c *poolConn) ReadPartSafe() ([]byte, error) {\n\ti := 0\n\tsign := 0\n\tresult := make([]byte, 0)\n\tfor sign != 2 {\n\t\tb, err := c.ReadOneBuffer()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.buffer.index >= c.buffer.size {\n\t\t\tc.mustRead = true\n\t\t}\n\n\t\t//judge the end is \"\\r\\n\"\n\t\tif sign == 0 {\n\t\t\tif b == '\\r' {\n\t\t\t\tsign++\n\t\t\t}\n\t\t} else if sign == 1 {\n\t\t\tif b == '\\n' {\n\t\t\t\tsign++\n\t\t\t} else {\n\t\t\t\tsign = 0\n\t\t\t}\n\t\t}\n\t\tresult = append(result, b)\n\t\ti++\n\t}\n\treturn result[0: len(result)-2], nil\n}",
"func Must(err error) bool {\n\tif err != nil {\n\t\tif panicOnErrorMode {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tklog.Errorf(\"%s\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (c *DChSectionReader) TrySectionReader() (dat *io.SectionReader, open bool) {\n\tc.req <- struct{}{}\n\tdat, open = <-c.dat\n\treturn dat, open\n}",
"func checkReader(t *testing.T, r zbuf.Reader, checkReads bool) {\n\tfor expect := 3; expect <= 6; expect++ {\n\t\trec, err := r.Read()\n\t\trequire.NoError(t, err)\n\n\t\tv, err := rec.AccessInt(\"value\")\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, int64(expect), v, \"Got expected record value\")\n\t}\n\n\trec, err := r.Read()\n\trequire.NoError(t, err)\n\trequire.Nil(t, rec, \"Reached eof after last record in time span\")\n\n\tif checkReads {\n\t\trr, ok := r.(*rangeReader)\n\t\trequire.True(t, ok, \"Can get read stats from index reader\")\n\t\trequire.LessOrEqual(t, rr.reads(), uint64(6), \"Indexed reader did not read the entire file\")\n\t}\n}",
"func MustReadLines(filename string) []string {\n\ts, err := ReadLines(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}",
"func tryReadEvent() {\n\talldone := false\n\t// procn := 0\n\tfor !alldone {\n\t\tdone1 := tryReadUifnEvent()\n\n\t\tif !baseInfoGot {\n\t\t\tlog.Println(\"baseInfoGot is not set, not need other works.\")\n\t\t\treturn\n\t\t}\n\n\t\tdone2 := tryReadContactEvent()\n\t\tdone3 := tryReadMessageEvent()\n\t\tdone4 := tryRecvIntentMessageEvent()\n\n\t\talldone = done1 && done2 && done3 && done4\n\t}\n}",
"func (recBuf *recBuf) bumpRepeatedLoadErr(err error) {\n\trecBuf.mu.Lock()\n\tdefer recBuf.mu.Unlock()\n\tif len(recBuf.batches) == 0 {\n\t\treturn\n\t}\n\tbatch0 := recBuf.batches[0]\n\tbatch0.tries++\n\tif batch0.tries > recBuf.cl.cfg.retries {\n\t\trecBuf.lockedFailAllRecords(err)\n\t}\n}",
"func (reqParams *ReqParams) readAny(resp *http.Response, out any) (err error) {\n\tdebug.Assert(out != nil)\n\tif err = reqParams.checkResp(resp); err != nil || resp.StatusCode != http.StatusOK {\n\t\treturn\n\t}\n\t// decode response\n\tif resp.Header.Get(cos.HdrContentType) == cos.ContentMsgPack {\n\t\tdebug.Assert(cap(reqParams.buf) > cos.KiB) // caller must allocate\n\t\tr := msgp.NewReaderBuf(resp.Body, reqParams.buf)\n\t\terr = out.(msgp.Decodable).DecodeMsg(r)\n\t} else {\n\t\terr = jsoniter.NewDecoder(resp.Body).Decode(out)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to decode response: %v -> %T\", err, out)\n\t}\n\treturn\n}",
"func (p *combined) NotReady(err error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor _, probe := range p.probes {\n\t\tprobe.NotReady(err)\n\t}\n}",
"func (s *Stream) willRead(n uint64) error {\n\ts.kind = -1 // rearm / re-initialize Kind\n\tif len(s.stack) > 0 {\n\t\ttos := s.stack[len(s.stack)-1]\n\t\t// read size cannot greater than the size of the list\n\t\tif n > tos.size-tos.pos {\n\t\t\treturn ErrElemTooLarge\n\t\t}\n\t\t// change the list position\n\t\ts.stack[len(s.stack)-1].pos += n\n\t}\n\tif s.limited {\n\n\t\tif n > s.remaining {\n\t\t\treturn ErrValueTooLarge\n\t\t}\n\t\ts.remaining -= n\n\t}\n\treturn nil\n}",
"func Must(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tpanic(err)\n}",
"func (fr *fieldReader) maybeReadNextBlock() bool {\n\tif fr.fb.remaining > 0 {\n\t\treturn true\n\t}\n\tif fr.field == gbam.FieldCoord {\n\t\tvlog.Fatal(\"use maybeReadNextCoordBlock instead\")\n\t}\n\treturn fr.readNextBlock()\n}",
"func mustReadFile(filename string) []byte {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}",
"func MustLoadTemplate(filename string) *raymond.Template {\n\ttpl, err := raymond.ParseFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tpl\n}",
"func (fc finderClient) FullReads(ctx context.Context,\n\thost, index, shard string,\n\tids []strfmt.UUID,\n) ([]objects.Replica, error) {\n\tn := len(ids)\n\trs, err := fc.cl.FetchObjects(ctx, host, index, shard, ids)\n\tif m := len(rs); err == nil && n != m {\n\t\terr = fmt.Errorf(\"malformed full read response: length expected %d got %d\", n, m)\n\t}\n\treturn rs, err\n}",
"func TestReadHeaderError(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\tmockTr.readError = errors.New(\"error\")\n\ttr := NewTFramedTransport(mockTr)\n\n\tbuff := make([]byte, len(frame)-4)\n\tn, err := tr.Read(buff)\n\n\tassert.Equal(t, mockTr.readError, err)\n\tassert.Equal(t, 0, n)\n}",
"func MustReadFile(path string, doGzip bool) string {\n body, err := ReadFile(path, doGzip)\n if err != nil {\n panic(err)\n }\n \n return body\n}",
"func (_f8 *FakelogicalReader) ReadCalledOnceWith(path string) bool {\n\tvar count int\n\tfor _, call := range _f8.ReadCalls {\n\t\tif reflect.DeepEqual(call.Parameters.Path, path) {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count == 1\n}",
"func TestHandlerReadingNilBodySuccess(t *testing.T) {\n\th := otelhttp.NewHandler(\n\t\thttp.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Body != nil {\n\t\t\t\t_, err := ioutil.ReadAll(r.Body)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t}), \"test_handler\",\n\t)\n\n\tr, err := http.NewRequest(http.MethodGet, \"http://localhost/\", nil)\n\trequire.NoError(t, err)\n\n\trr := httptest.NewRecorder()\n\th.ServeHTTP(rr, r)\n\tassert.Equal(t, 200, rr.Result().StatusCode)\n}",
"func (s *smlReader) readPreliminary() error {\n\tfor {\n\t\t_, rc, err := s.readRune()\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase rc == rcEOF:\n\t\t\treturn fmt.Errorf(\"unexpected end of file while reading preliminary\")\n\t\tcase rc == rcOpen:\n\t\t\treturn nil\n\t\t}\n\t}\n\t// Unreachable.\n\tpanic(\"unreachable\")\n}",
"func (p *parallelReader) canDecode(buf [][]byte) bool {\n\tbufCount := 0\n\tfor _, b := range buf {\n\t\tif b != nil {\n\t\t\tbufCount++\n\t\t}\n\t}\n\treturn bufCount >= p.dataBlocks\n}",
"func TestGetDataFromUrlBodyReadError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(200).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, func(r io.Reader) ([]byte, error) {\n\t\treturn nil, errors.New(\"IO Reader error occurred\")\n\t})\n\n\tassert.Error(t, err)\n}",
"func SuccessfulReadPreloginRequest(io.ReadWriteCloser) (map[uint8][]byte, error) {\n\treturn nil, nil\n}",
"func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tresources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.ErrorIs(err, errTest)\n\trequire.Empty(installedVMs)\n\trequire.Empty(failedVMs)\n}",
"func (b brokenReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"brokenReader is always broken.\")\n}",
"func readFull(r io.Reader, buf []byte) (n int, err error) {\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\terr = nil\n\t}\n\treturn\n}",
"func (m *MigrateManager) mayRecover() error {\n\t// It may be not need to do anything now.\n\treturn nil\n}",
"func (r *Responder) PartialContent() { r.write(http.StatusPartialContent) }",
"func TestPartialNetworkPartition2(t *testing.T) {\n\tservers := 10\n\tcfg := makeConfig(t, servers, false)\n\tdefer cfg.cleanup()\n\n\t// XPaxos servers (ID = 2, 4, 6) fail to send RPCs 75%, 50%, and 25% of the time\n\tcfg.net.SetFaultRate(2, 75)\n\tcfg.net.SetFaultRate(4, 50)\n\tcfg.net.SetFaultRate(6, 25)\n\n\tfmt.Println(\"Test: Partial Network Partition - Single Partial Failure (t>1)\")\n\n\titers := 3\n\tfor i := 0; i < iters; i++ {\n\t\tcfg.client.Propose(nil)\n\t\tcomparePrepareSeqNums(cfg)\n\t\tcompareExecuteSeqNums(cfg)\n\t\tcomparePrepareLogEntries(cfg)\n\t\tcompareCommitLogEntries(cfg)\n\t}\n}",
"func RenderPartial(name string, w http.ResponseWriter, data interface{}) error {\n\tt, err := ParseTemplate(name, true)\n\tif err != nil {\n\t\tlog.Error().Str(\"module\", \"web\").Str(\"path\", name).Err(err).\n\t\t\tMsg(\"Error in template\")\n\t\treturn err\n\t}\n\tw.Header().Set(\"Expires\", \"-1\")\n\treturn t.Execute(w, data)\n}",
"func (requestManager *RequestManager) MustView(chain *solo.Chain, contractName string,\n\tfunctionName string, params ...interface{}) dict.Dict {\n\tresponse, err := chain.CallView(contractName, functionName, params...)\n\trequire.NoError(requestManager.env.T, err)\n\treturn response\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tDie(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func ReadFull(f io.Reader, buf []byte) int {\n\tn, err := io.ReadFull(f, buf)\n\tAbortIf(err)\n\treturn n\n}",
"func MustLoad(filename string) []byte {\n\tfs, err := Assets.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tres, err := ioutil.ReadAll(fs)\n\tdefer fs.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}",
"func (c *Client) RenterStreamPartialGet(siaPath string, start, end uint64) (resp []byte, err error) {\n\tsiaPath = strings.TrimPrefix(siaPath, \"/\")\n\tresp, err = c.getRawPartialResponse(\"/renter/stream/\"+siaPath, start, end)\n\treturn\n}"
] | [
"0.60439926",
"0.5645817",
"0.54828453",
"0.5442675",
"0.5392826",
"0.52395606",
"0.52249944",
"0.51955503",
"0.5156704",
"0.51566005",
"0.5084639",
"0.50834095",
"0.5021623",
"0.49996892",
"0.49993837",
"0.499132",
"0.49418923",
"0.48929805",
"0.4889833",
"0.48810267",
"0.48584732",
"0.4793955",
"0.47936738",
"0.47854337",
"0.4781429",
"0.47709447",
"0.47600365",
"0.4747836",
"0.47014168",
"0.46904",
"0.46735048",
"0.46710396",
"0.46658245",
"0.46637648",
"0.46420336",
"0.4633891",
"0.4585558",
"0.45847473",
"0.45781246",
"0.45709994",
"0.45660377",
"0.45506236",
"0.45470586",
"0.4532117",
"0.45302874",
"0.45236892",
"0.45107448",
"0.4510669",
"0.45102766",
"0.4505071",
"0.45018974",
"0.44991195",
"0.44973442",
"0.4497264",
"0.44864106",
"0.44799656",
"0.4475338",
"0.4473738",
"0.44684204",
"0.4466889",
"0.44652545",
"0.44529223",
"0.44521552",
"0.44476104",
"0.44441757",
"0.44406378",
"0.4436293",
"0.44360167",
"0.4436009",
"0.44358024",
"0.4434769",
"0.44326913",
"0.4432604",
"0.44109473",
"0.4410246",
"0.44101736",
"0.44072515",
"0.44008407",
"0.43925732",
"0.43919775",
"0.43910816",
"0.43892115",
"0.43792304",
"0.4377913",
"0.43693456",
"0.4365525",
"0.43607473",
"0.43587846",
"0.43581024",
"0.43545407",
"0.4351016",
"0.43486026",
"0.43486026",
"0.43486026",
"0.43486026",
"0.43486026",
"0.43486026",
"0.4347951",
"0.4341351",
"0.43401727"
] | 0.7883434 | 0 |
NewEnvironment creates new type environment with current analysis pass. | NewEnvironment создает новую среду типов с текущим проходом анализа. | func NewEnvironment(pass *analysis.Pass) *Environment {
return &Environment{
//TODO make it private and reject object named __val
ExplicitRefinementMap: map[types.Object]types.Type{},
ImplicitRefinementMap: map[types.Object]types.Type{},
funArgRefinementMap: map[string]types.Type{},
Scope: nil,
Pos: token.NoPos,
Pass: pass,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewEnvironment(name string, parent *Environment) *Environment {\n\treturn &Environment{\n\t\tname: name,\n\t\tparent: parent,\n\t\tdict: make(map[string]*Symbol),\n\t}\n}",
"func NewEnvironment() *Environment {\n\ts := make(map[string]Object)\n\treturn &Environment{store: s, outer: nil}\n}",
"func NewEnvironment(ctx *pulumi.Context,\n\tname string, args *EnvironmentArgs, opts ...pulumi.ResourceOption) (*Environment, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.EnvironmentId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'EnvironmentId'\")\n\t}\n\tif args.InfrastructureSpec == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'InfrastructureSpec'\")\n\t}\n\tif args.LakeId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LakeId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"environmentId\",\n\t\t\"lakeId\",\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Environment\n\terr := ctx.RegisterResource(\"google-native:dataplex/v1:Environment\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewEnvironment() *Environment {\n\ts := make(map[string]Object)\n\treturn &Environment{store: s}\n}",
"func NewEnvironment() Environment {\n\tvalues := make(map[string]interface{})\n\treturn Environment{Values: values}\n}",
"func NewEnvironment(input Environment) *Environment {\n\treturn &input\n}",
"func NewEnvironment() *Environment {\n\ts := make(map[string]Variable)\n\treturn &Environment{store: s, outer: nil}\n}",
"func NewEnvironment() *Environment {\n\tptr := C.m3_NewEnvironment()\n\treturn &Environment{\n\t\tptr: (EnvironmentT)(ptr),\n\t}\n}",
"func NewEnvironment() *Environment {\n\tptr := C.m3_NewEnvironment()\n\treturn &Environment{\n\t\tptr: (EnvironmentT)(ptr),\n\t}\n}",
"func (e *Env) NewEnv() *Env {\n\treturn &Env{\n\t\tenv: make(map[string]interface{}),\n\t\tparent: e,\n\t\tbuiltin: e.builtin,\n\t\tglobal: e.global,\n\t\tfuncArg: make(map[string]interface{}),\n\t\t//importFunc: e.importFunc,\n\t\tfileInfo: e.fileInfo,\n\t}\n}",
"func NewEnvironment() *Environment {\n\tvar env Environment\n\tenv = Environment{\n\t\tvars{\n\t\t\t\"+\": add,\n\t\t\t\"-\": sub,\n\t\t\t\"*\": mult,\n\t\t\t\"/\": div,\n\t\t\t\"<=\": lteq,\n\t\t\t\"equal?\": eq,\n\t\t\t\"not\": not,\n\t\t\t\"and\": and,\n\t\t\t\"or\": or,\n\t\t\t\"cons\": construct,\n\t\t\t\"car\": head,\n\t\t\t\"cdr\": tail,\n\t\t\t\"list\": Eval(parser.Parse(\"(lambda z z)\"), &env),\n\t\t},\n\t\tnil}\n\treturn &env\n}",
"func NewEnvironment() *Environment {\n\treturn &Environment{\n\t\tdata: map[data.Name]Namespace{},\n\t}\n}",
"func NewEnvironment(env ...string) *Environment {\n\te := Environment{\n\t\tHidden: &Environment{},\n\t}\n\tfor _, keyvalue := range env {\n\t\tpair := strings.SplitN(keyvalue, \"=\", 2)\n\t\te.Add(pair[0], pair[1])\n\t}\n\n\treturn &e\n}",
"func NewEnv(context *libcoap.Context) *Env {\n return &Env{\n context,\n nil,\n make(chan Event, 32),\n nil,\n }\n}",
"func NewEnvironment(name string) *Environment {\n\tthis := Environment{}\n\tthis.Name = name\n\treturn &this\n}",
"func newEnv(envPath, provider, name, tfStatePath string, logger hclog.Logger) (*environment, error) {\n\t// Make sure terraform is on the PATH\n\ttf, err := exec.LookPath(\"terraform\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to lookup terraform binary: %v\", err)\n\t}\n\n\tlogger = logger.Named(\"provision\").With(\"provider\", provider, \"name\", name)\n\n\t// set the path to the terraform module\n\ttfPath := path.Join(envPath, provider, name)\n\tlogger.Debug(\"using tf path\", \"path\", tfPath)\n\tif _, err := os.Stat(tfPath); os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"failed to lookup terraform configuration dir %s: %v\", tfPath, err)\n\t}\n\n\t// set the path to state file\n\ttfState := path.Join(tfStatePath, fmt.Sprintf(\"e2e.%s.%s.tfstate\", provider, name))\n\n\tenv := &environment{\n\t\tprovider: provider,\n\t\tname: name,\n\t\ttf: tf,\n\t\ttfPath: tfPath,\n\t\ttfState: tfState,\n\t\tlogger: logger,\n\t}\n\treturn env, nil\n}",
"func NewEnvironment(jsonData string) (*Environment, error) {\n\t// initialize env with input data\n\tenv := new(Environment)\n\terr := serialize.CopyFromJSON(jsonData, env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn env, nil\n}",
"func NewEnv(t *testing.T) *Env {\n\treturn &Env{t, make(chan struct{}), sync.Mutex{}, make([]string, 0)}\n}",
"func New(env *Environment) *Environment {\n\treturn NewSized(env, 0)\n}",
"func NewEnvironment() *Environment {\n\tenv := &Environment{}\n\tenv.SetUp()\n\treturn env\n}",
"func NewEnvironment() *Environment {\n\treturn &Environment{\n\t\tprotos: make(map[string]Fetcher),\n\t\torigins: make(map[string]Fetcher),\n\t}\n}",
"func NewEnvironment() *Environment {\n\tvm := NewVM()\n\tenv := &Environment{\n\t\tVM: vm,\n\t}\n\treturn env\n}",
"func NewEnv(files []string) *Env {\n\tglobal := make(map[string]interface{})\n\tglobal[\"ENVIRON\"] = getEnvVars()\n\n\treturn &Env{\n\t\tenv: make(map[string]interface{}),\n\t\tparent: nil,\n\t\tbuiltin: newBuiltIn(files),\n\t\tglobal: global,\n\t\tfuncArg: make(map[string]interface{}),\n\t\t//importFunc: make(map[string]func(*Env) (reflect.Value, error)),\n\t\tfileInfo: &FileInfo{\n\t\t\tfiles: files,\n\t\t\tcurFileIndex: 0,\n\t\t\treadCloser: make(map[string]*io.ReadCloser),\n\t\t\tscanner: make(map[string]*bufio.Scanner),\n\t\t},\n\t}\n}",
"func NewEnvironment() *Environment {\n\tenv := Environment{\n\t\tstate: lua.NewState(),\n\t}\n\tluajson.Preload(env.state)\n\n\treturn &env\n}",
"func newTestEnv(ctx context.Context, t *testing.T, pipelineInfo *pps.PipelineInfo, realEnv *realenv.RealEnv) *testEnv {\n\tlogger := logs.New(pctx.Child(ctx, t.Name()))\n\tworkerDir := filepath.Join(realEnv.Directory, \"worker\")\n\tdriver, err := driver.NewDriver(\n\t\tctx,\n\t\trealEnv.ServiceEnv,\n\t\trealEnv.PachClient,\n\t\tpipelineInfo,\n\t\tworkerDir,\n\t)\n\trequire.NoError(t, err)\n\n\tctx, cancel := pctx.WithCancel(realEnv.PachClient.Ctx())\n\tt.Cleanup(cancel)\n\tdriver = driver.WithContext(ctx)\n\n\treturn &testEnv{\n\t\tRealEnv: realEnv,\n\t\tlogger: logger,\n\t\tdriver: &testDriver{driver},\n\t}\n}",
"func NewEnv(loader *ScriptLoader, debugging bool) *Env {\n\tenv := &Env{\n\t\tvm: otto.New(),\n\t\tloader: loader,\n\t\tdebugging: debugging,\n\t\tbuiltinModules: make(map[string]otto.Value),\n\t\tbuiltinModuleFactories: make(map[string]BuiltinModuleFactory),\n\t}\n\tenv.vm.Set(\"__getModulePath\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tvar cwd, moduleID string\n\t\tvar err error\n\t\tif cwd, err = call.Argument(0).ToString(); err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__getModulePath error\", err.Error()))\n\t\t}\n\t\tif moduleID, err = call.Argument(1).ToString(); err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__getModulePath error\", err.Error()))\n\t\t}\n\t\tif _, found := env.builtinModules[moduleID]; found {\n\t\t\tret, _ := otto.ToValue(moduleID)\n\t\t\treturn ret\n\t\t}\n\t\tif _, found := env.builtinModuleFactories[moduleID]; found {\n\t\t\tret, _ := otto.ToValue(moduleID)\n\t\t\treturn ret\n\t\t}\n\t\tif ap, err := env.loader.GetModuleAbs(cwd, moduleID); err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__getModulePath error\", err.Error()))\n\t\t} else {\n\t\t\tret, _ := otto.ToValue(ap)\n\t\t\treturn ret\n\t\t}\n\t})\n\tvar requireSrc string\n\tif env.debugging {\n\t\trequireSrc = debugRequireSrc\n\t} else {\n\t\trequireSrc = releaseRequireSrc\n\t}\n\tenv.vm.Set(\"__loadSource\", func(call otto.FunctionCall) otto.Value {\n\t\tvar mp string\n\t\tvar err error\n\t\tvm := call.Otto\n\t\t// reading arguments\n\t\tif mp, err = call.Argument(0).ToString(); err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__loadSource error\", err.Error()))\n\t\t}\n\t\t// finding built builtin modules\n\t\tif mod, found := env.builtinModules[mp]; found {\n\t\t\tretObj, _ := vm.Object(\"({isBuiltin: true})\")\n\t\t\tretObj.Set(\"builtin\", mod)\n\t\t\treturn retObj.Value()\n\t\t}\n\t\t// finding unbuilt builtin modules\n\t\tif mf, found := env.builtinModuleFactories[mp]; found {\n\t\t\tretObj, _ := vm.Object(\"({isBuiltin: true})\")\n\t\t\tmod := mf.CreateModule(vm)\n\t\t\tretObj.Set(\"builtin\", mod)\n\t\t\tenv.builtinModules[mp] = mod\n\t\t\treturn retObj.Value()\n\t\t}\n\t\t// loading module on file system\n\t\tsrc, err := env.loader.LoadScript(mp)\n\t\tif err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__loadSource error\", err.Error()))\n\t\t}\n\t\tscript, err := vm.Compile(mp, src)\n\t\tif err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__loadSource error\", err.Error()))\n\t\t}\n\t\tmodValue, err := vm.Run(script)\n\t\tif err != nil {\n\t\t\tpanic(vm.MakeCustomError(\"__loadSource error\", err.Error()))\n\t\t}\n\t\tretObj, _ := vm.Object(\"({})\")\n\t\tretObj.Set(\"src\", modValue)\n\t\tretObj.Set(\"filename\", path.Base(mp))\n\t\tretObj.Set(\"dirname\", path.Dir(mp))\n\t\treturn retObj.Value()\n\t})\n\t_, err := env.vm.Run(requireSrc)\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *otto.Error:\n\t\t\tpanic(err.(otto.Error).String())\n\t\tdefault:\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn env\n}",
"func NewEnvironment(testKeys []string) Environment {\n\tvars := make(map[string]string)\n\tfor _, key := range testKeys {\n\t\tvars[key] = os.Getenv(key)\n\t}\n\treturn Environment{backup: vars}\n}",
"func NewEnvironment(options ...Option) Environment {\n\te := &environment{\n\t\tdefaultScheme: DefaultScheme,\n\t\taccessorFactory: DefaultAccessorFactory,\n\t\tcloser: NopCloser,\n\t\tclosed: make(chan struct{}),\n\t}\n\n\tfor _, o := range options {\n\t\to(e)\n\t}\n\n\treturn e\n}",
"func NewEnv(ps ...*Env) *Env {\n\treturn &Env{\n\t\tBindings: Bindings{},\n\t\tDocs: Docs{},\n\n\t\t// XXX(hack): allocate a slice to prevent comparing w/ nil in tests\n\t\tParents: append([]*Env{}, ps...),\n\t}\n}",
"func NewEnv(name string, gp *pool.GoroutinePool) adapter.Env {\n\treturn env{\n\t\t// logger: newLogger(name),\n\t\tgp: gp,\n\t\tdaemons: new(int64),\n\t\tworkers: new(int64),\n\t}\n}",
"func New(mgr manager.Manager, namespace, buildprefix string) (*Environment, error) {\n\treturn &Environment{mgr.GetClient(), mgr.GetScheme(), namespace, buildprefix}, nil\n}",
"func NewEnv() Env {\n\tenv := Env{}\n\tenv.LoadEnv()\n\treturn env\n}",
"func NewGlobal() *Environment {\n\treturn New(nil)\n}",
"func NewEnvironment(maxMemoryPages int, maxTableSize int, maxValueSlots int, maxCallStackDepth int, defaultMemoryPages int, defaultTableSize int, gasLimit uint64, disableFloatingPoint bool, returnOnGasLimitExceeded bool) *Environment {\n\treturn &Environment{ // Return initialized vm config\n\t\tMaxMemoryPages: maxMemoryPages,\n\t\tMaxTableSize: maxTableSize,\n\t\tMaxValueSlots: maxValueSlots,\n\t\tMaxCallStackDepth: maxCallStackDepth,\n\t\tDefaultMemoryPages: defaultMemoryPages,\n\t\tDefaultTableSize: defaultTableSize,\n\t\tGasLimit: gasLimit,\n\t\tDisableFloatingPoint: disableFloatingPoint,\n\t\tReturnOnGasLimitExceeded: returnOnGasLimitExceeded,\n\t}\n}",
"func newTestEnvironment(eventJSONFilePath string, shipyardPath string, jobConfigPath string) (*testEnvironment, error) {\n\n\t// Create K8s clientset\n\tclientset, err := NewK8sClient()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to build k8s clientset: %w\", err)\n\t}\n\n\t// Create a new Keptn api for the use of the E2E test\n\tkeptnAPI, err := NewKeptnAPI(readKeptnConnectionDetailsFromEnv())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create the keptn API: %w\", err)\n\t}\n\n\t// Read the event we want to trigger and extract the project, service and stage\n\tkeptnEvent, err := readKeptnContextExtendedCE(eventJSONFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable parse JSON event file: %w\", err)\n\t}\n\n\teventData, err := parseKeptnEventData(keptnEvent)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable parse event data of the JSON event: %w\", err)\n\t}\n\n\t// Load shipyard file and create the project in Keptn\n\tshipyardFile, err := ioutil.ReadFile(shipyardPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read the shipyard file: %w\", err)\n\t}\n\n\t// Load the job configuration for the E2E test\n\tjobConfigYaml, err := ioutil.ReadFile(jobConfigPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read the job configuration file: %w\", err)\n\t}\n\n\treturn &testEnvironment{\n\t\tK8s: clientset,\n\t\tAPI: keptnAPI,\n\t\tEventData: eventData,\n\t\tEvent: keptnEvent,\n\t\tNamespace: \"keptn\",\n\t\tshipyard: shipyardFile,\n\t\tjobConfig: jobConfigYaml,\n\t}, nil\n}",
"func NewProgramEnv() *ProgramEnv {\n\tretval := ProgramEnv{}\n\n\t// all done\n\treturn &retval\n}",
"func NewEnvironment() Environment {\n\treturn &environ{\n\t\tuid: uuid.New().String(),\n\t}\n}",
"func NewEnvironment(kubeConfig *rest.Config) *Environment {\n\tatomic.AddInt32(&namespaceCounter, portPerTest)\n\tnamespaceID := gomegaConfig.GinkgoConfig.ParallelNode*testPerNode + int(namespaceCounter)\n\t// the single namespace used by this test\n\tns := utils.GetNamespaceName(namespaceID)\n\n\tenv := &Environment{\n\t\tEnvironment: &utils.Environment{\n\t\t\tID: namespaceID,\n\t\t\tNamespace: ns,\n\t\t\tKubeConfig: kubeConfig,\n\t\t\tConfig: &config.Config{\n\t\t\t\tCtxTimeOut: 10 * time.Second,\n\t\t\t\tMeltdownDuration: defaultTestMeltdownDuration * time.Second,\n\t\t\t\tMeltdownRequeueAfter: defaultTestMeltdownRequeueAfter * time.Second,\n\t\t\t\tMonitoredID: ns,\n\t\t\t\tOperatorNamespace: ns,\n\t\t\t\tFs: afero.NewOsFs(),\n\t\t\t},\n\t\t},\n\t\tMachine: Machine{\n\t\t\tMachine: machine.NewMachine(),\n\t\t},\n\t}\n\tgomega.SetDefaultEventuallyTimeout(env.PollTimeout)\n\tgomega.SetDefaultEventuallyPollingInterval(env.PollInterval)\n\n\treturn env\n}",
"func NewEnv() (*Env, error) {\n\tctx := context.Background()\n\tdomainID := fmt.Sprintf(\"domain %d\", rand.Int()) // nolint: gas\n\tdb, err := testdb.New(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: failed to open database: %v\", err)\n\t}\n\n\t// Map server\n\tmapEnv, err := maptest.NewMapEnv(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: failed to create trillian map server: %v\", err)\n\t}\n\n\ttlog := fake.NewTrillianLogClient()\n\n\t// Configure domain, which creates new map and log trees.\n\tdomainStorage, err := domain.NewStorage(db)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: failed to create domain storage: %v\", err)\n\t}\n\tadminSvr := adminserver.New(tlog, mapEnv.Map, mapEnv.Admin, mapEnv.Admin, domainStorage, vrfKeyGen)\n\tdomainPB, err := adminSvr.CreateDomain(ctx, &pb.CreateDomainRequest{\n\t\tDomainId: domainID,\n\t\tMinInterval: ptypes.DurationProto(1 * time.Second),\n\t\tMaxInterval: ptypes.DurationProto(5 * time.Second),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: CreateDomain(): %v\", err)\n\t}\n\n\tmapID := domainPB.Map.TreeId\n\tlogID := domainPB.Log.TreeId\n\tmapPubKey, err := der.UnmarshalPublicKey(domainPB.Map.GetPublicKey().GetDer())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: Failed to load signing keypair: %v\", err)\n\t}\n\tvrfPub, err := p256.NewVRFVerifierFromRawKey(domainPB.Vrf.GetDer())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: Failed to load vrf pubkey: %v\", err)\n\t}\n\n\t// Common data structures.\n\tmutations, err := mutationstorage.New(db)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"env: Failed to create mutations object: %v\", err)\n\t}\n\tauth := authentication.NewFake()\n\tauthz := authorization.New()\n\n\tqueue := mutator.MutationQueue(mutations)\n\tserver := keyserver.New(tlog, mapEnv.Map, mapEnv.Admin, mapEnv.Admin,\n\t\tentry.New(), auth, authz, domainStorage, queue, mutations)\n\tgsvr := grpc.NewServer()\n\tpb.RegisterKeyTransparencyServer(gsvr, server)\n\n\t// Sequencer\n\tseq := sequencer.New(tlog, mapEnv.Map, entry.New(), domainStorage, mutations, queue)\n\t// Only sequence when explicitly asked with receiver.Flush()\n\td := &domaindef.Domain{\n\t\tDomainID: domainID,\n\t\tLogID: logID,\n\t\tMapID: mapID,\n\t}\n\treceiver := seq.NewReceiver(ctx, d, 60*time.Hour, 60*time.Hour)\n\treceiver.Flush(ctx)\n\n\taddr, lis, err := Listen()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo gsvr.Serve(lis)\n\n\t// Client\n\tcc, err := grpc.Dial(addr, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Dial(%v) = %v\", addr, err)\n\t}\n\tktClient := pb.NewKeyTransparencyClient(cc)\n\tclient := grpcc.New(ktClient, domainID, vrfPub, mapPubKey, coniks.Default, fake.NewFakeTrillianLogVerifier())\n\tclient.RetryCount = 0\n\n\treturn &Env{\n\t\tEnv: &integration.Env{\n\t\t\tClient: client,\n\t\t\tCli: ktClient,\n\t\t\tDomain: domainPB,\n\t\t\tReceiver: receiver,\n\t\t},\n\t\tmapEnv: mapEnv,\n\t\tgrpcServer: gsvr,\n\t\tgrpcCC: cc,\n\t\tdb: db,\n\t}, nil\n}",
"func newRuntimeContext(env string, logger *zap.Logger, scope tally.Scope, service workflowserviceclient.Interface) *RuntimeContext {\n\treturn &RuntimeContext{\n\t\tEnv: env,\n\t\tlogger: logger,\n\t\tmetrics: scope,\n\t\tservice: service,\n\t}\n}",
"func newTestEnvironment() *environment {\n\tdir, err := ioutil.TempDir(\"\", uuid.NewUUID().String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttestAccounts := account.GenAccounts(usersNumber)\n\towner := testAccounts[0]\n\n\tserver := backend.NewSimulatedBackend(account.Addresses(testAccounts))\n\n\tmAddr, _, err := mediator.Deploy(owner.TransactOpts, server)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmSession, err := mediator.NewMediatorSession(*owner.TransactOpts,\n\t\tmAddr, server)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trAddr, err := mSession.RootChain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tparsed, err := abi.JSON(strings.NewReader(rootchain.RootChainABI))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmParsed, err := abi.JSON(strings.NewReader(mediator.MediatorABI))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ttAddr, _ := deployToken(owner.TransactOpts, server)\n\n\ttOwnerSession := tokenSession(owner.TransactOpts,\n\t\ttAddr, server)\n\n\tvar users []*user\n\n\t// mint and approval to Mediator contract.\n\tfor _, acc := range testAccounts {\n\t\tmint(tOwnerSession, acc.From, supply, server)\n\n\t\ttSession := tokenSession(acc.TransactOpts,\n\t\t\ttAddr, server)\n\t\tincreaseApproval(tSession, mAddr, supply, server)\n\n\t\tusers = append(users, &user{acc: acc})\n\t}\n\n\t// get free port\n\tport := getPort()\n\n\t// creates new Plasma server.\n\tsrv := newServer(\n\t\t*testAccounts[0], rAddr, mAddr, parsed,\n\t\tmParsed, server, dir, port)\n\tgo srv.ListenAndServe()\n\n\ttime.Sleep(time.Microsecond * 200)\n\n\treturn &environment{\n\t\tdir: dir,\n\t\taccounts: users,\n\t\tmediatorAddress: mAddr,\n\t\trootChainAddress: rAddr,\n\t\ttokenAddress: tAddr,\n\t\tmediatorABI: mParsed,\n\t\trootChainABI: parsed,\n\t\tbackend: server,\n\t\tserver: srv,\n\t\tport: port,\n\t}\n}",
"func (e *Environment) New(n data.Name) Namespace {\n\treturn &namespace{\n\t\tenvironment: e,\n\t\tentries: entries{},\n\t\tdomain: n,\n\t}\n}",
"func CreateTestEnv(t *testing.T) TestInput {\n\tt.Helper()\n\n\t// Initialize store keys\n\tkeyGravity := sdk.NewKVStoreKey(gravitytypes.StoreKey)\n\tkeyAcc := sdk.NewKVStoreKey(authtypes.StoreKey)\n\tkeyStaking := sdk.NewKVStoreKey(stakingtypes.StoreKey)\n\tkeyBank := sdk.NewKVStoreKey(banktypes.StoreKey)\n\tkeyDistro := sdk.NewKVStoreKey(distrtypes.StoreKey)\n\tkeyParams := sdk.NewKVStoreKey(paramstypes.StoreKey)\n\ttkeyParams := sdk.NewTransientStoreKey(paramstypes.TStoreKey)\n\tkeyGov := sdk.NewKVStoreKey(govtypes.StoreKey)\n\tkeySlashing := sdk.NewKVStoreKey(slashingtypes.StoreKey)\n\tkeyAllocation := sdk.NewKVStoreKey(types.StoreKey)\n\n\t// Initialize memory database and mount stores on it\n\tdb := dbm.NewMemDB()\n\tms := store.NewCommitMultiStore(db)\n\tms.MountStoreWithDB(keyGravity, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyBank, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyDistro, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)\n\tms.MountStoreWithDB(keyGov, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keySlashing, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAllocation, sdk.StoreTypeIAVL, db)\n\terr := ms.LoadLatestVersion()\n\trequire.Nil(t, err)\n\n\t// Create sdk.Context\n\tctx := sdk.NewContext(ms, tmproto.Header{\n\t\tHeight: 1234567,\n\t\tTime: time.Date(2020, time.April, 22, 12, 0, 0, 0, time.UTC),\n\t}, false, log.TestingLogger())\n\n\tcdc := MakeTestCodec()\n\tmarshaler := MakeTestMarshaler()\n\n\tparamsKeeper := paramskeeper.NewKeeper(marshaler, cdc, keyParams, tkeyParams)\n\tparamsKeeper.Subspace(authtypes.ModuleName)\n\tparamsKeeper.Subspace(banktypes.ModuleName)\n\tparamsKeeper.Subspace(stakingtypes.ModuleName)\n\tparamsKeeper.Subspace(distrtypes.ModuleName)\n\tparamsKeeper.Subspace(govtypes.ModuleName)\n\tparamsKeeper.Subspace(types.DefaultParamspace)\n\tparamsKeeper.Subspace(slashingtypes.ModuleName)\n\tparamsKeeper.Subspace(gravitytypes.ModuleName)\n\n\t// this is also used to initialize module accounts for all the map keys\n\tmaccPerms := map[string][]string{\n\t\tauthtypes.FeeCollectorName: nil,\n\t\tdistrtypes.ModuleName: nil,\n\t\tstakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\tstakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\tgovtypes.ModuleName: {authtypes.Burner},\n\t\ttypes.ModuleName: {authtypes.Minter, authtypes.Burner},\n\t}\n\n\taccountKeeper := authkeeper.NewAccountKeeper(\n\t\tmarshaler,\n\t\tkeyAcc, // target store\n\t\tgetSubspace(paramsKeeper, authtypes.ModuleName),\n\t\tauthtypes.ProtoBaseAccount, // prototype\n\t\tmaccPerms,\n\t)\n\n\tblockedAddr := make(map[string]bool, len(maccPerms))\n\tfor acc := range maccPerms {\n\t\tblockedAddr[authtypes.NewModuleAddress(acc).String()] = true\n\t}\n\n\tbankKeeper := bankkeeper.NewBaseKeeper(\n\t\tmarshaler,\n\t\tkeyBank,\n\t\taccountKeeper,\n\t\tgetSubspace(paramsKeeper, banktypes.ModuleName),\n\t\tblockedAddr,\n\t)\n\tbankKeeper.SetParams(ctx, banktypes.Params{DefaultSendEnabled: true})\n\n\tstakingKeeper := stakingkeeper.NewKeeper(marshaler, keyStaking, accountKeeper, bankKeeper, getSubspace(paramsKeeper, stakingtypes.ModuleName))\n\tstakingKeeper.SetParams(ctx, TestingStakeParams)\n\n\tdistKeeper := distrkeeper.NewKeeper(marshaler, keyDistro, getSubspace(paramsKeeper, distrtypes.ModuleName), accountKeeper, bankKeeper, stakingKeeper, authtypes.FeeCollectorName, nil)\n\tdistKeeper.SetParams(ctx, distrtypes.DefaultParams())\n\n\t// set genesis items required for distribution\n\tdistKeeper.SetFeePool(ctx, distrtypes.InitialFeePool())\n\n\t// total supply to track this\n\ttotalSupply := sdk.NewCoins(sdk.NewInt64Coin(\"stake\", 100000000))\n\n\t// set up initial accounts\n\tfor name, perms := range maccPerms {\n\t\tmod := authtypes.NewEmptyModuleAccount(name, perms...)\n\t\tif name == stakingtypes.NotBondedPoolName {\n\t\t\trequire.NoError(t, fundModAccount(ctx, bankKeeper, mod.GetName(), totalSupply))\n\t\t} else if name == distrtypes.ModuleName {\n\t\t\t// some big pot to pay out\n\t\t\tamt := sdk.NewCoins(sdk.NewInt64Coin(\"stake\", 500000))\n\t\t\trequire.NoError(t, fundModAccount(ctx, bankKeeper, mod.GetName(), amt))\n\t\t}\n\n\t\taccountKeeper.SetModuleAccount(ctx, mod)\n\t}\n\n\tstakeAddr := authtypes.NewModuleAddress(stakingtypes.BondedPoolName)\n\tmoduleAcct := accountKeeper.GetAccount(ctx, stakeAddr)\n\trequire.NotNil(t, moduleAcct)\n\n\trouter := baseapp.NewRouter()\n\trouter.AddRoute(bank.AppModule{}.Route())\n\trouter.AddRoute(staking.AppModule{}.Route())\n\trouter.AddRoute(distribution.AppModule{}.Route())\n\n\t// Load default wasm config\n\n\tgovRouter := govtypes.NewRouter().\n\t\tAddRoute(paramsproposal.RouterKey, params.NewParamChangeProposalHandler(paramsKeeper)).\n\t\tAddRoute(govtypes.RouterKey, govtypes.ProposalHandler)\n\n\tgovKeeper := govkeeper.NewKeeper(\n\t\tmarshaler, keyGov, getSubspace(paramsKeeper, govtypes.ModuleName).WithKeyTable(govtypes.ParamKeyTable()), accountKeeper, bankKeeper, stakingKeeper, govRouter,\n\t)\n\n\tgovKeeper.SetProposalID(ctx, govtypes.DefaultStartingProposalID)\n\tgovKeeper.SetDepositParams(ctx, govtypes.DefaultDepositParams())\n\tgovKeeper.SetVotingParams(ctx, govtypes.DefaultVotingParams())\n\tgovKeeper.SetTallyParams(ctx, govtypes.DefaultTallyParams())\n\n\tslashingKeeper := slashingkeeper.NewKeeper(\n\t\tmarshaler,\n\t\tkeySlashing,\n\t\t&stakingKeeper,\n\t\tgetSubspace(paramsKeeper, slashingtypes.ModuleName).WithKeyTable(slashingtypes.ParamKeyTable()),\n\t)\n\n\tgravityKeeper := gravitykeeper.NewKeeper(\n\t\tmarshaler,\n\t\tkeyGravity,\n\t\tgetSubspace(paramsKeeper, gravitytypes.DefaultParamspace),\n\t\taccountKeeper,\n\t\tstakingKeeper,\n\t\tbankKeeper,\n\t\tslashingKeeper,\n\t\tsdk.DefaultPowerReduction,\n\t)\n\n\tstakingKeeper = *stakingKeeper.SetHooks(\n\t\tstakingtypes.NewMultiStakingHooks(\n\t\t\tdistKeeper.Hooks(),\n\t\t\tslashingKeeper.Hooks(),\n\t\t\tgravityKeeper.Hooks(),\n\t\t),\n\t)\n\n\tk := NewKeeper(\n\t\tmarshaler,\n\t\tkeyAllocation,\n\t\tgetSubspace(paramsKeeper, types.DefaultParamspace),\n\t\tstakingKeeper,\n\t\tgravityKeeper,\n\t)\n\n\tk.setParams(ctx, TestingAllocationParams)\n\n\treturn TestInput{\n\t\tAllocationKeeper: k,\n\t\tGravityKeeper: gravityKeeper,\n\t\tAccountKeeper: accountKeeper,\n\t\tBankKeeper: bankKeeper,\n\t\tStakingKeeper: stakingKeeper,\n\t\tSlashingKeeper: slashingKeeper,\n\t\tDistKeeper: distKeeper,\n\t\tGovKeeper: govKeeper,\n\t\tContext: ctx,\n\t\tMarshaler: marshaler,\n\t\tLegacyAmino: cdc,\n\t}\n}",
"func MakeEnv(outer *Env) Env {\n\tframe := map[Symbol]LangType{}\n\treturn Env{\n\t\touter: outer,\n\t\tframe: frame,\n\t}\n}",
"func NewEnvironmentViewer(ctx *common.Context, format string, environmentName string, viewTasks bool, writer io.Writer) Executor {\n\n\tworkflow := new(environmentWorkflow)\n\n\tvar environmentViewer func() error\n\tif format == JSON {\n\t\tenvironmentViewer = workflow.environmentViewerJSON(ctx.Config.Namespace, environmentName, ctx.StackManager, ctx.StackManager, ctx.ClusterManager, writer)\n\t} else if format == SHELL {\n\t\tenvironmentViewer = workflow.environmentViewerSHELL(ctx.Config.Namespace, environmentName, ctx.StackManager, ctx.StackManager, ctx.ClusterManager, writer)\n\t} else {\n\t\tenvironmentViewer = workflow.environmentViewerCli(ctx.Config.Namespace, environmentName, ctx.StackManager, ctx.StackManager, ctx.ClusterManager, ctx.InstanceManager, ctx.TaskManager, viewTasks, writer)\n\t}\n\n\treturn newPipelineExecutor(\n\t\tenvironmentViewer,\n\t)\n}",
"func NewEnv() *Env {\n\tenv := new(Env)\n\t// env.EnvParams.DefaultEnv()\n\t// env.EnvParams.Initialize()\n\treturn env\n}",
"func New() {\n\ttypeOfProject()\n}",
"func NewEnv(r io.Reader, varReader tpl.VariableReader, parser tpl.Parser) (EnvSource, error) {\n\tenv, err := parseEnvironment(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecretTemplates := make([]envvarTpls, len(env))\n\tfor i, envvar := range env {\n\t\tkeyTpl, err := parser.Parse(envvar.key, envvar.lineNumber, envvar.columnNumberKey)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = validation.ValidateEnvarName(envvar.key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalTpl, err := parser.Parse(envvar.value, envvar.lineNumber, envvar.columnNumberValue)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsecretTemplates[i] = envvarTpls{\n\t\t\tkey: keyTpl,\n\t\t\tvalue: valTpl,\n\t\t\tlineNo: envvar.lineNumber,\n\t\t}\n\t}\n\n\treturn envTemplate{\n\t\tenvVars: secretTemplates,\n\t\ttemplateVarReader: varReader,\n\t}, nil\n}",
"func CreateEnv(testchain *model.Testchain) (*model.Response, error) {\n\turl := helpers.BuildURL(connectionString(), \"environments\", \"start\")\n\tjson, err := testchain.ToReader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client.Post(url, json)\n}",
"func NewFromCurrentEnvironment() (p *Project, err error) {\n\tvar found bool\n\tp = &Project{}\n\n\tif p.GoRootPath, found = os.LookupEnv(\"GOROOT\"); !found {\n\t\treturn nil, fmt.Errorf(\"GOROOT is not set\")\n\t}\n\n\tif p.GoPath, found = os.LookupEnv(\"GOPATH\"); !found {\n\t\treturn nil, fmt.Errorf(\"GOPATH is not set\")\n\t}\n\n\tif p.Name, found = os.LookupEnv(\"GOVENV_PROJECT\"); !found {\n\t\treturn nil, fmt.Errorf(\"GOVENV_PROJECT is not set\")\n\t}\n\n\tif p.ManagementDirPath, found = os.LookupEnv(\"GOVENV_MANAGEMENT_DIR\"); !found {\n\t\treturn nil, fmt.Errorf(\"GOVENV_MANAGEMET_DIR is not set\")\n\t}\n\n\treturn p, nil\n}",
"func NewEnvironmentService(sling *sling.Sling, uriTemplate string, sortOrderPath string, summaryPath string) *EnvironmentService {\n\treturn &EnvironmentService{\n\t\tsortOrderPath: sortOrderPath,\n\t\tsummaryPath: summaryPath,\n\t\tCanDeleteService: services.CanDeleteService{\n\t\t\tService: services.NewService(constants.ServiceEnvironmentService, sling, uriTemplate),\n\t\t},\n\t}\n}",
"func NewSized(env *Environment, size int) *Environment {\n\treturn &Environment{values: make(map[string]interface{}), enclosing: env, indexedValues: make([]interface{}, size)}\n}",
"func New(name string) (*ChefEnvironment, util.Gerror) {\n\tif !util.ValidateEnvName(name) {\n\t\terr := util.Errorf(\"Field 'name' invalid\")\n\t\terr.SetStatus(http.StatusBadRequest)\n\t\treturn nil, err\n\t}\n\n\tvar found bool\n\tif config.UsingDB() {\n\t\tvar eerr error\n\t\tfound, eerr = checkForEnvironmentSQL(datastore.Dbh, name)\n\t\tif eerr != nil {\n\t\t\terr := util.CastErr(eerr)\n\t\t\terr.SetStatus(http.StatusInternalServerError)\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tds := datastore.New()\n\t\t_, found = ds.Get(\"env\", name)\n\t}\n\tif found || name == \"_default\" {\n\t\terr := util.Errorf(\"Environment already exists\")\n\t\treturn nil, err\n\t}\n\n\tenv := &ChefEnvironment{\n\t\tName: name,\n\t\tChefType: \"environment\",\n\t\tJSONClass: \"Chef::Environment\",\n\t\tDefault: map[string]interface{}{},\n\t\tOverride: map[string]interface{}{},\n\t\tCookbookVersions: map[string]string{},\n\t}\n\treturn env, nil\n}",
"func (client *Client) CreateEnvironment(req *Request) (*Response, error) {\n\treturn client.Execute(&Request{\n\t\tMethod: \"POST\",\n\t\tPath: EnvironmentsPath,\n\t\tQueryParams: req.QueryParams,\n\t\tBody: req.Body,\n\t\tResult: &CreateEnvironmentResult{},\n\t})\n}",
"func CreateEnvironment(host string, verifyTLS bool, apiKey string, project string, name string, slug string) (models.EnvironmentInfo, Error) {\n\tpostBody := map[string]string{\"project\": project, \"name\": name, \"slug\": slug}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Invalid environment info\"}\n\t}\n\n\turl, err := generateURL(host, \"/v3/environments\", nil)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PostRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to create environment\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tenvironmentInfo, ok := result[\"environment\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.EnvironmentInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing environment, expected map[string]interface{}, got %T\", result[\"environment\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tinfo := models.ParseEnvironmentInfo(environmentInfo)\n\n\treturn info, Error{}\n}",
"func NewContext(env map[string]string) *ContextImpl {\n\tcntx := &ContextImpl{\n\t\tenv: env,\n\t\ttest: make(map[string]string),\n\t\ttestNumber: 0,\n\t\tcorrelationId: \"\",\n\t}\n\tif cntx.env == nil {\n\t\tcntx.env = make(map[string]string)\n\t}\n\treturn cntx\n}",
"func (t *DeploymentEnvironment_DeploymentEnvironment) NewDeploymentEnvironment(Id string) (*DeploymentEnvironment_DeploymentEnvironment_DeploymentEnvironment, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.DeploymentEnvironment == nil {\n\t\tt.DeploymentEnvironment = make(map[string]*DeploymentEnvironment_DeploymentEnvironment_DeploymentEnvironment)\n\t}\n\n\tkey := Id\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.DeploymentEnvironment[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list DeploymentEnvironment\", key)\n\t}\n\n\tt.DeploymentEnvironment[key] = &DeploymentEnvironment_DeploymentEnvironment_DeploymentEnvironment{\n\t\tId: &Id,\n\t}\n\n\treturn t.DeploymentEnvironment[key], nil\n}",
"func NewSimpleEnv() *Env {\n\tenv := new(Env)\n\t// env.EnvParams.DefaultEnv()\n\t// env.EnvParams.Initialize()\n\treturn env\n}",
"func (client *Client) NewTestEnvironment(bucketKey string, testID string, environment Environment) (Environment, error) {\n\tvar newEnvironment = Environment{}\n\n\tpath := fmt.Sprintf(\"buckets/%s/tests/%s/environments\", bucketKey, testID)\n\tdata, err := json.Marshal(&environment)\n\tif err != nil {\n\t\treturn newEnvironment, err\n\t}\n\n\tcontent, err := client.Post(path, data)\n\tif err != nil {\n\t\treturn newEnvironment, err\n\t}\n\n\terr = unmarshal(content, &newEnvironment)\n\treturn newEnvironment, err\n}",
"func CreateMainTestEnv(opts *CreateMainTestEnvOpts) (env *EnvT, tearDown func()) {\n\tglobalMutex.Lock()\n\tpackageLevelVirtualTest := newVirtualTest(opts)\n\tglobalMutex.Unlock()\n\n\tenv = NewEnv(packageLevelVirtualTest) // register global test for env\n\treturn env, packageLevelVirtualTest.cleanup\n}",
"func NewEnclosedEnvironment(outer *Environment) *Environment {\n\tenv := NewEnvironment()\n\tenv.outer = outer\n\treturn env\n}",
"func NewEnclosedEnvironment(outer *Environment) *Environment {\n\tenv := NewEnvironment()\n\tenv.outer = outer\n\treturn env\n}",
"func (s *Store) CreateEnvironment(environment *archer.Environment) error {\n\tif _, err := s.GetProject(environment.Project); err != nil {\n\t\treturn err\n\t}\n\n\tenvironmentPath := fmt.Sprintf(fmtEnvParamPath, environment.Project, environment.Name)\n\tdata, err := marshal(environment)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"serializing environment %s: %w\", environment.Name, err)\n\t}\n\n\t_, err = s.ssmClient.PutParameter(&ssm.PutParameterInput{\n\t\tName: aws.String(environmentPath),\n\t\tDescription: aws.String(fmt.Sprintf(\"The %s deployment stage\", environment.Name)),\n\t\tType: aws.String(ssm.ParameterTypeString),\n\t\tValue: aws.String(data),\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase ssm.ErrCodeParameterAlreadyExists:\n\t\t\t\treturn &ErrEnvironmentAlreadyExists{\n\t\t\t\t\tEnvironmentName: environment.Name,\n\t\t\t\t\tProjectName: environment.Project}\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"create environment %s in project %s: %w\", environment.Name, environment.Project, err)\n\t}\n\treturn nil\n}",
"func (client LabClient) CreateEnvironmentPreparer(ctx context.Context, resourceGroupName string, name string, labVirtualMachine LabVirtualMachine) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"name\": autorest.Encode(\"path\", name),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2015-05-21-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}/createEnvironment\", pathParameters),\n\t\tautorest.WithJSON(labVirtualMachine),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func NewEnclosedEnvironment(outer *Environment) *Environment {\n\tenv := NewEnvironment()\n\tenv.outer = outer\n\n\treturn env\n}",
"func InitEnvironment(e *Environment) {\n\te.SetCommand(\"print\", _stdPrint)\n\te.SetCommand(\"set\", _stdSet)\n\te.SetCommand(\"get\", _stdGet)\n\te.SetCommand(\"math\", _stdMath)\n\te.SetCommand(\"push\", _stdPush)\n\te.SetCommand(\"pop\", _stdPop)\n\n\t// ================\n\t// Special Commands\n\t// ================\n\n\t// Function creation\n\t// ===================\n\tvar functioner *Functioner\n\tvar functionerName string\n\te.SetCommand(\"func\", func(e *Environment, args string) int64 {\n\t\tif args == \"\" {\n\t\t\treturn 0\n\t\t}\n\t\tfunctionerName = args\n\t\tfunctioner = NewFunctioner(e)\n\t\treturn 1\n\t})\n\te.SetCommand(\"+\", func(e *Environment, args string) int64 {\n\t\tif args == \"\" {\n\t\t\treturn 0\n\t\t}\n\t\tfunctioner.Append(args)\n\t\treturn 1\n\t})\n\te.SetCommand(\"endfunc\", func(e *Environment, args string) int64 {\n\t\tif functioner == nil || functionerName == \"\" {\n\t\t\treturn 0\n\t\t}\n\t\tf := functioner.GetFunction()\n\t\te.SetCommand(functionerName, f)\n\n\t\tfunctioner = nil\n\t\tfunctionerName = \"\"\n\t\treturn 1\n\t})\n}",
"func NewExecuteEnv(req Request, reports []Report) *ExecuteEnv {\n\tenvReports := make(map[string]map[ExternalID]RawReport)\n\tfor _, report := range reports {\n\t\tvalReports := make(map[ExternalID]RawReport)\n\t\tfor _, each := range report.RawReports {\n\t\t\tvalReports[each.ExternalID] = each\n\t\t}\n\t\tenvReports[report.Validator.String()] = valReports\n\t}\n\treturn &ExecuteEnv{\n\t\tBaseEnv: BaseEnv{\n\t\t\trequest: req,\n\t\t},\n\t\treports: envReports,\n\t}\n}",
"func newScope(ast *parser.Thrift) *Scope {\n\treturn &Scope{\n\t\tast: ast,\n\t\timports: newImportManager(),\n\t\tglobals: namespace.NewNamespace(namespace.UnderscoreSuffix),\n\t\tnamespace: ast.GetNamespaceOrReferenceName(\"go\"),\n\t}\n}",
"func NewEnv(dsn string) (*Env, error) {\n\tdb, err := sql.Open(\"postgres\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Env{db}, nil\n}",
"func newContext(config *Config) (*Context, error) {\n\tctx := &Context{Env: make(map[string]string)}\n\n\tfor _, envVarName := range config.Envvars {\n\t\tvalue := os.Getenv(envVarName)\n\t\tif value != \"\" {\n\t\t\t//log.Printf(\"Env var %s found with value '%s'\", envVarName, value)\n\t\t\tctx.Env[envVarName] = value\n\t\t} else {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Env var %s not defined!\", envVarName))\n\t\t}\n\t}\n\n\treturn ctx, nil\n}",
"func (c *Client) CreateTemporaryEnvironment(runDescription string, urlString string, webhook string) (*Environment, error) {\n\tname := \"temporary-env-for-custom-url-via-CLI\"\n\tif runDescription != \"\" {\n\t\tif len(runDescription) > 241 {\n\t\t\trunDescription = runDescription[:241]\n\t\t}\n\t\tname = fmt.Sprintf(\"%v-temporary-env\", runDescription)\n\t}\n\n\tbody := EnvironmentParams{\n\t\tName: name,\n\t\tURL: urlString,\n\t\tIsTemporary: true,\n\t\tWebhook: webhook,\n\t\tWebhookEnabled: webhook != \"\",\n\t}\n\treq, err := c.NewRequest(\"POST\", \"environments\", &body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar env Environment\n\t_, err = c.Do(req, &env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &env, nil\n}",
"func NewEnv(rctx context.Context, t testing.TB) *Env {\n\t// Use an error group with a cancelable context to supervise every component\n\t// and cancel everything if one fails\n\tctx, cancel := pctx.WithCancel(rctx)\n\teg, ctx := errgroup.WithContext(ctx)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, eg.Wait())\n\t})\n\tt.Cleanup(cancel)\n\n\tenv := &Env{Context: ctx, Directory: t.TempDir()}\n\n\t// NOTE: this is changing a GLOBAL variable in etcd. This function should not\n\t// be run in the same process as production code where this may affect\n\t// performance (but there should be little risk of that as this is only for\n\t// test code).\n\tetcdwal.SegmentSizeBytes = 1 * 1000 * 1000 // 1 MB\n\n\tetcdConfig := embed.NewConfig()\n\tetcdConfig.MaxTxnOps = 10000\n\n\t// Create test dirs for etcd data\n\tetcdConfig.Dir = path.Join(env.Directory, \"etcd_data\")\n\tetcdConfig.WalDir = path.Join(env.Directory, \"etcd_wal\")\n\n\t// Speed up initial election, hopefully this has no other impact since there\n\t// is only one etcd instance\n\tetcdConfig.InitialElectionTickAdvance = false\n\tetcdConfig.TickMs = 10\n\tetcdConfig.ElectionMs = 50\n\n\t// Log to the test log.\n\tlevel := log.AddLoggerToEtcdServer(ctx, etcdConfig)\n\t// We want to assign a random unused port to etcd, but etcd doesn't give us a\n\t// way to read it back out later. We can work around this by creating our own\n\t// listener on a random port, find out which port was used, close that\n\t// listener, and pass that port down for etcd to use. There is a small race\n\t// condition here where someone else can steal the port between these steps,\n\t// but it should be fairly minimal and depends on how the OS assigns\n\t// unallocated ports.\n\tlistener, err := net.Listen(\"tcp\", \"localhost:0\")\n\trequire.NoError(t, err)\n\trequire.NoError(t, listener.Close())\n\n\tclientURL, err := url.Parse(fmt.Sprintf(\"http://%s\", listener.Addr().String()))\n\trequire.NoError(t, err)\n\n\tetcdConfig.ListenPeerUrls = []url.URL{}\n\tetcdConfig.ListenClientUrls = []url.URL{*clientURL}\n\n\t// Throw away noisy messages from etcd - comment these out if you need to debug\n\t// a failed start\n\tlevel.SetLevel(zapcore.ErrorLevel)\n\n\tenv.Etcd, err = embed.StartEtcd(etcdConfig)\n\trequire.NoError(t, err)\n\tt.Cleanup(env.Etcd.Close)\n\n\teg.Go(func() error {\n\t\treturn errorWait(ctx, env.Etcd.Err())\n\t})\n\n\t// Wait for the server to become ready, then restore the log level.\n\tselect {\n\tcase <-env.Etcd.Server.ReadyNotify():\n\t\t// This used to be DebugLevel. It's a lot of noise to sort through and I'm not sure\n\t\t// anyone has ever wanted to read them. Feel free to change this back to DebugLevel\n\t\t// if it helps you, though.\n\t\tlevel.SetLevel(zapcore.InfoLevel)\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"etcd did not start after 30 seconds\")\n\t}\n\n\tcfg := log.GetEtcdClientConfig(env.Context)\n\tcfg.Endpoints = []string{clientURL.String()}\n\tcfg.DialOptions = client.DefaultDialOptions()\n\tenv.EtcdClient, err = etcd.New(cfg)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, env.EtcdClient.Close())\n\t})\n\n\t// TODO: supervise the EtcdClient connection and error the errgroup if they\n\t// go down\n\n\treturn env\n}",
"func NewEnv(host string) (Env, error) {\n\n\tconfig := strings.Split(host, \"-\")\n\n\tif len(config) != 3 {\n\t\tlog.Errorf(\"Host %s format is invalid\", host)\n\t\treturn Env{}, fmt.Errorf(service.EnvNotAvailable)\n\t}\n\n\tif strings.Index(config[2], \"env\") > -1 {\n\t\t//check container id and expose port\n\t\tport, err := service.GetEndpoint(config[0], config[1])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn Env{}, fmt.Errorf(service.EnvNotAvailable)\n\t\t}\n\t\treturn Env{ContainerID: config[0], Port: port.HostPort, PrivateIP: port.HostIP, Region: config[2]}, nil\n\t} else {\n\t\tlog.Errorf(\"Host %s format is invalid\", host)\n\t\treturn Env{}, fmt.Errorf(service.EnvNotAvailable)\n\t}\n\n}",
"func TestNew(t *testing.T) {\n\te := newTestEnvr()\n\tif e.Name != envName {\n\t\tt.Errorf(\".Name = %s, want %s\", e.Name, envName)\n\t}\n\tfor i, v := range testVars {\n\t\tif e.RequiredVars[i] != v {\n\t\t\tt.Errorf(\"RequiredVars[%d] = %s, want %s\", i, e.RequiredVars[i], v)\n\t\t}\n\t}\n}",
"func newClassWithEnv(\n\tname string,\n\tsuperClass RubyClass,\n\tinstanceMethods,\n\tclassMethods map[string]RubyMethod,\n\tbuilder func(RubyClassObject, ...RubyObject) (RubyObject, error),\n\tenv Environment,\n) *class {\n\tvar superclassClass RubyClass = classClass\n\tif superClass != nil {\n\t\tsuperclassClass = superClass.(RubyClassObject).Class()\n\t}\n\treturn &class{\n\t\tname: name,\n\t\tsuperClass: superClass,\n\t\tinstanceMethods: NewMethodSet(instanceMethods),\n\t\tclass: newEigenclass(superclassClass, classMethods),\n\t\tbuilder: builder,\n\t\tEnvironment: NewEnclosedEnvironment(env),\n\t}\n}",
"func NewWorkspace(name string, environment map[string]string, columns map[string]map[string][]string, inheritEnv bool) *Workspace {\n\tif environment == nil {\n\t\tenvironment = make(map[string]string)\n\t}\n\tws := &Workspace{\n\t\tName: name,\n\t\tEnvironment: environment,\n\t\tTasks: make(map[string]*Task),\n\t\tFunctions: make(map[string]*Function),\n\t\tColumns: columns,\n\t\tInheritEnvironment: inheritEnv,\n\t}\n\tif _, ok := ws.Environment[\"WORKSPACE\"]; !ok {\n\t\tws.Environment[\"WORKSPACE\"] = name\n\t}\n\treturn ws\n}",
"func (env *Environment) New(testFn TestFunc) *Fixture {\n\treturn &Fixture{\n\t\tname: runtime.FuncForPC(reflect.ValueOf(testFn).Pointer()).Name(),\n\t\ttestFn: testFn,\n\t\tenv: env,\n\t}\n}",
"func NewFinalEnvironment(env *Environment, Ds *HoppingEV) *FinalEnvironment {\n\tDco := Ds.Dco(env)\n\tFreeEnergy := env.FreeEnergy(Ds)\n\tfenv := FinalEnvironment{*env, Dco, FreeEnergy}\n\treturn &fenv\n}",
"func NewEnv() (*Env, error) {\n\tvar _env *C.MDB_env\n\tret := C.mdb_env_create(&_env)\n\tif ret != SUCCESS {\n\t\treturn nil, Errno(ret)\n\t}\n\treturn &Env{_env}, nil\n}",
"func NewEnvProvider() envProvider {\n\treturn envProvider{}\n}",
"func NewLongTermEnvironment(ctx *pulumi.Context,\n\tname string, args *LongTermEnvironmentArgs, opts ...pulumi.ResourceOption) (*LongTermEnvironment, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Kind == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Kind'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.Sku == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Sku'\")\n\t}\n\tif args.StorageConfiguration == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'StorageConfiguration'\")\n\t}\n\tif args.TimeSeriesIdProperties == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'TimeSeriesIdProperties'\")\n\t}\n\targs.Kind = pulumi.String(\"LongTerm\")\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:timeseriesinsights/v20180815preview:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:timeseriesinsights:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:timeseriesinsights:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:timeseriesinsights/v20170228preview:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:timeseriesinsights/v20170228preview:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:timeseriesinsights/v20171115:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:timeseriesinsights/v20171115:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:timeseriesinsights/v20200515:LongTermEnvironment\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:timeseriesinsights/v20200515:LongTermEnvironment\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource LongTermEnvironment\n\terr := ctx.RegisterResource(\"azure-native:timeseriesinsights/v20180815preview:LongTermEnvironment\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func newTestGlobals() *router.ApplicationGlobals {\n\treturn &router.ApplicationGlobals{\n\t\tStorage: newTestModel(),\n\t\tFetcher: newTestFetcher(),\n\t\tWriter: newResponseWriter(),\n\t}\n}",
"func (code *InterpCode) NewContext(cfg *ContextConfig) (ictx Context, err error) {\n\tdefer func() {\n\t\tierr := recover()\n\t\tif ierr == nil {\n\t\t\treturn\n\t\t}\n\t\tif v, ok := ierr.(error); ok {\n\t\t\terr = v\n\t\t}\n\t\terr = fmt.Errorf(\"%s\", ierr)\n\t}()\n\tdefer CaptureTrap(&err)\n\tvm, err := exec.NewVM(code.module,\n\t\texec.WithLazyCompile(true),\n\t\texec.WithGasMapper(new(GasMapper)),\n\t\texec.WithGasLimit(cfg.GasLimit))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvm.RecoverPanic = true\n\tctx := &wagonContext{\n\t\tmodule: code.module,\n\t\tvm: vm,\n\t\tuserData: make(map[string]interface{}),\n\t}\n\tvm.UserData = ctx\n\tictx = ctx\n\treturn\n}",
"func newBuildPipeline(t gaia.PipelineType) BuildPipeline {\n\tvar bP BuildPipeline\n\n\t// Create build pipeline for given pipeline type\n\tswitch t {\n\tcase gaia.PTypeGolang:\n\t\tbP = &BuildPipelineGolang{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeJava:\n\t\tbP = &BuildPipelineJava{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypePython:\n\t\tbP = &BuildPipelinePython{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeCpp:\n\t\tbP = &BuildPipelineCpp{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeRuby:\n\t\tbP = &BuildPipelineRuby{\n\t\t\tType: t,\n\t\t}\n\tcase gaia.PTypeNodeJS:\n\t\tbP = &BuildPipelineNodeJS{\n\t\t\tType: t,\n\t\t}\n\t}\n\n\treturn bP\n}",
"func (cfg *Config) makeEnv(c context.Context, e *vpython.Environment) (*Env, error) {\n\t// We MUST have a package loader.\n\tif cfg.Loader == nil {\n\t\treturn nil, errors.New(\"no package loader provided\")\n\t}\n\n\t// Resolve our base directory, if one is not supplied.\n\tif cfg.BaseDir == \"\" {\n\t\t// Use one in a temporary directory.\n\t\tcfg.BaseDir = filepath.Join(os.TempDir(), \"vpython\")\n\t\tlogging.Debugf(c, \"Using tempdir-relative environment root: %s\", cfg.BaseDir)\n\t}\n\tif err := filesystem.AbsPath(&cfg.BaseDir); err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to resolve absolute path of base directory\").Err()\n\t}\n\n\t// Enforce maximum path length.\n\tif cfg.MaxScriptPathLen > 0 {\n\t\tif longestPath := longestGeneratedScriptPath(cfg.BaseDir); longestPath != \"\" {\n\t\t\tlongestPathLen := utf8.RuneCountInString(longestPath)\n\t\t\tif longestPathLen > cfg.MaxScriptPathLen {\n\t\t\t\treturn nil, errors.Reason(\n\t\t\t\t\t\"expected deepest path length (%d) exceeds threshold (%d)\",\n\t\t\t\t\tlongestPathLen, cfg.MaxScriptPathLen,\n\t\t\t\t).InternalReason(\"longestPath(%q)\", longestPath).Err()\n\t\t\t}\n\t\t}\n\t}\n\n\t// Construct a new, independent Environment for this Env.\n\te = e.Clone()\n\tif cfg.Spec != nil {\n\t\te.Spec = cfg.Spec.Clone()\n\t}\n\tif err := spec.NormalizeEnvironment(e); err != nil {\n\t\treturn nil, errors.Annotate(err, \"invalid environment\").Err()\n\t}\n\n\t// If the environment doesn't specify a VirtualEnv package (expected), use\n\t// our default.\n\tif e.Spec.Virtualenv == nil {\n\t\te.Spec.Virtualenv = &cfg.Package\n\t}\n\n\tif err := cfg.Loader.Resolve(c, e); err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to resolve packages\").Err()\n\t}\n\n\tif err := cfg.resolvePythonInterpreter(c, e.Spec); err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to resolve system Python interpreter\").Err()\n\t}\n\te.Runtime.Path = cfg.si.Python\n\te.Runtime.Version = e.Spec.PythonVersion\n\n\tvar err error\n\tif e.Runtime.Hash, err = cfg.si.Hash(); err != nil {\n\t\treturn nil, err\n\t}\n\tlogging.Debugf(c, \"Resolved system Python runtime (%s @ %s): %s\",\n\t\te.Runtime.Version, e.Runtime.Hash, e.Runtime.Path)\n\n\t// Ensure that our base directory exists.\n\tif err := filesystem.MakeDirs(cfg.BaseDir); err != nil {\n\t\treturn nil, errors.Annotate(err, \"could not create environment root: %s\", cfg.BaseDir).Err()\n\t}\n\n\t// Generate our environment name based on the deterministic hash of its\n\t// fully-resolved specification.\n\tenvName := cfg.OverrideName\n\tif envName == \"\" {\n\t\tenvName = cfg.envNameForSpec(e.Spec, e.Runtime)\n\t}\n\tenv := cfg.envForName(envName, e)\n\treturn env, nil\n}",
"func newTestEnvr() *Envr {\n\tfor _, v := range testVars {\n\t\tif err := os.Unsetenv(v); err != nil {\n\t\t\tlog.Fatalf(\"os.Unsetenv() err = %s\", err)\n\t\t}\n\t}\n\treturn New(envName, testVars)\n}",
"func NewEnv() *Env {\n\treturn &Env{\n\t\tos.Getenv(clusterRoot),\n\t\tos.Getenv(kubeConfig),\n\t}\n}",
"func makeTestEnv(t *testing.T, name string) testEnv {\n\tctx := context.Background()\n\n\tlocalRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption)\n\tif err != nil {\n\t\tt.Fatalf(\"error creating registry: %v\", err)\n\t}\n\tlocalRepo, err := localRegistry.Repository(ctx, name)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error getting repo: %v\", err)\n\t}\n\n\ttruthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()))\n\tif err != nil {\n\t\tt.Fatalf(\"error creating registry: %v\", err)\n\t}\n\ttruthRepo, err := truthRegistry.Repository(ctx, name)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error getting repo: %v\", err)\n\t}\n\n\ttruthBlobs := statsBlobStore{\n\t\tstats: make(map[string]int),\n\t\tblobs: truthRepo.Blobs(ctx),\n\t}\n\n\tlocalBlobs := statsBlobStore{\n\t\tstats: make(map[string]int),\n\t\tblobs: localRepo.Blobs(ctx),\n\t}\n\n\ts := scheduler.New(ctx, inmemory.New(), \"/scheduler-state.json\")\n\n\tproxyBlobStore := proxyBlobStore{\n\t\tremoteStore: truthBlobs,\n\t\tlocalStore: localBlobs,\n\t\tscheduler: s,\n\t}\n\n\tte := testEnv{\n\t\tstore: proxyBlobStore,\n\t\tctx: ctx,\n\t}\n\treturn te\n}",
"func New(t *testing.T, cfg Config) *Environment {\n\te := &Environment{\n\t\thelmPath: \"../kubernetes_helm/helm\",\n\t\tsynkPath: \"src/go/cmd/synk/synk_/synk\",\n\t\tt: t,\n\t\tcfg: cfg,\n\t\tscheme: k8sruntime.NewScheme(),\n\t\tclusters: map[string]*cluster{},\n\t}\n\tif cfg.SchemeFunc != nil {\n\t\tcfg.SchemeFunc(e.scheme)\n\t}\n\tscheme.AddToScheme(e.scheme)\n\n\tvar g errgroup.Group\n\t// Setup cluster concurrently.\n\tfor _, cfg := range cfg.Clusters {\n\t\t// Make name unique to avoid collisions across parallel tests.\n\t\tuniqName := fmt.Sprintf(\"%s-%x\", cfg.Name, time.Now().UnixNano())\n\t\tt.Logf(\"Assigned unique name %q to cluster %q\", uniqName, cfg.Name)\n\n\t\tcluster := &cluster{\n\t\t\tgenName: uniqName,\n\t\t\tcfg: cfg,\n\t\t}\n\t\te.clusters[cfg.Name] = cluster\n\n\t\tg.Go(func() error {\n\t\t\tif err := setupCluster(e.synkPath, cluster); err != nil {\n\t\t\t\t// If cluster has already been created, delete it.\n\t\t\t\tif cluster.kind != nil && os.Getenv(\"NO_TEARDOWN\") == \"\" {\n\t\t\t\t\tcluster.kind.Delete(cfg.Name, \"\")\n\t\t\t\t\tif cluster.kubeConfigPath != \"\" {\n\t\t\t\t\t\tos.Remove(cluster.kubeConfigPath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn errors.Wrapf(err, \"Create cluster %q\", cfg.Name)\n\t\t\t}\n\t\t\tlog.Printf(\"Created cluster %q\", cfg.Name)\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn e\n}",
"func NewTestContext(t *testing.T) *TestContext {\n\t// Provide a way for tests to provide and capture stdin and stdout\n\t// Copy output to the test log simultaneously, use go test -v to see the output\n\terr := &bytes.Buffer{}\n\taggErr := io.MultiWriter(err, test.Logger{T: t})\n\tout := &bytes.Buffer{}\n\taggOut := io.MultiWriter(out, test.Logger{T: t})\n\n\tinnerContext := New()\n\tinnerContext.correlationId = \"0\"\n\tinnerContext.timestampLogs = false\n\tinnerContext.environ = getEnviron()\n\tinnerContext.FileSystem = aferox.NewAferox(\"/\", afero.NewMemMapFs())\n\tinnerContext.In = &bytes.Buffer{}\n\tinnerContext.Out = aggOut\n\tinnerContext.Err = aggErr\n\tinnerContext.ConfigureLogging(context.Background(), LogConfiguration{\n\t\tLogLevel: zapcore.DebugLevel,\n\t\tVerbosity: zapcore.DebugLevel,\n\t})\n\tinnerContext.PlugInDebugContext = &PluginDebugContext{\n\t\tDebuggerPort: \"2735\",\n\t\tRunPlugInInDebugger: \"\",\n\t\tPlugInWorkingDirectory: \"\",\n\t}\n\n\tc := &TestContext{\n\t\tContext: innerContext,\n\t\tcapturedOut: out,\n\t\tcapturedErr: err,\n\t\tT: t,\n\t}\n\n\tc.NewCommand = c.NewTestCommand\n\n\treturn c\n}",
"func NewEnvironmentProvider() *EnvironmentProvider {\n\treturn &EnvironmentProvider{\n\t\tlookup: os.LookupEnv,\n\t}\n}",
"func (client *Client) NewSharedEnvironment(bucketKey string, environment Environment) (Environment, error) {\n\tvar newEnvironment = Environment{}\n\n\tpath := fmt.Sprintf(\"buckets/%s/environments\", bucketKey)\n\tdata, err := json.Marshal(&environment)\n\tif err != nil {\n\t\treturn newEnvironment, err\n\t}\n\n\tcontent, err := client.Post(path, data)\n\tif err != nil {\n\t\treturn newEnvironment, err\n\t}\n\n\terr = unmarshal(content, &newEnvironment)\n\treturn newEnvironment, err\n}",
"func newScope(parent *scope) scope {\n\treturn scope{objects: map[string]objectAndSource{}, parent: parent}\n}",
"func (env *Environment) Clone() ext.Environment {\n\tclone := NewEnvironment()\n\tclone.goCallbacks = env.goCallbacks\n\treturn clone\n}",
"func execNewScope(_ int, p *gop.Context) {\n\targs := p.GetArgs(4)\n\tret := types.NewScope(args[0].(*types.Scope), token.Pos(args[1].(int)), token.Pos(args[2].(int)), args[3].(string))\n\tp.Ret(4, ret)\n}",
"func NewEnvStorage(prefix string, uppercase bool) *EnvStorage {\n\tes := &EnvStorage{prefix, uppercase}\n\tstorage = es\n\treturn es\n}",
"func (client LabClient) CreateEnvironment(ctx context.Context, resourceGroupName string, name string, labVirtualMachine LabVirtualMachine) (result LabCreateEnvironmentFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.CreateEnvironment\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.CreateEnvironmentPreparer(ctx, resourceGroupName, name, labVirtualMachine)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"CreateEnvironment\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateEnvironmentSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"CreateEnvironment\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func NewType() Type {}",
"func NewEnvProvider(pub *ecdsa.PublicKey, session conf.SessionInterface, client v1auth.TokenClientInterface) *EnvProvider {\n\treturn &EnvProvider{\n\t\tProviderBasis: &ProviderBasis{\n\t\t\tExpireWindow: DefaultExpireWindow,\n\t\t\tPub: pub,\n\t\t},\n\t\tSession: session,\n\t\tClient: client,\n\t}\n}",
"func NewInterpreter(lox *Lox, global env) Interpreter {\n\tinterpreter := Interpreter{\n\t\tlox,\n\t\tglobal,\n\t\tglobal,\n\t\tmake(map[Expr]int, 0),\n\t}\n\n\tinterpreter.init()\n\treturn interpreter\n}"
] | [
"0.6802027",
"0.6662787",
"0.6633159",
"0.6615684",
"0.6584132",
"0.657267",
"0.6483676",
"0.6449514",
"0.6449514",
"0.6340193",
"0.62803227",
"0.6262485",
"0.6236817",
"0.6187867",
"0.61719394",
"0.61681217",
"0.61587524",
"0.61014456",
"0.60950303",
"0.6028156",
"0.6007955",
"0.598239",
"0.5940723",
"0.5926037",
"0.58576983",
"0.5851898",
"0.58388686",
"0.58104336",
"0.5809",
"0.57580715",
"0.5735858",
"0.56955254",
"0.56705415",
"0.56609523",
"0.5650344",
"0.5643218",
"0.559436",
"0.5481446",
"0.54764366",
"0.5461393",
"0.54553026",
"0.54057974",
"0.5385194",
"0.53761625",
"0.53344643",
"0.5317075",
"0.53095263",
"0.5210989",
"0.5185879",
"0.5114672",
"0.5107421",
"0.51028216",
"0.509882",
"0.50582486",
"0.50445503",
"0.5043563",
"0.5041337",
"0.5035036",
"0.49907923",
"0.4979247",
"0.4974648",
"0.4974648",
"0.49704933",
"0.49640328",
"0.49542436",
"0.4939402",
"0.4930497",
"0.49216104",
"0.49195525",
"0.4916684",
"0.49119532",
"0.49058124",
"0.48884198",
"0.48872113",
"0.48851854",
"0.48557413",
"0.48542297",
"0.48530164",
"0.4847718",
"0.48386425",
"0.4834988",
"0.48337156",
"0.48288724",
"0.48269606",
"0.48245615",
"0.48208556",
"0.4808768",
"0.48065257",
"0.47979945",
"0.4794312",
"0.4790062",
"0.47896102",
"0.47895336",
"0.47837305",
"0.478256",
"0.47699955",
"0.47683263",
"0.4768155",
"0.4755569",
"0.47555262"
] | 0.727 | 0 |
EncodeTo encodes the response to []byte by bencode and write the result into w. w may be http.ResponseWriter. | EncodeTo кодирует ответ в []byte с использованием bencode и записывает результат в w. w может быть http.ResponseWriter. | func (sr ScrapeResponse) EncodeTo(w io.Writer) (err error) {
return bencode.NewEncoder(w).Encode(sr)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (resp *Response) WriteTo(w http.ResponseWriter) {\n\tif resp.buf != nil {\n\t\tresp.Body = resp.buf.Bytes()\n\t\tresp.buf = nil\n\t}\n\n\tif w != nil {\n\t\t// Write the headers\n\t\tfor k, vs := range resp.Header {\n\t\t\t// Reset existing values\n\t\t\tw.Header().Del(k)\n\t\t\tif len(vs) == 1 {\n\t\t\t\tw.Header().Set(k, resp.Header.Get(k))\n\t\t\t}\n\t\t\tif len(vs) > 1 {\n\t\t\t\tfor _, v := range vs {\n\t\t\t\t\tw.Header().Add(k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif resp.redirect != \"\" {\n\t\t\thttp.Redirect(w, resp.req, resp.redirect, resp.StatusCode)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(resp.StatusCode)\n\t\tw.Write(resp.Body)\n\t}\n}",
"func (res Responder) WriteTo(w io.Writer) (int64, error) {\n\treturn res.b.WriteTo(w)\n}",
"func (w responseWriter) Write(b []byte) (int, error) {\n\t// 向一个bytes.buffer中写一份数据来为获取body使用\n\tw.b.Write(b)\n\t// 完成http.ResponseWriter.Write()原有功能\n\treturn w.ResponseWriter.Write(b)\n}",
"func (p *Poll) EncodeToByte() []byte {\n\tb, _ := json.Marshal(p)\n\treturn b\n}",
"func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tresp := response.(*common.XmidtResponse)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(common.HeaderWPATID, ctx.Value(common.ContextKeyRequestTID).(string))\n\tcommon.ForwardHeadersByPrefix(\"\", resp.ForwardedHeaders, w.Header())\n\n\tw.WriteHeader(resp.Code)\n\t_, err = w.Write(resp.Body)\n\treturn\n}",
"func encodeResponse(resp *plugin.CodeGeneratorResponse, w io.Writer) {\n\toutBytes, err := proto.Marshal(resp)\n\tif err != nil {\n\t\tlog.Fatal(\"unable to marshal response to protobuf: \" + err.Error())\n\t}\n\n\tif _, err := w.Write(outBytes); err != nil {\n\t\tlog.Fatal(\"unable to write protobuf to stdout: \" + err.Error())\n\t}\n}",
"func encodeGetUserResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func (r *Response) Write(w io.Writer) error",
"func (e *RegisterRequest) WriteTo(w http.ResponseWriter) error {\n\tb, err := json.Marshal(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.Write(b)\n\treturn nil\n}",
"func encodeGetResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func encodeGetResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func (w *WriterInterceptor) Write(b []byte) (int, error) {\n\tlength := w.response.Header.Get(\"Content-Length\")\n\tif length == \"\" || length == \"0\" {\n\t\tw.buf = b\n\t\treturn w.DoWrite()\n\t}\n\n\tw.response.ContentLength += int64(len(b))\n\tw.buf = append(w.buf, b...)\n\n\t// If not EOF\n\tif cl, _ := strconv.Atoi(length); w.response.ContentLength != int64(cl) {\n\t\treturn len(b), nil\n\t}\n\n\tw.response.Body = ioutil.NopCloser(bytes.NewReader(w.buf))\n\tresm := NewResponseModifier(w.response.Request, w.response)\n\tw.modifier(resm)\n\treturn w.DoWrite()\n}",
"func Encode(w http.ResponseWriter, r *http.Request, status int, v interface{}) error {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"encode json\")\n\t}\n\tvar out io.Writer = w\n\tif strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t\tgzw := gzip.NewWriter(w)\n\t\tout = gzw\n\t\tdefer gzw.Close()\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(status)\n\tif _, err := out.Write(b); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func EncodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n return json.NewEncoder(w).Encode(response)\n}",
"func (res *pbResponse) MarshalTo(buf []byte) (int, error) {\n\tvar size = uint64(res.Size())\n\tif uint64(cap(buf)) >= size {\n\t\tbuf = buf[:size]\n\t} else {\n\t\treturn 0, fmt.Errorf(\"proto: pbResponse: buf is too short\")\n\t}\n\tvar offset uint64\n\tvar n uint64\n\tif res.Seq != 0 {\n\t\tbuf[offset] = 1<<3 | 0\n\t\toffset++\n\t\t//n = code.EncodeVarint(buf[offset:], res.Seq)\n\t\t{\n\t\t\tvar t = res.Seq\n\t\t\tvar size = code.SizeofVarint(t)\n\t\t\tfor i := uint64(0); i < size-1; i++ {\n\t\t\t\tbuf[offset+i] = byte(t) | 0x80\n\t\t\t\tt >>= 7\n\t\t\t}\n\t\t\tbuf[offset+size-1] = byte(t)\n\t\t\tn = size\n\t\t}\n\t\toffset += n\n\t}\n\tif len(res.Error) > 0 {\n\t\tbuf[offset] = 2<<3 | 2\n\t\toffset++\n\t\t//n = code.EncodeString(buf[offset:], res.Error)\n\t\t{\n\t\t\tvar length = uint64(len(res.Error))\n\t\t\tvar lengthSize = code.SizeofVarint(length)\n\t\t\tvar s = lengthSize + length\n\t\t\tt := length\n\t\t\tfor i := uint64(0); i < lengthSize-1; i++ {\n\t\t\t\tbuf[offset+i] = byte(t) | 0x80\n\t\t\t\tt >>= 7\n\t\t\t}\n\t\t\tbuf[offset+lengthSize-1] = byte(t)\n\t\t\tcopy(buf[offset+lengthSize:], res.Error)\n\t\t\tn = s\n\t\t}\n\t\toffset += n\n\t}\n\tif len(res.Reply) > 0 {\n\t\tbuf[offset] = 3<<3 | 2\n\t\toffset++\n\t\t//n = code.EncodeBytes(buf[offset:], res.Reply)\n\t\t{\n\t\t\tvar length = uint64(len(res.Reply))\n\t\t\tvar lengthSize = code.SizeofVarint(length)\n\t\t\tvar s = lengthSize + length\n\t\t\tt := length\n\t\t\tfor i := uint64(0); i < lengthSize-1; i++ {\n\t\t\t\tbuf[offset+i] = byte(t) | 0x80\n\t\t\t\tt >>= 7\n\t\t\t}\n\t\t\tbuf[offset+lengthSize-1] = byte(t)\n\t\t\tcopy(buf[offset+lengthSize:], res.Reply)\n\t\t\tn = s\n\t\t}\n\t\toffset += n\n\t}\n\treturn int(offset), nil\n}",
"func (bs endecBytes) WriteTo(w io.Writer) (int64, error) {\n\tn, err := w.Write(bs)\n\treturn int64(n), err\n}",
"func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}",
"func (w *ResponseWriterTee) Write(b []byte) (int, error) {\n\tw.Buffer.Write(b)\n\treturn w.w.Write(b)\n}",
"func encodeByteSlice(w io.Writer, bz []byte) (err error) {\n\terr = encodeVarint(w, int64(len(bz)))\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = w.Write(bz)\n\treturn\n}",
"func EncodeDoublySecureResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.(string)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encode(i interface{}, w http.ResponseWriter) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(i)\n\t// Problems encoding\n\tif err != nil {\n\t\tLoggingClient.Error(\"Error encoding the data: \" + err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func encodeGetUserDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}",
"func encodeGetDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tif e, ok := response.(errorer); ok && e.error() != nil {\n\t\t// Not a Go kit transport error, but a business-logic error.\n\t\t// Provide those as HTTP errors.\n\t\tencodeError(ctx, e.error(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", ContentType)\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tif e, ok := response.(errorer); ok && e.error() != nil {\n\t\tEncodeError(ctx, e.error(), w)\n\t\treturn nil\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\treturn nil\n}",
"func WriteBytes(w http.ResponseWriter, status int, text []byte) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(text)\n}",
"func (DefaultDispatcher) Write(rw http.ResponseWriter, resp Response) error {\n\tswitch x := resp.(type) {\n\tcase JSONResponse:\n\t\trw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tio.WriteString(rw, \")]}',\\n\") // Break parsing of JavaScript in order to prevent XSSI.\n\t\treturn json.NewEncoder(rw).Encode(x.Data)\n\tcase *TemplateResponse:\n\t\tt, ok := (x.Template).(*template.Template)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"%T is not a safe template and it cannot be parsed and written\", t)\n\t\t}\n\t\trw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tif len(x.FuncMap) == 0 {\n\t\t\tif x.Name == \"\" {\n\t\t\t\treturn t.Execute(rw, x.Data)\n\t\t\t}\n\t\t\treturn t.ExecuteTemplate(rw, x.Name, x.Data)\n\t\t}\n\t\tcloned, err := t.Clone()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloned = cloned.Funcs(x.FuncMap)\n\t\tif x.Name == \"\" {\n\t\t\treturn cloned.Execute(rw, x.Data)\n\t\t}\n\t\treturn cloned.ExecuteTemplate(rw, x.Name, x.Data)\n\tcase safehtml.HTML:\n\t\trw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\t_, err := io.WriteString(rw, x.String())\n\t\treturn err\n\tcase FileServerResponse:\n\t\trw.Header().Set(\"Content-Type\", x.ContentType())\n\t\t// The http package will take care of writing the file body.\n\t\treturn nil\n\tcase RedirectResponse:\n\t\thttp.Redirect(rw, x.Request.req, x.Location, int(x.Code))\n\t\treturn nil\n\tcase NoContentResponse:\n\t\trw.WriteHeader(int(StatusNoContent))\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not a safe response type and it cannot be written\", resp)\n\t}\n}",
"func (j *JSendWriterBuffer) Write(b []byte) (int, error) {\n\treturn j.responseWriter.Write(b)\n}",
"func (w *customResponseWriter) Write(b []byte) (int, error) {\n\tif w.status == 0 {\n\t\tw.status = http.StatusOK\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.length += n\n\treturn n, err\n}",
"func EncodeProcessingResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.([]string)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func (r tokenResponseWriter) Write(b []byte) (int, error) {\n\treturn r.w.Write(b) // pass it to the original ResponseWriter\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func JsonEncodeAndWriteResponse(response http.ResponseWriter, value interface{}) error {\n\n\tif value == nil {\n\t\treturn NewStackError(\"Nil value passed\")\n\t}\n\n\trawJson, err := json.Marshal(value)\n\tif err != nil {\n\t\thttp.Error(response, \"Error\", 500)\n\t\treturn NewStackError(\"Unable to marshal json: %v\", err)\n\t}\n\n\tresponse.Header().Set(ContentTypeHeader, ContentTypeJson)\n\n\twritten, err := response.Write(rawJson)\n\tif err != nil {\n\t\treturn NewStackError(\"Unable to write response: %v\", err)\n\t}\n\n\tif written != len(rawJson) {\n\t\treturn NewStackError(\"Unable to write full response - wrote: %d - expected: %d\", written, len(rawJson))\n\t}\n\n\treturn nil\n}",
"func ToBytes(inter interface{}) []byte {\n\treqBodyBytes := new(bytes.Buffer)\n\tjson.NewEncoder(reqBodyBytes).Encode(inter)\n\tfmt.Println(reqBodyBytes.Bytes()) // this is the []byte\n\tfmt.Println(string(reqBodyBytes.Bytes())) // converted back to show it's your original object\n\treturn reqBodyBytes.Bytes()\n}",
"func EncodeAlsoDoublySecureResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.(string)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\t// Set JSON type\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Check error\n\tif e, ok := response.(errorer); ok {\n\t\t// This is a errorer class, now check for error\n\t\tif err := e.error(); err != nil {\n\t\t\tencodeError(ctx, e.error(), w)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// cast to dataHolder to get Data, otherwise just encode the resposne\n\tif holder, ok := response.(dataHolder); ok {\n\t\treturn json.NewEncoder(w).Encode(holder.getData())\n\t} else {\n\t\treturn json.NewEncoder(w).Encode(response)\n\t}\n}",
"func encodeGetByCreteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func (o *GetBackendsBackendIDTestOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func encodeTextResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tif r, ok := response.(io.Reader); ok {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, err := io.Copy(w, r)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (handler Handler) Write(w http.ResponseWriter, b []byte) (int, error) {\n\treturn w.Write(b)\n}",
"func (request *RequestResponseFrame) WriteTo(w io.Writer) (wrote int64, err error) {\n\tif wrote, err = request.Header.WriteTo(w); err != nil {\n\t\treturn\n\t}\n\n\tvar n int64\n\n\tif request.HasMetadata() {\n\t\tif n, err = request.Metadata.WriteTo(w); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\twrote += n\n\t}\n\n\tif n, err = writeExact(w, []byte(request.Data)); err != nil {\n\t\treturn\n\t}\n\n\twrote += n\n\n\treturn\n}",
"func (e *RegistryV2Error) WriteAsRegistryV2ResponseTo(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfor k, v := range e.Headers {\n\t\tw.Header()[k] = v\n\t}\n\tif e.Status == 0 {\n\t\tw.WriteHeader(apiErrorStatusCodes[e.Code])\n\t} else {\n\t\tw.WriteHeader(e.Status)\n\t}\n\tif r.Method != http.MethodHead {\n\t\tbuf, _ := json.Marshal(struct {\n\t\t\tErrors []*RegistryV2Error `json:\"errors\"`\n\t\t}{\n\t\t\tErrors: []*RegistryV2Error{e},\n\t\t})\n\t\tw.Write(append(buf, '\\n'))\n\t}\n}",
"func (w *multiWriter) Write(b []byte) (int, error) {\n\tvar resp logrus.Fields\n\tif w.isJSONResponse() {\n\t\tif err := json.Unmarshal(b, &resp); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tw.ctx.Set(\"response\", resp)\n\t} else {\n\t\tw.ctx.Set(\"response\", b)\n\t}\n\treturn w.ResponseWriter.Write(b)\n}",
"func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}",
"func EncodeGetResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func (res *AddResponse) Encode(_ context.Context, w ResponseWriter) error {\n\treturn w.WriteError(ldaputil.ApplicationAddResponse, errors.New(\"not implemented\"))\n}",
"func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}",
"func encodeTransactionOutputsToBuffer(buf []byte, obj *transactionOutputs) error {\n\tif uint64(len(buf)) < encodeSizeTransactionOutputs(obj) {\n\t\treturn encoder.ErrBufferUnderflow\n\t}\n\n\te := &encoder.Encoder{\n\t\tBuffer: buf[:],\n\t}\n\n\t// obj.Out maxlen check\n\tif len(obj.Out) > 65535 {\n\t\treturn encoder.ErrMaxLenExceeded\n\t}\n\n\t// obj.Out length check\n\tif uint64(len(obj.Out)) > math.MaxUint32 {\n\t\treturn errors.New(\"obj.Out length exceeds math.MaxUint32\")\n\t}\n\n\t// obj.Out length\n\te.Uint32(uint32(len(obj.Out)))\n\n\t// obj.Out\n\tfor _, x := range obj.Out {\n\n\t\t// x.Address.Version\n\t\te.Uint8(x.Address.Version)\n\n\t\t// x.Address.Key\n\t\te.CopyBytes(x.Address.Key[:])\n\n\t\t// x.Coins\n\t\te.Uint64(x.Coins)\n\n\t\t// x.Hours\n\t\te.Uint64(x.Hours)\n\n\t}\n\n\treturn nil\n}",
"func (w *responseWriter) Write(b []byte) (int, error) {\n\tif w.Status == 0 {\n\t\tw.Status = 200\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.Length += n\n\treturn n, err\n}",
"func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tif e, ok := response.(errorer); ok && e.error() != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tw.WriteHeader(codeFrom(e.error()))\n\t\treturn marshalStructWithError(response, w)\n\t}\n\n\t// Used for pagination\n\tif e, ok := response.(counter); ok {\n\t\tw.Header().Set(\"X-Total-Count\", strconv.Itoa(e.count()))\n\t}\n\n\t// Don't overwrite a header (i.e. called from encodeTextResponse)\n\tif v := w.Header().Get(\"Content-Type\"); v == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\t// Only write json body if we're setting response as json\n\t\treturn json.NewEncoder(w).Encode(response)\n\t}\n\treturn nil\n}",
"func (r *TotalBalanceStateContext) EncodeToEth(resp *iotexapi.ReadStateResponse) (string, error) {\n\ttotal, ok := new(big.Int).SetString(string(resp.Data), 10)\n\tif !ok {\n\t\treturn \"\", errConvertBigNumber\n\t}\n\n\tdata, err := _totalBalanceMethod.Outputs.Pack(total)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn hex.EncodeToString(data), nil\n}",
"func (r *Tracker) Write(b []byte) (int, error) {\n\tif !r.Written() {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\tif !r.Response.Discard {\n\t\tr.Response.Body.Write(b)\n\t}\n\treturn r.ResponseWriter.Write(b)\n}",
"func (r *ResponseReverter) Write(buf []byte) (int, error) {\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}",
"func (b ByteArray) Encode(w io.Writer) error {\n\terr := util.WriteVarInt(w, len(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = w.Write(b)\n\treturn err\n}",
"func EncodeCreateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventoryviews.Inventory)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewCreateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}",
"func (c *Context) WriteResponse(obj interface{}, statusCode int) os.Error {\n\n\tvar error os.Error\n\n\t// get the formatter\n\tformatter, error := GetFormatter(c.Format)\n\n\tif error != nil {\n\t\tc.writeInternalServerError(error, http.StatusNotFound)\n\t\treturn error\n\t} else {\n\n\t\t// set the content type\n\t\tc.ResponseWriter.Header()[\"Content-Type\"] = []string{formatter.ContentType()}\n\n\t\t// format the output\n\t\toutput, error := formatter.Format(obj)\n\n\t\tif error != nil {\n\t\t\tc.writeInternalServerError(error, http.StatusInternalServerError)\n\t\t\treturn error\n\t\t} else {\n\n\t\t\toutputString := string(output)\n\n\t\t\t/*\n\t\t\t\tJSONP\n\t\t\t*/\n\t\t\tcallback := c.GetCallback()\n\t\t\tif callback != \"\" {\n\n\t\t\t\t// wrap with function call\n\n\t\t\t\trequestContext := c.GetRequestContext()\n\n\t\t\t\toutputString = callback + \"(\" + outputString\n\n\t\t\t\tif requestContext != \"\" {\n\t\t\t\t\toutputString = outputString + \", \\\"\" + requestContext + \"\\\")\"\n\t\t\t\t} else {\n\t\t\t\t\toutputString = outputString + \")\"\n\t\t\t\t}\n\n\t\t\t\t// set the new content type\n\t\t\t\tc.ResponseWriter.Header()[\"Content-Type\"] = []string{JSONP_CONTENT_TYPE}\n\n\t\t\t}\n\n\t\t\t// write the status code\n\t\t\tif strings.Index(c.Request.URL.Raw, REQUEST_ALWAYS200_PARAMETER) > -1 {\n\n\t\t\t\t// \"always200\"\n\t\t\t\t// write a fake 200 status code (regardless of what the actual code was)\n\t\t\t\tc.ResponseWriter.WriteHeader(http.StatusOK)\n\n\t\t\t} else {\n\n\t\t\t\t// write the actual status code\n\t\t\t\tc.ResponseWriter.WriteHeader(statusCode)\n\n\t\t\t}\n\n\t\t\t// write the output\n\t\t\tc.ResponseWriter.Write([]uint8(outputString))\n\n\t\t}\n\n\t}\n\n\t// success - no errors\n\treturn nil\n\n}",
"func encodeStringResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json;charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func EncodeChangeResponse(_ context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\terr = e.Encode(response)\n\treturn err\n}",
"func responseToCLient(w http.ResponseWriter, str string) {\n\tw.Write([]byte(str)) // writing back to response writer\n}",
"func encodeResponse(w http.ResponseWriter, resp interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(resp)\n}",
"func encodeResponse(ctx context.Context, responseWriter http.ResponseWriter, response interface{}) error {\n\tif err, ok := response.(errorer); ok && err.error() != nil {\n\t\t// Not a Go kit transport error, but a business-logic error.\n\t\t// Provide those as HTTP errors.\n\t\tencodeError(ctx, err.error(), responseWriter)\n\t\treturn nil\n\t}\n\tresponseWriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\treturn json.NewEncoder(responseWriter).Encode(response)\n}",
"func EncodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\n\tif e, ok := response.(Errorer); ok {\n\t\t// Not a Go kit transport error, but a business-logic error.\n\t\t// Provide those as HTTP errors.\n\t\tEncodeError(ctx, e.Error, w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func encode(i interface{}, w http.ResponseWriter) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(i)\n\n\tif err != nil {\n\t\tLoggingClient.Error(\"Error encoding the data: \" + err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n}",
"func (ti TypeItem) ToBytes() []byte {\n\treqBodyBytes := new(bytes.Buffer)\n\tjson.NewEncoder(reqBodyBytes).Encode(ti)\n\treturn reqBodyBytes.Bytes()\n}",
"func (o *GetTradesByAccountOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetTradesByAccountOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}",
"func (e *Encoder) WriteTo(w io.Writer, data interface{}) error {\n\te.Reset(w)\n\treturn e.WriteObject(data)\n}",
"func (a *Authorization) WriteTo(w io.Writer) (int64, error) {\n\tvar n int64\n\tvar err error\n\twr := base64.NewEncoder(base64.StdEncoding, w)\n\tdefer wr.Close()\n\tts := a.timestampBytes()\n\twritten, err := wr.Write(append(ts, '|'))\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tn += int64(written)\n\tfor _, binBuf := range [][]byte{a.salt, a.signature, a.rawMsg} {\n\t\twritten, err = wr.Write(binBuf)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tn += int64(written)\n\t}\n\treturn n, err\n}",
"func (request *RequestFireAndForgetFrame) WriteTo(w io.Writer) (wrote int64, err error) {\n\tif wrote, err = request.Header.WriteTo(w); err != nil {\n\t\treturn\n\t}\n\n\tvar n int64\n\n\tif request.HasMetadata() {\n\t\tif n, err = request.Metadata.WriteTo(w); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\twrote += n\n\t}\n\n\tif n, err = writeExact(w, []byte(request.Data)); err != nil {\n\t\treturn\n\t}\n\n\twrote += n\n\n\treturn\n}",
"func encodeGetDealByDIDResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func (bf *BloomFilter) ToBytes(binBuf *bytes.Buffer) error {\n\n\tbinary.Write(binBuf, binary.LittleEndian, bf.errorRate)\n\tbinary.Write(binBuf, binary.LittleEndian, uint64(bf.numSlices))\n\tbinary.Write(binBuf, binary.LittleEndian, uint64(bf.bitsPerSlice))\n\tbinary.Write(binBuf, binary.LittleEndian, uint64(bf.capacity))\n\tbinary.Write(binBuf, binary.LittleEndian, uint64(bf.count))\n\n\treturn bf.bitarray.ToBytes(binBuf)\n}",
"func EncodeSecureResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, any) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v any) error {\n\t\tres, _ := v.(string)\n\t\tenc := encoder(ctx, w)\n\t\tbody := res\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}",
"func Write(w http.ResponseWriter, btes []byte, status int, contentType string) error {\n\tw.Header().Add(\"Content-Type\", contentType)\n\tw.Header().Add(\"Content-Length\", fmt.Sprintf(\"%d\", len(btes)))\n\tWriteProcessTime(w)\n\tw.WriteHeader(status)\n\t_, err := w.Write(btes)\n\treturn err\n}",
"func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}",
"func encodeUploadResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}",
"func (o WebhookOutput) EncodeAs() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Webhook) pulumi.StringOutput { return v.EncodeAs }).(pulumi.StringOutput)\n}",
"func BindToResponse(result reflect.Value, header http.Header, scope string, acceptLanguage string) (output []byte, ge gomerr.Gomerr) {\n\ttc := structs.ToolContextWithScope(scope).Put(headersKey, header).Put(AcceptLanguageKey, acceptLanguage)\n\n\toutBodyBinding := hasOutBodyBinding[result.Type().String()]\n\tif !outBodyBinding {\n\t\ttc.Put(bind2.OutKey, make(map[string]interface{}))\n\t}\n\n\tif ge = structs.ApplyTools(result, tc, DefaultBindToResponseTool); ge != nil {\n\t\treturn nil, ge\n\t}\n\n\tif outBodyBinding {\n\t\treturn tc.Get(bodyBytesKey).([]byte), nil\n\t} else {\n\t\t// based on content type, and the absence of any \"body\" attributes use the proper marshaler to put the\n\t\t// data into the response bytes\n\t\t// TODO:p3 Allow applications to provide alternative means to choose a marshaler\n\t\tcontentType := header.Get(AcceptsHeader) // TODO:p4 support multi-options\n\t\tmarshal, ok := responseConfig.perContentTypeMarshalFunctions[contentType]\n\t\tif !ok {\n\t\t\tif responseConfig.defaultMarshalFunction == nil {\n\t\t\t\treturn nil, gomerr.Marshal(\"Unsupported Accepts content type\", contentType)\n\t\t\t}\n\t\t\tmarshal = responseConfig.defaultMarshalFunction\n\t\t\tcontentType = DefaultContentType\n\t\t}\n\n\t\toutMap := tc.Get(bind2.OutKey).(map[string]interface{})\n\t\tif len(outMap) == 0 && responseConfig.EmptyValueHandlingDefault == OmitEmpty {\n\t\t\treturn nil, ge\n\t\t}\n\n\t\tbytes, err := marshal(outMap)\n\t\tif err != nil {\n\t\t\treturn nil, gomerr.Marshal(\"Unable to marshal data\", outMap).AddAttribute(\"ContentType\", contentType).Wrap(err)\n\t\t}\n\t\theader.Set(ContentTypeHeader, contentType)\n\n\t\treturn bytes, nil\n\t}\n}",
"func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}",
"func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}",
"func (o *GetBackendOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func (h BaseHandler) SendMarshalResponse(w http.ResponseWriter, v interface{}) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\th.SendInternalError(w, err, \"parsing output data failed\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif _, err = w.Write(b); err != nil {\n\t\th.Log.Errorf(\"Error while sending response: %v\", err)\n\t}\n}",
"func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}",
"func (o *GetIBAServerOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {\n\tif isNilInterface(v) {\n\t\treturn w.Write(nullBytes)\n\t}\n\n\tjw := jwriter.Writer{}\n\tv.MarshalTinyJSON(&jw)\n\treturn jw.DumpTo(w)\n}",
"func (w *Writer) Encode(data interface{}) []byte {\n\tb, _ := w.encode(data)\n\n\treturn b\n}",
"func (cbcr *CardBinCheckResponse) WriteResponse(w io.Writer, c ContentType) error {\n\tvar err error\n\tswitch c {\n\tcase JSONContentType:\n\t\t_, err = io.WriteString(w, cbcr.toJSON())\n\tcase TextContentType:\n\t\t_, err = io.WriteString(w, cbcr.toText())\n\tdefault:\n\t\terr = errors.New(\"No supporting content type\")\n\t}\n\treturn err\n}",
"func NewHttpResponseEncodeWriter(w http.ResponseWriter, opts ...ResponseWriterOption) func(error) *httpResponseEncoder {\n\treturn func(gRPCErr error) *httpResponseEncoder {\n\t\treturn &httpResponseEncoder{\n\t\t\tgRPCErr: gRPCErr,\n\t\t\tw: w,\n\t\t\topts: opts,\n\t\t}\n\t}\n}",
"func (o *GetTournamentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func encodeGenericResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(response)\n}",
"func encodeAddResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func encodeAddResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}",
"func ServerEncodeResponseBody(timeLayout string, format wrp.Format) gokithttp.EncodeResponseFunc {\n\treturn func(ctx context.Context, httpResponse http.ResponseWriter, value interface{}) error {\n\t\tvar (\n\t\t\twrpResponse = value.(wrpendpoint.Response)\n\t\t\toutput bytes.Buffer\n\t\t)\n\n\t\ttracinghttp.HeadersForSpans(timeLayout, httpResponse.Header(), wrpResponse.Spans()...)\n\n\t\tif err := wrpResponse.Encode(&output, format); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thttpResponse.Header().Set(\"Content-Type\", format.ContentType())\n\t\t_, err := output.WriteTo(httpResponse)\n\t\treturn err\n\t}\n}",
"func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}",
"func (b *Bytes) WriteTo(w io.Writer) (int64, error) {\n\tn, err := w.Write(b.Bytes())\n\treturn int64(n), err\n}",
"func EncodeInto(buf *[]byte, val interface{}, opts Options) error {\n err := encodeInto(buf, val, opts)\n if err != nil {\n return err\n }\n *buf = encodeFinish(*buf, opts)\n return err\n}",
"func (e *RegistryV2Error) WriteAsTextTo(w http.ResponseWriter) {\n\tfor k, v := range e.Headers {\n\t\tw.Header()[k] = v\n\t}\n\tif e.Status == 0 {\n\t\tw.WriteHeader(apiErrorStatusCodes[e.Code])\n\t} else {\n\t\tw.WriteHeader(e.Status)\n\t}\n\tw.Write([]byte(e.Error() + \"\\n\"))\n}"
] | [
"0.5900893",
"0.5624768",
"0.55791545",
"0.55571955",
"0.5534204",
"0.5395386",
"0.5389386",
"0.53571355",
"0.5337786",
"0.5306685",
"0.53040093",
"0.53040093",
"0.5280645",
"0.5271151",
"0.52391887",
"0.52389556",
"0.52290684",
"0.5228518",
"0.5228017",
"0.5222941",
"0.52040625",
"0.5184524",
"0.5161609",
"0.5151505",
"0.5143536",
"0.51308846",
"0.5127131",
"0.51243466",
"0.5121752",
"0.5121217",
"0.5117821",
"0.51145566",
"0.51098806",
"0.510693",
"0.50986546",
"0.50986546",
"0.50977683",
"0.50927156",
"0.50898856",
"0.5087563",
"0.5087424",
"0.5076182",
"0.5057497",
"0.50531393",
"0.50520444",
"0.50509095",
"0.5049908",
"0.5042409",
"0.5041549",
"0.5035074",
"0.50177705",
"0.50120425",
"0.5004887",
"0.4998467",
"0.49973452",
"0.49827343",
"0.49753347",
"0.4972825",
"0.49615037",
"0.4955579",
"0.49536756",
"0.49515578",
"0.49504453",
"0.49486277",
"0.4945848",
"0.49453077",
"0.4942654",
"0.4939615",
"0.49346673",
"0.4927198",
"0.49263903",
"0.49145964",
"0.49105346",
"0.49080187",
"0.48904613",
"0.48885065",
"0.48879188",
"0.48832357",
"0.48832023",
"0.48801583",
"0.48738685",
"0.48704308",
"0.4868418",
"0.48539102",
"0.48526874",
"0.4850809",
"0.48507416",
"0.484555",
"0.4843058",
"0.48420173",
"0.48411793",
"0.48400533",
"0.48385903",
"0.48274988",
"0.48274988",
"0.48262972",
"0.4824994",
"0.48220414",
"0.48142797",
"0.48139644"
] | 0.7092946 | 0 |
NewClient returns a new HTTPClient. scrapeURL may be empty, which will replace the "announce" in announceURL with "scrape" to generate the scrapeURL. | NewClient возвращает новый HTTPClient. scrapeURL может быть пустым, что заменит "announce" в announceURL на "scrape", чтобы сгенерировать scrapeURL. | func NewClient(announceURL, scrapeURL string) *Client {
if scrapeURL == "" {
scrapeURL = strings.Replace(announceURL, "announce", "scrape", -1)
}
id := metainfo.NewRandomHash()
return &Client{AnnounceURL: announceURL, ScrapeURL: scrapeURL, ID: id}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func New(url string) *Client {\n\treturn &Client{&http.Client{}, url, func(r *http.Request) *http.Request { return r }}\n}",
"func New(url string) *Client {\n\treturn &Client{url: url, httpC: http.DefaultClient}\n}",
"func New() (crawl *Crawl) {\n\tc := &http.Client{\n\t\tTransport: http.DefaultTransport,\n\t}\n\tc.Jar, _ = cookiejar.New(nil)\n\n\tcrawl = &Crawl{\n\t\tClient: c,\n\t\tmutex: new(sync.RWMutex),\n\t\thandlers: make(map[interface{}]Handler),\n\t\tcloseCh: make(chan bool, 1),\n\t\tdoneCh: make(chan bool, 1),\n\t}\n\tcrawl.SetOptions(DefaultOptions)\n\treturn\n}",
"func New() *Scraper {\n\treturn &Scraper{\n\t\tclient: &http.Client{Timeout: 10 * time.Second},\n\t}\n}",
"func New(url string) *Client {\n\treturn &Client{\n\t\tclient: http2.NewClient(nil),\n\t\turl: url,\n\t}\n}",
"func NewClient(url string) *Client {\n\n\thttpClient := http.DefaultClient\n\tbaseURL := fmt.Sprintf(\"%s%s/\", url, APIVersion)\n\treturn &Client{\n\t\tsling: sling.New().Client(httpClient).Base(baseURL),\n\t}\n}",
"func NewClient() *Client {\n baseURL, _ := url.Parse(defaultBaseURL)\n return &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n}",
"func NewClient(url string) *Client {\n\treturn &Client{URL: url, Default: true}\n}",
"func NewClient(baseurl string) (cl *Client, err error) {\n\tcl = new(Client)\n\t_, err = url.Parse(baseurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcl.BaseURL = baseurl\n\treturn cl, nil\n}",
"func New(url string) Client {\n\treturn &client{\n\t\tbaseURL: url,\n\t}\n}",
"func NewClient(u string) *Client {\n\treturn &Client{URL: u}\n}",
"func New(url string) *Client {\n\treturn NewWithHTTP(url, http.DefaultClient)\n}",
"func NewClient(url string) *Client {\n\treturn &Client{&http.Client{}, url}\n}",
"func NewClient(url string) *Client {\n\treturn &Client{\n\t\thttpClient: &http.Client{Timeout: time.Minute},\n\t\turl: url,\n\t\tminVersion: minVersion,\n\t}\n}",
"func NewClient(url, token string) *Client {\n\treturn &Client{\n\t\turl: strings.TrimSuffix(url, \"/\"),\n\t\tclient: &http.Client{},\n\t\ttoken: token,\n\t}\n}",
"func NewClient(baseurl string) *Client {\n\treturn &Client{\n\t\tbaseurl: baseurl,\n\t\tclient: &http.Client{Timeout: 20 * time.Second},\n\t}\n}",
"func NewClient(apiKey string, options ...OptionFunc) *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tc := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: time.Second * 10,\n\t\t},\n\t\tbaseURL: baseURL,\n\t\tapiKey: apiKey,\n\t\tuserAgent: \"github.com/barthr/newsapi\",\n\t}\n\n\tfor _, opt := range options {\n\t\topt(c)\n\t}\n\treturn c\n}",
"func New(url string, httpClient *http.Client, customHeaders http.Header) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{\n\t\t\tTimeout: defaultHTTPTimeout,\n\t\t}\n\t}\n\n\treturn &Client{\n\t\turl: url,\n\t\thttpClient: httpClient,\n\t\tcustomHeaders: customHeaders,\n\t}\n}",
"func NewClient(httpClient *http.Client) *Client {\n\tu, _ := url.Parse(BaseURL)\n\treturn &Client{\n\t\tBaseURL: u,\n\t\tHTTPClient: httpClient,\n\t}\n}",
"func NewClient(url string) *Client {\n\treturn &Client{url: url}\n}",
"func NewClient() (*Client, error) {\n\tvar seedTickers = []string{\"AAPL\", \"GOOG\", \"MSFT\"}\n\tjar, err := cookiejar.New(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpClient := &http.Client{Jar: jar}\n\tc := Client{httpClient: httpClient}\n\n\ti := rand.Intn(len(seedTickers))\n\tticker := seedTickers[i]\n\tcrumb, err := getCrumb(c.httpClient, ticker)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.crumb = crumb\n\n\treturn &c, nil\n\n}",
"func New() *Scraper {\n\tseedURL, _ := env.GetCrawlerVars(env.SeedURL)\n\treturn &Scraper{\n\t\tlock: &sync.RWMutex{},\n\t\tvisitsCount: 0,\n\t\tseedURL: seedURL.(string),\n\t\trequests: make(scrapingRequests, 0),\n\t\tacquiredProducts: make(item.Items, 0),\n\t}\n}",
"func NewClient(url string) *Client {\n\treturn &Client{\n\t\tURL: url,\n\t\tclient: http.DefaultClient,\n\t}\n}",
"func NewClient(baseURL string) (*Client, error) {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu.Path = path.Join(u.Path, \"/\"+version)\n\n\treturn &Client{\n\t\turl: u,\n\t\thttp: &http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t},\n\t}, nil\n}",
"func NewClient(url string) *Client {\n\ttr := http.DefaultTransport\n\thttp := &http.Client{Transport: tr}\n\tclient := &Client{http: http, url: url}\n\treturn client\n}",
"func NewScrape(cfg *domain.Config) *Scrape {\n\treturn &Scrape{\n\t\tcfg: cfg,\n\t}\n}",
"func NewClient(with ...ClientOption) *Client {\n\ttimeout := DefaultTimeout\n\n\tclient := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tbase: getBaseURL(url.URL{\n\t\t\tScheme: \"https\",\n\t\t\tHost: \"api.secrethub.io\",\n\t\t}),\n\t\tuserAgent: DefaultUserAgent,\n\t}\n\tclient.Options(with...)\n\treturn client\n}",
"func NewClient(URL, token string) *Client {\n\tc := &Client{\n\t\tToken: token,\n\t\tclient: http.DefaultClient,\n\t}\n\n\tif !strings.HasSuffix(URL, \"/\") {\n\t\tURL += \"/\"\n\t}\n\n\tvar err error\n\tc.URL, err = url.Parse(URL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c\n}",
"func NewClient(url string, token oauth2.Token, insecure bool) Client {\n\tclient := &http.Client{}\n\n\tif insecure {\n\t\tclient.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t}\n\n\treturn Client{url, token, client}\n}",
"func NewClient(u string) (*Client, error) {\n\tif len(u) == 0 {\n\t\treturn nil, fmt.Errorf(\"client: missing url\")\n\t}\n\n\tparsedURL, err := url.Parse(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tURL: parsedURL,\n\t\tDefaultHeader: make(http.Header),\n\t}\n\n\tif err := c.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}",
"func NewClient(httpClient *http.Client, URL string, Token string, Source string, SourceType string, Index string) (*Client) {\n\t// Create a new client\n\tif httpClient == nil {\n\t\ttr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} // turn off certificate checking\n\t\thttpClient = &http.Client{Timeout: time.Second * 20, Transport: tr}\n\t}\n\n\tc := &Client{HTTPClient: httpClient, URL: URL, Token: Token, Source: Source, SourceType: SourceType, Index: Index}\n\n\treturn c\n}",
"func New(endpoint *url.URL, client *http.Client) *Client {\n\tif client == nil {\n\t\tclient = httpClient\n\t}\n\n\tif len(endpoint.Path) > 0 && !strings.HasSuffix(endpoint.Path, \"/\") {\n\t\tendpoint.Path = endpoint.Path + \"/\"\n\t}\n\n\treturn &Client{client, endpoint, make(http.Header), endpoint.Query()}\n}",
"func NewClient(url, token string) *Client {\n\treturn &Client{\n\t\turl: strings.TrimSuffix(url, \"/\"),\n\t\taccessToken: token,\n\t\tclient: &http.Client{},\n\t}\n}",
"func NewFromURL(base *url.URL) *Client {\n\n\tif baseStr := base.String(); len(baseStr) > 0 {\n\n\t\tLogger.Debug(\"Creating Marathon Client from url.URL = %s\", base.String())\n\t\tbaseURL, err := url.Parse(baseStr)\n\t\tif err != nil {\n\t\t\tLogger.Debug(\"Invalid baseURL\")\n\t\t\treturn nil\n\t\t}\n\n\t\t_client := &Client{}\n\t\treturn _client.New(baseURL)\n\t}\n\treturn nil\n}",
"func NewClient(owner string, url string) *Client {\n\n\tclient := &Client{\n\t\turl: url,\n\t\towner: owner,\n\t}\n\n\treturn client\n}",
"func NewClient(address string) *Client {\n\t// bootstrap the config\n\tc := &Client{\n\t\tAddress: address,\n\t\tScheme: \"http\",\n\t}\n\n\t// Make sure IPAM connection is alive, with retries\n\tfor i := 0; i < 5; i++ {\n\t\t_, err := c.IndexPools()\n\t\tif err == nil {\n\t\t\treturn c\n\t\t}\n\t\tlog.Println(\"Could not connect to IPAM, retrying in 5 Seconds...\")\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\treturn nil\n}",
"func NewClient(casterURL string) (client *Client, err error) {\n\tu, err := url.Parse(casterURL)\n\tclient = &Client{\n\t\tRequest: &http.Request{\n\t\t\tURL: u,\n\t\t\tMethod: \"GET\",\n\t\t\tProtoMajor: 1,\n\t\t\tProtoMinor: 1,\n\t\t\tHeader: make(map[string][]string),\n\t\t},\n\t}\n\tclient.Header.Set(\"User-Agent\", \"NTRIP GoClient\")\n\tclient.Header.Set(\"Ntrip-Version\", \"Ntrip/2.0\")\n\treturn client, err\n}",
"func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbase, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not parse base URL\")\n\t}\n\n\tc := &Client{client: httpClient, baseURL: base}\n\treturn c, nil\n}",
"func NewScraper(opts ...Option) *Scraper {\n\tm := &Scraper{\n\t\turl: \"\",\n\t\texpectedStatusCode: http.StatusOK,\n\t\ttargetPrice: DefaultTargetPrice,\n\t\tselector: DefaultSelector,\n\t\tfindText: DefaultFindText,\n\t\tmaxRetries: DefaultMaxRetries,\n\t\tretrySeconds: DefaultRetrySeconds,\n\t\tLogger: &utils.DefaultLogger{},\n\t\tclient: new(http.Client),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func New(logger *log.Logger, cfg Config) (*Scraper, error) {\n\tvar errs []error\n\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tincludes, err := compileRegexps(cfg.Includes)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\texcludes, err := compileRegexps(cfg.Excludes)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tproxyURL, err := url.Parse(cfg.Proxy)\n\tif err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tif errs != nil {\n\t\treturn nil, errors.Join(errs...)\n\t}\n\n\tif u.Scheme == \"\" {\n\t\tu.Scheme = \"http\" // if no URL scheme was given default to http\n\t}\n\n\tif cfg.UserAgent == \"\" {\n\t\tcfg.UserAgent = agent.GoogleBot()\n\t}\n\n\tb := surf.NewBrowser()\n\tb.SetUserAgent(cfg.UserAgent)\n\tb.SetTimeout(time.Duration(cfg.Timeout) * time.Second)\n\n\tif cfg.Proxy != \"\" {\n\t\tdialer, err := proxy.FromURL(proxyURL, proxy.Direct)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.SetTransport(&http.Transport{\n\t\t\tDial: dialer.Dial,\n\t\t})\n\t}\n\n\ts := &Scraper{\n\t\tconfig: cfg,\n\n\t\tbrowser: b,\n\t\tlogger: logger,\n\t\tprocessed: make(map[string]struct{}),\n\t\tURL: u,\n\t\tcssURLRe: regexp.MustCompile(`^url\\(['\"]?(.*?)['\"]?\\)$`),\n\t\tincludes: includes,\n\t\texcludes: excludes,\n\t}\n\treturn s, nil\n}",
"func New(httpClient *http.Client, config Config) (*Client, error) {\n\tc := NewClient(httpClient)\n\tc.Config = config\n\n\tbaseURL, err := url.Parse(\"https://\" + config.Host)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.BaseURL = baseURL\n\treturn c, nil\n}",
"func NewClient(address string, httpService httpservice.HTTPService) (*Client, error) {\n\tvar httpClient *http.Client\n\taddressURL, err := url.Parse(address)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse marketplace address\")\n\t}\n\tif addressURL.Hostname() == \"localhost\" || addressURL.Hostname() == \"127.0.0.1\" {\n\t\thttpClient = httpService.MakeClient(true)\n\t} else {\n\t\thttpClient = httpService.MakeClient(false)\n\t}\n\n\treturn &Client{\n\t\taddress: address,\n\t\thttpClient: httpClient,\n\t}, nil\n}",
"func newClientWithURL(url string, apiKey string, apiSecret string) (Client, error) {\n\treturn newClientWithURLs(apiKey, apiSecret, url, url, url, url)\n}",
"func New(base string) *Client {\n\n\tLogger.Debug(\"Creating Marathon Client with baseURL = %s\", base)\n\tbaseURL, err := url.Parse(base)\n\tif len(base) == 0 || err != nil {\n\t\tLogger.Debug(\"Invalid baseURL\")\n\t\treturn nil\n\t}\n\n\t_client := &Client{}\n\treturn _client.New(baseURL)\n}",
"func NewClient() (c *Client) {\n\tvar (\n\t\tcookie *cookiejar.Jar\n\t)\n\n\tcookie, _ = cookiejar.New(nil)\n\n\tc = &Client{\n\t\tClient: &http.Client{\n\t\t\tJar: cookie,\n\t\t},\n\t\tUserAgent: \"Sbss-Client\",\n\t}\n\n\treturn\n}",
"func NewClient(apiKey string) *Client {\n\tu, _ := url.ParseRequestURI(DefaultBaseURL)\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\tapiKey: apiKey,\n\t\tbaseURL: u,\n\t}\n}",
"func NewClient(profURL *url.URL) Client {\n\tc := &client{\n\t\tc: &http.Client{},\n\t\tprofURL: profURL,\n\t}\n\n\treturn c\n}",
"func (mc *Client) New(base *url.URL) *Client {\n\n\tmarathon := mc\n\tmarathon.Session = requist.New(base.String())\n\n\tif marathon.Session != nil {\n\t\trequist.Logger = Logger\n\t\tmarathon.baseURL = base.String()\n\t\tmarathon.info = &data.Info{}\n\t\tmarathon.fail = &data.FailureMessage{}\n\n\t\tif base.User.String() != \"\" {\n\t\t\tif pass, check := base.User.Password(); check {\n\t\t\t\tmarathon.Session.SetBasicAuth(base.User.Username(), pass)\n\t\t\t}\n\t\t\tmarathon.auth = marathon.Session.GetBasicAuth()\n\t\t}\n\t\tmarathon.SetTimeout(DeploymentTimeout)\n\t\tmarathon.Session.Accept(\"application/json\")\n\t\tmarathon.Session.SetHeader(\"Cache-Control\", \"no-cache\")\n\t\tmarathon.Session.SetHeader(\"Accept-Encoding\", \"identity\")\n\n\t\tLogger.Debug(\"Marathon Client = %+v\", marathon)\n\t\treturn marathon\n\t}\n\treturn nil\n}",
"func NewClient(c Configuration) (Client, error) {\n\tcli := Client{\n\t\tName: \"splunk-http-collector-client\",\n\t}\n\tif err := cli.Configure(c.Collector.Proto, c.Collector.Host, c.Collector.Port); err != nil {\n\t\treturn cli, err\n\t}\n\tlog.Debugf(\"%s: proto=%s\", cli.Name, c.Collector.Proto)\n\tlog.Debugf(\"%s: host=%s\", cli.Name, c.Collector.Host)\n\tlog.Debugf(\"%s: port=%d\", cli.Name, c.Collector.Port)\n\tlog.Debugf(\"%s: token=%s\", cli.Name, c.Collector.Token)\n\tlog.Debugf(\"%s: timeout=%d\", cli.Name, c.Collector.Timeout)\n\tlog.Debugf(\"%s: endpoint.health=%s\", cli.Name, cli.Endpoints.Health)\n\tlog.Debugf(\"%s: endpoint.event=%s\", cli.Name, cli.Endpoints.Event)\n\tlog.Debugf(\"%s: endpoint.raw=%s\", cli.Name, cli.Endpoints.Raw)\n\tt := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\tcli.client = &http.Client{\n\t\tTimeout: time.Duration(c.Collector.Timeout) * time.Second,\n\t\tTransport: t,\n\t}\n\tcli.Token = c.Collector.Token\n\tif err := cli.HealthCheck(); err != nil {\n\t\treturn cli, err\n\t}\n\treturn cli, nil\n}",
"func NewClient(apiKey string) *Client {\n\treturn &Client{\n\t\tC: http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t},\n\t\tService: \"https://saucenao.com\",\n\t\tAPIKey: apiKey,\n\t}\n}",
"func New(url string, client *http.Client) *Rietveld {\n\turl = strings.TrimRight(url, \"/\")\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\treturn &Rietveld{\n\t\turl: url,\n\t\tclient: client,\n\t}\n}",
"func NewClient(cacher Cacher) *Client {\n\tvar newClient Client\n\n\tnewClient.BaseURL = DefaultBaseURL\n\tnewClient.Retries = 5\n\tnewClient.cacher = cacher\n\tnewClient.maxIdleConns = 2\n\n\t// Also sets up our initial http client\n\tnewClient.SetTimeout(60 * time.Second)\n\n\tif client == nil {\n\t\tclient = &newClient\n\t}\n\treturn &newClient\n}",
"func NewClient(connectionstring, pooluser string) (sc GenericClient) {\n\tif strings.HasPrefix(connectionstring, \"stratum+tcp://\") {\n\t\tsc = &StratumClient{\n\t\t\tconnectionstring: strings.TrimPrefix(connectionstring, \"stratum+tcp://\"),\n\t\t\tUser: pooluser,\n\t\t}\n\t} else {\n\t\ts := SiadClient{}\n\t\ts.siadurl = \"http://\" + connectionstring + \"/miner/header\"\n\t\tsc = &s\n\t}\n\treturn\n}",
"func NewClient(baseURL string, defaultHeaders map[string]string) *Client {\n\turl, _ := url.Parse(baseURL)\n\tif defaultHeaders == nil {\n\t\tdefaultHeaders = make(map[string]string)\n\t}\n\treturn &Client{httpClient: &http.Client{}, baseURL: url, defaultHeaders: defaultHeaders}\n}",
"func New() (Scraper, error) {\n\treturn &scraper{}, nil\n}",
"func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseURL,\n\t}\n\n\tc.Draws = &drawsService{\n\t\tclient: c,\n\t\tEndpoint: defaultDrawsEndpoint,\n\t}\n\treturn c\n}",
"func NewClient(baseURL string, httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbaseEndpoint, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !strings.HasSuffix(baseEndpoint.Path, \"/\") {\n\t\tbaseEndpoint.Path += \"/\"\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseEndpoint,\n\t}\n\tc.common.client = c\n\tc.Boards = (*BoardsService)(&c.common)\n\tc.Epics = (*EpicsService)(&c.common)\n\tc.Issues = (*IssuesService)(&c.common)\n\tc.Sprints = (*SprintsService)(&c.common)\n\tc.Backlog = (*BacklogService)(&c.common)\n\n\treturn c, nil\n}",
"func NewClient(c *http.Client, baseURL *url.URL) *client {\n\treturn &client{\n\t\tbaseURL: baseURL,\n\t\tclient: c,\n\t}\n}",
"func New(startURL string, host string) *Crawler {\n\treturn &Crawler{\n\t\tRequester: request.HTTPRequest{},\n\t\tStartURL: startURL,\n\t\tLinks: make(PageLinks),\n\t\thost: host,\n\t\tmaxGoRoutines: 20,\n\t}\n}",
"func NewClient(config *ClientConfig) *Client {\n\tvar client *Client\n\n\thttpClient := &fasthttp.Client{\n\t\tName: \"Gocursive\",\n\t\tMaxConnsPerHost: 10240,\n\t}\n\n\tif !strings.HasSuffix(config.url.Path, \"/\") {\n\t\tconfig.url.Path += \"/\"\n\t}\n\n\tconfig.outputDir, _ = filepath.Abs(config.outputDir)\n\n\tclient = &Client{\n\t\tconfig: config,\n\t\thttpClient: httpClient,\n\t\tdirectories: []string{},\n\t\tfiles: []*url.URL{},\n\t\tbytesTotal: 0,\n\t\tbytesRecv: 0,\n\t}\n\n\treturn client\n}",
"func NewClient(url, apiKey string) *Client {\n\treturn &Client{\n\t\turl: url,\n\t\tapiKey: apiKey,\n\t}\n}",
"func NewClient(addr string, insecure bool) *Client {\n\ttransport := &http.Transport{}\n\tif insecure {\n\t\ttransport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t}\n\n\thttpClient := &http.Client{Transport: transport}\n\n\treturn &Client{\n\t\taddr: addr,\n\t\thttpClient: httpClient,\n\t}\n}",
"func New(baseURL *url.URL) *Client {\n\treturn &Client{\n\t\tu: baseURL,\n\t}\n}",
"func (c *baseClient) New() *baseClient {\n\t// Copy headers\n\theader := make(http.Header)\n\tfor k, v := range c.header {\n\t\theader[k] = v\n\t}\n\n\treturn &baseClient{\n\t\thttpClient: c.httpClient,\n\t\tmethod: c.method,\n\t\turl: c.url,\n\t\theader: header,\n\t}\n}",
"func NewClient(accessToken string, version string, httpClient *http.Client) *Client {\n\ttype Params struct {\n\t\tAccessToken string `url:\"access_token,omitempty\"`\n\t\tLocale string `url:\"locale,omitempty\"`\n\t}\n\n\tparams := &Params{AccessToken: accessToken, Locale: \"*\"}\n\n\tclient := &Client{\n\t\tAccessToken: accessToken,\n\t\tsling: sling.New().Client(httpClient).Base(baseURL).\n\t\t\tSet(\"Content-Type\", contentTypeHeader(version)).\n\t\t\tQueryStruct(params),\n\t}\n\n\tclient.rl = rate.New(10, time.Second*1)\n\n\treturn client\n}",
"func NewClient(addr *url.URL) (Client, error) {\n\tif addr == nil || addr.Host == \"\" {\n\t\tlogging.Info(\"Using nop metricd client.\")\n\t\treturn newNopClient(), nil\n\t}\n\tlogging.Infof(\"Using metricd at: %s\", addr.Host)\n\treturn newRealClient(addr, nil)\n}",
"func New(h, o string, s *State) (*Crawler, error) {\n\tm, err := url.Parse(h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif m.Host == \"\" {\n\t\treturn nil, errors.New(\"empty main host\")\n\t}\n\treturn &Crawler{\n\t\tendpoint: h,\n\t\tmainURL: m,\n\t\toutput: o,\n\t\tuploadPageCh: make(chan string, 1024),\n\t\tuploadAssetCh: make(chan string, 1024),\n\t\tsaveCh: make(chan File, 128),\n\t\tUploadWorkers: DefaultWorkersCount,\n\t\tSaveWorkers: DefaultWorkersCount,\n\t\tIncludeSubDomains: false,\n\t\tEnableGzip: true,\n\t\tstate: s,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tTimeout: 15 * time.Second,\n\t\t\t\t\tKeepAlive: 180 * time.Second,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\t\tResponseHeaderTimeout: 10 * time.Second,\n\t\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\t},\n\t\t},\n\t}, nil\n}",
"func NewClient(endpoint string, cli *http.Client) (*Client, error) {\n\tu, err := url.ParseRequestURI(endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gaurun: failed to parse url - endpoint = %s: %w\", endpoint, err)\n\t}\n\n\tif cli == nil {\n\t\tcli = http.DefaultClient\n\t}\n\n\treturn &Client{\n\t\tEndpoint: u,\n\t\tHTTPClient: cli,\n\t}, nil\n}",
"func NewClient(host, version, userAgent string) (*Client, error) {\n\tbaseURL, err := url.Parse(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif baseURL.Path == \"\" {\n\t\tbaseURL.Path = \"/\"\n\t}\n\tunix2HTTP(baseURL)\n\thClient := getHTTPClient(host)\n\tif hClient == nil {\n\t\treturn nil, fmt.Errorf(\"Unable to parse provided url: %v\", host)\n\t}\n\tc := &Client{\n\t\tbase: baseURL,\n\t\tversion: version,\n\t\thttpClient: hClient,\n\t\tauthstring: \"\",\n\t\taccesstoken: \"\",\n\t\tuserAgent: fmt.Sprintf(\"%v/%v\", userAgent, version),\n\t}\n\treturn c, nil\n}",
"func New(credhubURL string, hc HTTPClient) (*Client, error) {\n\tc := &Client{\n\t\turl: credhubURL,\n\t\thc: hc,\n\t}\n\n\tc.Log = log.New(os.Stderr, log.Prefix(), log.Flags())\n\n\terr := c.setVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}",
"func NewClient(accessToken string, version string, httpClient *http.Client) *Client {\n\tclient := &Client{\n\t\tAccessToken: accessToken,\n\t\tsling: sling.New().Client(httpClient).Base(baseURL).\n\t\t\tSet(\"Content-Type\", contentTypeHeader(version)).\n\t\t\tSet(\"Authorization\", authorizationHeader(accessToken)),\n\t}\n\n\tclient.rl = rate.New(10, time.Second*1)\n\n\treturn client\n}",
"func NewClient(key, url string) *Client {\n\treturn &Client{\n\t\turl: url,\n\t\theader: req.Header{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"Accept\": \"application/json\",\n\t\t\t\"X-Api-Key\": key,\n\t\t},\n\t}\n}",
"func New(p policy) (*retryablehttp.Client, error) {\n\tlogger := p.CreateLogger()\n\n\tinnerHTTPClient, err := createInnerHTTPClient(logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttpClient := retryablehttp.NewClient()\n\tp.ConfigureHTTP(httpClient)\n\n\thttpClient.Logger = loggerAdapter{logger}\n\thttpClient.HTTPClient = innerHTTPClient\n\n\treturn httpClient, nil\n}",
"func newClient(apiKey string) *Client {\n\tvar url *url.URL\n\turl, _ = url.Parse(\"https://vulners.com/api/v3\")\n\treturn &Client{baseURL: url, apiKey: apiKey}\n}",
"func New(client *http.Client, req *http.Request, check RespCheck, urls []*url.URL) *FastestURL {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\tif req == nil {\n\t\treq = &http.Request{}\n\t}\n\tif check == nil {\n\t\tcheck = func(resp *http.Response) bool {\n\t\t\treturn resp.StatusCode == http.StatusOK\n\t\t}\n\t}\n\treturn &FastestURL{\n\t\tClient: client,\n\t\tURLs: urls,\n\t\tRequest: req,\n\t\tRespCheck: check,\n\t}\n}",
"func New(context *contexter.Context) (*Client) {\n return &Client {\n urlBaseIndex: 0,\n\t\tcontext: context,\n }\n}",
"func NewHTTPScraper() *HTTPScraper {\n\treturn &HTTPScraper{client: &http.Client{}}\n}",
"func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tc := Client{\n\t\tBaseURL: baseURL,\n\t\tclient: httpClient,\n\t\tUserAgent: userAgent,\n\t}\n\treturn &c\n}",
"func (_ *_Crawler) New(_ *cli.Context, client http.Client, logger glog.Log) (crawler.Crawler, error) {\n\tc := _Crawler{\n\t\thttpClient: client,\n\t\t// this regular used to match category page url path\n\t\tcategoryPathMatcher: regexp.MustCompile(`^/en-us/shop(.*)`),\n\t\t// this regular used to match product page url path\n\t\tproductPathMatcher: regexp.MustCompile(`^(/en-us/p(.*)(&lvrid=_p)(.*)) | (/en-us/p(.*))$`),\n\t\tlogger: logger.New(\"_Crawler\"),\n\t}\n\treturn &c, nil\n}",
"func NewClient(httpClient *http.Client, username string, password string) *Client {\n\tbase := sling.New().Client(httpClient).Base(msfUrl)\n\tbase.SetBasicAuth(username, password)\n\treturn &Client{\n\t\tsling: base,\n\t\tNBA: newNBAService(base.New()),\n\t}\n}",
"func New(addr string) (*Client, error) {\n\treturn &Client{\n\t\taddr: addr,\n\t\thttpClient: &http.Client{},\n\t}, nil\n}",
"func New(addr string) (*Client, error) {\n\treturn &Client{\n\t\taddr: addr,\n\t\thttpClient: &http.Client{},\n\t}, nil\n}",
"func NewClient(url, token string) (*Client, chan error, error) {\n\tsocket, err := dialar.Dial(url, http.Header{})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tc := &Client{\n\t\tDone: make(chan struct{}),\n\t\ttoken: token,\n\t\tsocket: socket,\n\t\ttasks: make(chan client.Task, buffSize),\n\t\tprogress: make(chan []byte, buffSize),\n\t\tresult: make(chan []byte, buffSize),\n\t\tfinish: make(chan client.JudgeResult, buffSize),\n\t\trequest: make(chan struct{}, 1),\n\t\tack: make(chan ack, 1),\n\t\tencoder: parser.NewEncoder(socket),\n\t\tdecoder: parser.NewDecoder(socket),\n\t\terrCh: make(chan error),\n\t}\n\n\tgo c.readLoop()\n\tgo c.writeLoop()\n\n\treturn c, c.errCh, nil\n}",
"func (_ *_Crawler) New(_ *cli.Context, client http.Client, logger glog.Log) (crawler.Crawler, error) {\n\tc := _Crawler{\n\t\thttpClient: client,\n\t\t// this regular used to match category page url path\n\t\tcategoryPathMatcher: regexp.MustCompile(`^/collections(/[a-zA-Z0-9_-]+){1,6}$`),\n\t\t// this regular used to match product page url path\n\t\tproductPathMatcher: regexp.MustCompile(`^(/[/a-zA-Z0-9_-]+)?/products(/[a-zA-Z0-9_-]+){1,3}$`),\n\t\tlogger: logger.New(\"_Crawler\"),\n\t}\n\treturn &c, nil\n}",
"func New(u url.URL, ignoreRobotsTxt bool, maxWorkers int, userAgent string) *Crawler {\n\tu.Path = \"/\"\n\treturn &Crawler{\n\t\turl: u,\n\t\tignoreRobotsTxt: ignoreRobotsTxt,\n\t\tqueued: sync.Map{},\n\t\tpool: worker.NewPool(maxWorkers),\n\t\tpagesWithErr: make(map[string]bool),\n\t\tSiteMap: make(map[string]*Page),\n\t\tuserAgent: userAgent,\n\t}\n}",
"func New(addr string) *Client {\n\treturn &Client{\n\t\taddr: addr,\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: 1 * time.Minute,\n\t\t},\n\t}\n}",
"func NewClient(cfg *Config) (*Client, error) {\r\n\tBaseURL := new(url.URL)\r\n\tvar err error\r\n\r\n\tviper.SetEnvPrefix(\"TS\")\r\n\tviper.BindEnv(\"LOG\")\r\n\r\n\tswitch l := viper.Get(\"LOG\"); l {\r\n\tcase \"trace\":\r\n\t\tlog.SetLevel(log.TraceLevel)\r\n\tcase \"debug\":\r\n\t\tlog.SetLevel(log.DebugLevel)\r\n\tcase \"info\":\r\n\t\tlog.SetLevel(log.InfoLevel)\r\n\tcase \"warn\":\r\n\t\tlog.SetLevel(log.WarnLevel)\r\n\tcase \"fatal\":\r\n\t\tlog.SetLevel(log.FatalLevel)\r\n\tcase \"panic\":\r\n\t\tlog.SetLevel(log.PanicLevel)\r\n\t}\r\n\r\n\tif cfg.BaseURL != \"\" {\r\n\t\tBaseURL, err = url.Parse(cfg.BaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t} else {\r\n\t\tBaseURL, err = url.Parse(defaultBaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t}\r\n\r\n\tnewClient := &Client{\r\n\t\tBaseURL: BaseURL,\r\n\t\tclient: http.DefaultClient,\r\n\t\tcreds: &Credentials{\r\n\t\t\tAPIKey: cfg.APIKey,\r\n\t\t\tOrganizationID: cfg.OrganizationID,\r\n\t\t\tUserID: cfg.UserID,\r\n\t\t},\r\n\t}\r\n\r\n\tnewClient.Rulesets = &RulesetService{newClient}\r\n\tnewClient.Rules = &RuleService{newClient}\r\n\r\n\treturn newClient, nil\r\n}",
"func New(opts ...Option) *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tc := &Client{\n\t\tHTTPClient: &http.Client{Timeout: time.Second * 5},\n\t\tBaseURL: *baseURL,\n\t\tUserAgent: userAgent,\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\treturn c\n}",
"func NewClient(httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tc := &Client{}\n\n\tbaseURL, _ := url.Parse(DefaultBaseURL)\n\tvsspsBaseURL, _ := url.Parse(DefaultVsspsBaseURL)\n\tvsaexBaseURL, _ := url.Parse(DefaultVsaexBaseURL)\n\n\tc.client = httpClient\n\tc.BaseURL = *baseURL\n\tc.VsspsBaseURL = *vsspsBaseURL\n\tc.VsaexBaseURL = *vsaexBaseURL\n\tc.UserAgent = userAgent\n\n\tc.Boards = &BoardsService{client: c}\n\tc.BuildDefinitions = &BuildDefinitionsService{client: c}\n\tc.Builds = &BuildsService{client: c}\n\tc.DeliveryPlans = &DeliveryPlansService{client: c}\n\tc.Favourites = &FavouritesService{client: c}\n\tc.Git = &GitService{client: c}\n\tc.Iterations = &IterationsService{client: c}\n\tc.PolicyEvaluations = &PolicyEvaluationsService{client: c}\n\tc.PullRequests = &PullRequestsService{client: c}\n\tc.Teams = &TeamsService{client: c}\n\tc.Tests = &TestsService{client: c}\n\tc.Users = &UsersService{client: c}\n\tc.UserEntitlements = &UserEntitlementsService{client: c}\n\tc.WorkItems = &WorkItemsService{client: c}\n\n\treturn c, nil\n}",
"func NewClient() *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.Schedule = &ScheduleServiceOp{client: c}\n\tc.Time = &TimeServiceOp{client: c}\n\n\treturn c\n}",
"func NewClient(baseURL string) (*Client, error) {\n\turl, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: time.Second * 30, // Max of 30 secs\n\t\t},\n\t\tBaseURL: url,\n\t\tUserAgent: userAgent,\n\t}\n\n\tc.Games = &GameService{client: c}\n\tc.Teams = &TeamService{client: c}\n\tc.Players = &PlayerService{client: c}\n\tc.Stats = &StatService{client: c}\n\n\treturn c, nil\n}",
"func NewClient(config *Config) (*Client, error) {\n\n\tconfig = DefaultConfig().Merge(config)\n\n\tif !strings.HasPrefix(config.Address, \"http\") {\n\t\tconfig.Address = \"http://\" + config.Address\n\t}\n\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tconfig: *config,\n\t\theaders: map[string]string{},\n\t\thttpClient: cleanhttp.DefaultClient(),\n\t}\n\n\treturn client, nil\n}",
"func newScraper(u *url.URL, timeout int) (*scraper, error) {\n\tvar title string\n\tvar language string\n\tvar author string\n\tvar description string\n\tvar generator string\n\tvar feed string\n\tcharset := \"utf-8\"\n\tlinks := make([]string, 0)\n\timages := make([]string, 0)\n\tkeywords := make([]string, 0)\n\tcompatibility := make(map[string]string)\n\n\tscrpr := func(n *html.Node) {\n\t\tswitch n.Data {\n\t\tcase \"html\":\n\t\t\tlanguage = findAttribute(n, \"lang\")\n\t\tcase \"title\":\n\t\t\ttitle = n.FirstChild.Data\n\t\tcase \"a\":\n\t\t\tlinks = addElement(links, u, n, \"href\")\n\t\tcase \"img\":\n\t\t\timages = addElement(images, u, n, \"src\")\n\t\tcase \"link\":\n\t\t\ttyp := findAttribute(n, \"type\")\n\t\t\tswitch typ {\n\t\t\tcase \"application/rss+xml\":\n\t\t\t\tfeed = findAttribute(n, \"href\")\n\t\t\t}\n\t\tcase \"meta\":\n\t\t\tname := findAttribute(n, \"name\")\n\t\t\tswitch name {\n\t\t\tcase \"author\":\n\t\t\t\tauthor = findAttribute(n, \"content\")\n\t\t\tcase \"keywords\":\n\t\t\t\tkeywords = strings.Split(findAttribute(n, \"content\"), \", \")\n\t\t\tcase \"description\":\n\t\t\t\tdescription = findAttribute(n, \"content\")\n\t\t\tcase \"generator\":\n\t\t\t\tgenerator = findAttribute(n, \"content\")\n\t\t\t}\n\n\t\t\thttpEquiv := findAttribute(n, \"http-equiv\")\n\t\t\tswitch httpEquiv {\n\t\t\tcase \"Content-Type\":\n\t\t\t\tcharset = findCharset(findAttribute(n, \"content\"))\n\t\t\tcase \"X-UA-Compatible\":\n\t\t\t\tcompatibility = mapifyStr(findAttribute(n, \"content\"))\n\t\t\t}\n\t\t}\n\t}\n\n\tcl := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: timeoutDialer(timeout),\n\t\t},\n\t}\n\n\tresp, err := cl.Get(u.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\ttree, err := h5.New(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttree.Walk(scrpr)\n\n\treturn &scraper{title,\n\t\tlanguage,\n\t\tauthor,\n\t\tdescription,\n\t\tgenerator,\n\t\tfeed,\n\t\tcharset,\n\t\tlinks,\n\t\timages,\n\t\tkeywords,\n\t\tcompatibility}, nil\n}",
"func New(filepath string, opts ...Option) (*Client, error) {\n\tvar options Options\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\tc := &Client{filepath: filepath}\n\tif strings.HasPrefix(filepath, \"http://\") || strings.HasPrefix(filepath, \"https://\") {\n\t\tc.isURL = true\n\t\tc.httpClient = http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t\tTransport: &transport{\n\t\t\t\tHeaders: options.Headers,\n\t\t\t},\n\t\t}\n\t}\n\treturn c, nil\n}",
"func NewClient(registryURL string) *Client {\n\treturn &Client{\n\t\turl: registryURL + \"/sgulreg/services\",\n\t\thttpClient: http.DefaultClient,\n\t\treqMux: &sync.RWMutex{},\n\t\tregistered: false,\n\t}\n}",
"func NewClient(httpClient *http.Client, atlasSubdomain string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\n\tvar baseURLStr strings.Builder\n\tbaseURLStr.WriteString(\"https://\")\n\tbaseURLStr.WriteString(atlasSubdomain)\n\tbaseURLStr.WriteString(\".\")\n\tbaseURLStr.WriteString(defaultBaseURL)\n\n\tbaseURL, err := url.Parse(baseURLStr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{client: httpClient, BaseURL: baseURL}\n\tc.common.client = c\n\tc.ApplicationRole = (*ApplicationRoleService)(&c.common)\n\tc.AuditRecords = (*AuditRecordsService)(&c.common)\n\tc.AvatarsService = (*AvatarsService)(&c.common)\n\treturn c, nil\n}",
"func NewClient(sugar *zap.SugaredLogger, url, signingKey string) (*Client, error) {\n\tconst timeout = time.Minute\n\tclient := &http.Client{Timeout: timeout}\n\treturn &Client{sugar: sugar, url: url, client: client, signingKey: signingKey}, nil\n}",
"func NewClient(url, usr, pwd string, mods ...func(*Client)) (Client, error) {\n\n\t// Normalize the URL\n\tif !strings.HasPrefix(url, \"http://\") && !strings.HasPrefix(url, \"https://\") {\n\t\turl = \"https://\" + url\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tcookieJar, _ := cookiejar.New(nil)\n\thttpClient := http.Client{\n\t\tTimeout: 60 * time.Second,\n\t\tTransport: tr,\n\t\tJar: cookieJar,\n\t}\n\n\tclient := Client{\n\t\tHttpClient: &httpClient,\n\t\tUrl: url,\n\t\tUsr: usr,\n\t\tPwd: pwd,\n\t}\n\tfor _, mod := range mods {\n\t\tmod(&client)\n\t}\n\treturn client, nil\n}",
"func NewClient(host string) *Client {\n\tc := &Client{}\n\tc.URL.Host = host\n\treturn c\n}",
"func newTestClient(fn RoundTripFunc) *http.Client {\n\treturn &http.Client{\n\t\tTransport: fn,\n\t}\n}"
] | [
"0.6308598",
"0.6222202",
"0.6097972",
"0.6060751",
"0.6043418",
"0.604116",
"0.5992147",
"0.5986665",
"0.5968273",
"0.59570366",
"0.5919546",
"0.59144855",
"0.59128547",
"0.5904674",
"0.5874914",
"0.58727384",
"0.58473456",
"0.5841087",
"0.58391356",
"0.5830302",
"0.5823845",
"0.5815095",
"0.5804614",
"0.5800183",
"0.5796186",
"0.5795588",
"0.57711905",
"0.5763741",
"0.5753981",
"0.57287747",
"0.5712433",
"0.57012725",
"0.56978256",
"0.5692709",
"0.56791",
"0.56674045",
"0.565989",
"0.564957",
"0.56408155",
"0.5632043",
"0.5631362",
"0.56286025",
"0.5625796",
"0.5624589",
"0.5599554",
"0.55965966",
"0.5592429",
"0.55880785",
"0.55741847",
"0.5560923",
"0.5559902",
"0.55595577",
"0.5537816",
"0.55329883",
"0.55228436",
"0.55169463",
"0.55051345",
"0.5503144",
"0.548703",
"0.5482654",
"0.54718596",
"0.5468862",
"0.54663694",
"0.5466146",
"0.54604673",
"0.5457545",
"0.54533696",
"0.54507905",
"0.54492724",
"0.54453605",
"0.5443859",
"0.5435001",
"0.54324573",
"0.5432445",
"0.54248315",
"0.54221255",
"0.54093087",
"0.54060847",
"0.54058945",
"0.5403997",
"0.5401059",
"0.5401059",
"0.53993934",
"0.5399327",
"0.5398481",
"0.5398267",
"0.53973943",
"0.5395777",
"0.5394296",
"0.5392406",
"0.53896695",
"0.538825",
"0.5388043",
"0.53799886",
"0.5373203",
"0.53694224",
"0.53693706",
"0.53581595",
"0.53557223",
"0.5348011"
] | 0.76326865 | 0 |
Announce sends a Announce request to the tracker. | Announce отправляет запрос Announce на трекер. | func (t *Client) Announce(c context.Context, req AnnounceRequest) (
resp AnnounceResponse, err error) {
if req.PeerID.IsZero() {
if t.ID.IsZero() {
req.PeerID = metainfo.NewRandomHash()
} else {
req.PeerID = t.ID
}
}
err = t.send(c, t.AnnounceURL, req.ToQuery(), &resp)
return
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (b *Builder) Announce() (err error) {\n\targs := &rpc.AnnounceArgs{\n\t\tGOOS: b.b.GOOS(),\n\t\tGOARCH: b.b.GOARCH(),\n\t\tType: \"Builder\",\n\t\tURL: b.base,\n\t}\n\treply := new(rpc.AnnounceReply)\n\tif err = b.tcl.Call(\"Tracker.Announce\", args, reply); err != nil {\n\t\treturn\n\t}\n\tb.key = reply.Key\n\treturn\n}",
"func (r *Runner) Announce() (err error) {\n\targs := &rpc.AnnounceArgs{\n\t\tGOOS: runtime.GOOS,\n\t\tGOARCH: runtime.GOARCH,\n\t\tType: \"Runner\",\n\t\tURL: r.base,\n\t}\n\treply := new(rpc.AnnounceReply)\n\tif err = r.tcl.Call(\"Tracker.Announce\", args, reply); err != nil {\n\t\treturn\n\t}\n\tr.key = reply.Key\n\treturn\n}",
"func (cc *ClientConn) Announce(u *base.URL, tracks Tracks) (*base.Response, error) {\n\terr := cc.checkState(map[clientConnState]struct{}{\n\t\tclientConnStateInitial: {},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// in case of ANNOUNCE, the base URL doesn't have a trailing slash.\n\t// (tested with ffmpeg and gstreamer)\n\tbaseURL := u.Clone()\n\n\t// set id, base url and control attribute on tracks\n\tfor i, t := range tracks {\n\t\tt.ID = i\n\t\tt.BaseURL = baseURL\n\t\tt.Media.Attributes = append(t.Media.Attributes, psdp.Attribute{\n\t\t\tKey: \"control\",\n\t\t\tValue: \"trackID=\" + strconv.FormatInt(int64(i), 10),\n\t\t})\n\t}\n\n\tres, err := cc.Do(&base.Request{\n\t\tMethod: base.Announce,\n\t\tURL: u,\n\t\tHeader: base.Header{\n\t\t\t\"Content-Type\": base.HeaderValue{\"application/sdp\"},\n\t\t},\n\t\tBody: tracks.Write(),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode != base.StatusOK {\n\t\treturn nil, liberrors.ErrClientWrongStatusCode{\n\t\t\tCode: res.StatusCode, Message: res.StatusMessage}\n\t}\n\n\tcc.streamBaseURL = baseURL\n\tcc.state = clientConnStatePreRecord\n\n\treturn res, nil\n}",
"func SendAnnounce() {\n\tvar pkt MeshPkt\n\tvar annc AnnouncePkt\n\n\tpkt.PktPayload = TypeAnnounce\n\tpkt.SenderID = MySID\n\n\tannc.NumServices = 3\n\tannc.Services = [32]int8{ServiceLookup, ServicePubChat, ServicePrivMsg}\n\n\tvar buffer bytes.Buffer\n\n\tbinary.Write(&buffer, binary.BigEndian, &pkt)\n\tbinary.Write(&buffer, binary.BigEndian, &annc)\n\n\tsendPkt(\"255.255.255.255\", 8032, buffer.Bytes())\n}",
"func NewAnnounce(c *gin.Context) (*AnnounceRequest, error) {\n\tq, err := QueryStringParser(c.Request.RequestURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcompact := q.Params[\"compact\"] != \"0\"\n\n\tevent := ANNOUNCE\n\tevent_name, _ := q.Params[\"event\"]\n\tswitch event_name {\n\tcase \"started\":\n\t\tevent = STARTED\n\tcase \"stopped\":\n\t\tevent = STOPPED\n\tcase \"completed\":\n\t\tevent = COMPLETED\n\t}\n\n\tnumWant := getNumWant(q, 30)\n\n\tinfo_hash, exists := q.Params[\"info_hash\"]\n\tif !exists {\n\t\treturn nil, errors.New(\"Info hash not supplied\")\n\t}\n\n\tpeerID, exists := q.Params[\"peer_id\"]\n\tif !exists {\n\t\treturn nil, errors.New(\"Peer id not supplied\")\n\t}\n\n\tipv4, err := getIP(q.Params[\"ip\"])\n\tif err != nil {\n\t\t// Look for forwarded ip in header then default to remote address\n\t\tforwarded_ip := c.Request.Header.Get(\"X-Forwarded-For\")\n\t\tif forwarded_ip != \"\" {\n\t\t\tipv4_new, err := getIP(forwarded_ip)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"NewAnnounce: Failed to parse header supplied IP\", err)\n\t\t\t\treturn nil, errors.New(\"Invalid ip header\")\n\t\t\t}\n\t\t\tipv4 = ipv4_new\n\t\t} else {\n\t\t\ts := strings.Split(c.Request.RemoteAddr, \":\")\n\t\t\tip_req, _ := s[0], s[1]\n\t\t\tipv4_new, err := getIP(ip_req)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"NewAnnounce: Failed to parse detected IP\", err)\n\t\t\t\treturn nil, errors.New(\"Invalid ip hash\")\n\t\t\t}\n\t\t\tipv4 = ipv4_new\n\t\t}\n\t}\n\n\tport, err := q.Uint64(\"port\")\n\tif err != nil || port < 1024 || port > 65535 {\n\t\treturn nil, errors.New(\"Invalid port, must be between 1024 and 65535\")\n\t}\n\n\tleft, err := q.Uint64(\"left\")\n\tif err != nil {\n\t\treturn nil, errors.New(\"No left value\")\n\t} else {\n\t\tleft = util.UMax(0, left)\n\t}\n\n\tdownloaded, err := q.Uint64(\"downloaded\")\n\tif err != nil {\n\t\tdownloaded = 0\n\t} else {\n\t\tdownloaded = util.UMax(0, downloaded)\n\t}\n\n\tuploaded, err := q.Uint64(\"uploaded\")\n\tif err != nil {\n\t\tuploaded = 0\n\t} else {\n\t\tuploaded = util.UMax(0, uploaded)\n\t}\n\n\tcorrupt, err := q.Uint64(\"corrupt\")\n\tif err != nil {\n\t\t// Assume we just don't have the param\n\t\tcorrupt = 0\n\t} else {\n\t\tcorrupt = util.UMax(0, corrupt)\n\t}\n\n\treturn &AnnounceRequest{\n\t\tCompact: compact,\n\t\tCorrupt: corrupt,\n\t\tDownloaded: downloaded,\n\t\tEvent: event,\n\t\tIPv4: ipv4,\n\t\tInfoHash: info_hash,\n\t\tLeft: left,\n\t\tNumWant: numWant,\n\t\tPeerID: peerID,\n\t\tPort: port,\n\t\tUploaded: uploaded,\n\t}, nil\n}",
"func (tracker *Tracker) HandleAnnounce(ctx *gin.Context) {\n\tstats.RegisterEvent(stats.EV_ANNOUNCE)\n\tr := db.Pool.Get()\n\tdefer r.Close()\n\tif r.Err() != nil {\n\t\tstats.RegisterEvent(stats.EV_ANNOUNCE_FAIL)\n\t\tctx.Error(r.Err()).SetMeta(errMeta(\n\t\t\tMSG_GENERIC_ERROR,\n\t\t\t\"Internal error, HALP\",\n\t\t\tlog.Fields{\"fn\": \"HandleAnnounce\"},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tlog.Debugln(ctx.Request.RequestURI)\n\tann, err := NewAnnounce(ctx)\n\tif err != nil {\n\t\tstats.RegisterEvent(stats.EV_ANNOUNCE_FAIL)\n\t\tctx.Error(err).SetMeta(errMeta(\n\t\t\tMSG_QUERY_PARSE_FAIL,\n\t\t\t\"Failed to parse announce\",\n\t\t\tlog.Fields{\n\t\t\t\t\"fn\": \"HandleAnnounce\",\n\t\t\t\t\"remote_ip\": ctx.Request.RemoteAddr,\n\t\t\t\t\"uri\": ctx.Request.RequestURI,\n\t\t\t},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tinfo_hash_hex := fmt.Sprintf(\"%x\", ann.InfoHash)\n\tlog.WithFields(log.Fields{\n\t\t\"ih\": info_hash_hex,\n\t\t\"ip\": ann.IPv4,\n\t\t\"port\": ann.Port,\n\t\t\"up\": util.Bytes(ann.Uploaded),\n\t\t\"dn\": util.Bytes(ann.Downloaded),\n\t\t\"left\": util.Bytes(ann.Left),\n\t\t\"event\": ann.Event,\n\t}).Debug(\"Announce event\")\n\n\tpasskey := ctx.Param(\"passkey\")\n\n\tuser_id := tracker.findUserID(passkey)\n\tif user_id == 0 {\n\t\tstats.RegisterEvent(stats.EV_INVALID_PASSKEY)\n\t\tctx.Error(errors.New(\"Invalid passkey\")).SetMeta(errMeta(\n\t\t\tMSG_INVALID_AUTH,\n\t\t\t\"Invalid passkey supplied\",\n\t\t\tlog.Fields{\"fn\": \"HandleAnnounce\", \"passkey\": passkey},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tuser := tracker.FindUserByID(user_id)\n\tif !user.CanLeech && ann.Left > 0 {\n\t\tctx.Error(errors.New(\"Leech disabled for user\")).SetMeta(errMeta(\n\t\t\tMSG_GENERIC_ERROR,\n\t\t\t\"Leeching not allowed for user\",\n\t\t\tlog.Fields{\"fn\": \"HandleAnnounce\", \"passkey\": passkey},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\tif !user.Enabled {\n\t\tctx.Error(errors.New(\"Disabled user\")).SetMeta(errMeta(\n\t\t\tMSG_GENERIC_ERROR,\n\t\t\t\"User disabled\",\n\t\t\tlog.Fields{\"fn\": \"HandleAnnounce\", \"passkey\": passkey},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tif !tracker.IsValidClient(ann.PeerID) {\n\t\tstats.RegisterEvent(stats.EV_INVALID_CLIENT)\n\t\tctx.Error(errors.New(\"Banned client\")).SetMeta(errMeta(\n\t\t\tMSG_GENERIC_ERROR,\n\t\t\t\"Banned client, check wiki for whitelisted clients\",\n\t\t\tlog.Fields{\n\t\t\t\t\"fn\": \"HandleAnnounce\",\n\t\t\t\t\"user_id\": user.UserID,\n\t\t\t\t\"user_name\": user.Username,\n\t\t\t\t\"peer_id\": ann.PeerID[0:8],\n\t\t\t},\n\t\t\tlog.ErrorLevel,\n\t\t))\n\t\treturn\n\t}\n\n\ttorrent := tracker.FindTorrentByInfoHash(info_hash_hex)\n\tif torrent == nil {\n\t\tstats.RegisterEvent(stats.EV_INVALID_INFOHASH)\n\t\tctx.Error(errors.New(\"Invalid info hash\")).SetMeta(errMeta(\n\t\t\tMSG_INFO_HASH_NOT_FOUND,\n\t\t\t\"Torrent not found, try TPB\",\n\t\t\tlog.Fields{\n\t\t\t\t\"fn\": \"HandleAnnounce\",\n\t\t\t\t\"user_id\": user.UserID,\n\t\t\t\t\"user_name\": user.Username,\n\t\t\t\t\"info_hash\": info_hash_hex,\n\t\t\t},\n\t\t\tlog.WarnLevel,\n\t\t))\n\t\treturn\n\t} else if !torrent.Enabled {\n\t\tstats.RegisterEvent(stats.EV_INVALID_INFOHASH)\n\t\tctx.Error(errors.New(\"Torrent not enabled\")).SetMeta(errMeta(\n\t\t\tMSG_INFO_HASH_NOT_FOUND,\n\t\t\ttorrent.DelReason(),\n\t\t\tlog.Fields{\n\t\t\t\t\"fn\": \"HandleAnnounce\",\n\t\t\t\t\"user_id\": user.UserID,\n\t\t\t\t\"user_name\": user.Username,\n\t\t\t\t\"info_hash\": info_hash_hex,\n\t\t\t},\n\t\t\tlog.WarnLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tpeer := torrent.findPeer(ann.PeerID)\n\tif peer == nil {\n\t\tlog.Debug(\"No existing peer found\")\n\t\tpeer = NewPeer(ann.PeerID, ann.IPv4.String(), ann.Port, torrent, user)\n\t\t// torrent.AddPeer(r, peer)\n\t}\n\n\tpeer_diff := PeerDiff{User: user, Torrent: torrent}\n\t// user update MUST happen after peer update since we rely on the old dl/ul values\n\tpeer.Update(ann, &peer_diff, torrent.Seeders)\n\ttorrent.Update(ann)\n\tuser.Update(ann, &peer_diff, torrent.MultiUp, torrent.MultiDn)\n\n\tif ann.Event == STOPPED {\n\t\tlog.Debug(\"Removing peer due to stop announce\")\n\t\ttorrent.DelPeer(r, peer)\n\t} else {\n\t\tif !torrent.HasPeer(peer) {\n\t\t\ttorrent.AddPeer(r, peer)\n\t\t}\n\t}\n\n\tif ann.Event == STOPPED {\n\t\t// Remove from torrents active peer set\n\t\tr.Send(\"SREM\", torrent.TorrentPeersKey, ann.PeerID)\n\n\t\tr.Send(\"SREM\", user.KeyActive, torrent.TorrentID)\n\n\t\t// Mark the peer as inactive\n\t\tr.Send(\"HSET\", peer.KeyPeer, \"active\", 0)\n\n\t\tr.Send(\"DEL\", peer.KeyTimer)\n\n\t\tif peer.IsHNR() {\n\t\t\tuser.AddHNR(r, torrent.TorrentID)\n\t\t}\n\t} else if ann.Event == COMPLETED {\n\n\t\t// Remove the torrent from the users incomplete set\n\t\tr.Send(\"SREM\", user.KeyIncomplete, torrent.TorrentID)\n\n\t\t// Remove the torrent from the users incomplete set\n\t\tr.Send(\"SADD\", user.KeyComplete, torrent.TorrentID)\n\n\t\t// Remove from the users hnr list if it exists\n\t\tr.Send(\"SREM\", user.KeyHNR, torrent.TorrentID)\n\n\t} else if ann.Event == STARTED {\n\t\t// Make sure we account for a user completing a torrent outside of\n\t\t// our view, or resuming from previously completions\n\t\tif peer.IsSeeder() {\n\t\t\tr.Send(\"SREM\", user.KeyHNR, torrent.TorrentID)\n\t\t\tr.Send(\"SREM\", user.KeyIncomplete, torrent.TorrentID)\n\t\t\tr.Send(\"SADD\", user.KeyComplete, torrent.TorrentID)\n\t\t} else {\n\t\t\tr.Send(\"SREM\", user.KeyComplete, torrent.TorrentID)\n\t\t\tr.Send(\"SADD\", user.KeyIncomplete, torrent.TorrentID)\n\t\t}\n\t}\n\n\tif ann.Event != STOPPED {\n\n\t\t// Add peer to torrent active peers\n\t\tr.Send(\"SADD\", torrent.TorrentPeersKey, ann.PeerID)\n\n\t\t// Add to users active torrent set\n\t\tr.Send(\"SADD\", user.KeyActive, torrent.TorrentID)\n\n\t\t// Refresh the peers expiration timer\n\t\t// If this expires, the peer reaper takes over and removes the\n\t\t// peer from torrents in the case of a non-clean client shutdown\n\t\tr.Send(\"SETEX\", peer.KeyTimer, conf.Config.ReapInterval, 1)\n\t}\n\tr.Flush()\n\n\tSyncEntityC <- torrent\n\tSyncEntityC <- user\n\n\tdict := bencode.Dict{\n\t\t\"complete\": torrent.Seeders,\n\t\t\"incomplete\": torrent.Leechers,\n\t\t\"interval\": conf.Config.AnnInterval,\n\t\t\"min interval\": conf.Config.AnnIntervalMin,\n\t}\n\n\tpeers := torrent.GetPeers(ann.NumWant, peer.GetCoord())\n\tif peers != nil {\n\t\tdict[\"peers\"] = MakeCompactPeers(peers, ann.PeerID)\n\t} else {\n\t\tdict[\"peers\"] = []byte{}\n\t}\n\tvar out_bytes bytes.Buffer\n\tencoder := bencode.NewEncoder(&out_bytes)\n\n\ter_msg_encoded := encoder.Encode(dict)\n\tif er_msg_encoded != nil {\n\t\tstats.RegisterEvent(stats.EV_ANNOUNCE_FAIL)\n\t\tctx.Error(er_msg_encoded).SetMeta(errMeta(\n\t\t\tMSG_GENERIC_ERROR,\n\t\t\t\"Internal error\",\n\t\t\tlog.Fields{\n\t\t\t\t\"fn\": \"HandleAnnounce\",\n\t\t\t\t\"user_id\": user.UserID,\n\t\t\t\t\"user_name\": user.Username,\n\t\t\t\t\"info_hash\": info_hash_hex,\n\t\t\t},\n\t\t\tlog.DebugLevel,\n\t\t))\n\t\treturn\n\t}\n\n\tctx.String(MSG_OK, out_bytes.String())\n}",
"func Announce(url string) (*AnnounceResponse, error) {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(res.Body)\n\tannRes := new(AnnounceResponse)\n\terr = bencode.Unmarshal(buf, annRes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn annRes, nil\n}",
"func (u UDPTracker) Announce(query url.Values, file data.FileRecord) []byte {\n\t// Create UDP announce response\n\tannounce := udp.AnnounceResponse{\n\t\tAction: 1,\n\t\tTransID: u.TransID,\n\t\tInterval: uint32(common.Static.Config.Interval),\n\t\tLeechers: uint32(file.Leechers()),\n\t\tSeeders: uint32(file.Seeders()),\n\t}\n\n\t// Convert to UDP byte buffer\n\tannounceBuf, err := announce.MarshalBinary()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn u.Error(\"Could not create UDP announce response\")\n\t}\n\n\t// Numwant\n\tnumwant, err := strconv.Atoi(query.Get(\"numwant\"))\n\tif err != nil {\n\t\tnumwant = 50\n\t}\n\n\t// Add compact peer list\n\tres := bytes.NewBuffer(announceBuf)\n\terr = binary.Write(res, binary.BigEndian, file.PeerList(query.Get(\"ip\"), numwant))\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn u.Error(\"Could not create UDP announce response\")\n\t}\n\n\treturn res.Bytes()\n}",
"func (c *Conn) Announce(msg string) {\n\tc.announce(msg, c.username)\n}",
"func (m *DHTModule) Announce(key string) {\n\n\tif m.IsAttached() {\n\t\tif err := m.Client.DHT().Announce(key); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tm.dht.Announce(announcer.ObjTypeAny, \"\", []byte(key), nil)\n}",
"func (t *UDPTracker) Announce(hash meta.Hash, id []byte, ip net.IP, port int, event int, status *DownloadStatus) ([]string, error) {\n\tif t.conn == nil {\n\t\terr := t.connect()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdata, err := t.announce(hash, event, status)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeers := convAddrs(data)\n\tif peers == nil {\n\t\treturn nil, errUDPResp\n\t}\n\treturn peers, nil\n}",
"func (teamID TeamID) SendAnnounce(message string) error {\n\t// for each agent on the team\n\t// determine which messaging protocols are enabled for gid\n\t// pick optimal\n\n\t// ok, err := SendMessage(gid, message)\n\treturn nil\n}",
"func (n *Interface) StartAnnounce() {\n\tn.ad.StartAnnounceDaemon()\n}",
"func (c *chatRoom) announce(msg string) {\n\tc.messages <- \"* \" + msg + \" *\"\n}",
"func (g *GateKeeper) announce() error {\n\tm, err := json.Marshal(g.Meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug().Msg(\"Starting to announce API to etcd\")\n\t_, err = (*g.etcd).Set(context.Background(), \"/meta/gatekeeper\", string(m), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info().Msg(\"Gatekeeper registered in etcd.\")\n\treturn nil\n}",
"func (tracker *UDP) Announce(ctx context.Context, hash []byte, myid []byte,\n\twant int, size int64, port4, port6 int, proxy string,\n\tf func(net.IP, int) bool) error {\n\tok := tracker.tryLock()\n\tif !ok {\n\t\treturn ErrNotReady\n\t}\n\tdefer tracker.unlock()\n\n\tif !tracker.ready() {\n\t\treturn ErrNotReady\n\t}\n\n\turl, err := nurl.Parse(tracker.url)\n\tif err != nil {\n\t\ttracker.updateInterval(0, err)\n\t\treturn err\n\t}\n\n\ttracker.time = time.Now()\n\n\tvar i4, i6 time.Duration\n\tvar e4, e6 error\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\tgo func() {\n\t\ti4, e4 = announceUDP(ctx, \"udp4\", f,\n\t\t\turl, hash, myid, want, size, port4, proxy)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\ti6, e6 = announceUDP(ctx, \"udp6\", f,\n\t\t\turl, hash, myid, want, size, port6, proxy)\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\tif e4 != nil && e6 != nil {\n\t\terr = e4\n\t}\n\tinterval := i4\n\tif interval < i6 {\n\t\tinterval = i6\n\t}\n\n\ttracker.updateInterval(interval, err)\n\treturn err\n}",
"func (a *Announcer) Announce(\n\tctx context.Context,\n\tmemberIndex group.MemberIndex,\n\tsessionID string,\n) (\n\t[]group.MemberIndex,\n\terror,\n) {\n\tmessagesChan := make(chan net.Message, announceReceiveBuffer)\n\n\ta.broadcastChannel.Recv(ctx, func(message net.Message) {\n\t\tmessagesChan <- message\n\t})\n\n\terr := a.broadcastChannel.Send(ctx, &announcementMessage{\n\t\tsenderID: memberIndex,\n\t\tprotocolID: a.protocolID,\n\t\tsessionID: sessionID,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot send announcement message: [%w]\", err)\n\t}\n\n\treadyMembersIndexesSet := make(map[group.MemberIndex]bool)\n\t// Mark itself as ready.\n\treadyMembersIndexesSet[memberIndex] = true\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase netMessage := <-messagesChan:\n\t\t\tannouncement, ok := netMessage.Payload().(*announcementMessage)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif announcement.senderID == memberIndex {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !a.membershipValidator.IsValidMembership(\n\t\t\t\tannouncement.senderID,\n\t\t\t\tnetMessage.SenderPublicKey(),\n\t\t\t) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif announcement.protocolID != a.protocolID {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif announcement.sessionID != sessionID {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treadyMembersIndexesSet[announcement.senderID] = true\n\t\tcase <-ctx.Done():\n\t\t\tbreak loop\n\t\t}\n\t}\n\n\treadyMembersIndexes := make([]group.MemberIndex, 0)\n\tfor memberIndex := range readyMembersIndexesSet {\n\t\treadyMembersIndexes = append(readyMembersIndexes, memberIndex)\n\t}\n\n\tsort.Slice(readyMembersIndexes, func(i, j int) bool {\n\t\treturn readyMembersIndexes[i] < readyMembersIndexes[j]\n\t})\n\n\treturn readyMembersIndexes, nil\n}",
"func (bft *ProtocolBFTCoSi) startAnnouncement(t RoundType) error {\n\tbft.announceChan <- announceChan{Announce: Announce{TYPE: t, Timeout: bft.Timeout}}\n\treturn nil\n}",
"func (c *Caller) Announce() (Card, error) {\n\t// XXX Must have at least one player to start the game.\n\tif c.gameFinished {\n\t\treturn blankCard, fmt.Errorf(\"game already finished\")\n\t}\n\n\tc.gameStarted = true\n\tcard, err := c.deck.Select()\n\tif err != nil {\n\t\tc.gameFinished = true\n\t\treturn card, err\n\t}\n\n\t// We update our internal boards to use them later in `Loteria` for\n\t// confirming the player really won.\n\tfor name, board := range c.players {\n\t\tif board.Mark(card) == nil {\n\t\t\tc.players[name] = board\n\t\t}\n\t}\n\n\treturn card, nil\n}",
"func (inn *LocalNode) consulAnnounce(conf *Config) (err error) {\n\tcheckID := inn.ID() + \"_ttl\"\n\n\taddrParts := strings.Split(conf.RPCAddress, \":\")\n\tif len(addrParts) < 2 {\n\t\treturn errors.New(\"address format should be HOST:PORT\")\n\t}\n\tport, err := strconv.ParseInt(addrParts[1], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcheckInterval, err := time.ParseDuration(conf.Health.CheckInterval)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcheckTimeout, err := time.ParseDuration(conf.Health.CheckTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create heartbeat check\n\tacc := consulapi.AgentServiceCheck{\n\t\tCheckID: checkID,\n\t\tName: checkID,\n\t\tStatus: consulapi.HealthCritical,\n\t\tDeregisterCriticalServiceAfter: conf.Health.DeregisterCriticalServiceAfter,\n\t\tTTL: (checkInterval + checkTimeout).String(),\n\t}\n\n\tservice := &consulapi.AgentServiceRegistration{\n\t\tID: conf.Consul.Service,\n\t\tName: conf.Consul.Service,\n\t\t//Tags: nil,\n\t\tPort: int(port),\n\t\tAddress: addrParts[0],\n\t\tCheck: &acc,\n\t\tNamespace: conf.Consul.Namespace,\n\t}\n\n\tif err := inn.consul.Agent().ServiceRegister(service); err != nil {\n\t\treturn err\n\t}\n\n\t// Run TTL updater\n\tgo inn.updateTTLConsul(checkInterval, checkID)\n\n\treturn nil\n}",
"func announce() {\n\tif isCoordinator {\n\t\tlogrus.Info(\"Coordinator: \", isCoordinator)\n\t\trevealIP()\n\n\t\tlogrus.Info(\"Running on port: \", karaiPort)\n\t} else {\n\t\tlogrus.Debug(\"launching as normal user on port: \", karaiPort)\n\t}\n}",
"func TestHeartbeatAnnounce(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tmode HeartbeatMode\n\t\tkind string\n\t}{\n\t\t{mode: HeartbeatModeProxy, kind: types.KindProxy},\n\t\t{mode: HeartbeatModeAuth, kind: types.KindAuthServer},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.mode.String(), func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\t\t\tclock := clockwork.NewFakeClock()\n\n\t\t\tannouncer := newFakeAnnouncer(ctx)\n\t\t\thb, err := NewHeartbeat(HeartbeatConfig{\n\t\t\t\tContext: ctx,\n\t\t\t\tMode: tt.mode,\n\t\t\t\tComponent: \"test\",\n\t\t\t\tAnnouncer: announcer,\n\t\t\t\tCheckPeriod: time.Second,\n\t\t\t\tAnnouncePeriod: 60 * time.Second,\n\t\t\t\tKeepAlivePeriod: 10 * time.Second,\n\t\t\t\tServerTTL: 600 * time.Second,\n\t\t\t\tClock: clock,\n\t\t\t\tGetServerInfo: func() (types.Resource, error) {\n\t\t\t\t\tsrv := &types.ServerV2{\n\t\t\t\t\t\tKind: tt.kind,\n\t\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t\t\tName: \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: types.ServerSpecV2{\n\t\t\t\t\t\t\tAddr: \"127.0.0.1:1234\",\n\t\t\t\t\t\t\tHostname: \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tsrv.SetExpiry(clock.Now().UTC().Add(apidefaults.ServerAnnounceTTL))\n\t\t\t\t\treturn srv, nil\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateInit)\n\n\t\t\t// on the first run, heartbeat will move to announce state,\n\t\t\t// will call announce right away\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 1)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// next call will not move to announce, because time is not up yet\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\n\t\t\t// advance time, and heartbeat will move to announce\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetch()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounce)\n\t\t\terr = hb.announce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 2)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\n\t\t\t// in case of error, system will move to announce wait state,\n\t\t\t// with next attempt scheduled on the next keep alive period\n\t\t\tannouncer.err = trace.ConnectionProblem(nil, \"boom\")\n\t\t\tclock.Advance(hb.AnnouncePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.Error(t, err)\n\t\t\trequire.True(t, trace.IsConnectionProblem(err))\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 3)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.KeepAlivePeriod))\n\n\t\t\t// once announce is successful, next announce is set on schedule\n\t\t\tannouncer.err = nil\n\t\t\tclock.Advance(hb.KeepAlivePeriod + time.Second)\n\t\t\terr = hb.fetchAndAnnounce()\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, announcer.upsertCalls[hb.Mode], 4)\n\t\t\trequire.Equal(t, hb.state, HeartbeatStateAnnounceWait)\n\t\t\trequire.Equal(t, hb.nextAnnounce, clock.Now().UTC().Add(hb.AnnouncePeriod))\n\t\t})\n\t}\n}",
"func multicastAnnounce(addr string) error {\n\tif addr == \"\" {\n\t\taddr = guessMulticastAddress()\n\t}\n\n\tfullAddr := addr + \":\" + strconv.FormatInt(int64(GetMulticastPort()), 10)\n\n\tlogInfo(\"Announcing presence on\", fullAddr)\n\n\taddress, err := net.ResolveUDPAddr(\"udp\", fullAddr)\n\tif err != nil {\n\t\tlogError(err)\n\t\treturn err\n\t}\n\tladdr := &net.UDPAddr{\n\t\tIP: GetListenIP(),\n\t\tPort: 0,\n\t}\n\tfor {\n\t\tc, err := net.DialUDP(\"udp\", laddr, address)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t\treturn err\n\t\t}\n\t\t// Compose and send the multicast announcement\n\t\tmsgBytes := encodeMulticastAnnounceBytes()\n\t\t_, err = c.Write(msgBytes)\n\t\tif err != nil {\n\t\t\tlogError(err)\n\t\t\treturn err\n\t\t}\n\n\t\tlogfTrace(\"Sent announcement multicast from %v to %v\", laddr, fullAddr)\n\n\t\tif GetMulticastAnnounceIntervalSeconds() > 0 {\n\t\t\ttime.Sleep(time.Second * time.Duration(GetMulticastAnnounceIntervalSeconds()))\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func (p *Provider) AnnounceDealToIndexer(ctx context.Context, proposalCid cid.Cid) error {\n\tvar deal storagemarket.MinerDeal\n\tif err := p.deals.Get(proposalCid).Get(&deal); err != nil {\n\t\treturn xerrors.Errorf(\"failed getting deal %s: %w\", proposalCid, err)\n\t}\n\n\tif err := p.meshCreator.Connect(ctx); err != nil {\n\t\treturn fmt.Errorf(\"cannot publish index record as indexer host failed to connect to the full node: %w\", err)\n\t}\n\n\tannCid, err := p.indexProvider.NotifyPut(ctx, nil, deal.ProposalCid.Bytes(), p.metadataForDeal(deal))\n\tif err == nil {\n\t\tlog.Infow(\"deal announcement sent to index provider\", \"advertisementCid\", annCid, \"shard-key\", deal.Proposal.PieceCID,\n\t\t\t\"proposalCid\", deal.ProposalCid)\n\t}\n\treturn err\n}",
"func AnnounceTournament(ctx AppContext) error {\n\tform := new(Announce)\n\tif err := ctx.Bind(form); err != nil {\n\t\treturn ctx.JSON(http.StatusBadRequest, Error(err))\n\t}\n\n\tservice := services.NewTournamentService(\n\t\tdb.NewTournamentRepo(ctx.Session),\n\t\tdb.NewPlayerRepo(ctx.Session),\n\t)\n\n\ttournament, err := service.Announce(form.TournamentID, int64(form.Deposit))\n\tif err != nil {\n\t\treturn ctx.JSON(http.StatusBadRequest, Error(err))\n\t}\n\n\treturn ctx.JSON(http.StatusOK, tournament)\n}",
"func (p *Note) Announcement(mt, msg, buid string, out interface{}) error {\n\tctx, cancel := context.WithTimeout(context.Background(), p.Timeout)\n\tdefer cancel()\n\treturn p.client.Do(p.announce(ctx, mt, msg, buid, 1), out)\n}",
"func (a *PeriodicalAnnouncer) Run() {\n\tdefer close(a.doneC)\n\ta.backoff.Reset()\n\n\ttimer := time.NewTimer(math.MaxInt64)\n\tdefer timer.Stop()\n\n\tresetTimer := func(interval time.Duration) {\n\t\ttimer.Reset(interval)\n\t\tif interval < 0 {\n\t\t\ta.nextAnnounce = time.Now()\n\t\t} else {\n\t\t\ta.nextAnnounce = time.Now().Add(interval)\n\t\t}\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// BEP 0003: No completed is sent if the file was complete when started.\n\tselect {\n\tcase <-a.completedC:\n\t\ta.completedC = nil\n\tdefault:\n\t}\n\n\ta.doAnnounce(ctx, tracker.EventStarted, a.numWant)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tif a.status == Contacting {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ta.doAnnounce(ctx, tracker.EventNone, a.numWant)\n\t\tcase resp := <-a.responseC:\n\t\t\ta.status = Working\n\t\t\ta.seeders = int(resp.Seeders)\n\t\t\ta.leechers = int(resp.Leechers)\n\t\t\ta.warningMsg = resp.WarningMessage\n\t\t\tif a.warningMsg != \"\" {\n\t\t\t\ta.log.Debugln(\"announce warning:\", a.warningMsg)\n\t\t\t}\n\t\t\ta.interval = resp.Interval\n\t\t\tif resp.MinInterval > 0 {\n\t\t\t\ta.minInterval = resp.MinInterval\n\t\t\t}\n\t\t\ta.HasAnnounced = true\n\t\t\ta.lastError = nil\n\t\t\ta.backoff.Reset()\n\t\t\tinterval := a.getNextInterval()\n\t\t\tresetTimer(interval)\n\t\t\tgo func() {\n\t\t\t\tselect {\n\t\t\t\tcase a.newPeers <- resp.Peers:\n\t\t\t\tcase <-a.closeC:\n\t\t\t\t}\n\t\t\t}()\n\t\tcase err := <-a.errC:\n\t\t\ta.status = NotWorking\n\t\t\t// Give more friendly error to the user\n\t\t\ta.lastError = a.newAnnounceError(err)\n\t\t\tif a.lastError.Unknown {\n\t\t\t\ta.log.Errorln(\"announce error:\", a.lastError.ErrorWithType())\n\t\t\t} else {\n\t\t\t\ta.log.Debugln(\"announce error:\", a.lastError.Err.Error())\n\t\t\t}\n\t\t\tinterval := a.getNextIntervalFromError(a.lastError)\n\t\t\tresetTimer(interval)\n\t\tcase <-a.needMorePeersC:\n\t\t\tif a.status == Contacting || a.status == NotWorking {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tinterval := time.Until(a.lastAnnounce.Add(a.getNextInterval()))\n\t\t\tresetTimer(interval)\n\t\tcase <-a.completedC:\n\t\t\tif a.status == Contacting {\n\t\t\t\tcancel()\n\t\t\t\tctx, cancel = context.WithCancel(context.Background())\n\t\t\t}\n\t\t\ta.doAnnounce(ctx, tracker.EventCompleted, 0)\n\t\t\ta.completedC = nil // do not send more than one \"completed\" event\n\t\tcase req := <-a.statsCommandC:\n\t\t\treq.Response <- a.stats()\n\t\tcase <-a.closeC:\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (s *swiper) announce(ctx context.Context, topic, seed []byte) (<-chan bool, <-chan error) {\n\tannounces := make(chan bool)\n\terrs := make(chan error)\n\n\tgo func() {\n\t\tdefer close(announces)\n\t\tdefer close(errs)\n\t\tdefer func() { errs <- io.EOF }()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tperiodAnnounces, periodErrs := s.announceForPeriod(ctx, topic, seed, time.Now())\n\n\t\t\t\tselect {\n\t\t\t\tcase announce := <-periodAnnounces:\n\t\t\t\t\tannounces <- announce\n\t\t\t\t\tbreak\n\n\t\t\t\tcase err := <-periodErrs:\n\t\t\t\t\terrs <- err\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn announces, errs\n}",
"func TestTrackerAnnouncer(t *testing.T) {\n\tcfg := testingConfig()\n\tcfg.DisableTrackers = false\n\tcfg.BaseDir = \".testdata/utopia\"\n\tcl, err := NewClient(cfg)\n\trequire.NoError(t, err)\n\tdt := &dummyTracker{\n\t\tmyAddr: localhost + \":8081\",\n\t\tt: t,\n\t\t//tracker will always respond with the client's ip/port pair\n\t\tpeer: tracker.Peer{\n\t\t\tID: cl.ID(),\n\t\t\tIP: []byte(getOutboundIP().String()),\n\t\t\tPort: uint16(cl.port),\n\t\t},\n\t}\n\tdt.serve()\n\ttr, err := cl.AddFromFile(helloWorldTorrentFile)\n\trequire.NoError(t, err)\n\ttr.mi.Announce = dt.addr()\n\trequire.NoError(t, tr.StartDataTransfer())\n\t//we want to announce multiple times so sleep for a bit\n\ttime.Sleep(4 * time.Second)\n\tdefer cl.Close()\n\t//Assert that we filtered duplicate ip/port pairs\n\t//We should have established only 2 connections (in essence 1 but actually, because we\n\t//have connected to ourselves there are 2 - one becaused we dialed in `client.connectToPeer` and that\n\t//triggered us to accept another one in `client.handleConn`.)\n\tassert.Equal(t, 2, len(tr.conns))\n}",
"func (s *swiper) announceForPeriod(ctx context.Context, topic, seed []byte, t time.Time) (<-chan bool, <-chan error) {\n\tannounces := make(chan bool)\n\terrs := make(chan error)\n\n\troundedTime := roundTimePeriod(t, s.interval)\n\ttopicForTime := generateRendezvousPointForPeriod(topic, seed, roundedTime)\n\n\tnextStart := nextTimePeriod(roundedTime, s.interval)\n\n\tgo func() {\n\t\tctx, cancel := context.WithDeadline(ctx, nextStart)\n\t\tdefer cancel()\n\n\t\tdefer close(errs)\n\t\tdefer close(announces)\n\t\tdefer func() { errs <- io.EOF }()\n\n\t\tfor {\n\t\t\tduration, err := s.tinder.Advertise(ctx, string(topicForTime))\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tannounces <- true\n\n\t\t\tif ctx.Err() != nil || time.Now().Add(duration).UnixNano() > nextStart.UnixNano() {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn announces, errs\n}",
"func (o *objStore) EmitEventAnnounce(event *EventAnnounce) {\n\tif event.Type == cluster.EventStopAnnounce {\n\t\treturn\n\t}\n\to.outboundPump <- event\n}",
"func (t *Torrent) GetAnnounceURL() string {\n\tv := url.Values{}\n\tv.Add(\"peer_id\", t.PeerID)\n\tv.Add(\"port\", t.LocalPort[1:]) // TODO: This is a bad solution...\n\tv.Add(\"event\", t.Event)\n\tv.Add(\"info_hash\", t.MetaInfo.InfoHash)\n\t// These are int64s so we have to use FormatInt.\n\tdownloaded := strconv.FormatInt(t.Downloaded, 10)\n\tuploaded := strconv.FormatInt(t.Uploaded, 10)\n\tleft := strconv.FormatInt(t.Left, 10)\n\tv.Add(\"downloaded\", downloaded)\n\tv.Add(\"uploaded\", uploaded)\n\tv.Add(\"left\", left)\n\tv.Add(\"numwant\", strconv.Itoa(5))\n\tv.Add(\"compact\", \"1\") // We will always make compact requests.\n\n\treturn fmt.Sprintf(\"%s?%s\", t.AnnounceURL, v.Encode())\n}",
"func (f *lightFetcher) announce(p *peer, head *announceData) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tp.Log().Debug(\"Received new announcement\", \"number\", head.Number, \"hash\", head.Hash, \"reorg\", head.ReorgDepth)\n\n\tfp := f.peers[p]\n\tif fp == nil {\n\t\tp.Log().Debug(\"Announcement from unknown peer\")\n\t\treturn\n\t}\n\n\tif fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {\n\t\t// announced tds should be strictly monotonic\n\t\tp.Log().Debug(\"Received non-monotonic td\", \"current\", head.Td, \"previous\", fp.lastAnnounced.td)\n\t\tgo f.handler.removePeer(p.id)\n\t\treturn\n\t}\n\n\tn := fp.lastAnnounced\n\tfor i := uint64(0); i < head.ReorgDepth; i++ {\n\t\tif n == nil {\n\t\t\tbreak\n\t\t}\n\t\tn = n.parent\n\t}\n\t// n is now the reorg common ancestor, add a new branch of nodes\n\tif n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {\n\t\t// if announced head block height is lower or same as n or too far from it to add\n\t\t// intermediate nodes then discard previous announcement info and trigger a resync\n\t\tn = nil\n\t\tfp.nodeCnt = 0\n\t\tfp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)\n\t}\n\t// check if the node count is too high to add new nodes, discard oldest ones if necessary\n\tif n != nil {\n\t\t// n is now the reorg common ancestor, add a new branch of nodes\n\t\t// check if the node count is too high to add new nodes\n\t\tlocked := false\n\t\tfor uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {\n\t\t\tif !locked {\n\t\t\t\tf.chain.LockChain()\n\t\t\t\tdefer f.chain.UnlockChain()\n\t\t\t\tlocked = true\n\t\t\t}\n\t\t\t// if one of root's children is canonical, keep it, delete other branches and root itself\n\t\t\tvar newRoot *fetcherTreeNode\n\t\t\tfor i, nn := range fp.root.children {\n\t\t\t\tif rawdb.ReadCanonicalHash(f.handler.backend.chainDb, nn.number) == nn.hash {\n\t\t\t\t\tfp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)\n\t\t\t\t\tnn.parent = nil\n\t\t\t\t\tnewRoot = nn\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfp.deleteNode(fp.root)\n\t\t\tif n == fp.root {\n\t\t\t\tn = newRoot\n\t\t\t}\n\t\t\tfp.root = newRoot\n\t\t\tif newRoot == nil || !f.checkKnownNode(p, newRoot) {\n\t\t\t\tfp.bestConfirmed = nil\n\t\t\t\tfp.confirmedTd = nil\n\t\t\t}\n\n\t\t\tif n == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif n != nil {\n\t\t\tfor n.number < head.Number {\n\t\t\t\tnn := &fetcherTreeNode{number: n.number + 1, parent: n}\n\t\t\t\tn.children = append(n.children, nn)\n\t\t\t\tn = nn\n\t\t\t\tfp.nodeCnt++\n\t\t\t}\n\t\t\tn.hash = head.Hash\n\t\t\tn.td = head.Td\n\t\t\tfp.nodeByHash[n.hash] = n\n\t\t}\n\t}\n\n\tif n == nil {\n\t\t// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed\n\t\tif fp.root != nil {\n\t\t\tfp.deleteNode(fp.root)\n\t\t}\n\t\tn = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}\n\t\tfp.root = n\n\t\tfp.nodeCnt++\n\t\tfp.nodeByHash[n.hash] = n\n\t\tfp.bestConfirmed = nil\n\t\tfp.confirmedTd = nil\n\t}\n\n\tf.checkKnownNode(p, n)\n\tp.lock.Lock()\n\tp.headInfo = head\n\tfp.lastAnnounced = n\n\tp.lock.Unlock()\n\tf.checkUpdateStats(p, nil)\n\tif !f.requestTriggered {\n\t\tf.requestTriggered = true\n\t\tf.requestTrigger <- struct{}{}\n\t}\n}",
"func (herald *Herald) AnnounceSamples() error {\n\therald.Lock()\n\tdefer herald.Unlock()\n\tif herald.announcementQueue.Len() == 0 {\n\t\treturn fmt.Errorf(\"announcement queue is empty\")\n\t}\n\n\t// iterate once over the queue and process all the runs first\n\tfor request := herald.announcementQueue.Front(); request != nil; request = request.Next() {\n\t\tswitch v := request.Value.(type) {\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unexpected type in queue: %T\", v)\n\t\tcase *records.Sample:\n\t\t\tcontinue\n\t\tcase *records.Run:\n\t\t\t// make the service requests\n\t\t\tfor tag, complete := range v.Metadata.GetTags() {\n\n\t\t\t\t// check it's not been completed already\n\t\t\t\tif complete {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// get the service and submit the request\n\t\t\t\tservice := services.ServiceRegister[tag]\n\t\t\t\tif service.CheckAccess() == false {\n\t\t\t\t\treturn fmt.Errorf(\"%v: %v\", ErrServiceOffline, tag)\n\t\t\t\t}\n\t\t\t\tif err := service.SendRequest(v); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// set the status to announced\n\t\t\tv.Metadata.SetStatus(records.Status_announced)\n\n\t\t\t// dequeue the sample\n\t\t\tv.Metadata.AddComment(\"run announced.\")\n\t\t\tv.Metadata.SetStatus(records.Status_announced)\n\t\t\tif err := herald.updateRecord(v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\therald.announcementQueue.Remove(request)\n\t\t}\n\t}\n\n\t// process the remaining queue (should just be samples now)\n\tfor request := herald.announcementQueue.Front(); request != nil; request = request.Next() {\n\n\t\t// grab the sample that is first in the queue\n\t\tsample := request.Value.(*records.Sample)\n\n\t\t// TODO:\n\t\t// evalute the sample\n\t\t// update fields and propogate to linked data\n\t\t// decide if it should be dequeued\n\n\t\t// make the service requests\n\t\tfor tag, complete := range sample.Metadata.GetTags() {\n\n\t\t\t// check it's not been completed already\n\t\t\tif complete {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// get the service and submit the request\n\t\t\tservice := services.ServiceRegister[tag]\n\t\t\tif service.CheckAccess() == false {\n\t\t\t\treturn fmt.Errorf(\"%v: %v\", ErrServiceOffline, tag)\n\t\t\t}\n\t\t\tif err := service.SendRequest(sample); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// update the status of the sample and dequeue it\n\t\tsample.Metadata.AddComment(\"sample announced.\")\n\t\tsample.Metadata.SetStatus(records.Status_announced)\n\t\tif err := herald.updateRecord(sample); err != nil {\n\t\t\treturn err\n\t\t}\n\t\therald.announcementQueue.Remove(request)\n\t}\n\n\tif herald.announcementQueue.Len() != 0 {\n\t\treturn fmt.Errorf(\"announcements sent but queue still contains %d requests\", herald.announcementQueue.Len())\n\t}\n\treturn nil\n}",
"func AnnounceServeAndAllocate(etcd EtcdContext, srv ServerContext, state *allocator.State, spec memberSpec) {\n\tMust(spec.Validate(), \"member specification validation error\")\n\n\tvar ann = allocator.Announce(etcd.Etcd, state.LocalKey, spec.MarshalString(), etcd.Session.Lease())\n\tMust(state.KS.Load(context.Background(), etcd.Etcd, ann.Revision), \"failed to load KeySpace\")\n\n\t// Register a signal handler which zeros our advertised limit in Etcd.\n\t// Upon seeing this, Allocator will work to discharge all of our assigned\n\t// items, and Allocate will exit gracefully when none remain.\n\tvar signalCh = make(chan os.Signal, 1)\n\tsignal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT)\n\n\tgo func() {\n\t\tvar sig = <-signalCh\n\t\tlog.WithField(\"signal\", sig).Info(\"caught signal\")\n\n\t\tspec.ZeroLimit()\n\t\tMust(ann.Update(spec.MarshalString()), \"failed to update member announcement\", \"key\", state.LocalKey)\n\t}()\n\n\t// Now that the KeySpace has been loaded, we can begin serving requests.\n\tgo srv.Serve()\n\tgo func() { Must(state.KS.Watch(context.Background(), etcd.Etcd), \"keyspace Watch failed\") }()\n\n\tMust(allocator.Allocate(allocator.AllocateArgs{\n\t\tContext: context.Background(),\n\t\tEtcd: etcd.Etcd,\n\t\tState: state,\n\t}), \"Allocate failed\")\n\n\t// Close our session to remove our member key. If we were leader,\n\t// this notifies peers that we are no longer allocating.\n\tetcd.Session.Close()\n\tsrv.GracefulStop()\n}",
"func (d *Domain) Announce(service string, handler func([]byte, uint64) ([]byte, uint64)) error {\n\tlis, err := net.Listen(\"tcp\", \":8000\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//\n\t// TODO: Optimize with batching like the client side, with two goroutines\n\t// The define callback can do whatever the user wants, to send the\n\t// request to a worker pool through a channel o handle it directly on the\n\t// goroutine that receive the messages.\n\t//\n\n\tgo func() {\n\t\tfor {\n\t\t\tnetconn, err := lis.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Debug(err)\n\t\t\t}\n\t\t\tlog.Debug(\"Connection accepted from\", netconn.RemoteAddr())\n\n\t\t\tconn := newConn(netconn)\n\t\t\tgo func() {\n\t\t\t\treqs := make([]*Request, 0, maxBatchLen)\n\t\t\t\tfor {\n\t\t\t\t\treqs = reqs[:0:cap(reqs)]\n\t\t\t\t\tfor i := 0; i < maxBatchLen; i++ {\n\t\t\t\t\t\tpayld, seq, err := conn.recv()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Debug(err)\n\t\t\t\t\t\t\tExit()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Debug(\"RECV(\", seq, \") \", string(payld))\n\n\t\t\t\t\t\tpayld, seq = handler(payld, seq)\n\n\t\t\t\t\t\treq := newRequest(payld, seq)\n\n\t\t\t\t\t\treqs = append(reqs, req)\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := conn.sendBatch(reqs); err != nil {\n\t\t\t\t\t\tlog.Debug(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (s *Server) sendJetStreamAPIAuditAdvisory(ci *ClientInfo, acc *Account, subject, request, response string) {\n\ts.publishAdvisory(acc, JSAuditAdvisory, JSAPIAudit{\n\t\tTypedEvent: TypedEvent{\n\t\t\tType: JSAPIAuditType,\n\t\t\tID: nuid.Next(),\n\t\t\tTime: time.Now().UTC(),\n\t\t},\n\t\tServer: s.Name(),\n\t\tClient: ci,\n\t\tSubject: subject,\n\t\tRequest: request,\n\t\tResponse: response,\n\t\tDomain: s.getOpts().JetStreamDomain,\n\t})\n}",
"func (s *PeerStore) AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, announcingPeer bittorrent.Peer) ([]bittorrent.Peer, error) {\n\tselect {\n\tcase <-s.closed:\n\t\tpanic(\"attempted to interact with closed store\")\n\tdefault:\n\t}\n\n\tif announcingPeer.IP.AddressFamily != bittorrent.IPv4 && announcingPeer.IP.AddressFamily != bittorrent.IPv6 {\n\t\treturn nil, ErrInvalidIP\n\t}\n\n\tih := infohash(infoHash)\n\ts0, s1 := deriveEntropyFromRequest(infoHash, announcingPeer)\n\n\tp := &peer{}\n\tp.setPort(announcingPeer.Port)\n\tp.setIP(announcingPeer.IP.To16())\n\treturn s.announceSingleStack(ih, seeder, numWant, p, announcingPeer.IP.AddressFamily, s0, s1)\n}",
"func (c *ChannelAnnouncement) Encode(w io.Writer, pver uint32) error {\n\terr := writeElements(w,\n\t\tc.FirstNodeSig,\n\t\tc.SecondNodeSig,\n\t\tc.ChannelID,\n\t\tc.FirstBitcoinSig,\n\t\tc.SecondBitcoinSig,\n\t\tc.FirstNodeID,\n\t\tc.SecondNodeID,\n\t\tc.FirstBitcoinKey,\n\t\tc.SecondBitcoinKey,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (mr *MockDHTKeeperMockRecorder) AddToAnnounceList(key, repo, objType, announceTime interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddToAnnounceList\", reflect.TypeOf((*MockDHTKeeper)(nil).AddToAnnounceList), key, repo, objType, announceTime)\n}",
"func (consensus *Consensus) constructAnnounceMessage() []byte {\n\tmessage := &msg_pb.Message{\n\t\tServiceType: msg_pb.ServiceType_CONSENSUS,\n\t\tType: msg_pb.MessageType_ANNOUNCE,\n\t\tRequest: &msg_pb.Message_Consensus{\n\t\t\tConsensus: &msg_pb.ConsensusRequest{},\n\t\t},\n\t}\n\tconsensusMsg := message.GetConsensus()\n\tconsensus.populateMessageFields(consensusMsg)\n\tconsensusMsg.Payload = consensus.blockHeader\n\n\tmarshaledMessage, err := consensus.signAndMarshalConsensusMessage(message)\n\tif err != nil {\n\t\tutils.Logger().Error().Err(err).Msg(\"Failed to sign and marshal the Announce message\")\n\t}\n\treturn proto.ConstructConsensusMessage(marshaledMessage)\n}",
"func (a *Client) StartCallAnnouncement(params *StartCallAnnouncementParams, authInfo runtime.ClientAuthInfoWriter) (*StartCallAnnouncementNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewStartCallAnnouncementParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"startCallAnnouncement\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/calls/{callId}/announcements\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &StartCallAnnouncementReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*StartCallAnnouncementNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for startCallAnnouncement: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (a *Add) Tracker(announce string, addresses []string, dryRun bool) error {\n\tvar tracker *Tracker\n\tvar probe *Probe\n\tvar reacheableAddresses []string\n\tvar err error\n\n\tlog.Printf(\"Adding Tracker '%s' with addresses '[%s]'\", announce, strings.Join(addresses, \", \"))\n\n\tif tracker, err = NewTracker(announce); err != nil {\n\t\treturn err\n\t}\n\n\tprobe = NewProbe(tracker, a.config.Probe.Timeout)\n\n\tif len(addresses) == 0 {\n\t\tif err = probe.LookupAddresses(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tprobe.SetAddresses(addresses)\n\t}\n\n\tif reacheableAddresses, err = probe.ReachableAddresses(); err != nil {\n\t\treturn err\n\t}\n\n\tif len(reacheableAddresses) == 0 {\n\t\tlog.Printf(\"[WARN] No reachable addresses found!\")\n\t\ttracker.Addresses = []string{\"0.0.0.0\"}\n\t\ttracker.Status = 99\n\t} else {\n\t\tlog.Printf(\"Reachable addresses: '[%s]'\", strings.Join(reacheableAddresses, \", \"))\n\t\ttracker.Addresses = reacheableAddresses\n\t\ttracker.Status = 3\n\t}\n\n\treturn a.storage.Write([]*Tracker{tracker})\n}",
"func (h *Handler) Alive(w http.ResponseWriter, r *http.Request) {\n\th.srv.Writer().Write(w, r, &healthStatus{\n\t\tStatus: \"ok\",\n\t})\n}",
"func (ntmgr *NotifyMgr) AnnounceNewTransactions(newTxs []*types.TxDesc, filters []peer.ID) {\n\tif len(newTxs) <= 0 {\n\t\treturn\n\t}\n\tfor _, tx := range newTxs {\n\t\tlog.Trace(fmt.Sprintf(\"Announce new transaction :hash=%s height=%d add=%s\", tx.Tx.Hash().String(), tx.Height, tx.Added.String()))\n\t}\n\t// reply to p2p\n\tfor _, tx := range newTxs {\n\t\tntmgr.RelayInventory(tx, filters)\n\t}\n\n\tif ntmgr.RpcServer != nil {\n\t\tntmgr.RpcServer.NotifyNewTransactions(newTxs)\n\t}\n}",
"func (r *Redis) AnnounceDeath(node string) error {\n\treturn r.c.Publish(string(keys.NodeDeaths), node).Err()\n}",
"func (s *Server) announceText(service *ServiceEntry) {\n\tresp := new(dns.Msg)\n\tresp.MsgHdr.Response = true\n\n\ttxt := &dns.TXT{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: service.ServiceInstanceName(),\n\t\t\tRrtype: dns.TypeTXT,\n\t\t\tClass: dns.ClassINET | 1<<15,\n\t\t\tTtl: s.ttl,\n\t\t},\n\t\tTxt: service.Text,\n\t}\n\n\tresp.Answer = []dns.RR{txt}\n\ts.multicastResponse(resp)\n}",
"func (buf *Buffer) ACK(leased *Batch) {\n\tbuf.removeLease(leased)\n}",
"func (o *objStore) ReceiveEventAnnounce(event *EventAnnounce) {\n\tif event.Type == cluster.EventStopAnnounce {\n\t\treturn\n\t}\n\to.inboundPump <- event\n}",
"func New(\n\tprotocolID string,\n\tbroadcastChannel net.BroadcastChannel,\n\tmembershipValidator *group.MembershipValidator,\n) *Announcer {\n\tbroadcastChannel.SetUnmarshaler(func() net.TaggedUnmarshaler {\n\t\treturn &announcementMessage{}\n\t})\n\n\treturn &Announcer{\n\t\tprotocolID: protocolID,\n\t\tbroadcastChannel: broadcastChannel,\n\t\tmembershipValidator: membershipValidator,\n\t}\n}",
"func (mr *MockWatcherMockRecorder) AnnounceInterval() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AnnounceInterval\", reflect.TypeOf((*MockWatcher)(nil).AnnounceInterval))\n}",
"func (p2p *P2P) Advertise(service string) {\n\t// Advertise the availabilty of the service on this node\n\tttl, err := p2p.Discovery.Advertise(p2p.Ctx, service)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalln(\"Failed to Announce Service CID!\")\n\t}\n\t// Debug log\n\tlogrus.Debugln(\"Advertised:\", service)\n\tlogrus.Debugln(\"TTL:\", ttl)\n}",
"func (ann *announcer) Run(args interface{}, shutdown <-chan struct{}) {\n\n\tlog := ann.log\n\n\tlog.Info(\"starting…\")\n\n\tdelay := time.After(announceInitial)\nloop:\n\tfor {\n\t\tlog.Info(\"waiting…\")\n\t\tselect {\n\t\tcase <-shutdown:\n\t\t\tbreak loop\n\t\tcase <-delay:\n\t\t\tdelay = time.After(announceInterval)\n\t\t\tann.process()\n\t\t}\n\t}\n}",
"func (cli *OpsGenieAlertV2Client) Acknowledge(req alertsv2.AcknowledgeRequest) (*AsyncRequestResponse, error) {\n\treturn cli.sendAsyncPostRequest(&req)\n}",
"func (t *BeatTracker) Do(input *SimpleBuffer) {\n\tif t.o == nil {\n\t\treturn\n\t}\n\tC.aubio_beattracking_do(t.o, input.vec, t.buf.vec)\n}",
"func (s *BaseSpeaker) DecideAnnouncement(ruleMatrix rules.RuleMatrix, result bool) shared.SpeakerReturnContent {\n\treturn shared.SpeakerReturnContent{\n\t\tContentType: shared.SpeakerAnnouncement,\n\t\tRuleMatrix: ruleMatrix,\n\t\tVotingResult: result,\n\t\tActionTaken: true,\n\t}\n}",
"func NewPeriodicalAnnouncer(trk tracker.Tracker, numWant int, minInterval time.Duration, getTorrent func() tracker.Torrent, completedC chan struct{}, newPeers chan []*net.TCPAddr, l logger.Logger) *PeriodicalAnnouncer {\n\treturn &PeriodicalAnnouncer{\n\t\tTracker: trk,\n\t\tstatus: NotContactedYet,\n\t\tstatsCommandC: make(chan statsRequest),\n\t\tnumWant: numWant,\n\t\tminInterval: minInterval,\n\t\tlog: l,\n\t\tcompletedC: completedC,\n\t\tnewPeers: newPeers,\n\t\tgetTorrent: getTorrent,\n\t\tneedMorePeersC: make(chan struct{}, 1),\n\t\tresponseC: make(chan *tracker.AnnounceResponse),\n\t\terrC: make(chan error),\n\t\tcloseC: make(chan struct{}),\n\t\tdoneC: make(chan struct{}),\n\t\tbackoff: &backoff.ExponentialBackOff{\n\t\t\tInitialInterval: 5 * time.Second,\n\t\t\tRandomizationFactor: 0.5,\n\t\t\tMultiplier: 2,\n\t\t\tMaxInterval: 30 * time.Minute,\n\t\t\tMaxElapsedTime: 0, // never stop\n\t\t\tClock: backoff.SystemClock,\n\t\t},\n\t}\n}",
"func (c *localClient) announcementPkt(instanceID int64, msg []byte) ([]byte, bool) {\n\taddrs := c.addrList.AllAddresses()\n\tif len(addrs) == 0 {\n\t\t// Nothing to announce\n\t\treturn msg, false\n\t}\n\n\tif cap(msg) >= 4 {\n\t\tmsg = msg[:4]\n\t} else {\n\t\tmsg = make([]byte, 4)\n\t}\n\tbinary.BigEndian.PutUint32(msg, Magic)\n\n\tpkt := Announce{\n\t\tID: c.myID,\n\t\tAddresses: addrs,\n\t\tInstanceID: instanceID,\n\t}\n\tbs, _ := pkt.Marshal()\n\tmsg = append(msg, bs...)\n\n\treturn msg, true\n}",
"func (r *Relay) HostAnnouncement(pubkey hostdb.HostPublicKey) ([]byte, bool) {\n\tr.mu.Lock()\n\tann, ok := r.hosts[pubkey]\n\tr.mu.Unlock()\n\treturn ann, ok\n}",
"func (m MarkerID) Acknowledge(o *Operation, gid GoogleID) error {\n\tvar ns sql.NullString\n\terr := db.QueryRow(\"SELECT gid FROM marker WHERE ID = ? and opID = ?\", m, o.ID).Scan(&ns)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tLog.Notice(err)\n\t\treturn err\n\t}\n\tif err != nil && err == sql.ErrNoRows {\n\t\terr = fmt.Errorf(\"no such marker\")\n\t\tLog.Error(err)\n\t\treturn err\n\t}\n\tif !ns.Valid {\n\t\terr = fmt.Errorf(\"marker not assigned\")\n\t\tLog.Error(err)\n\t\treturn err\n\t}\n\tmarkerGid := GoogleID(ns.String)\n\tif gid != markerGid {\n\t\terr = fmt.Errorf(\"marker assigned to someone else\")\n\t\tLog.Error(err)\n\t\treturn err\n\t}\n\t_, err = db.Exec(\"UPDATE marker SET state = ? WHERE ID = ? AND opID = ?\", \"acknowledged\", m, o.ID)\n\tif err != nil {\n\t\tLog.Error(err)\n\t\treturn err\n\t}\n\tif err = o.Touch(); err != nil {\n\t\tLog.Error(err)\n\t}\n\n\to.firebaseMarkerStatus(m, \"acknowledged\")\n\treturn nil\n}",
"func (m *MysqlDriver) AddAnnouncement(announcement *model.Announcement) error {\n\tvar (\n\t\terr error\n\t\taffected int64\n\t)\n\tif announcement.ClassId == 0 {\n\t\taffected, err = m.conn.Omit(\"class_id\").Insert(announcement)\n\t} else {\n\t\taffected, err = m.conn.Insert(announcement)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif affected == 0 {\n\t\treturn ErrNoRowsAffected\n\t}\n\treturn nil\n}",
"func NewAnnouncer(relay *Relay, wg sync.WaitGroup) (Announcer, error) {\n\tbusConfig := relay.Config.Cog\n\tannouncer := &relayAnnouncer{\n\t\tid: relay.Config.ID,\n\t\treceiptTopic: fmt.Sprintf(\"bot/relays/%s/announcer\", relay.Config.ID),\n\t\trelay: relay,\n\t\toptions: mqtt.NewClientOptions(),\n\t\tstate: relayAnnouncerStoppedState,\n\t\tcontrol: make(chan relayAnnouncerCommand, 2),\n\t\tcoordinator: wg,\n\t\tannouncementPending: false,\n\t}\n\tannouncer.options.SetAutoReconnect(false)\n\tannouncer.options.SetKeepAlive(time.Duration(60) * time.Second)\n\tannouncer.options.SetPingTimeout(time.Duration(15) * time.Second)\n\tannouncer.options.SetCleanSession(true)\n\tannouncer.options.SetClientID(fmt.Sprintf(\"%s-a\", announcer.id))\n\tannouncer.options.SetUsername(announcer.id)\n\tannouncer.options.SetPassword(busConfig.Token)\n\tannouncer.options.AddBroker(busConfig.URL())\n\tif busConfig.SSLEnabled == true {\n\t\tannouncer.options.TLSConfig = tls.Config{\n\t\t\tServerName: busConfig.Host,\n\t\t\tSessionTicketsDisabled: true,\n\t\t\tInsecureSkipVerify: false,\n\t\t}\n\t}\n\n\treturn announcer, nil\n}",
"func NewWeaviatePeersAnnounceOK() *WeaviatePeersAnnounceOK {\n\treturn &WeaviatePeersAnnounceOK{}\n}",
"func (s *speaker) DecideAnnouncement(ruleMatrix rules.RuleMatrix, result bool) shared.SpeakerReturnContent {\n\t//(there are more important things to do) or (there is no result to announce)\n\tif s.getSpeakerBudget() < s.getHigherPriorityActionsCost(\"AnnounceVotingResult\") || ruleMatrix.RuleMatrixIsEmpty() {\n\t\treturn shared.SpeakerReturnContent{\n\t\t\tActionTaken: false,\n\t\t}\n\t}\n\treturn shared.SpeakerReturnContent{\n\t\tContentType: shared.SpeakerAnnouncement,\n\t\tRuleMatrix: ruleMatrix,\n\t\tVotingResult: result,\n\t\tActionTaken: true,\n\t}\n}",
"func (mr *MockDHTKeeperMockRecorder) IterateAnnounceList(it interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IterateAnnounceList\", reflect.TypeOf((*MockDHTKeeper)(nil).IterateAnnounceList), it)\n}",
"func (ec *executionContext) _Announcement(ctx context.Context, sel []query.Selection, obj *model.Announcement) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.Doc, sel, announcementImplementors, ec.Variables)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"Announcement\")\n\t\tcase \"id\":\n\t\t\tout.Values[i] = ec._Announcement_id(ctx, field, obj)\n\t\tcase \"user\":\n\t\t\tout.Values[i] = ec._Announcement_user(ctx, field, obj)\n\t\tcase \"card\":\n\t\t\tout.Values[i] = ec._Announcement_card(ctx, field, obj)\n\t\tcase \"message\":\n\t\t\tout.Values[i] = ec._Announcement_message(ctx, field, obj)\n\t\tcase \"createdAt\":\n\t\t\tout.Values[i] = ec._Announcement_createdAt(ctx, field, obj)\n\t\tcase \"updatedAt\":\n\t\t\tout.Values[i] = ec._Announcement_updatedAt(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}",
"func (s *EventsService) Acknowledge(event *Event) (*EventResponse, error) {\n\treturn s.postEvent(event, EventTypeAcknowledge)\n}",
"func (bft *ProtocolBFTCoSi) handleAnnouncement(msg announceChan) error {\n\tann := msg.Announce\n\tif bft.isClosing() {\n\t\tlog.Lvl3(\"Closing\")\n\t\treturn nil\n\t}\n\tif bft.IsLeaf() {\n\t\tbft.Timeout = ann.Timeout\n\t\treturn bft.startCommitment(ann.TYPE)\n\t}\n\treturn bft.sendToChildren(&ann)\n}",
"func Alive(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"alive\")\n}",
"func (b *Builder) PublishActivationTx(ctx context.Context) error {\n\tb.discardChallengeIfStale()\n\tif b.challenge != nil {\n\t\tb.log.With().Info(\"using existing atx challenge\", b.currentEpoch())\n\t} else {\n\t\tb.log.With().Info(\"building new atx challenge\", b.currentEpoch())\n\t\terr := b.buildNipstChallenge(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to build new atx challenge: %w\", err)\n\t\t}\n\t}\n\n\tif b.pendingATX == nil {\n\t\tvar err error\n\t\tb.pendingATX, err = b.createAtx(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tatx := b.pendingATX\n\tatxReceived := b.db.AwaitAtx(atx.ID())\n\tdefer b.db.UnsubscribeAtx(atx.ID())\n\tsize, err := b.signAndBroadcast(ctx, atx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.log.Event().Info(\"atx published\", atx.Fields(size)...)\n\tevents.ReportAtxCreated(true, uint64(b.currentEpoch()), atx.ShortString())\n\n\tselect {\n\tcase <-atxReceived:\n\t\tb.log.With().Info(\"received atx in db\", atx.ID())\n\tcase <-b.layerClock.AwaitLayer((atx.TargetEpoch() + 1).FirstLayer()):\n\t\tsyncedCh := make(chan struct{})\n\t\tb.syncer.RegisterChForSynced(ctx, syncedCh)\n\t\tselect {\n\t\tcase <-atxReceived:\n\t\t\tb.log.With().Info(\"received atx in db (in the last moment)\", atx.ID())\n\t\tcase <-syncedCh: // ensure we've seen all blocks before concluding that the ATX was lost\n\t\t\tb.discardChallenge()\n\t\t\treturn fmt.Errorf(\"%w: target epoch has passed\", ErrATXChallengeExpired)\n\t\tcase <-ctx.Done():\n\t\t\treturn ErrStopRequested\n\t\t}\n\tcase <-ctx.Done():\n\t\treturn ErrStopRequested\n\t}\n\tb.discardChallenge()\n\treturn nil\n}",
"func (p *protocol) Acknowledge(nonce *string, sequence uint32) error {\n\tlog.Debugf(\"[R %s > %s] Sending acknowledgement for nonce %x with sequence %d\", p.conn.LocalAddr().String(), p.conn.RemoteAddr().String(), *nonce, sequence)\n\treturn p.conn.SendMessage(&protocolACKN{nonce: nonce, sequence: sequence})\n}",
"func (o *InlineResponse20014Projects) SetShowAnnouncement(v bool) {\n\to.ShowAnnouncement = &v\n}",
"func notify(ctx context.Context, report string) error {\n\t_, err := sendRequest(ctx, \"POST\", notifyAddr, report)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (p *DiscoveryProtocol) onNotify() {\n\tlog.Println(\" pending requests: \", p.pendingReq)\n\tfor req := range p.pendingReq {\n\t\tif !p.requestExpired(req) {\n\t\t\tlog.Println(\"Request not expired, trying to send response\")\n\t\t\tif p.createSendResponse(req) {\n\t\t\t\tdelete(p.pendingReq, req)\n\t\t\t}\n\t\t}\n\t}\n}",
"func Notify(err interface{}, req *http.Request) error {\n\tif Airbrake != nil {\n\t\treturn Airbrake.Notify(err, req)\n\t}\n\tlog.Printf(\"[AIRBRAKE] %v\", err)\n\treturn nil\n}",
"func (coord *Coordinator) AcknowledgePacket(\n\tsource, counterparty *TestChain,\n\tcounterpartyClient string,\n\tpacket packettypes.Packet, ack []byte,\n) error {\n\t// get proof of acknowledgement on counterparty\n\tpacketKey := host.PacketAcknowledgementKey(packet.GetSourceChain(), packet.GetDestChain(), packet.GetSequence())\n\tproof, proofHeight := counterparty.QueryProof(packetKey)\n\n\t// Increment time and commit block so that 5 second delay period passes between send and receive\n\tcoord.IncrementTime()\n\tcoord.CommitBlock(source, counterparty)\n\n\tackMsg := packettypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress())\n\treturn coord.SendMsgs(source, counterparty, counterpartyClient, []sdk.Msg{ackMsg})\n}",
"func (o *InlineResponse20014Projects) SetAnnouncement(v string) {\n\to.Announcement = &v\n}",
"func (q *Queue) ACK(n uint) error {\n\treturn q.getAcker().handle(n)\n}",
"func (as *ASService) OnAddrRequest(ctx context.Context, req *requests.AddrRequest) {\n\tif r := as.sys.Cache().AddrSearch(req.Address); r != nil {\n\t\tas.Graph.InsertInfrastructure(r.ASN, r.Description, r.Address, r.Prefix, r.Source, r.Tag, as.uuid)\n\t\treturn\n\t}\n\n\tfor _, src := range as.sys.DataSources() {\n\t\tsrc.ASNRequest(ctx, &requests.ASNRequest{Address: req.Address})\n\t}\n\n\tfor i := 0; i < 30; i++ {\n\t\tif as.sys.Cache().AddrSearch(req.Address) != nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tif r := as.sys.Cache().AddrSearch(req.Address); r != nil && as.Graph != nil && as.uuid != \"\" {\n\t\tgo as.Graph.InsertInfrastructure(r.ASN, r.Description, r.Address, r.Prefix, r.Source, r.Tag, as.uuid)\n\t}\n}",
"func (n *Node) AnnounceSignerPresence(\n\tctx context.Context,\n\toperatorPublicKey *operator.PublicKey,\n\tkeepAddress common.Address,\n\tkeepMembersAddresses []common.Address,\n) ([]tss.MemberID, error) {\n\tbroadcastChannel, err := n.networkProvider.BroadcastChannelFor(keepAddress.Hex())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialize broadcast channel: [%v]\", err)\n\t}\n\n\ttss.RegisterUnmarshalers(broadcastChannel)\n\n\tif err := broadcastChannel.SetFilter(\n\t\tcreateAddressFilter(keepMembersAddresses),\n\t); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to set broadcast channel filter: [%v]\", err)\n\t}\n\n\t// TODO: Use generic type for representing address in node.go instead of\n\t// common.Address from go-ethereum. The current implementation is not\n\t// host-chain implementation agnostic.\n\t// To do not propagate ethereum-specific types any further, we convert\n\t// addresses to strings before passing them to AnnounceProtocol.\n\tkeepAddressString := keepAddress.Hex()\n\tkeepMembersAddressesStrings := make([]string, len(keepMembersAddresses))\n\tfor i, address := range keepMembersAddresses {\n\t\tkeepMembersAddressesStrings[i] = address.Hex()\n\t}\n\treturn tss.AnnounceProtocol(\n\t\tctx,\n\t\toperatorPublicKey,\n\t\tkeepAddressString,\n\t\tkeepMembersAddressesStrings,\n\t\tbroadcastChannel,\n\t\tn.ethereumChain.Signing().PublicKeyToAddress,\n\t)\n}",
"func (s *Server) Article(ctx context.Context, in *pb.ArticleRequest) (*pb.ArticleReply, error) {\n\ta, err := s.db.Get(ctx, in.Id)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, fmt.Sprintf(\"failed to get article: %v\", err))\n\t}\n\treturn &pb.ArticleReply{Article: a}, nil\n}",
"func (auo *AnnouncementUpdateOne) Save(ctx context.Context) (*Announcement, error) {\n\tif v, ok := auo.mutation.Title(); ok {\n\t\tif err := announcement.TitleValidator(v); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ent: validator failed for field \\\"title\\\": %v\", err)\n\t\t}\n\t}\n\tif v, ok := auo.mutation.Description(); ok {\n\t\tif err := announcement.DescriptionValidator(v); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ent: validator failed for field \\\"description\\\": %v\", err)\n\t\t}\n\t}\n\tif v, ok := auo.mutation.Time(); ok {\n\t\tif err := announcement.TimeValidator(v); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ent: validator failed for field \\\"time\\\": %v\", err)\n\t\t}\n\t}\n\n\tvar (\n\t\terr error\n\t\tnode *Announcement\n\t)\n\tif len(auo.hooks) == 0 {\n\t\tnode, err = auo.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*AnnouncementMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tauo.mutation = mutation\n\t\t\tnode, err = auo.sqlSave(ctx)\n\t\t\treturn node, err\n\t\t})\n\t\tfor i := len(auo.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = auo.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, auo.mutation); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn node, err\n}",
"func (cli *FakeDatabaseClient) BounceListener(ctx context.Context, in *dbdpb.BounceListenerRequest, opts ...grpc.CallOption) (*dbdpb.BounceListenerResponse, error) {\n\tpanic(\"implement me\")\n}",
"func (a acknowledger) Ack(multiple bool) (err error) {\n\tastilog.Debugf(\"astiamqp: ack %v with multiple %v\", a.deliveryTag, multiple)\n\tif err = a.acknowledger.Ack(a.deliveryTag, multiple); err != nil {\n\t\terr = errors.Wrapf(err, \"astiamqp: ack %v with multiple %v failed\", a.deliveryTag, multiple)\n\t\treturn\n\t}\n\treturn\n}",
"func (pipeline *ReceivePipeline) Enqueue(response announced.Response) {\n\tpipeline.head <- response\n}",
"func (s *raftServer) sendHeartBeat() {\n\tae := &AppendEntry{Term: s.Term(), LeaderId: s.server.Pid()}\n\tae.LeaderCommit = s.commitIndex.Get()\n\te := &cluster.Envelope{Pid: cluster.BROADCAST, Msg: ae}\n\ts.server.Outbox() <- e\n}",
"func (c *Chain) advertiseBlock(b block.Block) error {\n\tmsg := &peermsg.Inv{}\n\tmsg.AddItem(peermsg.InvTypeBlock, b.Header.Hash)\n\n\tbuf := new(bytes.Buffer)\n\tif err := msg.Encode(buf); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif err := topics.Prepend(buf, topics.Inv); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tm := message.New(topics.Inv, *buf)\n\tc.eventBus.Publish(topics.Gossip, m)\n\treturn nil\n}",
"func (nh *NodeHost) RequestAddObserver(clusterID uint64,\n\tnodeID uint64, address string, configChangeIndex uint64,\n\ttimeout time.Duration) (*RequestState, error) {\n\tv, ok := nh.getCluster(clusterID)\n\tif !ok {\n\t\treturn nil, ErrClusterNotFound\n\t}\n\treq, err := v.requestAddObserverWithOrderID(nodeID,\n\t\taddress, configChangeIndex, timeout)\n\tnh.execEngine.setNodeReady(clusterID)\n\treturn req, err\n}",
"func (m *MockDHTKeeper) AddToAnnounceList(key []byte, repo string, objType int, announceTime int64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AddToAnnounceList\", key, repo, objType, announceTime)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (c *client) Activate(u *model.User, r *model.Repo, link string) error {\n\tconfig := map[string]string{\n\t\t\"url\": link,\n\t\t\"secret\": r.Hash,\n\t\t\"content_type\": \"json\",\n\t}\n\thook := gitea.CreateHookOption{\n\t\tType: \"gitea\",\n\t\tConfig: config,\n\t\tEvents: []string{\"push\", \"create\", \"pull_request\"},\n\t\tActive: true,\n\t}\n\n\tclient := c.newClientToken(u.Token)\n\t_, err := client.CreateRepoHook(r.Owner, r.Name, hook)\n\treturn err\n}",
"func (incident *Incident) Send(cfg *CachetMonitor) error {\n\tswitch incident.Status {\n\t\tcase 1, 2, 3:\n\t\t\t// partial outage\n\t\t\tincident.ComponentStatus = 3\n\n\t\t\tcompInfo := cfg.API.GetComponentData(incident.ComponentID)\n\t\t\tif compInfo.Status == 3 {\n\t\t\t\t// major outage\n\t\t\t\tincident.ComponentStatus = 4\n\t\t\t}\n\t\tcase 4:\n\t\t\t// fixed\n\t\t\tincident.ComponentStatus = 1\n\t}\n\n\trequestType := \"POST\"\n\trequestURL := \"/incidents\"\n\tif incident.ID > 0 {\n\t\trequestType = \"PUT\"\n\t\trequestURL += \"/\" + strconv.Itoa(incident.ID)\n\t}\n\n\tjsonBytes, _ := json.Marshal(incident)\n\n\tresp, body, err := cfg.API.NewRequest(requestType, requestURL, jsonBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar data struct {\n\t\tID int `json:\"id\"`\n\t}\n\tif err := json.Unmarshal(body.Data, &data); err != nil {\n\t\treturn fmt.Errorf(\"Cannot parse incident body: %v, %v\", err, string(body.Data))\n\t}\n\n\tincident.ID = data.ID\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Could not create/update incident!\")\n\t}\n\n\treturn nil\n}",
"func (p *PacketCryptAnn) GetAnnounceHeader() []byte {\n\treturn p.Header[:PcAnnHeaderLen]\n}",
"func (au *AnnouncementUpdate) Exec(ctx context.Context) error {\n\t_, err := au.Save(ctx)\n\treturn err\n}",
"func (r *SubscriptionsService) Acknowledge(acknowledgerequest *AcknowledgeRequest) *SubscriptionsAcknowledgeCall {\n\tc := &SubscriptionsAcknowledgeCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.acknowledgerequest = acknowledgerequest\n\treturn c\n}",
"func (u *Umbrella) OnASNRequest(ctx context.Context, req *requests.ASNRequest) {\n\tbus := ctx.Value(requests.ContextEventBus).(*eventbus.EventBus)\n\tif bus == nil {\n\t\treturn\n\t}\n\n\tif u.API == nil || u.API.Key == \"\" {\n\t\treturn\n\t}\n\n\tif req.Address == \"\" && req.ASN == 0 {\n\t\treturn\n\t}\n\n\tu.CheckRateLimit()\n\tbus.Publish(requests.SetActiveTopic, u.String())\n\n\tif req.Address != \"\" {\n\t\tu.executeASNAddrQuery(ctx, req)\n\t\treturn\n\t}\n\n\tu.executeASNQuery(ctx, req)\n}",
"func (a *Announcement) Dial(p *pool.Pool) (*grpc.ClientConn, error) {\n\tif p == nil {\n\t\tp = pool.Global\n\t}\n\tif a.NetAddress == \"\" {\n\t\treturn nil, errors.New(\"No address known for this component\")\n\t}\n\tnetAddress := strings.Split(a.NetAddress, \",\")[0]\n\tif a.Certificate == \"\" {\n\t\treturn p.DialInsecure(netAddress)\n\t}\n\ttlsConfig, _ := a.TLSConfig()\n\treturn p.DialSecure(netAddress, credentials.NewTLS(tlsConfig))\n}",
"func solicit(w dhcp6server.ResponseSender, r *dhcp6server.Request) {\n\tw.Send(dhcp6.MessageTypeAdvertise)\n}",
"func (o *os) RequestAttention() {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.RequestAttention()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"request_attention\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}",
"func (s *server) Approve(ctx context.Context, request *event.ApproveParam) (*event.Response, error) {\n\treturn &event.Response{Status: int32(200), Message: string(\"Approve\"), Data: []*event.Deposit{}}, nil\n}",
"func (info *Info) Publish(aconf *config.AmqpConfig) error {\n\tblob, err := info.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := amqp.Publishing{\n\t\tDeliveryMode: amqp.Persistent,\n\t\tTimestamp: time.Now(),\n\t\tContentType: \"application/json\",\n\t\tBody: blob,\n\t}\n\tconn, err := Connect(aconf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ch.Close()\n\tif err := ch.ExchangeDeclare(aconf.ExchangeName(), amqp.ExchangeFanout, true, false, false, false, nil); err != nil {\n\t\treturn err\n\t}\n\tif err := ch.Confirm(false); err != nil {\n\t\treturn err\n\t}\n\tnotify := ch.NotifyPublish(make(chan amqp.Confirmation, 1))\n\tif err := ch.Publish(aconf.ExchangeName(), aconf.RoutingKey(), false, false, msg); err != nil {\n\t\treturn err\n\t}\n\tconfirm := <-notify\n\tif !confirm.Ack {\n\t\treturn errors.New(\"message was NACKed\")\n\t}\n\treturn nil\n}"
] | [
"0.7591315",
"0.7542612",
"0.7309203",
"0.73045576",
"0.70813364",
"0.69591624",
"0.6934339",
"0.6784179",
"0.6711855",
"0.6598236",
"0.62005067",
"0.62001413",
"0.6174527",
"0.61571336",
"0.60925037",
"0.6053757",
"0.59826213",
"0.58398366",
"0.58314586",
"0.5807263",
"0.5769014",
"0.5679035",
"0.5677883",
"0.55432546",
"0.5483456",
"0.54690355",
"0.545733",
"0.5435622",
"0.54049134",
"0.5387384",
"0.53676146",
"0.52061415",
"0.5154094",
"0.51468045",
"0.51374036",
"0.51116014",
"0.50264746",
"0.49962726",
"0.4955779",
"0.4921116",
"0.48973036",
"0.48640665",
"0.4816884",
"0.47856435",
"0.47344035",
"0.46511728",
"0.4649638",
"0.45649105",
"0.4529858",
"0.45230997",
"0.44898707",
"0.44857168",
"0.44742575",
"0.4470449",
"0.4444967",
"0.44409084",
"0.44406727",
"0.44277054",
"0.44209912",
"0.4402811",
"0.44026414",
"0.43855524",
"0.43716735",
"0.43627167",
"0.4339651",
"0.43348467",
"0.42505905",
"0.42419755",
"0.42277637",
"0.4218835",
"0.42109603",
"0.42108628",
"0.42056227",
"0.4203666",
"0.41895732",
"0.4174102",
"0.41655934",
"0.4159386",
"0.41435334",
"0.41410056",
"0.41400093",
"0.41321713",
"0.41290787",
"0.40966254",
"0.40866238",
"0.40839007",
"0.40696397",
"0.40559402",
"0.40524468",
"0.40452683",
"0.40382823",
"0.40360516",
"0.40313375",
"0.40218168",
"0.4020831",
"0.39836273",
"0.39834684",
"0.39767784",
"0.39757335",
"0.39729798"
] | 0.7839598 | 0 |
build state function that will check if a job responsible for building function image succeeded or failed; if a job is not running start one | Постройте функцию состояния, которая проверит, успешно ли завершил работу задача, отвечающая за построение образа функции; если задача не запущена, запустите её | func buildStateFnCheckImageJob(expectedJob batchv1.Job) stateFn {
return func(ctx context.Context, r *reconciler, s *systemState) (stateFn, error) {
labels := s.internalFunctionLabels()
err := r.client.ListByLabel(ctx, s.instance.GetNamespace(), labels, &s.jobs)
if err != nil {
return nil, errors.Wrap(err, "while listing jobs")
}
jobLen := len(s.jobs.Items)
if jobLen == 0 {
return buildStateFnInlineCreateJob(expectedJob), nil
}
jobFailed := s.jobFailed(backoffLimitExceeded)
conditionStatus := getConditionStatus(
s.instance.Status.Conditions,
serverlessv1alpha2.ConditionBuildReady,
)
if jobFailed && conditionStatus == corev1.ConditionFalse {
return stateFnInlineDeleteJobs, nil
}
if jobFailed {
r.result = ctrl.Result{
RequeueAfter: time.Minute * 5,
Requeue: true,
}
condition := serverlessv1alpha2.Condition{
Type: serverlessv1alpha2.ConditionBuildReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: serverlessv1alpha2.ConditionReasonJobFailed,
Message: fmt.Sprintf("Job %s failed, it will be re-run", s.jobs.Items[0].Name),
}
return buildStatusUpdateStateFnWithCondition(condition), nil
}
s.image = s.buildImageAddress(r.cfg.docker.PullAddress)
jobChanged := s.fnJobChanged(expectedJob)
if !jobChanged {
return stateFnCheckDeployments, nil
}
if jobLen > 1 || !equalJobs(s.jobs.Items[0], expectedJob) {
return stateFnInlineDeleteJobs, nil
}
expectedLabels := expectedJob.GetLabels()
if !mapsEqual(s.jobs.Items[0].GetLabels(), expectedLabels) {
return buildStateFnInlineUpdateJobLabels(expectedLabels), nil
}
return stateFnUpdateJobStatus, nil
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (st *buildStatus) start() {\n\tsetStatus(st.BuilderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err == errSkipBuildDueToDeps {\n\t\t\tst.setDone(true)\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\t\tlog.Println(st.BuilderRev, \"failed:\", err)\n\t\t\t}\n\t\t\tst.setDone(err == nil)\n\t\t\tpool.CoordinatorProcess().PutBuildRecord(st.buildRecord())\n\t\t}\n\t\tmarkDone(st.BuilderRev)\n\t}()\n}",
"func checkForJobSuccess(org, repo string, targetBuildNum int, client *circleci.Client) (err error) {\n\tcheckAttempts := 0\n\tcheckLimit := 60\n\tcheckInterval := 5 * time.Second\n\tlogger.Infof(\"Polling CircleCI for status of build: %d\", targetBuildNum)\n\tfor {\n\t\tvar build *circleci.Build\n\t\tif build, err = client.GetBuild(org, repo, targetBuildNum); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif build.Status == \"success\" {\n\t\t\tlogger.Infof(\"Detected success of CircleCI build: %d\", targetBuildNum)\n\t\t\tbreak\n\t\t} else if build.Status == \"failed\" {\n\t\t\treturn fmt.Errorf(\"CircleCI job: %d has failed\", targetBuildNum)\n\t\t}\n\t\tcheckAttempts++\n\t\tif checkAttempts == checkLimit {\n\t\t\treturn fmt.Errorf(\"Unable to verify CircleCI job was a success: https://circleci.com/gh/%s/%s/%d\",\n\t\t\t\torg, repo, targetBuildNum)\n\t\t}\n\t\ttime.Sleep(checkInterval)\n\t}\n\treturn\n}",
"func tryBuild(funcBinary string, runtime string, template string, builderImg string) error {\n\tscript := fmt.Sprintf(`set -ex\ncd $(mktemp -d)\n%[1]s create fn%[2]s%[3]s --runtime %[2]s --template %[3]s\ncd fn%[2]s%[3]s\n%[1]s build --builder %[4]s -v`, funcBinary, runtime, template, builderImg)\n\treturn runBash(script)\n}",
"func CfnJob_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"aws-cdk-lib.aws_glue.CfnJob\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func Check(conf *config.Config, queue *Queue, running *Running, manager manager.Driver) error {\n\tlog.Println(\"Checking for a job to run\")\n\tlog.Println(\"Queue: \", *queue)\n\trunning.Watch(&manager)\n\tnext := queue.Pop(running, conf.Server.MaxBuilds)\n\tif next != nil {\n\t\tlog.Println(\"About to build: \", next.Project, next.Branch)\n\t\tfor i := range conf.Projects {\n\t\t\tif next.Project == conf.Projects[i].Name {\n\t\t\t\tshouldDeploy := false\n\t\t\t\tlog.Println(\"Found a job to run\")\n\t\t\t\tfor j := range conf.Projects[i].DeployBranches {\n\t\t\t\t\tif next.Branch == conf.Projects[i].DeployBranches[j] {\n\t\t\t\t\t\tlog.Println(\"Will Deploy\")\n\t\t\t\t\t\tshouldDeploy = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tconfPath := conf.Projects[i].MaestroConfPath\n\t\t\t\tlog.Println(\"Running build\")\n\t\t\t\trunErr := manager.Run(\n\t\t\t\t\tfmt.Sprintf(\"%s-%s-%s\", next.Project, next.Branch, next.CurrCommit),\n\t\t\t\t\tconfDir(confPath),\n\t\t\t\t\tconfDir(confPath),\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"maestro\",\n\t\t\t\t\t\tfmt.Sprintf(\"--branch=%s\", next.Branch),\n\t\t\t\t\t\tfmt.Sprintf(\"--deploy=%v\", shouldDeploy),\n\t\t\t\t\t\tfmt.Sprintf(\"--prev-commit=%s\", next.PrevCommit),\n\t\t\t\t\t\tfmt.Sprintf(\"--curr-commit=%s\", next.CurrCommit),\n\t\t\t\t\t\tfmt.Sprintf(\"--config=%s\", confPath),\n\t\t\t\t\t\tfmt.Sprintf(\"--clone-path=%s\", conf.Server.WorkspaceDir),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn runErr\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (builder *Layered) Build(config *api.Config) (*api.Result, error) {\n\tbuildResult := &api.Result{}\n\n\tif config.HasOnBuild && config.BlockOnBuild {\n\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\tutilstatus.ReasonOnBuildForbidden,\n\t\t\tutilstatus.ReasonMessageOnBuildForbidden,\n\t\t)\n\t\treturn buildResult, errors.New(\"builder image uses ONBUILD instructions but ONBUILD is not allowed\")\n\t}\n\n\tif config.BuilderImage == \"\" {\n\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\tutilstatus.ReasonGenericS2IBuildFailed,\n\t\t\tutilstatus.ReasonMessageGenericS2iBuildFailed,\n\t\t)\n\t\treturn buildResult, errors.New(\"builder image name cannot be empty\")\n\t}\n\n\tif err := builder.CreateDockerfile(config); err != nil {\n\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\tutilstatus.ReasonDockerfileCreateFailed,\n\t\t\tutilstatus.ReasonMessageDockerfileCreateFailed,\n\t\t)\n\t\treturn buildResult, err\n\t}\n\n\tglog.V(2).Info(\"Creating application source code image\")\n\ttarStream := builder.tar.CreateTarStreamReader(filepath.Join(config.WorkingDir, \"upload\"), false)\n\tdefer tarStream.Close()\n\n\tnewBuilderImage := fmt.Sprintf(\"s2i-layered-temp-image-%d\", time.Now().UnixNano())\n\n\toutReader, outWriter := io.Pipe()\n\topts := docker.BuildImageOptions{\n\t\tName: newBuilderImage,\n\t\tStdin: tarStream,\n\t\tStdout: outWriter,\n\t\tCGroupLimits: config.CGroupLimits,\n\t}\n\tdocker.StreamContainerIO(outReader, nil, func(s string) { glog.V(2).Info(s) })\n\n\tglog.V(2).Infof(\"Building new image %s with scripts and sources already inside\", newBuilderImage)\n\tstartTime := time.Now()\n\terr := builder.docker.BuildImage(opts)\n\tbuildResult.BuildInfo.Stages = api.RecordStageAndStepInfo(buildResult.BuildInfo.Stages, api.StageBuild, api.StepBuildDockerImage, startTime, time.Now())\n\tif err != nil {\n\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\tutilstatus.ReasonDockerImageBuildFailed,\n\t\t\tutilstatus.ReasonMessageDockerImageBuildFailed,\n\t\t)\n\t\treturn buildResult, err\n\t}\n\n\t// upon successful build we need to modify current config\n\tbuilder.config.LayeredBuild = true\n\t// new image name\n\tbuilder.config.BuilderImage = newBuilderImage\n\t// see CreateDockerfile, conditional copy, location of scripts\n\tscriptsIncluded := checkValidDirWithContents(path.Join(config.WorkingDir, constants.UploadScripts))\n\tglog.V(2).Infof(\"Scripts dir has contents %v\", scriptsIncluded)\n\tif scriptsIncluded {\n\t\tbuilder.config.ScriptsURL = \"image://\" + path.Join(getDestination(config), \"scripts\")\n\t} else {\n\t\tvar err error\n\t\tbuilder.config.ScriptsURL, err = builder.docker.GetScriptsURL(newBuilderImage)\n\t\tif err != nil {\n\t\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\t\tutilstatus.ReasonGenericS2IBuildFailed,\n\t\t\t\tutilstatus.ReasonMessageGenericS2iBuildFailed,\n\t\t\t)\n\t\t\treturn buildResult, err\n\t\t}\n\t}\n\n\tglog.V(2).Infof(\"Building %s using sti-enabled image\", builder.config.Tag)\n\tstartTime = time.Now()\n\terr = builder.scripts.Execute(constants.Assemble, config.AssembleUser, builder.config)\n\tbuildResult.BuildInfo.Stages = api.RecordStageAndStepInfo(buildResult.BuildInfo.Stages, api.StageAssemble, api.StepAssembleBuildScripts, startTime, time.Now())\n\tif err != nil {\n\t\tbuildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(\n\t\t\tutilstatus.ReasonAssembleFailed,\n\t\t\tutilstatus.ReasonMessageAssembleFailed,\n\t\t)\n\t\tswitch e := err.(type) {\n\t\tcase s2ierr.ContainerError:\n\t\t\treturn buildResult, s2ierr.NewAssembleError(builder.config.Tag, e.Output, e)\n\t\tdefault:\n\t\t\treturn buildResult, err\n\t\t}\n\t}\n\tbuildResult.Success = true\n\n\treturn buildResult, nil\n}",
"func (st *buildStatus) start() {\n\tsetStatus(st.builderRev, st)\n\tgo func() {\n\t\terr := st.build()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(st, \"\\n\\nError: %v\\n\", err)\n\t\t\tlog.Println(st.builderRev, \"failed:\", err)\n\t\t}\n\t\tst.setDone(err == nil)\n\t\tst.buildRecord().put()\n\t\tmarkDone(st.builderRev)\n\t}()\n}",
"func (pkgw *packageWatcher) build(buildCache *cache.Cache, srcpkg *crd.Package) {\n\n\t// Ignore non-pending state packages.\n\tif srcpkg.Status.BuildStatus != fission.BuildStatusPending {\n\t\treturn\n\t}\n\n\t// Ignore duplicate build requests\n\tkey := fmt.Sprintf(\"%v-%v\", srcpkg.Metadata.Name, srcpkg.Metadata.ResourceVersion)\n\terr, _ := buildCache.Set(key, srcpkg)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer buildCache.Delete(key)\n\n\tlog.Printf(\"Start build for package %v with resource version %v\", srcpkg.Metadata.Name, srcpkg.Metadata.ResourceVersion)\n\n\tpkg, err := updatePackage(pkgw.fissionClient, srcpkg, fission.BuildStatusRunning, \"\", nil)\n\tif err != nil {\n\t\te := fmt.Sprintf(\"Error setting package pending state: %v\", err)\n\t\tlog.Println(e)\n\t\tupdatePackage(pkgw.fissionClient, srcpkg, fission.BuildStatusFailed, e, nil)\n\t\treturn\n\t}\n\n\tenv, err := pkgw.fissionClient.Environments(pkg.Spec.Environment.Namespace).Get(pkg.Spec.Environment.Name)\n\tif errors.IsNotFound(err) {\n\t\tupdatePackage(pkgw.fissionClient, pkg,\n\t\t\tfission.BuildStatusFailed, \"Environment not existed\", nil)\n\t\treturn\n\t}\n\n\t// Do health check for environment builder pod\n\tfor i := 0; i < 15; i++ {\n\t\t// Informer store is not able to use label to find the pod,\n\t\t// iterate all available environment builders.\n\t\titems := pkgw.podStore.List()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error retrieving pod information for env %v: %v\", err, env.Metadata.Name)\n\t\t\treturn\n\t\t}\n\n\t\tif len(items) == 0 {\n\t\t\tlog.Printf(\"Environment \\\"%v\\\" builder pod is not existed yet, retry again later.\", pkg.Spec.Environment.Name)\n\t\t\ttime.Sleep(time.Duration(i*1) * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tpod := item.(*apiv1.Pod)\n\n\t\t\t// In order to support backward compatibility, for all builder images created in default env,\n\t\t\t// the pods will be created in fission-builder namespace\n\t\t\tbuilderNs := pkgw.builderNamespace\n\t\t\tif env.Metadata.Namespace != metav1.NamespaceDefault {\n\t\t\t\tbuilderNs = env.Metadata.Namespace\n\t\t\t}\n\n\t\t\t// Filter non-matching pods\n\t\t\tif pod.ObjectMeta.Labels[LABEL_ENV_NAME] != env.Metadata.Name ||\n\t\t\t\tpod.ObjectMeta.Labels[LABEL_ENV_NAMESPACE] != builderNs ||\n\t\t\t\tpod.ObjectMeta.Labels[LABEL_ENV_RESOURCEVERSION] != env.Metadata.ResourceVersion {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Pod may become \"Running\" state but still failed at health check, so use\n\t\t\t// pod.Status.ContainerStatuses instead of pod.Status.Phase to check pod readiness states.\n\t\t\tpodIsReady := true\n\n\t\t\tfor _, cStatus := range pod.Status.ContainerStatuses {\n\t\t\t\tpodIsReady = podIsReady && cStatus.Ready\n\t\t\t}\n\n\t\t\tif !podIsReady {\n\t\t\t\tlog.Printf(\"Environment \\\"%v\\\" builder pod is not ready, retry again later.\", pkg.Spec.Environment.Name)\n\t\t\t\ttime.Sleep(time.Duration(i*1) * time.Second)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Add the package getter rolebinding to builder sa\n\t\t\t// we continue here if role binding was not setup succeesffully. this is because without this, the fetcher wont be able to fetch the source pkg into the container and\n\t\t\t// the build will fail eventually\n\t\t\terr := fission.SetupRoleBinding(pkgw.k8sClient, fission.PackageGetterRB, pkg.Metadata.Namespace, fission.PackageGetterCR, fission.ClusterRole, fission.FissionBuilderSA, builderNs)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error : %v in setting up the role binding %s for pkg : %s.%s\", err, fission.PackageGetterRB, pkg.Metadata.Name, pkg.Metadata.Namespace)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Setup rolebinding for sa : %s.%s for pkg : %s.%s\", fission.FissionBuilderSA, builderNs, pkg.Metadata.Name, pkg.Metadata.Namespace)\n\t\t\t}\n\n\t\t\tctx := context.Background()\n\t\t\tuploadResp, buildLogs, err := buildPackage(ctx, pkgw.fissionClient, builderNs, pkgw.storageSvcUrl, pkg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error building package %v: %v\", pkg.Metadata.Name, err)\n\t\t\t\tupdatePackage(pkgw.fissionClient, pkg, fission.BuildStatusFailed, buildLogs, nil)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Printf(\"Start updating info of package: %v\", pkg.Metadata.Name)\n\n\t\t\tfnList, err := pkgw.fissionClient.\n\t\t\t\tFunctions(metav1.NamespaceAll).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\te := fmt.Sprintf(\"Error getting function list: %v\", err)\n\t\t\t\tlog.Println(e)\n\t\t\t\tbuildLogs += fmt.Sprintf(\"%v\\n\", e)\n\t\t\t\tupdatePackage(pkgw.fissionClient, pkg, fission.BuildStatusFailed, buildLogs, nil)\n\t\t\t}\n\n\t\t\t// A package may be used by multiple functions. Update\n\t\t\t// functions with old package resource version\n\t\t\tfor _, fn := range fnList.Items {\n\t\t\t\tif fn.Spec.Package.PackageRef.Name == pkg.Metadata.Name &&\n\t\t\t\t\tfn.Spec.Package.PackageRef.Namespace == pkg.Metadata.Namespace &&\n\t\t\t\t\tfn.Spec.Package.PackageRef.ResourceVersion != pkg.Metadata.ResourceVersion {\n\t\t\t\t\tfn.Spec.Package.PackageRef.ResourceVersion = pkg.Metadata.ResourceVersion\n\t\t\t\t\t// update CRD\n\t\t\t\t\t_, err = pkgw.fissionClient.Functions(fn.Metadata.Namespace).Update(&fn)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\te := fmt.Sprintf(\"Error updating function package resource version: %v\", err)\n\t\t\t\t\t\tlog.Println(e)\n\t\t\t\t\t\tbuildLogs += fmt.Sprintf(\"%v\\n\", e)\n\t\t\t\t\t\tupdatePackage(pkgw.fissionClient, pkg, fission.BuildStatusFailed, buildLogs, nil)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, err = updatePackage(pkgw.fissionClient, pkg,\n\t\t\t\tfission.BuildStatusSucceeded, buildLogs, uploadResp)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error update package info: %v\", err)\n\t\t\t\tupdatePackage(pkgw.fissionClient, pkg, fission.BuildStatusFailed, buildLogs, nil)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Printf(\"Completed build request for package: %v\", pkg.Metadata.Name)\n\t\t\treturn\n\t\t}\n\t}\n\t// build timeout\n\tupdatePackage(pkgw.fissionClient, pkg,\n\t\tfission.BuildStatusFailed, \"Build timeout due to environment builder not ready\", nil)\n\n\tlog.Printf(\"Max retries exceeded in building the source pkg : %s.%s, timeout due to environment builder not ready\",\n\t\tpkg.Metadata.Name, pkg.Metadata.Namespace)\n\n\treturn\n}",
"func (a *App) BuildImage(ctx context.Context, name, dockerfile string, tags []string) error {\n\ta.scheduler.mu.Lock()\n\tvar worker *Worker\n\t// TODO: find more available worker, not just first one.\n\tfor _, w := range a.scheduler.workers {\n\t\tworker = w\n\t\tbreak\n\t}\n\ta.scheduler.mu.Unlock()\n\n\tif worker == nil {\n\t\treturn fmt.Errorf(\"no worker available\")\n\t}\n\n\treturn worker.buildImage(ctx, name, dockerfile, tags)\n}",
"func validateBuildRunToSucceed(testBuild *utils.TestBuild, testBuildRun *buildv1alpha1.BuildRun) {\n\ttrueCondition := corev1.ConditionTrue\n\tfalseCondition := corev1.ConditionFalse\n\n\t// Ensure the BuildRun has been created\n\terr := testBuild.CreateBR(testBuildRun)\n\tExpect(err).ToNot(HaveOccurred(), \"Failed to create BuildRun\")\n\n\t// Ensure a BuildRun eventually moves to a succeeded TRUE status\n\tnextStatusLog := time.Now().Add(60 * time.Second)\n\tEventually(func() corev1.ConditionStatus {\n\t\ttestBuildRun, err = testBuild.LookupBuildRun(types.NamespacedName{Name: testBuildRun.Name, Namespace: testBuild.Namespace})\n\t\tExpect(err).ToNot(HaveOccurred(), \"Error retrieving a buildRun\")\n\n\t\tif testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded) == nil {\n\t\t\treturn corev1.ConditionUnknown\n\t\t}\n\n\t\tExpect(testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status).ToNot(Equal(falseCondition), \"BuildRun status doesn't move to Succeeded\")\n\n\t\tnow := time.Now()\n\t\tif now.After(nextStatusLog) {\n\t\t\tLogf(\"Still waiting for build run '%s' to succeed.\", testBuildRun.Name)\n\t\t\tnextStatusLog = time.Now().Add(60 * time.Second)\n\t\t}\n\n\t\treturn testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status\n\n\t}, time.Duration(1100*getTimeoutMultiplier())*time.Second, 5*time.Second).Should(Equal(trueCondition), \"BuildRun did not succeed\")\n\n\t// Verify that the BuildSpec is still available in the status\n\tExpect(testBuildRun.Status.BuildSpec).ToNot(BeNil(), \"BuildSpec is not available in the status\")\n\n\tLogf(\"Test build '%s' is completed after %v !\", testBuildRun.GetName(), testBuildRun.Status.CompletionTime.Time.Sub(testBuildRun.Status.StartTime.Time))\n}",
"func (s *githubHook) buildStatus(eventType, commit string, payload []byte, proj *brigade.Project, status *github.RepoStatus) {\n\t// If we need an SSH key, set it here\n\tif proj.Repo.SSHKey != \"\" {\n\t\tkey, err := ioutil.TempFile(\"\", \"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error creating ssh key cache: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tkeyfile := key.Name()\n\t\tdefer os.Remove(keyfile)\n\t\tif _, err := key.WriteString(proj.Repo.SSHKey); err != nil {\n\t\t\tlog.Printf(\"error writing ssh key cache: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tos.Setenv(\"BRIGADE_REPO_KEY\", keyfile)\n\t\tdefer os.Unsetenv(\"BRIGADE_REPO_KEY\") // purely defensive... not really necessary\n\t}\n\n\tmsg := \"Building\"\n\tsvc := StatusContext\n\tstatus.State = &StatePending\n\tstatus.Description = &msg\n\tstatus.Context = &svc\n\tif err := s.createStatus(commit, proj, status); err != nil {\n\t\t// For this one, we just log an error and continue.\n\t\tlog.Printf(\"Error setting status to %s: %s\", *status.State, err)\n\t}\n\tif err := s.build(eventType, commit, payload, proj); err != nil {\n\t\tlog.Printf(\"Build failed: %s\", err)\n\t\tmsg = truncAt(err.Error(), 140)\n\t\tstatus.State = &StateFailure\n\t\tstatus.Description = &msg\n\t} else {\n\t\tmsg = \"Brigade build passed\"\n\t\tstatus.State = &StateSuccess\n\t\tstatus.Description = &msg\n\t}\n\tif err := s.createStatus(commit, proj, status); err != nil {\n\t\t// For this one, we just log an error and continue.\n\t\tlog.Printf(\"After build, error setting status to %s: %s\", *status.State, err)\n\t}\n}",
"func (st *buildStatus) isTry() bool { return st.trySet != nil }",
"func (r *ClusterInstallationReconciler) checkUpdateJob(\n\tmattermost *mattermostv1alpha1.ClusterInstallation,\n\tdesired *appsv1.Deployment,\n\treqLogger logr.Logger,\n) (*batchv1.Job, error) {\n\treqLogger.Info(fmt.Sprintf(\"Running Mattermost update image job check for image %s\", mattermost.GetMattermostAppContainerFromDeployment(desired).Image))\n\tjob, err := r.Resources.FetchMattermostUpdateJob(mattermost.Namespace)\n\tif err != nil {\n\t\t// Unable to fetch job\n\t\tif k8sErrors.IsNotFound(err) {\n\t\t\t// Job is not running, let's launch\n\t\t\treqLogger.Info(\"Launching update image job\")\n\t\t\tif err = r.Resources.LaunchMattermostUpdateJob(mattermost, mattermost.Namespace, desired, reqLogger, nil); err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"Launching update image job failed\")\n\t\t\t}\n\t\t\treturn nil, errors.New(\"Began update image job\")\n\t\t}\n\n\t\treturn nil, errors.Wrap(err, \"failed to determine if an update image job is already running\")\n\t}\n\n\t// Job is either running or completed\n\n\t// If desired deployment image does not match the one used by update job, restart it.\n\tisSameImage, err := r.isMainContainerImageSame(\n\t\tmattermost,\n\t\tdesired.Spec.Template.Spec.Containers,\n\t\tjob.Spec.Template.Spec.Containers,\n\t)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to compare image of update job and desired deployment\")\n\t}\n\tif !isSameImage {\n\t\treqLogger.Info(\"Mattermost image changed, restarting update job\")\n\t\terr := r.Resources.RestartMattermostUpdateJob(mattermost, job, desired, reqLogger, nil)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to restart update job\")\n\t\t}\n\n\t\treturn nil, errors.New(\"Restarted update image job\")\n\t}\n\n\tif job.Status.CompletionTime == nil {\n\t\treturn nil, errors.New(\"update image job still running\")\n\t}\n\n\t// Job is completed, can check completion status\n\n\tif job.Status.Failed > 0 {\n\t\treturn job, errors.New(\"update image job failed\")\n\t}\n\n\treqLogger.Info(\"Update image job ran successfully\")\n\n\treturn job, nil\n}",
"func checkJobStatus(jobQueue *jobqueue.Client, t *jobqueue.Task) (bool, error) {\n\tif t.Job.Status != jobqueue.JobStatusNew {\n\t\treturn true, fmt.Errorf(\"bad job status: %s\", t.Job.Status)\n\t}\n\tif t.Job.Action != \"select-hypervisor\" {\n\t\treturn true, fmt.Errorf(\"bad action: %s\", t.Job.Action)\n\t}\n\treturn false, nil\n}",
"func jobCompleted(job JobType) bool {\n\t// Call numerxData server to check the status of this job\n\t// return true if we get:\n\t// \t\t[“step”=”metaindexstatus”, “status”=”success”]\n\t//\tor [“step”=“eventindexstatus”, “status” = “success”]\n\t/*\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t*/\n\t// uri string, resource string, params map[string]string\n\tvar params map[string]string = make(map[string]string)\n\tparams[\"id\"] = job.JobId\n\trequest, err := fileUploadStatusRequest(baseUrl, \"/status\", params)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t}\n\n\tif verbose {\n\t\tlog.Println(\"RQ URL: \", request.URL)\n\t\tlog.Println(\"RQ Headers: \", request.Header)\n\t\tlog.Println(\"RQ Body: \", request)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false // let the caller func to handle retries\n\t} else {\n\t\t/* JSON\n\t\t[\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"metaindexstatus\",\"Status\":\"success\",\"Timestamp\":1465589455508,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"parsedmeta\",\"Status\":\"success\",\"Timestamp\":1465588843502,\"Notes\":\"\"},\n\t\t\t{\"ID\":\"0.0.LqO~iOvJV3sdUOd8\",\"Step\":\"rawmeta\",\"Status\":\"success\",\"Timestamp\":1465588543502,\"Notes\":\"\"}\n\t\t]\n\t\t*/\n\t\tdefer resp.Body.Close()\n\n\t\tvar bodyContent []byte\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Status: \", resp.StatusCode)\n\t\t\tlog.Println(\"Status RS Headers: \", resp.Header)\n\t\t}\n\n\t\tbodyContent, err := ioutil.ReadAll(resp.Body)\n\n\t\tif verbose {\n\t\t\tlog.Println(\"Status RS Content: error? :\", err)\n\t\t\tlog.Println(\"Status RS Content: body: bytes: \", bodyContent)\n\t\t\tlog.Println(\"Status RS Content: body: string: \", string(bodyContent))\n\t\t}\n\t\tif resp.StatusCode == 200 {\n\t\t\t// Check the step's status\n\t\t\tstatus, err := getStatusResponse(bodyContent)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error %v while checking status for %v, file: %v \\n\", err, job.JobId, job.Filename)\n\t\t\t\treturn false // let the caller func to handle retries\n\t\t\t} else {\n\t\t\t\tswitch requestType {\n\t\t\t\tcase RQ_Viewership:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexEventData): // \"eventindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedEventData), string(RawEventData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t//\t(actually the new struct with file-name, id, and retry-number)\n\t\t\t\tcase RQ_MetaBilling, RQ_MetaProgram, RQ_MetaChanMap, RQ_MetaEventMap:\n\t\t\t\t\tfor _, entry := range status {\n\t\t\t\t\t\tswitch entry.Step {\n\t\t\t\t\t\tcase string(IndexMetaData): // \"metaindexstatus\":\n\t\t\t\t\t\t\tswitch entry.Status {\n\t\t\t\t\t\t\tcase string(Success): // \"success\":\n\t\t\t\t\t\t\t\tif verbose {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"Complete for: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\tcase string(Failure): // \"failure\":\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase string(ParsedMetaData), string(RawMetaData):\n\t\t\t\t\t\t\tif entry.Status == string(Failure) {\n\t\t\t\t\t\t\t\tfailedJobsChan <- job\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Printf(\"Not yet: %s, file: %s\\n\", job.JobId, job.Filename)\n\t\t\t\t\t\tlog.Println(\"Current state: \", status)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\tfailedJobsChan <- job\n\t\t\tif verbose {\n\t\t\t\tlog.Println(\"Error Status %v while checking status for %v, file: %s \\n\", err, job.JobId, job.Filename)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (cc *ConfigsController) needsBuild(ctx context.Context, st store.RStore) (*ctrltiltfile.BuildEntry, bool) {\n\tstate := st.RLockState()\n\tdefer st.RUnlockState()\n\n\t// Don't start the next build until the previous action has been recorded,\n\t// so that we don't accidentally repeat the same build.\n\tif cc.loadStartedCount != state.StartedTiltfileLoadCount {\n\t\treturn nil, false\n\t}\n\n\t// Don't start the next build if the last completion hasn't been recorded yet.\n\tfor _, ms := range state.TiltfileStates {\n\t\tisRunning := !ms.CurrentBuild.StartTime.IsZero()\n\t\tif isRunning {\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\tfor _, name := range state.TiltfileDefinitionOrder {\n\t\ttf, ok := state.Tiltfiles[name.String()]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\ttfState, ok := state.TiltfileStates[name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar reason model.BuildReason\n\t\tlastStartTime := tfState.LastBuild().StartTime\n\t\tif !tfState.StartedFirstBuild() {\n\t\t\treason = reason.With(model.BuildReasonFlagInit)\n\t\t}\n\n\t\thasPendingChanges, _ := tfState.HasPendingChanges()\n\t\tif hasPendingChanges {\n\t\t\treason = reason.With(model.BuildReasonFlagChangedFiles)\n\t\t}\n\n\t\tif state.UserConfigState.ArgsChangeTime.After(lastStartTime) {\n\t\t\treason = reason.With(model.BuildReasonFlagTiltfileArgs)\n\t\t}\n\n\t\tif state.ManifestInTriggerQueue(name) {\n\t\t\treason = reason.With(tfState.TriggerReason)\n\t\t}\n\n\t\tif reason == model.BuildReasonNone {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilesChanged := []string{}\n\t\tfor _, st := range tfState.BuildStatuses {\n\t\t\tfor k := range st.PendingFileChanges {\n\t\t\t\tfilesChanged = append(filesChanged, k)\n\t\t\t}\n\t\t}\n\t\tfilesChanged = sliceutils.DedupedAndSorted(filesChanged)\n\n\t\treturn &ctrltiltfile.BuildEntry{\n\t\t\tName: name,\n\t\t\tFilesChanged: filesChanged,\n\t\t\tBuildReason: reason,\n\t\t\tUserConfigState: state.UserConfigState,\n\t\t\tTiltfilePath: tf.Spec.Path,\n\t\t\tCheckpointAtExecStart: state.LogStore.Checkpoint(),\n\t\t}, true\n\t}\n\n\treturn nil, false\n}",
"func build() bool {\n\tlog.Println(okColor(\"Running build commands!\"))\n\n\tfor _, c := range flagBuildCommandList.commands {\n\t\terr := runBuildCommand(c)\n\t\tif err != nil {\n\t\t\tlog.Println(failColor(\"Command failed: \"), failColor(c))\n\t\t\treturn false\n\t\t}\n\t}\n\tlog.Println(okColor(\"Build ok.\"))\n\n\treturn true\n}",
"func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}",
"func getCIJobStatus(outputFile, branch string, htmlize bool) error {\n\tvar result error\n\tlog.Print(\"Getting CI job status (this may take a while)...\")\n\n\tred := \"<span style=\\\"color:red\\\">\"\n\tgreen := \"<span style=\\\"color:green\\\">\"\n\toff := \"</span>\"\n\n\tif htmlize {\n\t\tred = \"<FONT COLOR=RED>\"\n\t\tgreen = \"<FONT COLOR=GREEN>\"\n\t\toff = \"</FONT>\"\n\t}\n\n\tvar extraFlag string\n\n\tif strings.Contains(branch, \"release-\") {\n\t\t// If working on a release branch assume --official for the purpose of displaying\n\t\t// find_green_build output\n\t\textraFlag = \"--official\"\n\t} else {\n\t\t// For master branch, limit the analysis to 30 primary ci jobs. This is necessary\n\t\t// due to the recently expanded blocking test list for master. The expanded test\n\t\t// list is often unable to find a complete passing set and find_green_build runs\n\t\t// unbounded for hours\n\t\textraFlag = \"--limit=30\"\n\t}\n\n\tf, err := os.OpenFile(outputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = f.Close(); err != nil {\n\t\t\tresult = fmt.Errorf(\"failed to close file %s, %v\", outputFile, err)\n\t\t}\n\t}()\n\n\tf.WriteString(fmt.Sprintf(\"## State of %s branch\\n\", branch))\n\n\t// Call script find_green_build to get CI job status\n\tcontent, err := u.Shell(os.Getenv(\"GOPATH\")+\"/src/k8s.io/release/find_green_build\", \"-v\", extraFlag, branch)\n\tif err == nil {\n\t\tf.WriteString(fmt.Sprintf(\"%sGOOD TO GO!%s\\n\\n\", green, off))\n\t} else {\n\t\tf.WriteString(fmt.Sprintf(\"%sNOT READY%s\\n\\n\", red, off))\n\t}\n\n\tf.WriteString(\"### Details\\n```\\n\")\n\tf.WriteString(content)\n\tf.WriteString(\"```\\n\")\n\n\tlog.Print(\"CI job status fetched.\")\n\treturn result\n}",
"func onRunIsResolved(target *api.Container, run *api.Container) bool {\n\tif target.DesiredStatus >= api.ContainerCreated {\n\t\treturn run.KnownStatus >= api.ContainerRunning\n\t}\n\treturn false\n}",
"func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}",
"func (fc *FederatedController) syncFLJob(key string) (bool, error) {\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tklog.V(4).Infof(\"Finished syncing federatedlearning job %q (%v)\", key, time.Since(startTime))\n\t}()\n\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif len(ns) == 0 || len(name) == 0 {\n\t\treturn false, fmt.Errorf(\"invalid federatedlearning job key %q: either namespace or name is missing\", key)\n\t}\n\tsharedFLJob, err := fc.jobLister.FederatedLearningJobs(ns).Get(name)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(4).Infof(\"FLJob has been deleted: %v\", key)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tflJob := *sharedFLJob\n\t// set kind for flJob in case that the kind is None\n\tflJob.SetGroupVersionKind(neptunev1.SchemeGroupVersion.WithKind(\"FederatedLearningJob\"))\n\t// if flJob was finished previously, we don't want to redo the termination\n\tif IsFLJobFinished(&flJob) {\n\t\treturn true, nil\n\t}\n\tselector, _ := GenerateSelector(&flJob)\n\tpods, err := fc.podStore.Pods(flJob.Namespace).List(selector)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tactivePods := k8scontroller.FilterActivePods(pods)\n\tactive := int32(len(activePods))\n\tsucceeded, failed := getStatus(pods)\n\tconditions := len(flJob.Status.Conditions)\n\t// flJob first start\n\tif flJob.Status.StartTime == nil {\n\t\tnow := metav1.Now()\n\t\tflJob.Status.StartTime = &now\n\t}\n\n\tvar manageJobErr error\n\tjobFailed := false\n\tvar failureReason string\n\tvar failureMessage string\n\tphase := flJob.Status.Phase\n\n\tif failed > 0 {\n\t\tjobFailed = true\n\t\tfailureReason = \"workerFailed\"\n\t\tfailureMessage = \"the worker of FLJob failed\"\n\t}\n\n\tif jobFailed {\n\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondFailed, failureReason, failureMessage))\n\t\tflJob.Status.Phase = neptunev1.FLJobFailed\n\t\tfc.recorder.Event(&flJob, v1.EventTypeWarning, failureReason, failureMessage)\n\t} else {\n\t\t// in the First time, we create the pods\n\t\tif len(pods) == 0 {\n\t\t\tactive, manageJobErr = fc.createPod(&flJob)\n\t\t}\n\t\tcomplete := false\n\t\tif succeeded > 0 && active == 0 {\n\t\t\tcomplete = true\n\t\t}\n\t\tif complete {\n\t\t\tflJob.Status.Conditions = append(flJob.Status.Conditions, NewFLJobCondition(neptunev1.FLJobCondComplete, \"\", \"\"))\n\t\t\tnow := metav1.Now()\n\t\t\tflJob.Status.CompletionTime = &now\n\t\t\tfc.recorder.Event(&flJob, v1.EventTypeNormal, \"Completed\", \"FLJob completed\")\n\t\t\tflJob.Status.Phase = neptunev1.FLJobSucceeded\n\t\t} else {\n\t\t\tflJob.Status.Phase = neptunev1.FLJobRunning\n\t\t}\n\t}\n\n\tforget := false\n\t// Check if the number of jobs succeeded increased since the last check. If yes \"forget\" should be true\n\t// This logic is linked to the issue: https://github.com/kubernetes/kubernetes/issues/56853 that aims to\n\t// improve the FLJob backoff policy when parallelism > 1 and few FLJobs failed but others succeed.\n\t// In this case, we should clear the backoff delay.\n\tif flJob.Status.Succeeded < succeeded {\n\t\tforget = true\n\t}\n\n\t// no need to update the flJob if the status hasn't changed since last time\n\tif flJob.Status.Active != active || flJob.Status.Succeeded != succeeded || flJob.Status.Failed != failed || len(flJob.Status.Conditions) != conditions || flJob.Status.Phase != phase {\n\t\tflJob.Status.Active = active\n\t\tflJob.Status.Succeeded = succeeded\n\t\tflJob.Status.Failed = failed\n\n\t\tif jobFailed && !IsFLJobFinished(&flJob) {\n\t\t\t// returning an error will re-enqueue FLJob after the backoff period\n\t\t\treturn forget, fmt.Errorf(\"failed pod(s) detected for flJob key %q\", key)\n\t\t}\n\n\t\tforget = true\n\t}\n\n\treturn forget, manageJobErr\n}",
"func (bc *OktetoBuilder) isImageBuilt(tags []string) (string, error) {\n\tfor _, tag := range tags {\n\t\timageWithDigest, err := bc.Registry.GetImageTagWithDigest(tag)\n\n\t\tif err != nil {\n\t\t\tif oktetoErrors.IsNotFound(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// return error if the registry doesn't send a not found error\n\t\t\treturn \"\", fmt.Errorf(\"error checking image at registry %s: %v\", tag, err)\n\t\t}\n\t\treturn imageWithDigest, nil\n\t}\n\treturn \"\", fmt.Errorf(\"not found\")\n}",
"func (scw *JobFuncWrapper) ensureNooneElseRunning(job *que.Job, tx *pgx.Tx, key string) (bool, error) {\n\tvar lastCompleted time.Time\n\tvar nextScheduled time.Time\n\terr := tx.QueryRow(\"SELECT last_completed, next_scheduled FROM cron_metadata WHERE id = $1 FOR UPDATE\", key).Scan(&lastCompleted, &nextScheduled)\n\tif err != nil {\n\t\tif err == pgx.ErrNoRows {\n\t\t\t_, err = tx.Exec(\"INSERT INTO cron_metadata (id) VALUES ($1)\", key)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\treturn false, ErrImmediateReschedule\n\t\t}\n\t\treturn false, err\n\t}\n\n\tif time.Now().Before(nextScheduled) {\n\t\tvar futureJobs int\n\t\t// make sure we don't regard ourself as a future job. Sometimes clock skew makes us think we can't run yet.\n\t\terr = tx.QueryRow(\"SELECT count(*) FROM que_jobs WHERE job_class = $1 AND args::jsonb = $2::jsonb AND run_at >= $3 AND job_id != $4\", job.Type, job.Args, nextScheduled, job.ID).Scan(&futureJobs)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif futureJobs > 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, scw.QC.EnqueueInTx(&que.Job{\n\t\t\tType: job.Type,\n\t\t\tArgs: job.Args,\n\t\t\tRunAt: nextScheduled,\n\t\t}, tx)\n\t}\n\n\t// Continue\n\treturn true, nil\n}",
"func checkState() {\n\tlist := listContainers()\n\tallup := true\n\tfor _, cfgCon := range clusterConfig {\n\t\tfound := false\n\t\tfor _, con := range list {\n\t\t\tif con.Name == cfgCon.Name {\n\t\t\t\tif !con.State.Running {\n\t\t\t\t\tallup = false\n\t\t\t\t}\n\t\t\t\tcheckContainerState(con, cfgCon)\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tlog.Println(\"No container found for\", cfgCon.Name)\n\t\t\tcreateContainer(cfgCon)\n\t\t\tupdateConfig()\n\t\t}\n\t}\n\tif allup {\n\t\tlog.Println(\"All containers are up\")\n\t}\n}",
"func isRunning(on_node, operation, rcCode string) bool {\n\treturn on_node != \"\" && (operation == \"start\" || (operation == \"monitor\" && rcCode == \"0\"))\n}",
"func newGoldTryjobProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, ignoreClient *http.Client, eventBus eventbus.EventBus) (ingestion.Processor, error) {\n\tgerritURL := config.ExtraParams[CONFIG_GERRIT_CODE_REVIEW_URL]\n\tif strings.TrimSpace(gerritURL) == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing URL for the Gerrit code review systems. Got value: '%s'\", gerritURL)\n\t}\n\n\t// Get the config options.\n\tsvcAccountFile := config.ExtraParams[CONFIG_SERVICE_ACCOUNT_FILE]\n\tsklog.Infof(\"Got service account file '%s'\", svcAccountFile)\n\n\tpollInterval, err := parseDuration(config.ExtraParams[CONFIG_BUILD_BUCKET_POLL_INTERVAL], bbstate.DefaultPollInterval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttimeWindow, err := parseDuration(config.ExtraParams[CONFIG_BUILD_BUCKET_TIME_WINDOW], bbstate.DefaultTimeWindow)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuildBucketURL := config.ExtraParams[CONFIG_BUILD_BUCKET_URL]\n\tbuildBucketName := config.ExtraParams[CONFIG_BUILD_BUCKET_NAME]\n\tif (buildBucketURL == \"\") || (buildBucketName == \"\") {\n\t\treturn nil, fmt.Errorf(\"BuildBucketName and BuildBucketURL must not be empty.\")\n\t}\n\n\tbuilderRegExp := config.ExtraParams[CONFIG_BUILDER_REGEX]\n\tif builderRegExp == \"\" {\n\t\tbuilderRegExp = bbstate.DefaultTestBuilderRegex\n\t}\n\n\t// Get the config file in the repo that should be parsed to determine whether a\n\t// bot uploads results. Currently only applies to the Skia repo.\n\tcfgFile := config.ExtraParams[CONFIG_JOB_CFG_FILE]\n\n\t_, expStoreFactory, err := ds_expstore.New(ds.DS, eventBus)\n\tif err != nil {\n\t\treturn nil, sklog.FmtErrorf(\"Unable to create cloud expectations store: %s\", err)\n\t}\n\tsklog.Infof(\"Cloud Expectations Store created\")\n\n\t// Create the cloud tryjob store.\n\ttryjobStore, err := tryjobstore.NewCloudTryjobStore(ds.DS, expStoreFactory, eventBus)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating tryjob store: %s\", err)\n\t}\n\tsklog.Infof(\"Cloud Tryjobstore Store created\")\n\n\t// Instantiate the Gerrit API client.\n\tts, err := auth.NewJWTServiceAccountTokenSource(\"\", svcAccountFile, gstorage.CloudPlatformScope, \"https://www.googleapis.com/auth/userinfo.email\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to authenticate service account: %s\", err)\n\t}\n\tclient := httputils.DefaultClientConfig().WithTokenSource(ts).With2xxOnly().Client()\n\tsklog.Infof(\"HTTP client instantiated\")\n\n\tgerritReview, err := gerrit.NewGerrit(gerritURL, \"\", client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsklog.Infof(\"Gerrit client instantiated\")\n\n\tbbConf := &bbstate.Config{\n\t\tBuildBucketURL: buildBucketURL,\n\t\tBuildBucketName: buildBucketName,\n\t\tClient: client,\n\t\tTryjobStore: tryjobStore,\n\t\tGerritClient: gerritReview,\n\t\tPollInterval: pollInterval,\n\t\tTimeWindow: timeWindow,\n\t\tBuilderRegexp: builderRegExp,\n\t}\n\n\tbbGerritClient, err := bbstate.NewBuildBucketState(bbConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsklog.Infof(\"BuildBucketState created\")\n\n\tret := &goldTryjobProcessor{\n\t\tbuildIssueSync: bbGerritClient,\n\t\ttryjobStore: tryjobStore,\n\t\tvcs: vcs,\n\t\tcfgFile: cfgFile,\n\n\t\t// The argument to NewCondMonitor is 1 because we always want exactly one go-routine per unique\n\t\t// issue ID to enter the critical section. See the syncIssueAndTryjob function.\n\t\tsyncMonitor: util.NewCondMonitor(1),\n\t}\n\teventBus.SubscribeAsync(tryjobstore.EV_TRYJOB_UPDATED, ret.tryjobUpdatedHandler)\n\n\treturn ret, nil\n}",
"func verifyCircleCIJobSuccess(orgRepo, gitHash, circleCIDeployJobName, circleCIAPIToken string) (err error) {\n\tclient := &circleci.Client{Token: circleCIAPIToken}\n\tsplitOrgRepo := strings.Split(orgRepo, \"/\")\n\torg := splitOrgRepo[0]\n\trepo := splitOrgRepo[1]\n\tvar targetBuildNum int\n\tif targetBuildNum, err = obtainBuildNum(org, repo, gitHash, circleCIDeployJobName,\n\t\tclient); err != nil {\n\t\treturn\n\t}\n\treturn checkForJobSuccess(org, repo, targetBuildNum, client)\n}",
"func OfflineBuild(request *http.Request) (string, interface{}) {\n\tdecoder := json.NewDecoder(request.Body)\n\tbuilder := &build.Build{}\n\terr := decoder.Decode(builder)\n\tif err != nil {\n\t\tlog.Errorf(\"decode the request body err:%v\", err)\n\t\treturn r.StatusBadRequest, \"json format error\"\n\t}\n\n\tdockerfile := fmt.Sprintf(\"%s/%s/%s\", TARBALL_ROOT_DIR, builder.UserId, \"Dockerfile\")\n\tif !file.FileExsit(dockerfile) {\n\t\t//TODO generate the Dockerfile by Dockerfile template\n\t}\n\tprojects := fmt.Sprintf(\"%s/%s/%s\", TARBALL_ROOT_DIR, builder.UserId, builder.Tarball)\n\timgBuildTar := fmt.Sprintf(\"%s/%s/%s\", BUILD_IMAGE_TAR_DIR, builder.UserId, builder.Tarball)\n\tif err = file.Tar(imgBuildTar, true, dockerfile, projects); err != nil {\n\t\treturn r.StatusInternalServerError, err\n\t}\n\tbuildContext, err := os.Open(imgBuildTar)\n\tif err != nil {\n\t\treturn r.StatusInternalServerError, err\n\t}\n\tdefer buildContext.Close()\n\timage_repo := fmt.Sprintf(\"%s/%s:%s\", DEFAULT_REGISTRY, builder.AppName, builder.Version)\n\toptions := types.ImageBuildOptions{\n\t\tTags: []string{image_repo},\n\t\tDockerfile: \"Dockerfile\",\n\t}\n\n\tbuildResponse, err := client.DockerClient.ImageBuild(context.Background(), buildContext, options)\n\tif err != nil {\n\t\tlog.Errorf(\"build image err: %v\", err)\n\t\treturn r.StatusInternalServerError, err.Error()\n\t}\n\tres, err := ioutil.ReadAll(buildResponse.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"read the build image response err: %v\", err)\n\t\treturn r.StatusInternalServerError, err.Error()\n\t}\n\n\tbuilder.Image = image_repo\n\tbuilder.Status = build.BUILD_SUCCESS\n\tbuilder.BuildLog = string(res)\n\tif err = builder.Insert(); err != nil {\n\t\tlog.Errorf(\"insert the build to db err: %v\", err)\n\t}\n\n\tpushRes, err := pushImage(image_repo)\n\tif err != nil {\n\t\treturn r.StatusInternalServerError, \"build image successed,but push image to registry err :\" + err.Error()\n\t}\n\tlog.Debugf(\"push result ==%v\", pushRes)\n\treturn r.StatusCreated, string(res)\n}",
"func (sia *statefulsetInitAwaiter) checkAndLogStatus() bool {\n\tif sia.replicasReady && sia.revisionReady {\n\t\tsia.config.logStatus(diag.Info,\n\t\t\tfmt.Sprintf(\"%sStatefulSet initialization complete\", cmdutil.EmojiOr(\"✅ \", \"\")))\n\t\treturn true\n\t}\n\n\tisInitialDeployment := sia.currentGeneration <= 1\n\n\t// For initial generation, the revision doesn't need to be updated, so skip that step in the log.\n\tif isInitialDeployment {\n\t\tsia.config.logStatus(diag.Info, fmt.Sprintf(\"[1/2] Waiting for StatefulSet to create Pods (%d/%d Pods ready)\",\n\t\t\tsia.currentReplicas, sia.targetReplicas))\n\t} else {\n\t\tswitch {\n\t\tcase !sia.replicasReady:\n\t\t\tsia.config.logStatus(diag.Info, fmt.Sprintf(\"[1/3] Waiting for StatefulSet update to roll out (%d/%d Pods ready)\",\n\t\t\t\tsia.currentReplicas, sia.targetReplicas))\n\t\tcase !sia.revisionReady:\n\t\t\tsia.config.logStatus(diag.Info,\n\t\t\t\t\"[2/3] Waiting for StatefulSet to update .status.currentRevision\")\n\t\t}\n\t}\n\n\treturn false\n}",
"func main() {\n jsv.Run(true, job_verification_function, jsv_on_start_function)\n}",
"func launchCondition(logger *zap.Logger, client *elastic.Client) func(context.Context, *lifecycle.Event) (bool, error) {\n\treturn func(ctx context.Context, event *lifecycle.Event) (bool, error) {\n\t\t// Check cluster health\n\t\tresp, err := client.ClusterHealth().Do(ctx)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn false, err\n\t\tcase resp.TimedOut:\n\t\t\treturn false, errors.New(\"request to cluster master node timed out\")\n\t\tcase resp.RelocatingShards > 0:\n\t\t\t// Shards are being relocated. This is the activity lifecycler was designed to protect.\n\t\t\tlogger.Info(\"condition failed: RelocatingShards > 0\")\n\t\t\treturn false, nil\n\t\t}\n\n\t\tlogger.Info(\"condition passed\")\n\t\treturn true, nil\n\t}\n}",
"func (s *Worker) prepare(jobName string, f Job) func() {\n\treturn func() {\n\t\ts.logger.Info(fmt.Sprintf(\"job '%s' is started\", jobName))\n\t\terr := f()\n\t\tif err == nil {\n\t\t\ts.logger.Info(fmt.Sprintf(\"job '%s' is finished\", jobName))\n\t\t\treturn\n\t\t}\n\n\t\ts.logger.Error(\"cannot execute job\", \"job\", jobName, \"err\", err)\n\t}\n}",
"func (v *johndictTasker) Status() common.Job {\n\tlog.WithField(\"task\", v.job.UUID).Debug(\"Gathering task status\")\n\tv.mux.Lock()\n\tdefer v.mux.Unlock()\n\n\t// Run john --status command\n\tstatusExec := exec.Command(config.BinPath, \"--status=\"+v.job.UUID)\n\tstatusExec.Dir = v.wd\n\tstatus, err := statusExec.CombinedOutput()\n\tif err != nil {\n\t\tv.job.Error = err.Error()\n\t\tlog.WithField(\"Error\", err.Error()).Debug(\"Error running john status command.\")\n\t\t// Do not return the job now. Keep running the check so the hashes will be parsed \n\t\t// even if the job has quit before a status was pulled.\n\t\t// return v.job \n\t}\n\n\tlog.WithField(\"StatusStdout\", string(status)).Debug(\"Stdout status return of john call\")\n\n\ttimestamp := fmt.Sprintf(\"%d\", time.Now().Unix())\n\n\tmatch := regStatusLine.FindStringSubmatch(string(status))\n\tlog.WithField(\"StatusMatch\", match).Debug(\"Regex match of john status call\")\n\n\tif len(match) == 7 {\n\t\t// Get # of cracked hashes\n\t\tcrackedHashes, err := strconv.ParseInt(match[1], 10, 64)\n\t\tif err == nil {\n\t\t\tv.job.CrackedHashes = crackedHashes\n\t\t}\n\n\t\t// Get % complete\n\t\tprogress, err := strconv.ParseFloat(match[3], 64)\n\t\tif err == nil {\n\t\t\tv.job.Progress = progress\n\t\t}\n\n\t\t// Get ETA\n\t\teta, err := parseJohnETA(match[4])\n\t\tif err == nil {\n\t\t\tv.job.ETC = printTimeUntil(eta)\n\t\t} else {\n\t\t\tv.job.ETC = \"Not Available\"\n\t\t}\n\n\t\t// Get guesses / second\n\t\tif v.job.PerformanceTitle == \"\" {\n\t\t\t// We need to set the units the first time\n\t\t\tv.job.PerformanceTitle = match[6]\n\t\t}\n\n\t\tvar mag float64\n\t\tswitch v.job.PerformanceTitle {\n\t\tcase \"C/s\":\n\t\t\tmag = speedMagH[match[6]]\n\t\tcase \"KC/s\":\n\t\t\tmag = speedMagK[match[6]]\n\t\tcase \"MC/s\":\n\t\t\tmag = speedMagM[match[6]]\n\t\tcase \"GC/s\":\n\t\t\tmag = speedMagG[match[6]]\n\t\t}\n\n\t\t// convert our string into a float\n\t\tspeed, err := strconv.ParseFloat(match[5], 64)\n\t\tif err == nil {\n\t\t\tv.job.PerformanceData[timestamp] = fmt.Sprintf(\"%f\", speed*mag)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"speed\": speed,\n\t\t\t\t\"mag\": mag,\n\t\t\t}).Debug(\"Speed calculated.\")\n\t\t}\n\n\t} else {\n\t\tlog.WithField(\"MatchCount\", len(match)).Debug(\"Did not match enough items in the status\")\n\t}\n\n\t// Now get any hashes we might have cracked. Because of how John works we will\n\t// need to read in all the hashes provied and then search the .pot file in this\n\t// working directory to find any cracked passwords.\n\tvar hash2D [][]string\n\n\t// Read in hashes.txt file & pot file\n\thashFile, err := ioutil.ReadFile(filepath.Join(v.wd, \"hashes.txt\"))\n\tpotFile, err := ioutil.ReadFile(filepath.Join(v.wd, v.job.UUID+\".pot\"))\n\tpotHashes := strings.Split(string(potFile), \"\\n\")\n\tif err == nil {\n\t\thashes := strings.Split(string(hashFile), \"\\n\")\n\t\tfor _, hash := range hashes {\n\t\t\t// Check for existence in potHashes\n\t\t\tfor _, potHash := range potHashes {\n\t\t\t\tif strings.Contains(potHash, strings.ToLower(hash)) {\n\t\t\t\t\t// We have a hash match so let's add it to our output\n\t\t\t\t\thashIndex := strings.Index(potHash, strings.ToLower(hash))\n\t\t\t\t\tvar hashKeyPair []string\n\t\t\t\t\thashKeyPair = append(hashKeyPair, potHash[hashIndex+1:])\n\t\t\t\t\thashKeyPair = append(hashKeyPair, hash)\n\n\t\t\t\t\thash2D = append(hash2D, hashKeyPair)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tv.job.OutputData = hash2D\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"task\": v.job.UUID,\n\t\t\"status\": v.job.Status,\n\t}).Info(\"Ongoing task status\")\n\n\treturn v.job\n}",
"func (s ScheduledJob) validate() error {\n\tvar err error\n\tif err = s.ScheduledJobConfig.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = s.Workload.validate(); err != nil {\n\t\treturn err\n\t}\n\tif err = validateContainerDeps(validateDependenciesOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t\timageConfig: s.ImageConfig.Image,\n\t\tmainContainerName: aws.StringValue(s.Name),\n\t\tlogging: s.Logging,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate container dependencies: %w\", err)\n\t}\n\tif err = validateExposedPorts(validateExposedPortsOpts{\n\t\tsidecarConfig: s.Sidecars,\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"validate unique exposed ports: %w\", err)\n\t}\n\treturn nil\n}",
"func builder(jobs <-chan string, buildStarted chan<- string, buildDone chan<- bool) {\n\tcreateThreshold := func() <-chan time.Time {\n\t\treturn time.After(time.Duration(WorkDelay * time.Millisecond))\n\t}\n\n\tthreshold := createThreshold()\n\teventPath := \"\"\n\n\tfor {\n\t\tselect {\n\t\tcase eventPath = <-jobs:\n\t\t\tthreshold = createThreshold()\n\t\tcase <-threshold:\n\t\t\tbuildStarted <- eventPath\n\t\t\tbuildDone <- build()\n\t\t}\n\t}\n}",
"func (t *Tileset) CheckJobStatus() error {\n\tfmt.Println(\"Awaiting job completion. This may take some time...\")\n\tfor {\n\t\tstatusResponse := &StatusResponse{}\n\t\tres, err := t.base.SimpleGET(t.postURL() + \"/status\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson.Unmarshal(res, statusResponse)\n\t\tif statusResponse.Status == \"failed\" {\n\t\t\tfmt.Println(\"Job failed\")\n\t\t\treturn nil\n\t\t}\n\t\tif statusResponse.Status == \"success\" {\n\t\t\tfmt.Println(\"Job complete\")\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Println(statusResponse.Status)\n\t\ttime.Sleep(5 * time.Second)\n\n\t}\n\n}",
"func CfnJobTemplate_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_mediaconvert.CfnJobTemplate\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func makeJob(c context.Context, j *engine.Job) *schedulerJob {\n\ttraits, err := presentation.GetJobTraits(c, config(c).Catalog, j)\n\tif err != nil {\n\t\tlogging.WithError(err).Warningf(c, \"Failed to get task traits for %s\", j.JobID)\n\t}\n\n\tnow := clock.Now(c).UTC()\n\tnextRun := \"\"\n\tswitch ts := j.State.TickTime; {\n\tcase ts == schedule.DistantFuture:\n\t\tnextRun = \"-\"\n\tcase !ts.IsZero():\n\t\tnextRun = humanize.RelTime(ts, now, \"ago\", \"from now\")\n\tdefault:\n\t\tnextRun = \"not scheduled yet\"\n\t}\n\n\t// Internal state names aren't very user friendly. Introduce some aliases.\n\tstate := presentation.GetPublicStateKind(j, traits)\n\tlabelClass := stateToLabelClass[state]\n\tif j.State.State == engine.JobStateSlowQueue {\n\t\t// Job invocation is still in the task queue, but new invocation should be\n\t\t// starting now (so the queue is lagging for some reason).\n\t\tlabelClass = \"label-warning\"\n\t}\n\t// Put triggers after regular jobs.\n\tsortGroup := \"A\"\n\tif j.Flavor == catalog.JobFlavorTrigger {\n\t\tsortGroup = \"B\"\n\t}\n\n\treturn &schedulerJob{\n\t\tProjectID: j.ProjectID,\n\t\tJobName: j.GetJobName(),\n\t\tSchedule: j.Schedule,\n\t\tDefinition: taskToText(j.Task),\n\t\tRevision: j.Revision,\n\t\tRevisionURL: j.RevisionURL,\n\t\tState: string(state),\n\t\tOverruns: j.State.Overruns,\n\t\tNextRun: nextRun,\n\t\tPaused: j.Paused,\n\t\tLabelClass: labelClass,\n\t\tJobFlavorIcon: flavorToIconClass[j.Flavor],\n\t\tJobFlavorTitle: flavorToTitle[j.Flavor],\n\n\t\tsortGroup: sortGroup,\n\t\tnow: now,\n\t\ttraits: traits,\n\t}\n}",
"func buildImage(t *testing.T, projectID string, workingPackerDir string) {\n\t// Variables to pass to our Packer build using -var options\n\tPKR_VAR_project := os.Getenv(\"PKR_VAR_project\")\n\tPKR_VAR_zone := os.Getenv(\"PKR_VAR_zone\")\n\tPKR_VAR_airbyte_build_script := os.Getenv(\"PKR_VAR_airbyte_build_script\")\n\tairbyte_build_script := workingPackerDir + PKR_VAR_airbyte_build_script\n\n\tpackerOptions := &packer.Options{\n\t\t// The path to where the Packer template is located\n\t\tTemplate: workingPackerDir + \"airbyte_gce_image.pkr.hcl\",\n\n\t\tVars: map[string]string{\n\t\t\t\"project\": PKR_VAR_project,\n\t\t\t\"zone\": PKR_VAR_zone,\n\t\t\t\"airbyte_build_script\": airbyte_build_script,\n\t\t},\n\n\t\t// Configure retries for intermittent errors\n\t\tRetryableErrors: DefaultRetryablePackerErrors,\n\t\tTimeBetweenRetries: DefaultTimeBetweenPackerRetries,\n\t\tMaxRetries: DefaultMaxPackerRetries,\n\t}\n\n\t// Save the Packer Options so future test stages can use them\n\ttest_structure.SavePackerOptions(t, workingPackerDir, packerOptions)\n\n\t// Make sure the Packer build completes successfully\n\timageName := packer.BuildArtifact(t, packerOptions)\n\n\t// Save the imageName as a string so future test stages can use them\n\ttest_structure.SaveString(t, workingPackerDir, \"imageName\", imageName)\n}",
"func (this *Job) State() int {\n\tanyRunningTasks := false\n\tfor _, task := range this.Tasks {\n\t\tif task.IsRunning() {\n\t\t\tanyRunningTasks = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn JobState(this.Started, this.Finished, this.Suspended, this.Error, anyRunningTasks)\n}",
"func (c *chain) JobIsReady(jobId string) bool {\n\tisReady := true\n\tfor _, job := range c.PreviousJobs(jobId) {\n\t\tif job.State != proto.STATE_COMPLETE {\n\t\t\tisReady = false\n\t\t}\n\t}\n\treturn isReady\n}",
"func isValidCreateState(s pb.Invocation_State) bool {\n\tswitch s {\n\tdefault:\n\t\treturn false\n\tcase pb.Invocation_STATE_UNSPECIFIED:\n\tcase pb.Invocation_ACTIVE:\n\tcase pb.Invocation_FINALIZING:\n\t}\n\treturn true\n}",
"func (s *InitialisationStep) checkRuntimeStatus(operation internal.UpgradeKymaOperation, instance *internal.Instance, log logrus.FieldLogger) (internal.UpgradeKymaOperation, time.Duration, error) {\n\tif time.Since(operation.UpdatedAt) > CheckStatusTimeout {\n\t\tlog.Infof(\"operation has reached the time limit: updated operation time: %s\", operation.UpdatedAt)\n\t\treturn s.operationManager.OperationFailed(operation, fmt.Sprintf(\"operation has reached the time limit: %s\", CheckStatusTimeout))\n\t}\n\n\tstatus, err := s.provisionerClient.RuntimeOperationStatus(instance.GlobalAccountID, operation.ProvisionerOperationID)\n\tif err != nil {\n\t\treturn operation, s.timeSchedule.StatusCheck, nil\n\t}\n\tlog.Infof(\"call to provisioner returned %s status\", status.State.String())\n\n\tvar msg string\n\tif status.Message != nil {\n\t\tmsg = *status.Message\n\t}\n\n\t// do required steps on init\n\toperation, delay, err := s.performRuntimeTasks(UpgradeInitSteps, operation, instance, log)\n\tif delay != 0 || err != nil {\n\t\treturn operation, delay, err\n\t}\n\n\t// wait for operation completion\n\tswitch status.State {\n\tcase gqlschema.OperationStateInProgress, gqlschema.OperationStatePending:\n\t\treturn operation, s.timeSchedule.StatusCheck, nil\n\tcase gqlschema.OperationStateSucceeded, gqlschema.OperationStateFailed:\n\t\t// Set post-upgrade description which also reset UpdatedAt for operation retries to work properly\n\t\tif operation.Description != postUpgradeDescription {\n\t\t\toperation.Description = postUpgradeDescription\n\t\t\toperation, delay = s.operationManager.UpdateOperation(operation)\n\t\t\tif delay != 0 {\n\t\t\t\treturn operation, delay, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// do required steps on finish\n\toperation, delay, err = s.performRuntimeTasks(UpgradeFinishSteps, operation, instance, log)\n\tif delay != 0 || err != nil {\n\t\treturn operation, delay, err\n\t}\n\n\t// handle operation completion\n\tswitch status.State {\n\tcase gqlschema.OperationStateSucceeded:\n\t\treturn s.operationManager.OperationSucceeded(operation, msg)\n\tcase gqlschema.OperationStateFailed:\n\t\treturn s.operationManager.OperationFailed(operation, fmt.Sprintf(\"provisioner client returns failed status: %s\", msg))\n\t}\n\n\treturn s.operationManager.OperationFailed(operation, fmt.Sprintf(\"unsupported provisioner client status: %s\", status.State.String()))\n}",
"func ttrBuildImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) func() {\n\tsource := fakecontext.New(t, \"\")\n\tdefer source.Close()\n\n\terr := copy.DirCopy(\"../../integration/testdata/delta/\", source.Dir, copy.Content, false)\n\tassert.Assert(t, err)\n\n\tresp, err := client.ImageBuild(ctx, source.AsTarReader(t),\n\t\ttypes.ImageBuildOptions{\n\t\t\tTags: []string{ttrImageName(image)},\n\t\t\tDockerfile: image + \".Dockerfile\",\n\t\t},\n\t)\n\tassert.Assert(t, err)\n\n\tif resp.Body != nil {\n\t\tbody, err := readAllAndClose(resp.Body)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, \"Successfully built\"))\n\t\tassert.Assert(t, strings.Contains(body, \"Successfully tagged\"))\n\t}\n\n\treturn func() {\n\t\tttrRemoveImageAsserting(ctx, t, client, image)\n\t}\n}",
"func (s *service) Build(ctx *shared.Context) (err error) {\n\tif !s.IsOptimized() {\n\t\treturn nil\n\t}\n\tif err = s.buildBatched(ctx); err == nil {\n\t\terr = s.buildIndividual(ctx)\n\t}\n\treturn err\n}",
"func CfnStreamingImage_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_nimblestudio.CfnStreamingImage\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func (r *RecipeInfo) initStep(step int) (bool, error) {\n\t// Check if the recipe is done\n\tif step == r.TotalSteps || r.ID.ValueOrZero() == -1 {\n\t\treturn true, r.clear()\n\t}\n\n\t// Check if we are given a valid step in our recipe\n\tif step < 0 || step > r.TotalSteps {\n\t\tlog.Errorf(\"invalid step (%d) when there are only (%d) steps\", step, r.TotalSteps)\n\t\treturn false, errors.New(\"invalid step\")\n\t}\n\n\t// Clear past step jobs\n\tvar err error\n\t_, err = CurrentRecipe.clearJobs()\n\tCurrentRecipe.JobIDs = make([]int64, 0)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\t// Setup triggers/jobs\n\tjobs := make([]*JobPayload, 0)\n\n\t// Construct each JobPayload from each TriggerGroup\n\tfor _, triggerGroup := range r.recipe.Steps[step].TriggerGroups {\n\t\tvar job JobPayload\n\n\t\tif triggerGroup.ActionParams.Valid {\n\t\t\tjob.ActionParams = triggerGroup.ActionParams\n\t\t}\n\n\t\tif triggerGroup.ActionKey.Valid {\n\t\t\tjob.ActionKey = triggerGroup.ActionKey\n\t\t}\n\n\t\tif triggerGroup.Service.Valid {\n\t\t\tjob.Service = triggerGroup.Service.String\n\t\t}\n\n\t\t// Loop through all the triggers in the trigger group\n\t\tfor _, trigger := range triggerGroup.Triggers {\n\t\t\tl, err := strconv.Atoi(trigger.TriggerParams.String)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjob.TriggerParams = append(job.TriggerParams, l)\n\t\t\tjob.TriggerKeys = append(job.TriggerKeys, trigger.TriggerType.Key.String)\n\t\t}\n\t\tjobs = append(jobs, &job)\n\t}\n\n\t// Send the jobs to the trigger-queue\n\tfor _, j := range jobs {\n\t\tvar id int64\n\t\tvar err error\n\t\tid, err = j.SendJob()\n\t\tif err != nil {\n\t\t\tlog.Error(\"error sending job %+v\", j)\n\t\t\tlog.Error(err.Error())\n\t\t} else {\n\t\t\tr.JobIDs = append(r.JobIDs, id)\n\t\t}\n\t}\n\n\t// Update self\n\tif step > 0 {\n\t\tr.PrevStep = r.recipe.Steps[step-1]\n\t} else {\n\t\tr.PrevStep = nil\n\t}\n\n\tr.CurrentStep = r.recipe.Steps[step]\n\n\tif step+1 < r.TotalSteps {\n\t\tr.NextStep = r.recipe.Steps[step+1]\n\t} else {\n\t\tr.NextStep = nil\n\t}\n\treturn false, nil\n}",
"func (a *Amoeba) buildImage(buildContext io.Reader) error {\n\topts := types.ImageBuildOptions{\n\t\tTags: []string{a.bid + \":latest\"},\n\t\tRemove: true,\n\t\tForceRemove: true,\n\t\tLabels: map[string]string{amoebaBuild: a.bid},\n\t}\n\n\tres, err := a.cli.ImageBuild(a.ctx, buildContext, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\t_, err = ioutil.ReadAll(res.Body) // Blocks until built\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {\n\tif len(opt.Options.Outputs) > 1 {\n\t\treturn nil, errors.Errorf(\"multiple outputs not supported\")\n\t}\n\n\trc := opt.Source\n\tif buildID := opt.Options.BuildID; buildID != \"\" {\n\t\tb.mu.Lock()\n\n\t\tupload := false\n\t\tif strings.HasPrefix(buildID, \"upload-request:\") {\n\t\t\tupload = true\n\t\t\tbuildID = strings.TrimPrefix(buildID, \"upload-request:\")\n\t\t}\n\n\t\tif _, ok := b.jobs[buildID]; !ok {\n\t\t\tb.jobs[buildID] = newBuildJob()\n\t\t}\n\t\tj := b.jobs[buildID]\n\t\tvar cancel func()\n\t\tctx, cancel = context.WithCancel(ctx)\n\t\tj.cancel = cancel\n\t\tb.mu.Unlock()\n\n\t\tif upload {\n\t\t\tctx2, cancel := context.WithTimeout(ctx, 5*time.Second)\n\t\t\tdefer cancel()\n\t\t\terr := j.SetUpload(ctx2, rc)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif remoteContext := opt.Options.RemoteContext; remoteContext == \"upload-request\" {\n\t\t\tctx2, cancel := context.WithTimeout(ctx, 5*time.Second)\n\t\t\tdefer cancel()\n\t\t\tvar err error\n\t\t\trc, err = j.WaitUpload(ctx2)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\topt.Options.RemoteContext = \"\"\n\t\t}\n\n\t\tdefer func() {\n\t\t\tb.mu.Lock()\n\t\t\tdelete(b.jobs, buildID)\n\t\t\tb.mu.Unlock()\n\t\t}()\n\t}\n\n\tvar out builder.Result\n\n\tid := identity.NewID()\n\n\tfrontendAttrs := map[string]string{}\n\n\tif opt.Options.Target != \"\" {\n\t\tfrontendAttrs[\"target\"] = opt.Options.Target\n\t}\n\n\tif opt.Options.Dockerfile != \"\" && opt.Options.Dockerfile != \".\" {\n\t\tfrontendAttrs[\"filename\"] = opt.Options.Dockerfile\n\t}\n\n\tif opt.Options.RemoteContext != \"\" {\n\t\tif opt.Options.RemoteContext != \"client-session\" {\n\t\t\tfrontendAttrs[\"context\"] = opt.Options.RemoteContext\n\t\t}\n\t} else {\n\t\turl, cancel := b.reqBodyHandler.newRequest(rc)\n\t\tdefer cancel()\n\t\tfrontendAttrs[\"context\"] = url\n\t}\n\n\tcacheFrom := append([]string{}, opt.Options.CacheFrom...)\n\n\tfrontendAttrs[\"cache-from\"] = strings.Join(cacheFrom, \",\")\n\n\tfor k, v := range opt.Options.BuildArgs {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfrontendAttrs[\"build-arg:\"+k] = *v\n\t}\n\n\tfor k, v := range opt.Options.Labels {\n\t\tfrontendAttrs[\"label:\"+k] = v\n\t}\n\n\tif opt.Options.NoCache {\n\t\tfrontendAttrs[\"no-cache\"] = \"\"\n\t}\n\n\tif opt.Options.PullParent {\n\t\tfrontendAttrs[\"image-resolve-mode\"] = \"pull\"\n\t} else {\n\t\tfrontendAttrs[\"image-resolve-mode\"] = \"default\"\n\t}\n\n\tif opt.Options.Platform != \"\" {\n\t\t// same as in newBuilder in builder/dockerfile.builder.go\n\t\t// TODO: remove once opt.Options.Platform is of type specs.Platform\n\t\t_, err := platforms.Parse(opt.Options.Platform)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfrontendAttrs[\"platform\"] = opt.Options.Platform\n\t}\n\n\tswitch opt.Options.NetworkMode {\n\tcase \"host\", \"none\":\n\t\tfrontendAttrs[\"force-network-mode\"] = opt.Options.NetworkMode\n\tcase \"\", \"default\":\n\tdefault:\n\t\treturn nil, errors.Errorf(\"network mode %q not supported by buildkit\", opt.Options.NetworkMode)\n\t}\n\n\textraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts, b.dnsconfig.HostGatewayIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfrontendAttrs[\"add-hosts\"] = extraHosts\n\n\tif opt.Options.ShmSize > 0 {\n\t\tfrontendAttrs[\"shm-size\"] = strconv.FormatInt(opt.Options.ShmSize, 10)\n\t}\n\n\tulimits, err := toBuildkitUlimits(opt.Options.Ulimits)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(ulimits) > 0 {\n\t\tfrontendAttrs[\"ulimit\"] = ulimits\n\t}\n\n\texporterName := \"\"\n\texporterAttrs := map[string]string{}\n\tif len(opt.Options.Outputs) == 0 {\n\t\texporterName = exporter.Moby\n\t} else {\n\t\t// cacheonly is a special type for triggering skipping all exporters\n\t\tif opt.Options.Outputs[0].Type != \"cacheonly\" {\n\t\t\texporterName = opt.Options.Outputs[0].Type\n\t\t\texporterAttrs = opt.Options.Outputs[0].Attrs\n\t\t}\n\t}\n\n\tif (exporterName == client.ExporterImage || exporterName == exporter.Moby) && len(opt.Options.Tags) > 0 {\n\t\tnameAttr, err := overrides.SanitizeRepoAndTags(opt.Options.Tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif exporterAttrs == nil {\n\t\t\texporterAttrs = make(map[string]string)\n\t\t}\n\t\texporterAttrs[\"name\"] = strings.Join(nameAttr, \",\")\n\t}\n\n\tcache := controlapi.CacheOptions{}\n\tif inlineCache := opt.Options.BuildArgs[\"BUILDKIT_INLINE_CACHE\"]; inlineCache != nil {\n\t\tif b, err := strconv.ParseBool(*inlineCache); err == nil && b {\n\t\t\tcache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{\n\t\t\t\tType: \"inline\",\n\t\t\t})\n\t\t}\n\t}\n\n\treq := &controlapi.SolveRequest{\n\t\tRef: id,\n\t\tExporter: exporterName,\n\t\tExporterAttrs: exporterAttrs,\n\t\tFrontend: \"dockerfile.v0\",\n\t\tFrontendAttrs: frontendAttrs,\n\t\tSession: opt.Options.SessionID,\n\t\tCache: cache,\n\t}\n\n\tif opt.Options.NetworkMode == \"host\" {\n\t\treq.Entitlements = append(req.Entitlements, entitlements.EntitlementNetworkHost)\n\t}\n\n\taux := streamformatter.AuxFormatter{Writer: opt.ProgressWriter.Output}\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\teg.Go(func() error {\n\t\tresp, err := b.controller.Solve(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exporterName != exporter.Moby && exporterName != client.ExporterImage {\n\t\t\treturn nil\n\t\t}\n\t\tid, ok := resp.ExporterResponse[\"containerimage.digest\"]\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"missing image id\")\n\t\t}\n\t\tout.ImageID = id\n\t\treturn aux.Emit(\"moby.image.id\", types.BuildResult{ID: id})\n\t})\n\n\tch := make(chan *controlapi.StatusResponse)\n\n\teg.Go(func() error {\n\t\tdefer close(ch)\n\t\t// streamProxy.ctx is not set to ctx because when request is cancelled,\n\t\t// only the build request has to be cancelled, not the status request.\n\t\tstream := &statusProxy{streamProxy: streamProxy{ctx: context.TODO()}, ch: ch}\n\t\treturn b.controller.Status(&controlapi.StatusRequest{Ref: id}, stream)\n\t})\n\n\teg.Go(func() error {\n\t\tfor sr := range ch {\n\t\t\tdt, err := sr.Marshal()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := aux.Emit(\"moby.buildkit.trace\", dt); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &out, nil\n}",
"func StateConnectorCall(blockTime *big.Int, checkRet []byte, availableLedger uint64) bool {\n\tif len(checkRet) != 96 {\n\t\treturn false\n\t}\n\tinstructions := ParseInstructions(checkRet, availableLedger)\n\tif instructions.InitialCommit {\n\t\tgo func() {\n\t\t\tacceptedPath, rejectedPath := GetVerificationPaths(checkRet[8:])\n\t\t\t_, errACCEPTED := os.Stat(acceptedPath)\n\t\t\tif errACCEPTED != nil {\n\t\t\t\tif ReadChainWithRetries(blockTime, instructions) {\n\t\t\t\t\tverificationHashStore, err := os.Create(acceptedPath)\n\t\t\t\t\tverificationHashStore.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// Permissions problem\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tverificationHashStore, err := os.Create(rejectedPath)\n\t\t\t\t\tverificationHashStore.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// Permissions problem\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn true\n\t} else {\n\t\tacceptedPath, rejectedPath := GetVerificationPaths(checkRet[8:])\n\t\t_, errACCEPTED := os.Stat(acceptedPath)\n\t\t_, errREJECTED := os.Stat(rejectedPath)\n\t\tif errACCEPTED != nil && errREJECTED != nil {\n\t\t\tfor i := 0; i < 2*apiRetries; i++ {\n\t\t\t\t_, errACCEPTED = os.Stat(acceptedPath)\n\t\t\t\t_, errREJECTED = os.Stat(rejectedPath)\n\t\t\t\tif errACCEPTED == nil || errREJECTED == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttime.Sleep(apiRetryDelay)\n\t\t\t}\n\t\t}\n\t\treturn errACCEPTED == nil\n\t}\n}",
"func BuildImage(ctx context.Context, ow *rpc.OutputWriter, cli *client.Client, opts *docker.BuildImageOpts) Fixer {\n\treturn func() (string, error) {\n\t\tcreated, err := docker.EnsureImage(ctx, ow, cli, opts)\n\t\tif err != nil {\n\t\t\treturn \"failed to create custom image.\", err\n\t\t}\n\t\tif created {\n\t\t\treturn \"custom image already existed.\", nil\n\t\t}\n\t\treturn \"custom image created successfully.\", nil\n\t}\n}",
"func (t *task) validateState(newRuntime *pbtask.RuntimeInfo) bool {\n\tcurrentRuntime := t.runtime\n\n\tif newRuntime == nil {\n\t\t// no runtime is invalid\n\t\treturn false\n\t}\n\n\t// if current goal state is deleted, it cannot be overwritten\n\t// till the desired configuration version also changes\n\tif currentRuntime.GetGoalState() == pbtask.TaskState_DELETED &&\n\t\tnewRuntime.GetGoalState() != currentRuntime.GetGoalState() {\n\t\tif currentRuntime.GetDesiredConfigVersion() == newRuntime.GetDesiredConfigVersion() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif newRuntime.GetMesosTaskId() != nil {\n\t\tif currentRuntime.GetMesosTaskId().GetValue() !=\n\t\t\tnewRuntime.GetMesosTaskId().GetValue() {\n\t\t\t// Validate post migration, new runid is greater than previous one\n\t\t\tif !validateMesosTaskID(newRuntime.GetMesosTaskId().GetValue(),\n\t\t\t\tcurrentRuntime.GetMesosTaskId().GetValue()) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t// mesos task id has changed\n\t\t\tif newRuntime.GetState() == pbtask.TaskState_INITIALIZED {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// desired mesos task id should not have runID decrease at\n\t// any time\n\tif newRuntime.GetDesiredMesosTaskId() != nil &&\n\t\t!validateMesosTaskID(newRuntime.GetDesiredMesosTaskId().GetValue(),\n\t\t\tcurrentRuntime.GetDesiredMesosTaskId().GetValue()) {\n\t\treturn false\n\t}\n\n\t// if state update is not requested, then return true\n\tif newRuntime.GetState() == currentRuntime.GetState() {\n\t\treturn true\n\t}\n\n\t//TBD replace if's with more structured checks\n\n\tif util.IsPelotonStateTerminal(currentRuntime.GetState()) {\n\t\t// cannot overwrite terminal state without changing the mesos task id\n\t\treturn false\n\t}\n\n\tif IsMesosOwnedState(newRuntime.GetState()) {\n\t\t// update from mesos eventstream is ok from mesos states, resource manager states\n\t\t// and from INITIALIZED and LAUNCHED states.\n\t\tif IsMesosOwnedState(currentRuntime.GetState()) || IsResMgrOwnedState(currentRuntime.GetState()) {\n\t\t\treturn true\n\t\t}\n\n\t\tif currentRuntime.GetState() == pbtask.TaskState_INITIALIZED || currentRuntime.GetState() == pbtask.TaskState_LAUNCHED {\n\t\t\treturn true\n\t\t}\n\n\t\t// Update from KILLING state to only terminal states is allowed\n\t\tif util.IsPelotonStateTerminal(newRuntime.GetState()) && currentRuntime.GetState() == pbtask.TaskState_KILLING {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif IsResMgrOwnedState(newRuntime.GetState()) {\n\t\t// update from resource manager evenstream is ok from resource manager states or INITIALIZED state\n\t\tif IsResMgrOwnedState(currentRuntime.GetState()) {\n\t\t\treturn true\n\t\t}\n\n\t\tif currentRuntime.GetState() == pbtask.TaskState_INITIALIZED {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif newRuntime.GetState() == pbtask.TaskState_LAUNCHED {\n\t\t// update to LAUNCHED state from resource manager states and INITIALIZED state is ok\n\t\tif IsResMgrOwnedState(currentRuntime.GetState()) {\n\t\t\treturn true\n\t\t}\n\t\tif currentRuntime.GetState() == pbtask.TaskState_INITIALIZED {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif newRuntime.GetState() == pbtask.TaskState_KILLING {\n\t\t// update to KILLING state from any non-terminal state is ok\n\t\treturn true\n\t}\n\n\t// any other state transition is invalid\n\treturn false\n}",
"func execute(fn GraphiteReturner, job *Job, cache *lru.Cache) error {\n\tkey := fmt.Sprintf(\"%d-%d\", job.MonitorId, job.LastPointTs.Unix())\n\n\tpreConsider := time.Now()\n\n\tif time.Now().Sub(job.GeneratedAt) > time.Minute*time.Duration(10) {\n\t\texecutorNumTooOld.Inc(1)\n\t\treturn nil\n\t}\n\n\tif found, _ := cache.ContainsOrAdd(key, true); found {\n\t\t//log.Debug(\"T %s already done\", key)\n\t\texecutorNumAlreadyDone.Inc(1)\n\t\texecutorConsiderJobAlreadyDone.Value(time.Since(preConsider))\n\t\treturn nil\n\t}\n\n\t//log.Debug(\"T %s doing\", key)\n\texecutorNumOriginalTodo.Inc(1)\n\texecutorConsiderJobOriginalTodo.Value(time.Since(preConsider))\n\tgr, err := fn(job.OrgId)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fatal: job %q: %q\", job, err)\n\t}\n\tif gr, ok := gr.(*graphite.GraphiteContext); ok {\n\t\tgr.AssertMinSeries = job.AssertMinSeries\n\t\tgr.AssertStart = job.AssertStart\n\t\tgr.AssertStep = job.AssertStep\n\t\tgr.AssertSteps = job.AssertSteps\n\t}\n\n\tpreExec := time.Now()\n\texecutorJobExecDelay.Value(preExec.Sub(job.LastPointTs))\n\tevaluator, err := NewGraphiteCheckEvaluator(gr, job.Definition)\n\tif err != nil {\n\t\t// expressions should be validated before they are stored in the db!\n\t\treturn fmt.Errorf(\"fatal: job %q: invalid check definition %q: %q\", job, job.Definition, err)\n\t}\n\n\tres, err := evaluator.Eval(job.LastPointTs)\n\tdurationExec := time.Since(preExec)\n\tlog.Debug(\"job results - job:%v err:%v res:%v\", job, err, res)\n\n\t// the bosun api abstracts parsing, execution and graphite querying for us via 1 call.\n\t// we want to have some individual times\n\tif gr, ok := gr.(*graphite.GraphiteContext); ok {\n\t\texecutorJobQueryGraphite.Value(gr.Dur)\n\t\texecutorJobParseAndEval.Value(durationExec - gr.Dur)\n\t\tif gr.MissingVals > 0 {\n\t\t\texecutorGraphiteMissingVals.Value(int64(gr.MissingVals))\n\t\t}\n\t\tif gr.EmptyResp != 0 {\n\t\t\texecutorGraphiteEmptyResponse.Inc(int64(gr.EmptyResp))\n\t\t}\n\t\tif gr.IncompleteResp != 0 {\n\t\t\texecutorGraphiteIncompleteResponse.Inc(int64(gr.IncompleteResp))\n\t\t}\n\t\tif gr.BadStart != 0 {\n\t\t\texecutorGraphiteBadStart.Inc(int64(gr.BadStart))\n\t\t}\n\t\tif gr.BadStep != 0 {\n\t\t\texecutorGraphiteBadStep.Inc(int64(gr.BadStep))\n\t\t}\n\t\tif gr.BadSteps != 0 {\n\t\t\texecutorGraphiteBadSteps.Inc(int64(gr.BadSteps))\n\t\t}\n\t}\n\n\tif err != nil {\n\t\texecutorAlertOutcomesErr.Inc(1)\n\t\treturn fmt.Errorf(\"Eval failed for job %q : %s\", job, err.Error())\n\t}\n\n\tupdateMonitorStateCmd := m.UpdateMonitorStateCommand{\n\t\tId: job.MonitorId,\n\t\tState: res,\n\t\tUpdated: job.LastPointTs, // this protects against jobs running out of order.\n\t\tChecked: preExec,\n\t}\n\tif err := bus.Dispatch(&updateMonitorStateCmd); err != nil {\n\t\t//check if we failed due to deadlock.\n\t\tif err.Error() == \"Error 1213: Deadlock found when trying to get lock; try restarting transaction\" {\n\t\t\terr = bus.Dispatch(&updateMonitorStateCmd)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"non-fatal: failed to update monitor state: %q\", err)\n\t}\n\tif gr, ok := gr.(*graphite.GraphiteContext); ok {\n\t\trequests := \"\"\n\t\tfor _, trace := range gr.Traces {\n\t\t\tr := trace.Request\n\t\t\t// mangle trace.Response to keep the dumped out graphite\n\t\t\t// responses from crashing logstash\n\t\t\tresp := bytes.Replace(trace.Response, []byte(\"\\n\"), []byte(\"\\n> \"), -1)\n\t\t\trequests += fmt.Sprintf(\"\\ntargets: %s\\nfrom:%s\\nto:%s\\nresponse:%s\\n\", r.Targets, r.Start, r.End, resp)\n\t\t}\n\t\tlog.Debug(\"Job %s state_change=%t request traces: %s\", job, updateMonitorStateCmd.Affected > 0, requests)\n\t}\n\tif updateMonitorStateCmd.Affected > 0 && res != m.EvalResultUnknown {\n\t\t//emit a state change event.\n\t\tif job.Notifications.Enabled {\n\t\t\temails := strings.Split(job.Notifications.Addresses, \",\")\n\t\t\tif len(emails) < 1 {\n\t\t\t\tlog.Debug(\"no email addresses provided. OrgId: %d monitorId: %d\", job.OrgId, job.MonitorId)\n\t\t\t} else {\n\t\t\t\tfor _, email := range emails {\n\t\t\t\t\tlog.Info(\"sending email. addr=%s, orgId=%d, monitorId=%d, endpointSlug=%s, state=%s\", email, job.OrgId, job.MonitorId, job.EndpointSlug, res.String())\n\t\t\t\t}\n\t\t\t\tsendCmd := m.SendEmailCommand{\n\t\t\t\t\tTo: emails,\n\t\t\t\t\tTemplate: \"alerting_notification.html\",\n\t\t\t\t\tData: map[string]interface{}{\n\t\t\t\t\t\t\"EndpointId\": job.EndpointId,\n\t\t\t\t\t\t\"EndpointName\": job.EndpointName,\n\t\t\t\t\t\t\"EndpointSlug\": job.EndpointSlug,\n\t\t\t\t\t\t\"Settings\": job.Settings,\n\t\t\t\t\t\t\"CheckType\": job.MonitorTypeName,\n\t\t\t\t\t\t\"State\": res.String(),\n\t\t\t\t\t\t\"TimeLastData\": job.LastPointTs, // timestamp of the most recent data used\n\t\t\t\t\t\t\"TimeExec\": preExec, // when we executed the alerting rule and made the determination\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tif err := bus.Dispatch(&sendCmd); err != nil {\n\t\t\t\t\tlog.Error(0, \"failed to send email to %s. OrgId: %d monitorId: %d due to: %s\", emails, job.OrgId, job.MonitorId, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t//store the result in graphite.\n\tjob.StoreResult(res)\n\n\tswitch res {\n\tcase m.EvalResultOK:\n\t\texecutorAlertOutcomesOk.Inc(1)\n\tcase m.EvalResultWarn:\n\t\texecutorAlertOutcomesWarn.Inc(1)\n\tcase m.EvalResultCrit:\n\t\texecutorAlertOutcomesCrit.Inc(1)\n\tcase m.EvalResultUnknown:\n\t\texecutorAlertOutcomesUnkn.Inc(1)\n\t}\n\n\treturn nil\n}",
"func TakePipelineBuildJob(db gorp.SqlExecutor, store cache.Store, pbJobID int64, model string, workerName string, infos []sdk.SpawnInfo) (*sdk.PipelineBuildJob, error) {\n\tpbJob, err := GetPipelineBuildJobForUpdate(db, store, pbJobID)\n\tif err != nil {\n\t\treturn nil, sdk.WrapError(err, \"TakePipelineBuildJob> Cannot load pipeline build job\")\n\t}\n\tif pbJob.Status != sdk.StatusWaiting.String() {\n\t\tk := keyBookJob(pbJobID)\n\t\th := sdk.Hatchery{}\n\t\tif store.Get(k, &h) {\n\t\t\treturn nil, sdk.WrapError(sdk.ErrAlreadyTaken, \"TakePipelineBuildJob> job %d is not waiting status and was booked by hatchery %d. Current status:%s\", pbJobID, h.ID, pbJob.Status)\n\t\t}\n\t\treturn nil, sdk.WrapError(sdk.ErrAlreadyTaken, \"TakePipelineBuildJob> job %d is not waiting status. Current status:%s\", pbJobID, pbJob.Status)\n\t}\n\n\tpbJob.Model = model\n\tpbJob.Job.WorkerName = workerName\n\tpbJob.Start = time.Now()\n\tpbJob.Status = sdk.StatusBuilding.String()\n\n\tif err := prepareSpawnInfos(pbJob, infos); err != nil {\n\t\treturn nil, sdk.WrapError(err, \"TakePipelineBuildJob> Cannot prepare spawn infos\")\n\t}\n\n\tif err := UpdatePipelineBuildJob(db, pbJob); err != nil {\n\t\treturn nil, sdk.WrapError(err, \"TakePipelineBuildJob>Cannot update model on pipeline build job\")\n\t}\n\treturn pbJob, nil\n}",
"func createThumbnail(\n\tctx context.Context,\n\tsrc types.Path,\n\timg image.Image,\n\tconfig types.ThumbnailSize,\n\tmediaMetadata *types.MediaMetadata,\n\tactiveThumbnailGeneration *types.ActiveThumbnailGeneration,\n\tmaxThumbnailGenerators int,\n\tdb storage.Database,\n\tlogger *log.Entry,\n) (busy bool, errorReturn error) {\n\tlogger = logger.WithFields(log.Fields{\n\t\t\"Width\": config.Width,\n\t\t\"Height\": config.Height,\n\t\t\"ResizeMethod\": config.ResizeMethod,\n\t})\n\n\t// Check if request is larger than original\n\tif config.Width >= img.Bounds().Dx() && config.Height >= img.Bounds().Dy() {\n\t\treturn false, nil\n\t}\n\n\tdst := GetThumbnailPath(src, config)\n\n\t// Note: getActiveThumbnailGeneration uses mutexes and conditions from activeThumbnailGeneration\n\tisActive, busy, err := getActiveThumbnailGeneration(dst, config, activeThumbnailGeneration, maxThumbnailGenerators, logger)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif busy {\n\t\treturn true, nil\n\t}\n\n\tif isActive {\n\t\t// Note: This is an active request that MUST broadcastGeneration to wake up waiting goroutines!\n\t\t// Note: broadcastGeneration uses mutexes and conditions from activeThumbnailGeneration\n\t\tdefer func() {\n\t\t\t// Note: errorReturn is the named return variable so we wrap this in a closure to re-evaluate the arguments at defer-time\n\t\t\t// if err := recover(); err != nil {\n\t\t\t// \tbroadcastGeneration(dst, activeThumbnailGeneration, config, err.(error), logger)\n\t\t\t// \tpanic(err)\n\t\t\t// }\n\t\t\tbroadcastGeneration(dst, activeThumbnailGeneration, config, errorReturn, logger)\n\t\t}()\n\t}\n\n\texists, err := isThumbnailExists(ctx, dst, config, mediaMetadata, db, logger)\n\tif err != nil || exists {\n\t\treturn false, err\n\t}\n\n\tstart := time.Now()\n\twidth, height, err := adjustSize(dst, img, config.Width, config.Height, config.ResizeMethod == types.Crop, logger)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tlogger.WithFields(log.Fields{\n\t\t\"ActualWidth\": width,\n\t\t\"ActualHeight\": height,\n\t\t\"processTime\": time.Since(start),\n\t}).Info(\"Generated thumbnail\")\n\n\tstat, err := os.Stat(string(dst))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tthumbnailMetadata := &types.ThumbnailMetadata{\n\t\tMediaMetadata: &types.MediaMetadata{\n\t\t\tMediaID: mediaMetadata.MediaID,\n\t\t\tOrigin: mediaMetadata.Origin,\n\t\t\t// Note: the code currently always creates a JPEG thumbnail\n\t\t\tContentType: types.ContentType(\"image/jpeg\"),\n\t\t\tFileSizeBytes: types.FileSizeBytes(stat.Size()),\n\t\t},\n\t\tThumbnailSize: types.ThumbnailSize{\n\t\t\tWidth: config.Width,\n\t\t\tHeight: config.Height,\n\t\t\tResizeMethod: config.ResizeMethod,\n\t\t},\n\t}\n\n\terr = db.StoreThumbnail(ctx, thumbnailMetadata)\n\tif err != nil {\n\t\tlogger.WithError(err).WithFields(log.Fields{\n\t\t\t\"ActualWidth\": width,\n\t\t\t\"ActualHeight\": height,\n\t\t}).Error(\"Failed to store thumbnail metadata in database.\")\n\t\treturn false, err\n\t}\n\n\treturn false, nil\n}",
"func StartAndCheckJobBatchSuccess(testName string) {\n\tnspMetrics.AddStartAndCheckJobBatchSuccess()\n\tmetrics.AddTestOne(testName, nspMetrics.Success)\n\tmetrics.AddTestZero(testName, nspMetrics.Errors)\n\tlogger.Infof(\"Test %s: SUCCESS\", testName)\n}",
"func DockerBuild(w http.ResponseWriter, req *http.Request) (int, string) {\n\t// TODO: check content type\n\n\tbody, err := ioutil.ReadAll(req.Body)\n\tdefer req.Body.Close()\n\tif err != nil {\n\t\treturn 400, \"400 bad request\"\n\t}\n\n\tspec, err := job.NewSpec(body)\n\tif err != nil {\n\t\treturn 400, \"400 bad request\"\n\t}\n\n\treturn processJobHelper(spec, w, req)\n}",
"func (restorer *APTRestorer) buildState(message *nsq.Message) (*models.RestoreState, error) {\n\trestoreState := models.NewRestoreState(message)\n\trestorer.Context.MessageLog.Info(\"Asking Pharos for WorkItem %s\", string(message.Body))\n\tworkItem, err := GetWorkItem(message, restorer.Context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trestoreState.WorkItem = workItem\n\trestorer.Context.MessageLog.Info(\"Got WorkItem %d\", workItem.Id)\n\n\t// Get the saved state of this item, if there is one.\n\tif workItem.WorkItemStateId != nil {\n\t\trestorer.Context.MessageLog.Info(\"Asking Pharos for WorkItemState %d\", *workItem.WorkItemStateId)\n\t\tresp := restorer.Context.PharosClient.WorkItemStateGet(*workItem.WorkItemStateId)\n\t\tif resp.Error != nil {\n\t\t\trestorer.Context.MessageLog.Warning(\"Could not retrieve WorkItemState with id %d: %v\",\n\t\t\t\t*workItem.WorkItemStateId, resp.Error)\n\t\t} else {\n\t\t\tworkItemState := resp.WorkItemState()\n\t\t\tsavedState := &models.RestoreState{}\n\t\t\terr = json.Unmarshal([]byte(workItemState.State), savedState)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Could not unmarshal WorkItemState.State: %v\", err)\n\t\t\t}\n\t\t\trestoreState.PackageSummary = savedState.PackageSummary\n\t\t\trestoreState.ValidateSummary = savedState.ValidateSummary\n\t\t\trestoreState.CopySummary = savedState.CopySummary\n\t\t\trestoreState.RecordSummary = savedState.RecordSummary\n\t\t\trestoreState.LocalBagDir = savedState.LocalBagDir\n\t\t\trestoreState.LocalTarFile = savedState.LocalTarFile\n\t\t\trestoreState.RestoredToUrl = savedState.RestoredToUrl\n\t\t\trestoreState.CopiedToRestorationAt = savedState.CopiedToRestorationAt\n\t\t\trestorer.Context.MessageLog.Info(\"Got WorkItemState %d\", *workItem.WorkItemStateId)\n\t\t}\n\t}\n\n\t// Get the intellectual object. This should not have changed\n\t// during the processing of this request, because Pharos does\n\t// not permit delete operations while a restore is pending.\n\trestorer.Context.MessageLog.Info(\"Asking Pharos for IntellectualObject %s\",\n\t\trestoreState.WorkItem.ObjectIdentifier)\n\tresponse := restorer.Context.PharosClient.IntellectualObjectGet(\n\t\trestoreState.WorkItem.ObjectIdentifier, true, false)\n\tif response.Error != nil {\n\t\treturn nil, fmt.Errorf(\"Error retrieving IntellectualObject %s from Pharos: %v\", restoreState.WorkItem.ObjectIdentifier, response.Error)\n\t}\n\trestoreState.IntellectualObject = response.IntellectualObject()\n\trestorer.Context.MessageLog.Info(\"Got IntellectualObject %s\",\n\t\trestoreState.WorkItem.ObjectIdentifier)\n\n\t// LocalBagDir will not be set if we were unable to retrieve\n\t// WorkItemState above.\n\tif restoreState.LocalBagDir == \"\" {\n\t\trestoreState.LocalBagDir = filepath.Join(\n\t\t\trestorer.Context.Config.RestoreDirectory,\n\t\t\trestoreState.IntellectualObject.Identifier)\n\t}\n\trestorer.Context.MessageLog.Info(\"Set local bag dir to %s\", restoreState.LocalBagDir)\n\treturn restoreState, nil\n}",
"func (fsm *DeployFSMContext) checkServiceReady() (bool, error) {\n\truntime := fsm.Runtime\n\t// do not check if nil for compatibility\n\tif fsm.Deployment.Extra.ServicePhaseStartAt != nil {\n\t\tstartCheckPoint := fsm.Deployment.Extra.ServicePhaseStartAt.Add(30 * time.Second)\n\t\tif time.Now().Before(startCheckPoint) {\n\t\t\tfsm.pushLog(fmt.Sprintf(\"checking too early, delay to: %s\", startCheckPoint.String()))\n\t\t\t// too early to check\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tisReplicasZero := false\n\tfor _, s := range fsm.Spec.Services {\n\t\tif s.Deployments.Replicas == 0 {\n\t\t\tisReplicasZero = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif isReplicasZero {\n\t\tfsm.pushLog(\"checking status by inspect\")\n\t\t// we do double check to prevent `fake Healthy`\n\t\t// runtime.ScheduleName must have\n\t\tsg, err := fsm.getServiceGroup()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn sg.Status == \"Ready\" || sg.Status == \"Healthy\", nil\n\t}\n\n\t// 获取addon状态\n\tserviceGroup, err := fsm.getServiceGroup()\n\tif err != nil {\n\t\tfsm.pushLog(fmt.Sprintf(\"获取service状态失败,%s\", err.Error()))\n\t\treturn false, nil\n\t}\n\tfsm.pushLog(fmt.Sprintf(\"checking status: %s, servicegroup: %v\", serviceGroup.Status, runtime.ScheduleName))\n\t// 如果状态是failed,说明服务或者job运行失败\n\tif serviceGroup.Status == apistructs.StatusFailed {\n\t\treturn false, errors.New(serviceGroup.LastMessage)\n\t}\n\t// 如果状态是ready或者healthy,说明服务已经发起来了\n\truntimeStatus := apistructs.RuntimeStatusUnHealthy\n\tif serviceGroup.Status == apistructs.StatusReady || serviceGroup.Status == apistructs.StatusHealthy {\n\t\truntimeStatus = apistructs.RuntimeStatusHealthy\n\t}\n\truntimeItem := fsm.Runtime\n\tif runtimeItem.Status != runtimeStatus {\n\t\truntimeItem.Status = runtimeStatus\n\t\tif err := fsm.db.UpdateRuntime(runtime); err != nil {\n\t\t\tlogrus.Errorf(\"failed to update runtime status changed, runtime: %v, err: %v\", runtime.ID, err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif runtimeStatus == apistructs.RuntimeStatusHealthy {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}",
"func (e *dockerExec) create(ctx context.Context) (execState, error) {\n\tif _, err := e.client.ContainerInspect(ctx, e.containerName()); err == nil {\n\t\treturn execCreated, nil\n\t} else if !docker.IsErrNotFound(err) {\n\t\treturn execInit, errors.E(\"ContainerInspect\", e.containerName(), kind(err), err)\n\t}\n\tif err := e.Executor.ensureImage(ctx, e.Config.Image); err != nil {\n\t\te.Log.Errorf(\"error ensuring image %s: %v\", e.Config.Image, err)\n\t\treturn execInit, errors.E(\"ensureimage\", e.Config.Image, err)\n\t}\n\t// Map the products to input arguments and volume bindings for\n\t// the container. Currently we map the whole repository (named by\n\t// the digest) and then include the cut in the arguments passed to\n\t// the job.\n\targs := make([]interface{}, len(e.Config.Args))\n\tfor i, iv := range e.Config.Args {\n\t\tif iv.Out {\n\t\t\twhich := strconv.Itoa(iv.Index)\n\t\t\targs[i] = path.Join(\"/return\", which)\n\t\t} else {\n\t\t\tflat := iv.Fileset.Flatten()\n\t\t\targv := make([]string, len(flat))\n\t\t\tfor j, jv := range flat {\n\t\t\t\targPath := fmt.Sprintf(\"arg/%d/%d\", i, j)\n\t\t\t\tbinds := map[string]digest.Digest{}\n\t\t\t\tfor path, file := range jv.Map {\n\t\t\t\t\tbinds[path] = file.ID\n\t\t\t\t}\n\t\t\t\tif err := e.repo.Materialize(e.path(argPath), binds); err != nil {\n\t\t\t\t\treturn execInit, err\n\t\t\t\t}\n\t\t\t\targv[j] = \"/\" + argPath\n\t\t\t}\n\t\t\targs[i] = strings.Join(argv, \" \")\n\t\t}\n\t}\n\t// Set up temporary directory.\n\tos.MkdirAll(e.path(\"tmp\"), 0777)\n\tos.MkdirAll(e.path(\"return\"), 0777)\n\thostConfig := &container.HostConfig{\n\t\tBinds: []string{\n\t\t\te.hostPath(\"arg\") + \":/arg\",\n\t\t\te.hostPath(\"tmp\") + \":/tmp\",\n\t\t\te.hostPath(\"return\") + \":/return\",\n\t\t},\n\t\tNetworkMode: container.NetworkMode(\"host\"),\n\t\t// Try to ensure that jobs we control get killed before the reflowlet,\n\t\t// so that we don't lose adjacent tasks unnecessarily and so that\n\t\t// errors are more sensible to the user.\n\t\tOomScoreAdj: 1000,\n\t}\n\tif e.Config.NeedDockerAccess {\n\t\thostConfig.Binds = append(hostConfig.Binds, \"/var/run/docker.sock:/var/run/docker.sock\")\n\t}\n\n\t// Restrict docker memory usage if specified by the user.\n\t// If the docker container memory limit (the cgroup limit) is exceeded\n\t// before the OOM Killer kills the process, the following message\n\t// is recorded in /dev/kmsg:\n\t// Memory cgroup out of memory: Kill process <pid>\n\tif mem := e.Config.Resources[\"mem\"]; mem > 0 && e.Executor.HardMemLimit {\n\t\thostConfig.Resources.Memory = int64(mem)\n\t\thostConfig.Resources.MemorySwap = int64(mem) + int64(hardLimitSwapMem)\n\t}\n\n\tenv := []string{\n\t\t\"tmp=/tmp\",\n\t\t\"TMPDIR=/tmp\",\n\t\t\"HOME=/tmp\",\n\t}\n\tif outputs := e.Config.OutputIsDir; outputs != nil {\n\t\tfor i, isdir := range outputs {\n\t\t\tif isdir {\n\t\t\t\tos.MkdirAll(e.path(\"return\", strconv.Itoa(i)), 0777)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tenv = append(env, \"out=/return/default\")\n\t}\n\t// TODO(marius): this is a hack for Earl to use the AWS tool.\n\tif e.Config.NeedAWSCreds {\n\t\tcreds, err := e.Executor.AWSCreds.Get()\n\t\tif err != nil {\n\t\t\t// We mark this as temporary, because most of the time it is.\n\t\t\t// TODO(marius): can we get better error classification from\n\t\t\t// the AWS SDK?\n\t\t\treturn execInit, errors.E(\"run\", e.id, errors.Temporary, err)\n\t\t}\n\t\t// TODO(marius): region?\n\t\tenv = append(env, \"AWS_ACCESS_KEY_ID=\"+creds.AccessKeyID)\n\t\tenv = append(env, \"AWS_SECRET_ACCESS_KEY=\"+creds.SecretAccessKey)\n\t\tenv = append(env, \"AWS_SESSION_TOKEN=\"+creds.SessionToken)\n\t}\n\tconfig := &container.Config{\n\t\tImage: e.Config.Image,\n\t\t// We use a login shell here as many Docker images are configured\n\t\t// with /root/.profile, etc.\n\t\tEntrypoint: []string{\"/bin/bash\", \"-e\", \"-l\", \"-o\", \"pipefail\", \"-c\", fmt.Sprintf(e.Config.Cmd, args...)},\n\t\tCmd: []string{},\n\t\tEnv: env,\n\t\tLabels: map[string]string{\"reflow-id\": e.id.Hex()},\n\t\tUser: dockerUser,\n\t}\n\tnetworkingConfig := &network.NetworkingConfig{}\n\tif _, err := e.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, e.containerName()); err != nil {\n\t\treturn execInit, errors.E(\n\t\t\t\"ContainerCreate\",\n\t\t\tkind(err),\n\t\t\te.containerName(),\n\t\t\tfmt.Sprint(config), fmt.Sprint(hostConfig), fmt.Sprint(networkingConfig),\n\t\t\terr,\n\t\t)\n\t}\n\treturn execCreated, nil\n}",
"func TestStartJob(t *testing.T) {\n\tc := &kc{}\n\tbr := BuildRequest{\n\t\tOrg: \"owner\",\n\t\tRepo: \"kube\",\n\t\tBaseRef: \"master\",\n\t\tBaseSHA: \"abc\",\n\t\tPulls: []Pull{\n\t\t\t{\n\t\t\t\tNumber: 5,\n\t\t\t\tAuthor: \"a\",\n\t\t\t\tSHA: \"123\",\n\t\t\t},\n\t\t},\n\t}\n\tif _, err := startJob(c, \"job-name\", \"Context\", br); err != nil {\n\t\tt.Fatalf(\"Didn't expect error starting job: %v\", err)\n\t}\n\tlabels := c.job.Metadata.Labels\n\tif labels[\"jenkins-job-name\"] != \"job-name\" {\n\t\tt.Errorf(\"Jenkins job name label incorrect: %s\", labels[\"jenkins-job-name\"])\n\t}\n\tif labels[\"owner\"] != \"owner\" {\n\t\tt.Errorf(\"Owner label incorrect: %s\", labels[\"owner\"])\n\t}\n\tif labels[\"repo\"] != \"kube\" {\n\t\tt.Errorf(\"Repo label incorrect: %s\", labels[\"kube\"])\n\t}\n\tif labels[\"pr\"] != \"5\" {\n\t\tt.Errorf(\"PR label incorrect: %s\", labels[\"pr\"])\n\t}\n}",
"func (b *Build) IsPushImageSuccess() bool {\n\tif b.event.Version.Operation == api.DeployOperation {\n\t\treturn true\n\t}\n\treturn (b.status & pushImageSuccess) != 0\n}",
"func isRetryableTerminationState(s *v1.ContainerStateTerminated) bool {\n\t// TODO(jlewi): Need to match logic in\n\t// https://cs.corp.google.com/piper///depot/google3/cloud/ml/beta/job/training_job_state_util.cc?l=88\n\tif s.Reason == \"OOMKilled\" {\n\t\t// If the user's process causes an OOM and Docker kills the container,\n\t\t// the termination reason of ContainerState will be specified to\n\t\t// 'OOMKilled'. In this case, we can't assume this to be a retryable error.\n\t\t//\n\t\t// This check should happen before checking the termination log, since\n\t\t// if the container terminated with an OOM, the termination log may not\n\t\t// be written.\n\t\treturn false\n\t}\n\n\tif s.Message == \"\" {\n\t\t// launcher.sh should produce a termination log message. So if Kubernetes\n\t\t// doesn't report a termmination message then we can infer that\n\t\t// launcher.sh didn't exit cleanly. For example, the container might\n\t\t// have failed to start. We consider this a retryable error regardless\n\t\t// of the actual exit code.\n\t\treturn true\n\t}\n\n\t// TODO(jlewi): Should we use the exit code reported in the termination\n\t// log message and not the ExitCode reported by the container.\n\n\tif s.ExitCode >= 0 && s.ExitCode <= 127 {\n\t\t// For the exit_code in [0, 127]:\n\t\t// 0 means success,\n\t\t// 1 - 127 corresponds to permanent user errors.\n\t\t// We don't want to retry for both cases.\n\t\t// More info about exit status can be found in:\n\t\t// https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html\n\t\treturn false\n\t}\n\n\t// For the remaining cases that exit_code from workers that doesn't\n\t// fall into [0, 127]. They can be:\n\t// 137 corresponds to SIGKILL,\n\t// 143 corresponds to SIGTERM,\n\t// other values that have undefined behavior.\n\t// We treat them as internal errors for now and all the internal errors\n\t// will be retired.\n\treturn true\n}",
"func updateTaskState(task *api.Task) api.TaskStatus {\n\t//The task is the minimum status of all its essential containers unless the\n\t//status is terminal in which case it's that status\n\tlog.Debug(\"Updating task\", \"task\", task)\n\n\t// minContainerStatus is the minimum status of all essential containers\n\tminContainerStatus := api.ContainerDead + 1\n\t// minContainerStatus is the minimum status of all containers to be used in\n\t// the edge case of no essential containers\n\tabsoluteMinContainerStatus := minContainerStatus\n\tfor _, cont := range task.Containers {\n\t\tlog.Debug(\"On container\", \"cont\", cont)\n\t\tif cont.KnownStatus < absoluteMinContainerStatus {\n\t\t\tabsoluteMinContainerStatus = cont.KnownStatus\n\t\t}\n\t\tif !cont.Essential {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Terminal states\n\t\tif cont.KnownStatus == api.ContainerStopped {\n\t\t\tif task.KnownStatus < api.TaskStopped {\n\t\t\t\ttask.KnownStatus = api.TaskStopped\n\t\t\t\treturn task.KnownStatus\n\t\t\t}\n\t\t} else if cont.KnownStatus == api.ContainerDead {\n\t\t\tif task.KnownStatus < api.TaskDead {\n\t\t\t\ttask.KnownStatus = api.TaskDead\n\t\t\t\treturn task.KnownStatus\n\t\t\t}\n\t\t}\n\t\t// Non-terminal\n\t\tif cont.KnownStatus < minContainerStatus {\n\t\t\tminContainerStatus = cont.KnownStatus\n\t\t}\n\t}\n\n\tif minContainerStatus == api.ContainerDead+1 {\n\t\tlog.Warn(\"Task with no essential containers; all properly formed tasks should have at least one essential container\", \"task\", task)\n\n\t\t// If there's no essential containers, let's just assume the container\n\t\t// with the earliest status is essential and proceed.\n\t\tminContainerStatus = absoluteMinContainerStatus\n\t}\n\n\tlog.Info(\"MinContainerStatus is \" + minContainerStatus.String())\n\n\tif minContainerStatus == api.ContainerCreated {\n\t\tif task.KnownStatus < api.TaskCreated {\n\t\t\ttask.KnownStatus = api.TaskCreated\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerRunning {\n\t\tif task.KnownStatus < api.TaskRunning {\n\t\t\ttask.KnownStatus = api.TaskRunning\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerStopped {\n\t\tif task.KnownStatus < api.TaskStopped {\n\t\t\ttask.KnownStatus = api.TaskStopped\n\t\t\treturn task.KnownStatus\n\t\t}\n\t} else if minContainerStatus == api.ContainerDead {\n\t\tif task.KnownStatus < api.TaskDead {\n\t\t\ttask.KnownStatus = api.TaskDead\n\t\t\treturn task.KnownStatus\n\t\t}\n\t}\n\treturn api.TaskStatusNone\n}",
"func WorkflowKeyAvailability(key string) ([]string, []string) {\n\tswitch key {\n\tcase \"jobs.<job_id>.outputs.<output_id>\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"job\", \"matrix\", \"needs\", \"runner\", \"secrets\", \"steps\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.steps.continue-on-error\", \"jobs.<job_id>.steps.env\", \"jobs.<job_id>.steps.name\", \"jobs.<job_id>.steps.run\", \"jobs.<job_id>.steps.timeout-minutes\", \"jobs.<job_id>.steps.with\", \"jobs.<job_id>.steps.working-directory\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"job\", \"matrix\", \"needs\", \"runner\", \"secrets\", \"steps\", \"strategy\", \"vars\"}, []string{\"hashfiles\"}\n\tcase \"jobs.<job_id>.container.env.<env_id>\", \"jobs.<job_id>.services.<service_id>.env.<env_id>\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"job\", \"matrix\", \"needs\", \"runner\", \"secrets\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.environment.url\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"job\", \"matrix\", \"needs\", \"runner\", \"steps\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.steps.if\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"job\", \"matrix\", \"needs\", \"runner\", \"steps\", \"strategy\", \"vars\"}, []string{\"always\", \"cancelled\", \"failure\", \"hashfiles\", \"success\"}\n\tcase \"jobs.<job_id>.container.credentials\", \"jobs.<job_id>.services.<service_id>.credentials\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"matrix\", \"needs\", \"secrets\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.defaults.run\":\n\t\treturn []string{\"env\", \"github\", \"inputs\", \"matrix\", \"needs\", \"strategy\", \"vars\"}, []string{}\n\tcase \"on.workflow_call.outputs.<output_id>.value\":\n\t\treturn []string{\"github\", \"inputs\", \"jobs\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.env\", \"jobs.<job_id>.secrets.<secrets_id>\":\n\t\treturn []string{\"github\", \"inputs\", \"matrix\", \"needs\", \"secrets\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.concurrency\", \"jobs.<job_id>.container\", \"jobs.<job_id>.container.image\", \"jobs.<job_id>.continue-on-error\", \"jobs.<job_id>.environment\", \"jobs.<job_id>.name\", \"jobs.<job_id>.runs-on\", \"jobs.<job_id>.services\", \"jobs.<job_id>.timeout-minutes\", \"jobs.<job_id>.with.<with_id>\":\n\t\treturn []string{\"github\", \"inputs\", \"matrix\", \"needs\", \"strategy\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.strategy\":\n\t\treturn []string{\"github\", \"inputs\", \"needs\", \"vars\"}, []string{}\n\tcase \"jobs.<job_id>.if\":\n\t\treturn []string{\"github\", \"inputs\", \"needs\", \"vars\"}, []string{\"always\", \"cancelled\", \"failure\", \"success\"}\n\tcase \"env\":\n\t\treturn []string{\"github\", \"inputs\", \"secrets\", \"vars\"}, []string{}\n\tcase \"concurrency\", \"on.workflow_call.inputs.<inputs_id>.default\", \"run-name\":\n\t\treturn []string{\"github\", \"inputs\", \"vars\"}, []string{}\n\tdefault:\n\t\treturn nil, nil\n\t}\n}",
"func (b *Build) IsRunning() bool {\n\treturn (b.Status == StatusStarted || b.Status == StatusEnqueue)\n}",
"func (manager *Manager) Start(projectUpdateID string) {\n\tswitch manager.state {\n\tcase notRunningState:\n\t\t// Start elasticsearch update job async and return the job ID\n\t\tesJobIDs, err := manager.startProjectTagUpdater()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to start Elasticsearch Project rule update job projectUpdateID: %q\",\n\t\t\t\tprojectUpdateID)\n\t\t\tmanager.sendFailedEvent(fmt.Sprintf(\n\t\t\t\t\"Failed to start Elasticsearch Project rule update job projectUpdateID: %q\", projectUpdateID),\n\t\t\t\tprojectUpdateID)\n\t\t\treturn\n\t\t}\n\t\tmanager.esJobIDs = esJobIDs\n\n\t\tmanager.projectUpdateID = projectUpdateID\n\t\tmanager.state = runningState\n\t\tgo manager.waitingForJobToComplete()\n\tcase runningState:\n\t\tif manager.projectUpdateID == projectUpdateID {\n\t\t\t// Do nothing. The job has ready started\n\t\t} else {\n\t\t\tmanager.sendFailedEvent(fmt.Sprintf(\n\t\t\t\t\"Can not start another project update %q is running\", manager.projectUpdateID),\n\t\t\t\tprojectUpdateID)\n\t\t}\n\tdefault:\n\t\t// error state not found\n\t\tmanager.sendFailedEvent(fmt.Sprintf(\n\t\t\t\"Internal error state %q eventID %q\", manager.state, manager.projectUpdateID),\n\t\t\tprojectUpdateID)\n\t}\n}",
"func launchStatusCheck(conf *Config) error {\n\t// verify we can connect to message bus\n\tconn, err := Connect(conf)\n\tdefer conn.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info().Msg(\"OK ... AMQP Connection\")\n\n\t// verify we get a message channel\n\tch, err := GetAMQPChannel(conn)\n\tdefer ch.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info().Msg(\"OK ... AMQP Channel\")\n\n\t// verify the pre-configured exchange exists\n\tif err := CheckExchangeExists(ch, conf.Publisher.Exchange); err != nil {\n\t\treturn err\n\t}\n\tlog.Info().\n\t\tStr(\"exchange\", conf.Publisher.Exchange.Name).\n\t\tMsg(\"OK ... AMQP Exchange\")\n\n\treturn nil\n}",
"func checkInstallStatus(cs kube.Cluster) error {\n\tscopes.Framework.Infof(\"checking IstioOperator CR status\")\n\tgvr := schema.GroupVersionResource{\n\t\tGroup: \"install.istio.io\",\n\t\tVersion: \"v1alpha1\",\n\t\tResource: \"istiooperators\",\n\t}\n\n\tvar unhealthyCN []string\n\tretryFunc := func() error {\n\t\tus, err := cs.GetUnstructured(gvr, IstioNamespace, \"test-istiocontrolplane\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get istioOperator resource: %v\", err)\n\t\t}\n\t\tusIOPStatus := us.UnstructuredContent()[\"status\"]\n\t\tif usIOPStatus == nil {\n\t\t\tif _, err := cs.CoreV1().Services(OperatorNamespace).Get(context.TODO(), \"istio-operator\",\n\t\t\t\tkubeApiMeta.GetOptions{}); err != nil {\n\t\t\t\treturn fmt.Errorf(\"istio operator svc is not ready: %v\", err)\n\t\t\t}\n\t\t\tif _, err := cs.CheckPodsAreReady(kube2.NewPodFetch(cs.Accessor, OperatorNamespace)); err != nil {\n\t\t\t\treturn fmt.Errorf(\"istio operator pod is not ready: %v\", err)\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"status not found from the istioOperator resource\")\n\t\t}\n\t\tusIOPStatus = usIOPStatus.(map[string]interface{})\n\t\tiopStatusString, err := json.Marshal(usIOPStatus)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to marshal istioOperator status: %v\", err)\n\t\t}\n\t\tstatus := &api.InstallStatus{}\n\t\tjspb := jsonpb.Unmarshaler{AllowUnknownFields: true}\n\t\tif err := jspb.Unmarshal(bytes.NewReader(iopStatusString), status); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal istioOperator status: %v\", err)\n\t\t}\n\t\terrs := util.Errors{}\n\t\tunhealthyCN = []string{}\n\t\tif status.Status != api.InstallStatus_HEALTHY {\n\t\t\terrs = util.AppendErr(errs, fmt.Errorf(\"got IstioOperator status: %v\", status.Status))\n\t\t}\n\n\t\tfor cn, cnstatus := range status.ComponentStatus {\n\t\t\tif cnstatus.Status != api.InstallStatus_HEALTHY {\n\t\t\t\tunhealthyCN = append(unhealthyCN, cn)\n\t\t\t\terrs = util.AppendErr(errs, fmt.Errorf(\"got component: %s status: %v\", cn, cnstatus.Status))\n\t\t\t}\n\t\t}\n\t\treturn errs.ToError()\n\t}\n\terr := retry.UntilSuccess(retryFunc, retry.Timeout(retryTimeOut), retry.Delay(retryDelay))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"istioOperator status is not healthy: %v\", err)\n\t}\n\treturn nil\n}",
"func UpdatePipelineBuildJobStatus(db gorp.SqlExecutor, pbJob *sdk.PipelineBuildJob, status sdk.Status) error {\n\tvar query string\n\tquery = `SELECT status FROM pipeline_build_job WHERE id = $1 FOR UPDATE`\n\tvar currentStatus string\n\tif err := db.QueryRow(query, pbJob.ID).Scan(¤tStatus); err != nil {\n\t\treturn sdk.WrapError(err, \"UpdatePipelineBuildJobStatus> Cannot lock pipeline build job %d\", pbJob.ID)\n\t}\n\n\tswitch status {\n\tcase sdk.StatusBuilding:\n\t\tif currentStatus != sdk.StatusWaiting.String() {\n\t\t\treturn fmt.Errorf(\"UpdatePipelineBuildJobStatus> Cannot update status of PipelineBuildJob %d to %s, expected current status %s, got %s\",\n\t\t\t\tpbJob.ID, status, sdk.StatusWaiting, currentStatus)\n\t\t}\n\t\tpbJob.Start = time.Now()\n\t\tpbJob.Status = status.String()\n\n\tcase sdk.StatusFail, sdk.StatusSuccess, sdk.StatusDisabled, sdk.StatusSkipped, sdk.StatusStopped:\n\t\tif currentStatus != string(sdk.StatusWaiting) && currentStatus != string(sdk.StatusBuilding) && status != sdk.StatusDisabled && status != sdk.StatusSkipped {\n\t\t\tlog.Debug(\"UpdatePipelineBuildJobStatus> Status is %s, cannot update %d to %s\", currentStatus, pbJob.ID, status)\n\t\t\t// too late, Nate\n\t\t\treturn nil\n\t\t}\n\t\tpbJob.Done = time.Now()\n\t\tpbJob.Status = status.String()\n\tdefault:\n\t\treturn fmt.Errorf(\"UpdatePipelineBuildJobStatus> Cannot update PipelineBuildJob %d to status %v\", pbJob.ID, status.String())\n\t}\n\n\tif err := UpdatePipelineBuildJob(db, pbJob); err != nil {\n\t\treturn sdk.WrapError(err, \"UpdatePipelineBuildJobStatus> Cannot update pipeline build job %d\", pbJob.ID)\n\t}\n\n\tpb, errLoad := LoadPipelineBuildByID(db, pbJob.PipelineBuildID)\n\tif errLoad != nil {\n\t\treturn sdk.WrapError(errLoad, \"UpdatePipelineBuildJobStatus> Cannot load pipeline build %d: %s\", pbJob.PipelineBuildID, errLoad)\n\t}\n\n\tevent.PublishActionBuild(pb, pbJob)\n\treturn nil\n}",
"func TestDaemon_JobStatusWithNoCache(t *testing.T) {\n\td, start, clean, _, _, restart := mockDaemon(t)\n\tstart()\n\tdefer clean()\n\tw := newWait(t)\n\n\tctx := context.Background()\n\t// Perform update\n\tid := updatePolicy(ctx, t, d)\n\n\t// Make sure the job finishes first\n\tw.ForJobSucceeded(d, id)\n\n\t// Clear the cache like we've just restarted\n\trestart(func() {\n\t\td.JobStatusCache = &job.StatusCache{Size: 100}\n\t})\n\n\t// Now check if we can get the job status from the commit\n\tw.ForJobSucceeded(d, id)\n}",
"func (p Pipeline) BuildImages(force bool) error {\n\tif Verbose {\n\t\tpipelineLogger.Printf(\"Build Images:\")\n\t}\n\tcount, elapsedTime, totalElapsedTime, err := p.runCommand(runConfig{\n\t\tselection: func(step Step) bool {\n\t\t\treturn step.IsBuildable()\n\t\t},\n\t\trun: func(runner Runner, step Step) func() error {\n\t\t\treturn runner.ImageBuilder(step, force)\n\t\t},\n\t})\n\tif Verbose {\n\t\tpipelineLogger.Printf(\"Build %d images in %s\", count, elapsedTime)\n\t\tpipelineLogger.Printf(\"Total time spent building images: %s\", totalElapsedTime)\n\t}\n\treturn err\n}",
"func Application(cfg config.Config, suiteName string) error {\n\tlogger = log.WithFields(log.Fields{\"Suite\": suiteName})\n\n\t// Trigger build via web hook\n\terr := httpUtils.TriggerWebhookPush(cfg, defaults.App2BranchToBuildFrom, defaults.App2CommitID, defaults.App2SSHRepository, defaults.App2SharedSecret, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogger.Infof(\"First job was triggered\")\n\n\t// Get job\n\tjobSummary, err := test.WaitForCheckFuncWithValueOrTimeout(cfg, func(cfg config.Config) (*models.JobSummary, error) {\n\t\treturn job.GetLastPipelineJobWithStatus(cfg, defaults.App2Name, \"Running\", logger)\n\t}, logger)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjobName := jobSummary.Name\n\tlogger.Infof(\"First job name: %s\", jobName)\n\n\t// Another build should cause second job to queue up\n\t// Trigger another build via web hook\n\ttime.Sleep(1 * time.Second)\n\terr = httpUtils.TriggerWebhookPush(cfg, defaults.App2BranchToBuildFrom, defaults.App2CommitID, defaults.App2SSHRepository, defaults.App2SharedSecret, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogger.Infof(\"Second job was triggered\")\n\n\terr = test.WaitForCheckFuncOrTimeout(cfg, func(cfg config.Config) error {\n\t\t_, err := job.GetLastPipelineJobWithStatus(cfg, defaults.App2Name, \"Queued\", logger)\n\t\treturn err\n\t}, logger)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Info(\"Second job was queued\")\n\tjobStatus, err := test.WaitForCheckFuncWithValueOrTimeout(cfg, func(cfg config.Config) (string, error) {\n\t\treturn job.IsDone(cfg, defaults.App2Name, jobName, logger)\n\t}, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif jobStatus != \"Succeeded\" {\n\t\treturn fmt.Errorf(\"expected job status was Success, but got %s\", jobStatus)\n\t}\n\tlogger.Info(\"First job was completed\")\n\tsteps := job.GetSteps(cfg, defaults.App2Name, jobName)\n\n\texpectedSteps := []expectedStep{\n\t\t{name: \"clone-config\", components: []string{}},\n\t\t{name: \"prepare-pipelines\", components: []string{}},\n\t\t{name: \"radix-pipeline\", components: []string{}},\n\t\t{name: \"clone\", components: []string{}},\n\t\t{name: \"build-app\", components: []string{\"app\"}},\n\t\t{name: \"build-redis\", components: []string{\"redis\"}},\n\t}\n\n\tif len(steps) != len(expectedSteps) {\n\t\treturn errors.New(\"number of pipeline steps was not as expected\")\n\t}\n\n\tfor index, step := range steps {\n\t\tif !strings.EqualFold(step.Name, expectedSteps[index].name) {\n\t\t\treturn fmt.Errorf(\"expeced step %s, but got %s\", expectedSteps[index].name, step.Name)\n\t\t}\n\n\t\tif !array.EqualElements(step.Components, expectedSteps[index].components) {\n\t\t\treturn fmt.Errorf(\"expeced components %s, but got %s\", expectedSteps[index].components, step.Components)\n\t\t}\n\t}\n\n\tstepLog := job.GetLogForStep(cfg, defaults.App2Name, jobName, \"build-app\", logger)\n\t// Validate if Dockerfile build output contains SHA256 hash of build secrets:\n\t// https://github.com/equinor/radix-canarycicd-test-2/blob/master/Dockerfile#L9\n\tif !strings.Contains(stepLog, Secret1ValueSha256) || !strings.Contains(stepLog, Secret2ValueSha256) {\n\t\treturn errors.New(\"build secrets are not contained in build log\")\n\t}\n\n\tjobSummary, err = test.WaitForCheckFuncWithValueOrTimeout(cfg, func(cfg config.Config) (*models.JobSummary, error) {\n\t\treturn job.GetLastPipelineJobWithStatus(cfg, defaults.App2Name, \"Running\", logger)\n\t}, logger)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Stop job and verify that it has been stopped\n\tjobName = jobSummary.Name\n\tlogger.Infof(\"Second job name: %s\", jobName)\n\terr = job.Stop(cfg, defaults.App2Name, jobName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = test.WaitForCheckFuncOrTimeout(cfg, func(cfg config.Config) error {\n\t\t_, err := job.GetLastPipelineJobWithStatus(cfg, defaults.App2Name, \"Stopped\", logger)\n\t\treturn err\n\t}, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Info(\"Second job was stopped\")\n\treturn nil\n}",
"func buildImage(id, baseDir, stageDir string, buildEnv map[string]string) (CmdResult, error) {\n\tlog.Printf(\"Building image for %s\", id)\n\n\tdir := filepath.Join(baseDir, stageDir)\n\tvar b strings.Builder\n\tfor k, v := range buildEnv {\n\t\tfmt.Fprintf(&b, \"--build-arg %s=%s \", k, fmt.Sprintf(`\"%s\"`, v))\n\t}\n\tenv := b.String()\n\n\tcmdStr := fmt.Sprintf(\"docker build %s-t %s .\", env, id)\n\t// sh -c is a workaround that allow us to have double quotes around environment variable values.\n\t// Those are needed when the environment variables have whitespaces, for instance a NAME, like in\n\t// TREPB.\n\tcmd := exec.Command(\"bash\", \"-c\", cmdStr)\n\tcmd.Dir = dir\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\n\tlog.Printf(\"$ %s\", cmdStr)\n\terr := cmd.Run()\n\tswitch err.(type) {\n\tcase *exec.Error:\n\t\tcmdResultError := CmdResult{\n\t\t\tExitStatus: statusCode(err),\n\t\t\tCmd: cmdStr,\n\t\t}\n\t\treturn cmdResultError, fmt.Errorf(\"command was not executed correctly: %s\", err)\n\t}\n\n\tcmdResult := CmdResult{\n\t\tStdout: outb.String(),\n\t\tStderr: errb.String(),\n\t\tCmd: cmdStr,\n\t\tCmdDir: dir,\n\t\tExitStatus: statusCode(err),\n\t\tEnv: os.Environ(),\n\t}\n\n\treturn cmdResult, err\n}",
"func (this *Task) image(zkc zk.ZK) (string, string, string, error) {\n\tif this.ImagePath == \"\" {\n\n\t\tdefaultReleaseWatchPath, _, err := RegistryKeyValue(KReleaseWatch, map[string]interface{}{\n\t\t\t\"Domain\": this.domain,\n\t\t\t\"Service\": this.service,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\n\t\treleaseNode, err := zkc.Get(defaultReleaseWatchPath)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\n\t\tthis.ImagePath = releaseNode.GetValueString()\n\t\tglog.Infoln(\"ImagePath defaults to\", this.ImagePath, \"for job\", *this)\n\t}\n\n\tglog.Infoln(\"Container image from image path\", this.ImagePath)\n\tdocker_info, err := zkc.Get(this.ImagePath)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", err\n\t}\n\timage := docker_info.GetValueString()\n\tversion := image[strings.LastIndex(image, \":\")+1:]\n\treturn fmt.Sprintf(\"/%s/%s/%s\", this.domain, this.service, version), version, image, nil\n}",
"func TestJobSuccess(t *testing.T) {\n\tif !isTravis() {\n\t\tt.Skip(\"skipping integration test; it will only run on travis\")\n\t\treturn\n\t}\n\n\tjobDone := make(chan struct{}, 1)\n\n\tst, err := NewStore(testDBURL)\n\tif err != nil {\n\t\tt.Fatalf(\"NewStore returned %v\", err)\n\t}\n\tdefer dropDatabase(t, testDBURL)\n\n\tm := jobqueue.New(jobqueue.SetStore(st))\n\n\tf := func(args ...interface{}) error {\n\t\tif len(args) != 1 {\n\t\t\treturn fmt.Errorf(\"expected len(args) == 1, have %d\", len(args))\n\t\t}\n\t\ts, ok := args[0].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"expected type of 1st arg == string, have %T\", args[0])\n\t\t}\n\t\tif have, want := s, \"Hello\"; have != want {\n\t\t\treturn fmt.Errorf(\"expected 1st arg = %q, have %q\", want, have)\n\t\t}\n\t\tjobDone <- struct{}{}\n\t\treturn nil\n\t}\n\terr = m.Register(\"topic\", f)\n\tif err != nil {\n\t\tt.Fatalf(\"Register failed with %v\", err)\n\t}\n\terr = m.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed with %v\", err)\n\t}\n\tjob := &jobqueue.Job{Topic: \"topic\", Args: []interface{}{\"Hello\"}}\n\terr = m.Add(job)\n\tif err != nil {\n\t\tt.Fatalf(\"Add failed with %v\", err)\n\t}\n\tif job.ID == \"\" {\n\t\tt.Fatalf(\"Job ID = %q\", job.ID)\n\t}\n\ttimeout := 2 * time.Second\n\tselect {\n\tcase <-jobDone:\n\tcase <-time.After(timeout):\n\t\tt.Fatal(\"Processor func timed out\")\n\t}\n}",
"func IsTraining(projectID string, location string) (bool, error) {\n\tctx := context.Background()\n\tclient, err := automl.NewClient(ctx)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"NewClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\treq := &lropb.ListOperationsRequest{\n\t\tName: fmt.Sprintf(\"projects/%s/locations/%s\", projectID, location),\n\t}\n\n\tit := client.LROClient.ListOperations(ctx, req)\n\n\tfor {\n\t\top, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"ListOperations.Next: %v\", err)\n\t\t}\n\n\t\tif op.GetDone() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Since the OP is still running check if its an ImportJob or model training.\n\t\topMeta := &automlpb.OperationMetadata{}\n\n\t\terr = op.GetMetadata().UnmarshalTo(opMeta)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not unmarshal metadata for: %v; error: %v\", op.GetName(), err)\n\t\t}\n\n\t\t// Creating a dataset is not actually a long running operation; when we create a dataset we are just\n\t\t// defining the name essentionally.\n\t\tif opMeta.GetCreateDatasetDetails() != nil {\n\t\t\tdetails := opMeta.GetCreateDatasetDetails()\n\n\t\t\tif details != nil {\n\t\t\t\tlog.Infof(\"Found running op dataset pp: %v\\n%v\",op.Name, toString(details))\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\n\t\t// ImportData is a long running operation can take hours\n\t\t// TODO(jlewi): How do we associate a running ImportData with its dataset? Neither Metadata, nor details appears\n\t\t// to contain the dataset id.\n\t\tif opMeta.GetImportDataDetails() != nil {\n\t\t\tlog.Infof(\"ImportData Op: %v; metadata:\\n%v\", op.Name, toString(opMeta))\n\t\t\tdetails := opMeta.GetImportDataDetails()\n\n\t\t\tif details != nil {\n\t\t\t\tlog.Infof(\"ImportDataDetails Op: %v; Details:\\n%v\",op.Name, toString(details))\n\t\t\t}\n\n\t\t\treturn true, nil\n\t\t}\n\n\t\tif opMeta.GetCreateModelDetails() != nil {\n\t\t\tdetails := opMeta.GetCreateModelDetails()\n\n\t\t\tif details != nil {\n\t\t\t\tlog.Infof(\"Create Model Op: %v; Details:\\n%v\",op.Name, toString(details))\n\t\t\t}\n\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}",
"func check(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tbuildMU.Lock()\n\tdefer buildMU.Unlock()\n\tbuild.Status = pb.Status_INFRA_FAILURE\n\tbuild.SummaryMarkdown = fmt.Sprintf(\"run_annotations failure: `%s`\", err)\n\tclient.WriteBuild(build)\n\tfmt.Fprintln(os.Stderr, err)\n\tos.Exit(1)\n}",
"func (m *MinikubeRunner) EnsureRunning(opts ...string) {\n\ts, _, err := m.Status()\n\tif err != nil {\n\t\tm.T.Errorf(\"error getting status for ensure running: %v\", err)\n\t}\n\tif s != state.Running.String() {\n\t\tstdout, stderr, err := m.start(opts...)\n\t\tif err != nil {\n\t\t\tm.T.Errorf(\"error starting while running EnsureRunning : %v , stdout %s stderr %s\", err, stdout, stderr)\n\t\t}\n\t}\n\tm.CheckStatus(state.Running.String())\n}",
"func (c *Client) Build(ctx context.Context, opts BuildOptions) error {\n\timageRef, err := c.parseTagReference(opts.Image)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid image name '%s'\", opts.Image)\n\t}\n\n\tappPath, err := c.processAppPath(opts.AppPath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid app path '%s'\", opts.AppPath)\n\t}\n\n\tproxyConfig := c.processProxyConfig(opts.ProxyConfig)\n\n\tbuilderRef, err := c.processBuilderName(opts.Builder)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid builder '%s'\", opts.Builder)\n\t}\n\n\trawBuilderImage, err := c.imageFetcher.Fetch(ctx, builderRef.Name(), image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy})\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to fetch builder image '%s'\", builderRef.Name())\n\t}\n\n\tbldr, err := c.getBuilder(rawBuilderImage)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid builder %s\", style.Symbol(opts.Builder))\n\t}\n\n\trunImageName := c.resolveRunImage(opts.RunImage, imageRef.Context().RegistryStr(), builderRef.Context().RegistryStr(), bldr.Stack(), opts.AdditionalMirrors, opts.Publish)\n\trunImage, err := c.validateRunImage(ctx, runImageName, opts.PullPolicy, opts.Publish, bldr.StackID)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid run-image '%s'\", runImageName)\n\t}\n\n\tvar runMixins []string\n\tif _, err := dist.GetLabel(runImage, stack.MixinsLabel, &runMixins); err != nil {\n\t\treturn err\n\t}\n\n\tfetchedBPs, order, err := c.processBuildpacks(ctx, bldr.Image(), bldr.Buildpacks(), bldr.Order(), bldr.StackID, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.validateMixins(fetchedBPs, bldr, runImageName, runMixins); err != nil {\n\t\treturn errors.Wrap(err, \"validating stack mixins\")\n\t}\n\n\tbuildEnvs := map[string]string{}\n\tfor _, envVar := range opts.ProjectDescriptor.Build.Env {\n\t\tbuildEnvs[envVar.Name] = envVar.Value\n\t}\n\n\tfor k, v := range opts.Env {\n\t\tbuildEnvs[k] = v\n\t}\n\n\tephemeralBuilder, err := c.createEphemeralBuilder(rawBuilderImage, buildEnvs, order, fetchedBPs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.docker.ImageRemove(context.Background(), ephemeralBuilder.Name(), types.ImageRemoveOptions{Force: true})\n\n\tvar builderPlatformAPIs builder.APISet\n\tbuilderPlatformAPIs = append(builderPlatformAPIs, ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Deprecated...)\n\tbuilderPlatformAPIs = append(builderPlatformAPIs, ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Supported...)\n\n\tif !supportsPlatformAPI(builderPlatformAPIs) {\n\t\tc.logger.Debugf(\"pack %s supports Platform API(s): %s\", c.version, strings.Join(build.SupportedPlatformAPIVersions.AsStrings(), \", \"))\n\t\tc.logger.Debugf(\"Builder %s supports Platform API(s): %s\", style.Symbol(opts.Builder), strings.Join(builderPlatformAPIs.AsStrings(), \", \"))\n\t\treturn errors.Errorf(\"Builder %s is incompatible with this version of pack\", style.Symbol(opts.Builder))\n\t}\n\n\timgOS, err := rawBuilderImage.OS()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"getting builder OS\")\n\t}\n\n\tprocessedVolumes, warnings, err := processVolumes(imgOS, opts.ContainerConfig.Volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, warning := range warnings {\n\t\tc.logger.Warn(warning)\n\t}\n\n\tfileFilter, err := getFileFilter(opts.ProjectDescriptor)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trunImageName, err = pname.TranslateRegistry(runImageName, c.registryMirrors, c.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprojectMetadata := platform.ProjectMetadata{}\n\tif c.experimental {\n\t\tversion := opts.ProjectDescriptor.Project.Version\n\t\tsourceURL := opts.ProjectDescriptor.Project.SourceURL\n\t\tif version != \"\" || sourceURL != \"\" {\n\t\t\tprojectMetadata.Source = &platform.ProjectSource{\n\t\t\t\tType: \"project\",\n\t\t\t\tVersion: map[string]interface{}{\"declared\": version},\n\t\t\t\tMetadata: map[string]interface{}{\"url\": sourceURL},\n\t\t\t}\n\t\t}\n\t}\n\n\t// Default mode: if the TrustBuilder option is not set, trust the suggested builders.\n\tif opts.TrustBuilder == nil {\n\t\topts.TrustBuilder = IsSuggestedBuilderFunc\n\t}\n\n\tlifecycleOpts := build.LifecycleOptions{\n\t\tAppPath: appPath,\n\t\tImage: imageRef,\n\t\tBuilder: ephemeralBuilder,\n\t\tLifecycleImage: ephemeralBuilder.Name(),\n\t\tRunImage: runImageName,\n\t\tProjectMetadata: projectMetadata,\n\t\tClearCache: opts.ClearCache,\n\t\tPublish: opts.Publish,\n\t\tTrustBuilder: opts.TrustBuilder(opts.Builder),\n\t\tUseCreator: false,\n\t\tDockerHost: opts.DockerHost,\n\t\tCacheImage: opts.CacheImage,\n\t\tHTTPProxy: proxyConfig.HTTPProxy,\n\t\tHTTPSProxy: proxyConfig.HTTPSProxy,\n\t\tNoProxy: proxyConfig.NoProxy,\n\t\tNetwork: opts.ContainerConfig.Network,\n\t\tAdditionalTags: opts.AdditionalTags,\n\t\tVolumes: processedVolumes,\n\t\tDefaultProcessType: opts.DefaultProcessType,\n\t\tFileFilter: fileFilter,\n\t\tWorkspace: opts.Workspace,\n\t\tGID: opts.GroupID,\n\t\tPreviousImage: opts.PreviousImage,\n\t\tInteractive: opts.Interactive,\n\t\tTermui: termui.NewTermui(imageRef.Name(), ephemeralBuilder, runImageName),\n\t\tSBOMDestinationDir: opts.SBOMDestinationDir,\n\t}\n\n\tlifecycleVersion := ephemeralBuilder.LifecycleDescriptor().Info.Version\n\t// Technically the creator is supported as of platform API version 0.3 (lifecycle version 0.7.0+) but earlier versions\n\t// have bugs that make using the creator problematic.\n\tlifecycleSupportsCreator := !lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingCreator))\n\n\tif lifecycleSupportsCreator && opts.TrustBuilder(opts.Builder) {\n\t\tlifecycleOpts.UseCreator = true\n\t\t// no need to fetch a lifecycle image, it won't be used\n\t\tif err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {\n\t\t\treturn errors.Wrap(err, \"executing lifecycle\")\n\t\t}\n\n\t\treturn c.logImageNameAndSha(ctx, opts.Publish, imageRef)\n\t}\n\n\tif !opts.TrustBuilder(opts.Builder) {\n\t\tif lifecycleImageSupported(imgOS, lifecycleVersion) {\n\t\t\tlifecycleImageName := opts.LifecycleImage\n\t\t\tif lifecycleImageName == \"\" {\n\t\t\t\tlifecycleImageName = fmt.Sprintf(\"%s:%s\", internalConfig.DefaultLifecycleImageRepo, lifecycleVersion.String())\n\t\t\t}\n\n\t\t\timgArch, err := rawBuilderImage.Architecture()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"getting builder architecture\")\n\t\t\t}\n\n\t\t\tlifecycleImage, err := c.imageFetcher.Fetch(\n\t\t\t\tctx,\n\t\t\t\tlifecycleImageName,\n\t\t\t\timage.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy, Platform: fmt.Sprintf(\"%s/%s\", imgOS, imgArch)},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"fetching lifecycle image\")\n\t\t\t}\n\n\t\t\tlifecycleOpts.LifecycleImage = lifecycleImage.Name()\n\t\t} else {\n\t\t\treturn errors.Errorf(\"Lifecycle %s does not have an associated lifecycle image. Builder must be trusted.\", lifecycleVersion.String())\n\t\t}\n\t}\n\n\tif err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {\n\t\treturn errors.Wrap(err, \"executing lifecycle. This may be the result of using an untrusted builder\")\n\t}\n\n\treturn c.logImageNameAndSha(ctx, opts.Publish, imageRef)\n}",
"func getJobStatus(row chunk.Row) (JobStatus, string, error) {\n\t// ending status has the highest priority\n\tendTimeIsNull := row.IsNull(2)\n\tif !endTimeIsNull {\n\t\tresultMsgIsNull := row.IsNull(3)\n\t\tif !resultMsgIsNull {\n\t\t\tresultMessage := row.GetString(3)\n\t\t\treturn JobFinished, resultMessage, nil\n\t\t}\n\t\terrorMessage := row.GetString(4)\n\t\treturn JobFailed, errorMessage, nil\n\t}\n\n\tisAlive := row.GetInt64(1) == 1\n\tstartTimeIsNull := row.IsNull(5)\n\texpectedStatus := row.GetEnum(0).String()\n\n\tswitch expectedStatus {\n\tcase \"canceled\":\n\t\treturn JobCanceled, \"\", nil\n\tcase \"paused\":\n\t\tif startTimeIsNull || isAlive {\n\t\t\treturn JobPaused, \"\", nil\n\t\t}\n\t\treturn JobFailed, \"job expected paused but the node is timeout\", nil\n\tcase \"running\":\n\t\tif startTimeIsNull {\n\t\t\treturn JobPending, \"\", nil\n\t\t}\n\t\tif isAlive {\n\t\t\treturn JobRunning, \"\", nil\n\t\t}\n\t\treturn JobFailed, \"job expected running but the node is timeout\", nil\n\tdefault:\n\t\treturn JobFailed, fmt.Sprintf(\"unexpected job status %s\", expectedStatus), nil\n\t}\n}",
"func CfnWorkflow_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"aws-cdk-lib.aws_glue.CfnWorkflow\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func runBuilding(args []string) int {\n\tsuccess := false\n\tw, wf, r, rf, err := prepare(args, buildingOpt.Overwrite)\n\tif wf != nil {\n\t\tdefer wf(&success, buildingOpt.Backup)\n\t}\n\tif rf != nil {\n\t\tdefer rf()\n\t}\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\n\terr = csvutil.Building(r, w, buildingOpt.BuildingOption)\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\n\tsuccess = true\n\treturn 0\n}",
"func (spec *SourceSpec) IsContainerBuild() bool {\n\treturn spec.ContainerImage.Image != \"\"\n}",
"func (runner Runner) RequiresBuild() bool {\n\treturn true\n}",
"func IsFinished(job *batchv1.Job) bool {\n\tfor _, condition := range job.Status.Conditions {\n\t\tswitch condition.Type {\n\t\tcase batchv1.JobComplete:\n\t\t\treturn true\n\t\tcase batchv1.JobFailed:\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (twrkr *twerk) status() Status {\n\tlive := twrkr.liveWorkersNum.Get()\n\tworking := twrkr.currentlyWorkingNum.Get()\n\tinQueue := len(twrkr.jobListener)\n\n\treturn Status{\n\t\tlive: live,\n\t\tworking: working,\n\t\tjobsInQueue: inQueue,\n\t}\n}",
"func running() bool {\n\treturn runCalled.Load() != 0\n}",
"func (s *StateStrategy) StartScript(){\n util.Info(\"*****************Begin Start up Script ********************* \")\n if s.bapplySuccess == false {\n s.RollBackStart()\n return\n }\n\n ipCnt, err_code := strconv.Atoi(s.ipCnt)\n slotCnt, err_code1 := strconv.Atoi(s.slotCnt)\n if nil != err_code || nil != err_code1 {\n util.Error(fmt.Sprintf(\"Start Params Wrong, ipCnt %s, slotCnt %s\", s.ipCnt,s.slotCnt))\n }\n\n // compute np\n np := ipCnt*slotCnt\n shellPath := fmt.Sprintf(\"./test.sh %d %s %s %s %s %s %s %v %v %v\",np,s.algType,s.epochs,s.batchsize,s.learningrate,s.trainPath,s.savePath,s.taskId,env.REPORT_ADDR,env.REPORT_TOKEN)\n util.Infof(\"shellpath = %v\",shellPath)\n cmd := exec.Command(\"/bin/bash\",\"-c\",shellPath) //Cmd init\n util.Infof(\"Start Exec Cmd Arg: %v , Process PID: %v \",cmd.Args,s.pid)\n\n // get local env\n cmd.Env = s.GetCmdEnv()\n\n // Run starts the specified command and waits for it to complete\n var out bytes.Buffer\n var stderr bytes.Buffer\n cmd.Stdout = &out\n cmd.Stderr = &stderr\n err := cmd.Run()\n if nil != err {\n util.Error(fmt.Sprintf(\"Cmd Go Wrong, err : %v\",stderr.String()))\n util.Error(fmt.Sprint(err) + \": \" + stderr.String())\n s.RollBackStart()\n return\n }\n\n s.pid = cmd.Process.Pid\n util.Infof(\"Cmd Arg: %v , Process PID: %v \",cmd.Args,s.pid)\n\n if out.String() == \"\" {\n util.Info(\"*****************Start up Script Success********************* \")\n }else{\n util.Error(\"*****************Start up Script Unknow Wrong : \" + out.String())\n s.RollBackStart()\n return\n }\n\n s.UpdateState()\n}",
"func (j *JobJob) Valid() bool {\n\treturn j.TargetJob != nil\n}",
"func bootstrapAppImageBuild(c *cli.Context) error {\n\n\t// check if the number of arguments are stictly 1, if not\n\t// return\n\tif c.NArg() != 1 {\n\t\tlog.Fatal(\"Please specify the path to the AppDir which you would like to aid.\")\n\n\t}\n\tfileToAppDir := c.Args().Get(0)\n\n\t// does the file exist? if not early-exit\n\tif ! helpers.CheckIfFileOrFolderExists(fileToAppDir) {\n\t\tlog.Fatal(\"The specified directory does not exist\")\n\t}\n\n\t// Add the location of the executable to the $PATH\n\thelpers.AddHereToPath()\n\n\n\t// Check for needed files on $PATH\n\ttools := []string{\"file\", \"mksquashfs\", \"desktop-file-validate\", \"uploadtool\", \"patchelf\", \"desktop-file-validate\", \"patchelf\"} // \"sh\", \"strings\", \"grep\" no longer needed?; \"curl\" is needed for uploading only, \"glib-compile-schemas\" is needed in some cases only\n\t// curl is needed by uploadtool; TODO: Replace uploadtool with native Go code\n\t// \"sh\", \"strings\", \"grep\" are needed by appdirtool to parse qt_prfxpath; TODO: Replace with native Go code\n\tfor _, t := range tools {\n\t\t_, err := exec.LookPath(t)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Required helper tool\", t, \"missing\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// Check whether we have a sufficient version of mksquashfs for -offset\n\tif helpers.CheckIfSquashfsVersionSufficient(\"mksquashfs\") == false {\n\t\tos.Exit(1)\n\t}\n\n\t// Check if is directory, then assume we want to convert an AppDir into an AppImage\n\tfileToAppDir, _ = filepath.EvalSymlinks(fileToAppDir)\n\tif info, err := os.Stat(fileToAppDir); err == nil && info.IsDir() {\n\t\tGenerateAppImage(fileToAppDir)\n\t} else {\n\t\t// TODO: If it is a file, then check if it is an AppImage and if yes, extract it\n\t\tlog.Fatal(\"Supplied argument is not a directory \\n\" +\n\t\t\t\"To extract an AppImage, run it with --appimage-extract \\n\")\n\n\t}\n\treturn nil\n}",
"func checkArtStates(ctx context.Context, invID invocations.ID, arts []*artifactCreationRequest) (reqs []*artifactCreationRequest, realm string, err error) {\n\tvar invState pb.Invocation_State\n\n\teg, ctx := errgroup.WithContext(ctx)\n\teg.Go(func() error {\n\t\treturn invocations.ReadColumns(ctx, invID, map[string]any{\n\t\t\t\"State\": &invState, \"Realm\": &realm,\n\t\t})\n\t})\n\n\teg.Go(func() (err error) {\n\t\treqs, err = findNewArtifacts(ctx, invID, arts)\n\t\treturn\n\t})\n\n\tswitch err := eg.Wait(); {\n\tcase err != nil:\n\t\treturn nil, \"\", err\n\tcase invState != pb.Invocation_ACTIVE:\n\t\treturn nil, \"\", appstatus.Errorf(codes.FailedPrecondition, \"%s is not active\", invID.Name())\n\t}\n\treturn reqs, realm, nil\n}",
"func (self *Manager) checkProgramState(program *Program, checkLock *sync.WaitGroup) {\n\tvar isStopping = self.stopping\n\tdefer checkLock.Done()\n\n\tif isStopping {\n\t\treturn\n\t}\n\n\tswitch program.GetState() {\n\tcase ProgramStopped:\n\t\t// first-time start for autostart programs\n\t\tif program.AutoStart && !program.HasEverBeenStarted() {\n\t\t\tlog.Debugf(\"[%s] Starting program for the first time\", program.Name)\n\t\t\tprogram.ShouldAutoRestart() // do this here to \"seed\" the scheduler with the first schedule time\n\t\t\tprogram.Start()\n\t\t}\n\n\tcase ProgramExited:\n\t\t// automatic restart of cleanly-exited programs\n\t\tif program.ShouldAutoRestart() {\n\t\t\tlog.Debugf(\"[%s] Automatically restarting cleanly-exited program\", program.Name)\n\t\t\tprogram.Start()\n\t\t}\n\n\tcase ProgramBackoff:\n\t\tif program.ShouldAutoRestart() {\n\t\t\tlog.Debugf(\"[%s] Automatically restarting program after backoff (retry %d/%d)\",\n\t\t\t\tprogram.Name,\n\t\t\t\tprogram.processRetryCount,\n\t\t\t\tprogram.StartRetries)\n\t\t\tprogram.Start()\n\t\t} else {\n\t\t\tlog.Debugf(\"[%s] Marking program fatal after %d/%d retries\",\n\t\t\t\tprogram.Name,\n\t\t\t\tprogram.processRetryCount,\n\t\t\t\tprogram.StartRetries)\n\t\t\tprogram.StopFatal()\n\t\t}\n\t}\n}",
"func ProcessStateSuccess(p *os.ProcessState,) bool",
"func NewBuildState(config *Configuration) *BuildState {\n\tgraph := NewGraph()\n\tstate := &BuildState{\n\t\tGraph: graph,\n\t\tpendingParses: make(chan ParseTask, 10000),\n\t\tpendingActions: make(chan Task, 1000),\n\t\thashers: map[string]*fs.PathHasher{\n\t\t\t// For compatibility reasons the sha1 hasher has no suffix.\n\t\t\t\"sha1\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, sha1.New, \"sha1\"),\n\t\t\t\"sha256\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, sha256.New, \"sha256\"),\n\t\t\t\"crc32\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, newCRC32, \"crc32\"),\n\t\t\t\"crc64\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, newCRC64, \"crc64\"),\n\t\t\t\"blake3\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, newBlake3, \"blake3\"),\n\t\t\t\"xxhash\": fs.NewPathHasher(RepoRoot, config.Build.Xattrs, newXXHash, \"xxhash\"),\n\t\t},\n\t\tProcessExecutor: executorFromConfig(config),\n\t\tStartTime: startTime,\n\t\tConfig: config,\n\t\tRepoConfig: config,\n\t\tVerifyHashes: true,\n\t\tNeedBuild: true,\n\t\tXattrsSupported: config.Build.Xattrs,\n\t\tCoverage: TestCoverage{Files: map[string][]LineCoverage{}},\n\t\tTargetArch: config.Build.Arch,\n\t\tArch: cli.HostArch(),\n\t\tstats: &lockedStats{},\n\t\tprogress: &stateProgress{\n\t\t\tnumActive: 1, // One for the initial target adding on the main thread.\n\t\t\tnumPending: 1,\n\t\t\tpendingTargets: cmap.New[BuildLabel, chan struct{}](cmap.DefaultShardCount, hashBuildLabel),\n\t\t\tpendingPackages: cmap.New[packageKey, chan struct{}](cmap.DefaultShardCount, hashPackageKey),\n\t\t\tpackageWaits: cmap.New[packageKey, chan struct{}](cmap.DefaultShardCount, hashPackageKey),\n\t\t\tinternalResults: make(chan *BuildResult, 1000),\n\t\t\tcycleDetector: cycleDetector{graph: graph},\n\t\t},\n\t\tinitOnce: new(sync.Once),\n\t\tpreloadDownloadOnce: new(sync.Once),\n\t}\n\n\tstate.PathHasher = state.Hasher(config.Build.HashFunction)\n\tstate.progress.allStates = []*BuildState{state}\n\tstate.Hashes.Config = config.Hash()\n\tfor _, exp := range config.Parse.ExperimentalDir {\n\t\tstate.experimentalLabels = append(state.experimentalLabels, BuildLabel{PackageName: exp, Name: \"...\"})\n\t}\n\tgo state.forwardResults()\n\treturn state\n}",
"func (i *invocation) isActive() bool {\n\treturn i.queuedOperations.Len() > 0 || i.executingWorkersCount > 0\n}",
"func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers string) {\n\t// TODO\n}",
"func (o *initJobOpts) askDockerfile() (isDfSelected bool, err error) {\n\tif o.dockerfilePath != \"\" || o.image != \"\" {\n\t\treturn true, nil\n\t}\n\tif err = o.dockerEngine.CheckDockerEngineRunning(); err != nil {\n\t\tvar errDaemon *dockerengine.ErrDockerDaemonNotResponsive\n\t\tswitch {\n\t\tcase errors.Is(err, dockerengine.ErrDockerCommandNotFound):\n\t\t\tlog.Info(\"Docker command is not found; Copilot won't build from a Dockerfile.\\n\")\n\t\t\treturn false, nil\n\t\tcase errors.As(err, &errDaemon):\n\t\t\tlog.Info(\"Docker daemon is not responsive; Copilot won't build from a Dockerfile.\\n\")\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, fmt.Errorf(\"check if docker engine is running: %w\", err)\n\t\t}\n\t}\n\tdf, err := o.dockerfileSel.Dockerfile(\n\t\tfmt.Sprintf(fmtWkldInitDockerfilePrompt, color.HighlightUserInput(o.name)),\n\t\tfmt.Sprintf(fmtWkldInitDockerfilePathPrompt, color.HighlightUserInput(o.name)),\n\t\twkldInitDockerfileHelpPrompt,\n\t\twkldInitDockerfilePathHelpPrompt,\n\t\tfunc(v interface{}) error {\n\t\t\treturn validatePath(afero.NewOsFs(), v)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"select Dockerfile: %w\", err)\n\t}\n\tif df == selector.DockerfilePromptUseImage {\n\t\treturn false, nil\n\t}\n\to.dockerfilePath = df\n\treturn true, nil\n}",
"func (bldr *stackBuilder) Succeeded() bool {\n\treturn !bldr.failed\n}"
] | [
"0.5804885",
"0.576292",
"0.57217044",
"0.56923383",
"0.5667544",
"0.5643866",
"0.56260103",
"0.5621047",
"0.55758095",
"0.5512292",
"0.5495707",
"0.54381084",
"0.5428398",
"0.5407359",
"0.5348316",
"0.5330184",
"0.53044367",
"0.5302426",
"0.5284066",
"0.52470475",
"0.5225209",
"0.52098966",
"0.5205371",
"0.5205035",
"0.51705945",
"0.51440954",
"0.5134171",
"0.51336575",
"0.51315695",
"0.51306",
"0.5098524",
"0.5092978",
"0.5076843",
"0.5076279",
"0.5051923",
"0.5050195",
"0.50448465",
"0.50355947",
"0.50315595",
"0.50297403",
"0.501612",
"0.5007842",
"0.500766",
"0.4972332",
"0.4946587",
"0.49447295",
"0.49426648",
"0.4939377",
"0.4917301",
"0.4913231",
"0.4909241",
"0.49083453",
"0.4902477",
"0.49011776",
"0.48989165",
"0.4896408",
"0.48963448",
"0.4875938",
"0.4872168",
"0.48610657",
"0.48565295",
"0.48561996",
"0.48480493",
"0.48472258",
"0.4844314",
"0.4830866",
"0.48306665",
"0.4829727",
"0.4828304",
"0.48213112",
"0.48204434",
"0.48176962",
"0.48096645",
"0.48075503",
"0.4806411",
"0.4806084",
"0.48038182",
"0.4800449",
"0.47984862",
"0.47898617",
"0.47893274",
"0.4788031",
"0.47878042",
"0.47785848",
"0.47695833",
"0.4762726",
"0.47617772",
"0.4761582",
"0.47534326",
"0.47475305",
"0.47439232",
"0.47410676",
"0.4737177",
"0.47353894",
"0.47339278",
"0.47322416",
"0.47296995",
"0.47290686",
"0.47279134",
"0.4727279"
] | 0.7990586 | 0 |
Save changes the configuration of the hostonly network. | Сохраните изменения конфигурации сети hostonly. | func (n *hostOnlyNetwork) Save(vbox VBoxManager) error {
if err := n.SaveIPv4(vbox); err != nil {
return err
}
if n.DHCP {
vbox.vbm("hostonlyif", "ipconfig", n.Name, "--dhcp") // not implemented as of VirtualBox 4.3
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (ctx *BoltDbContext) SaveHost(host Host) error {\n\treturn ctx.db.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(hostsBucketName))\n\t\terr := b.Put([]byte(host.Address), host.GobEncode())\n\t\treturn err\n\t})\n}",
"func (hc *Hailconfig) Save() error {\n\terr := hc.f.Reset()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to reset\")\n\t}\n\treturn toml.NewEncoder(hc.f).Encode(&hc.config)\n}",
"func (smCfg *smConfiguration) Save(clientCfg *smclient.ClientConfig) error {\n\tsmCfg.viperEnv.Set(\"url\", clientCfg.URL)\n\tsmCfg.viperEnv.Set(\"user\", clientCfg.User)\n\tsmCfg.viperEnv.Set(\"ssl_disabled\", clientCfg.SSLDisabled)\n\n\tsmCfg.viperEnv.Set(\"access_token\", clientCfg.AccessToken)\n\tsmCfg.viperEnv.Set(\"refresh_token\", clientCfg.RefreshToken)\n\tsmCfg.viperEnv.Set(\"expiry\", clientCfg.ExpiresIn.Format(time.RFC1123Z))\n\n\tsmCfg.viperEnv.Set(\"client_id\", clientCfg.ClientID)\n\tsmCfg.viperEnv.Set(\"client_secret\", clientCfg.ClientSecret)\n\tsmCfg.viperEnv.Set(\"issuer_url\", clientCfg.IssuerURL)\n\tsmCfg.viperEnv.Set(\"token_url\", clientCfg.TokenEndpoint)\n\tsmCfg.viperEnv.Set(\"auth_url\", clientCfg.AuthorizationEndpoint)\n\n\treturn smCfg.viperEnv.WriteConfig()\n}",
"func (n *NetworkBuilder) Save(writer io.Writer) error {\n\terr := json.NewEncoder(writer).Encode(n.Network)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func (c *Config) save() {\n\tconst file = \"access.json\"\n\n\tc.logger.Printf(\"Save file %s\\n\", file)\n\n\tcfg := conf{\n\t\tIP: c.GetString(\"ip\"),\n\t\tPort: c.GetString(\"port\"),\n\t\tToken: c.GetString(\"token\"),\n\t\tWait: c.GetBool(\"wait\"),\n\t}\n\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\tc.logger.Error(err)\n\t}\n\n\tif err = ioutil.WriteFile(file, b, 0644); err != nil {\n\t\tc.logger.Error(err)\n\t}\n}",
"func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error {\n\tif n.IPv4.IP != nil && n.IPv4.Mask != nil {\n\t\tif err := vbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--ip\", n.IPv4.IP.String(), \"--netmask\", net.IP(n.IPv4.Mask).String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (a *Network) Save(path string) {\n\tioutil.WriteFile(path, []byte(a.outputFormat()), 0666)\n}",
"func hostsetconfigcmd(totalstorage, maxfilesize, mintolerance, maxduration, price, burn string) {\n\terr := callAPI(fmt.Sprintf(\"/host/setconfig?totalstorage=%s&maxfilesize=%s&mintolerance=%s\"+\n\t\t\"&maxduration=%s&price=%s&burn=%s\", totalstorage, maxfilesize, mintolerance, maxduration, price, burn))\n\tif err != nil {\n\t\tfmt.Println(\"Could not update host settings:\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"Host settings updated. You have been announced as a host on the network.\")\n}",
"func (d donut) SaveConfig() error {\n\tnodeDiskMap := make(map[string][]string)\n\tfor hostname, node := range d.nodes {\n\t\tdisks, err := node.ListDisks()\n\t\tif err != nil {\n\t\t\treturn iodine.New(err, nil)\n\t\t}\n\t\tfor order, disk := range disks {\n\t\t\tdonutConfigPath := filepath.Join(d.name, donutConfig)\n\t\t\tdonutConfigWriter, err := disk.CreateFile(donutConfigPath)\n\t\t\tdefer donutConfigWriter.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn iodine.New(err, nil)\n\t\t\t}\n\t\t\tnodeDiskMap[hostname][order] = disk.GetPath()\n\t\t\tjenc := json.NewEncoder(donutConfigWriter)\n\t\t\tif err := jenc.Encode(nodeDiskMap); err != nil {\n\t\t\t\treturn iodine.New(err, nil)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func Save() {\n\tc := Config{viper.GetString(\"email\"), viper.GetString(\"platform\"), viper.GetDuration(\"timeout\")}\n\tdata, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_ = ioutil.WriteFile(viper.GetString(\"file_config\"), data, 0600)\n}",
"func (viperConfig *Configurator) Save(sbConfig *config.SBConfiguration) error {\n\tviperConfig.viper.Set(\"url\", sbConfig.URL)\n\tviperConfig.viper.Set(\"authorization\", sbConfig.Authorization)\n\tviperConfig.viper.Set(\"room\", sbConfig.Room)\n\n\tif err := viperConfig.viper.WriteConfig(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (cfg *Config) Save(fpath string) error {\n\t// create a copy\n\tconfig2 := cfg\n\t// clear, setup setting\n\tconfig2.Password = \"\"\n\tconfig2.Iterations = ConfigHashIterations\n\n\t// save to file\n\tbyteDat2, err := json.MarshalIndent(config2, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(fpath, byteDat2, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *store) RegisterHost(host Host) error {\n\tconfig := host.Config()\n\tdir, err := host.Dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytes, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjsonPath := filepath.Join(dir, HostConfigJSONFile)\n\treturn ioutil.WriteFile(jsonPath, bytes, 0644)\n}",
"func saveRemoteConfig(conf *types.NetConf,\n\targs *skel.CmdArgs,\n\tipResult *current.Result,\n\tconnData connectionData) error {\n\n\tvar err error\n\n\t// Populate the configData with input data, which will be written to container.\n\tconfigData, err := populateUserspaceConfigData(conf, args, ipResult)\n\tif err != nil {\n\t\tlogging.Errorf(\"ERROR: saveRemoteConfig: Failure to retrieve pod - %v\", err)\n\t\treturn err\n\t}\n\n\t// Wrtie configData to the annotations, which will be read by container.\n\tconnData.pod, err = annotations.WritePodAnnotation(connData.kubeClient, connData.pod, configData)\n\tif err != nil {\n\t\tlogging.Errorf(\"ERROR: saveRemoteConfig: Failure to write annotations - %v\", err)\n\t\treturn err\n\t}\n\n\treturn err\n}",
"func Save() error {\n\treturn defaultConfig.Save()\n}",
"func (smCfg *smConfiguration) Save(settings *Settings) error {\n\tsmCfg.viperEnv.Set(\"url\", settings.URL)\n\tsmCfg.viperEnv.Set(\"user\", settings.User)\n\tsmCfg.viperEnv.Set(\"ssl_disabled\", settings.SSLDisabled)\n\tsmCfg.viperEnv.Set(\"token_basic_auth\", settings.TokenBasicAuth)\n\n\tsmCfg.viperEnv.Set(\"access_token\", settings.AccessToken)\n\tsmCfg.viperEnv.Set(\"refresh_token\", settings.RefreshToken)\n\tsmCfg.viperEnv.Set(\"expiry\", settings.ExpiresIn.Format(time.RFC1123Z))\n\n\tsmCfg.viperEnv.Set(\"client_id\", settings.ClientID)\n\tsmCfg.viperEnv.Set(\"client_secret\", settings.ClientSecret)\n\tsmCfg.viperEnv.Set(\"issuer_url\", settings.IssuerURL)\n\tsmCfg.viperEnv.Set(\"token_url\", settings.TokenEndpoint)\n\tsmCfg.viperEnv.Set(\"auth_url\", settings.AuthorizationEndpoint)\n\tsmCfg.viperEnv.Set(\"auth_flow\", string(settings.AuthFlow))\n\n\tcfgFile := smCfg.viperEnv.ConfigFileUsed()\n\tif err := smCfg.viperEnv.WriteConfig(); err != nil {\n\t\treturn fmt.Errorf(\"could not save config file %s: %s\", cfgFile, err)\n\t}\n\tconst ownerAccessOnly = 0600\n\tif err := os.Chmod(cfgFile, ownerAccessOnly); err != nil {\n\t\treturn fmt.Errorf(\"could not set access rights of config file %s: %s\", cfgFile, err)\n\t}\n\treturn nil\n}",
"func GhostConfigSave(config *GhostConfig) error {\n\tbucket, err := db.GetBucket(ghostBucketName)\n\tif err != nil {\n\t\treturn err\n\t}\n\trawConfig, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstorageLog.Infof(\"Saved config for '%s'\", config.Name)\n\treturn bucket.Set(fmt.Sprintf(\"%s.%s\", ghostConfigNamespace, config.Name), rawConfig)\n}",
"func (f *HostFilter) SaveAuto(path string) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tw := bufio.NewWriter(file)\n\n\thosts := make(map[string]HostEntry)\n\tfor host, entry := range f.hosts {\n\t\tif entry.Type.IsAuto() {\n\t\t\thosts[host] = entry\n\t\t}\n\t}\n\n\tyaml.NewEncoder(w).Encode(hosts)\n\n\tw.Flush()\n\tfile.Close()\n}",
"func (a *AppConf) Save(c LocalConf) (err error) {\n\terr = a.cc.Save(&c)\n\treturn\n}",
"func (o Iperf3SpecServerConfigurationOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v Iperf3SpecServerConfiguration) *bool { return v.HostNetwork }).(pulumi.BoolPtrOutput)\n}",
"func SaveToEgressCache(egressConfigFromPilot map[string][]*config.EgressRule) {\n\t{\n\t\tvar egressconfig []control.EgressConfig\n\t\tfor _, v := range egressConfigFromPilot {\n\t\t\tfor _, v1 := range v {\n\t\t\t\tvar Ports []*control.EgressPort\n\t\t\t\tfor _, v2 := range v1.Ports {\n\t\t\t\t\tp := control.EgressPort{\n\t\t\t\t\t\tPort: (*v2).Port,\n\t\t\t\t\t\tProtocol: (*v2).Protocol,\n\t\t\t\t\t}\n\t\t\t\t\tPorts = append(Ports, &p)\n\t\t\t\t}\n\t\t\t\tc := control.EgressConfig{\n\t\t\t\t\tHosts: v1.Hosts,\n\t\t\t\t\tPorts: Ports,\n\t\t\t\t}\n\n\t\t\t\tegressconfig = append(egressconfig, c)\n\t\t\t}\n\t\t}\n\t\tEgressConfigCache.Set(\"\", egressconfig, 0)\n\t}\n}",
"func (c Config) Save() error {\n\td, err := yaml.Marshal(&c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tos.Remove(\"handshake.yaml\")\n\treturn ioutil.WriteFile(\"handshake.yaml\", d, 0644)\n}",
"func (c *Config) Save() error {\r\n\tlog.Debug().Msg(\"[Config] Saving configuration...\")\r\n\tc.Validate()\r\n\r\n\treturn c.SaveFile(EnvManagerConfigFile)\r\n}",
"func (o Iperf3SpecClientConfigurationOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfiguration) *bool { return v.HostNetwork }).(pulumi.BoolPtrOutput)\n}",
"func (b *BlockCreator) save() error {\n\treturn persist.SaveJSON(settingsMetadata, b.persist, filepath.Join(b.persistDir, settingsFile))\n}",
"func (k *Kluster) Save() error {\n\t// we load the nil defaults so that future versions\n\t// don't have to deal with the backwards compatibility with omitted values\n\tif k.Config != nil {\n\t\tk.Config.LoadNilDefault()\n\t}\n\n\tdir := k.Dir()\n\tif _, err := os.Stat(dir); os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"base directory %s does not exists\", dir)\n\t\t// Or?:\n\t\t// os.MkdirAll(dir, 0755)\n\t}\n\n\tformat := k.format()\n\tvar data []byte\n\tvar err error\n\n\t// Get platform configurations\n\n\t// Update configuration\n\t// pConfig := make(map[string]interface{}, len(k.Platforms))\n\tname := k.Platform()\n\tif p, ok := k.provisioner[name]; ok {\n\t\tplatform := p\n\t\tk.Platforms[name] = platform.Config()\n\t\t// c := platform.Config()\n\t\t// // TODO: Workaround to not save the vSphere credentials, cannot have metadata to '-' because the Configurator takes the credentials from there.\n\t\t// if name == \"vsphere\" {\n\t\t// \tcVsphere := c.(*vsphere.Config)\n\t\t// \tcVsphere.VspherePassword = \"\"\n\t\t// \tcVsphere.VsphereUsername = \"\"\n\t\t// \tcVsphere.VsphereServer = \"\"\n\t\t// \tk.Platforms[name] = cVsphere\n\t\t// } else {\n\t\t// \tk.Platforms[name] = c\n\t\t// }\n\n\t\terr := k.LoadState()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk.UpdateState(name)\n\n\t\tk.ui.Log.Debugf(\"update state for %s: %v\", name, k.State[name])\n\t}\n\n\t// k.Platforms = pConfig\n\n\t// Do not use String() because:\n\t// (1) returns string and []byte is needed, and\n\t// (2) pretty print (pp=true) is needed with JSON format\n\tswitch format {\n\tcase \"yaml\":\n\t\tdata, err = k.YAML()\n\tcase \"json\":\n\t\tdata, err = k.JSON(true)\n\tcase \"toml\":\n\t\tdata, err = k.TOML()\n\tdefault:\n\t\terr = fmt.Errorf(\"can't stringify the Kluster, unknown format %q\", format)\n\t}\n\n\tlock, err := lockFile(k.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer lock.Unlock()\n\n\tk.ui.Log.Debugf(\"updating cluster configuration file %s\", k.path)\n\treturn ioutil.WriteFile(k.path, data, 0644)\n}",
"func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\ttelemetryLogger.Printf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}",
"func (in *Database) SaveNetwork(netw *types.Network) error {\n\tif netw.ID == \"\" {\n\t\tid := stringid.GenerateRandomID()\n\t\tnetw.ID = id\n\t\tnetw.ShortID = stringid.TruncateID(id)\n\t\tnetw.Created = time.Now()\n\t}\n\treturn in.save(\"network\", netw)\n}",
"func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\tlog.Logf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}",
"func (c Configuration) Save() {\n\tbuf := new(bytes.Buffer)\n\tif err := toml.NewEncoder(buf).Encode(c); err != nil {\n\t\tlog.Fatalln(\"Failed to encode config\", err)\n\t}\n\tf, err := os.Create(configFile)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed to create file\", err)\n\t\treturn\n\t}\n\n\tw := bufio.NewWriter(f)\n\tbuf.WriteTo(w)\n\tw.Flush()\n}",
"func saveDummyConfig() {\n\tif file, err := os.Create(\"dummyconfig.json\"); err == nil {\n\t\tencoder := json.NewEncoder(file)\n\t\tvar config Config\n\t\tconfig.Cluster = make([]ClusterConfig, 0)\n\t\tvar def, cluster ClusterConfig\n\t\tdef.Name = \"default\"\n\t\tdef.Address = \"http://localhost:8888/\"\n\t\tdef.ProtocolVersion = \"v1\"\n\t\tcluster.Name = \"cluster1\"\n\t\tcluster.Address = \"http://localhost:8282/\"\n\t\tcluster.ProtocolVersion = \"v1\"\n\t\tconfig.Cluster = append(config.Cluster, def)\n\t\tconfig.Cluster = append(config.Cluster, cluster)\n\t\tencoder.Encode(config)\n\t\tfile.Close()\n\t}\n}",
"func (c *Config) Save(name, key, value string) (err error) {\n\treturn c.SaveGlobal(name, key, value)\n}",
"func (defaultStorage) Save() error {\n\tpanic(noConfigStorage)\n}",
"func save() {\n\tnaksuIniPath := getIniFilePath()\n\n\terr := cfg.SaveTo(naksuIniPath)\n\tif err != nil {\n\t\tlog.Error(\"%s save failed: %v\", naksuIniPath, err)\n\t}\n}",
"func setEdisonInterfaces(i config.Interfaces, ip string) error {\n\n\tif dialogs.YesNoDialog(\"Would you like to assign static IP wlan address for your board?\") {\n\n\t\t// assign static ip\n\t\tfmt.Println(\"[+] ********NOTE: ADJUST THESE VALUES ACCORDING TO YOUR LOCAL NETWORK CONFIGURATION********\")\n\n\t\tfor {\n\t\t\tfmt.Printf(\"[+] Current values are:\\n \\t[+] Address:%s\\n\\t[+] Gateway:%s\\n\\t[+] Netmask:%s\\n\\t[+] DNS:%s\\n\",\n\t\t\t\ti.Address, i.Gateway, i.Netmask, i.DNS)\n\n\t\t\tif dialogs.YesNoDialog(\"Change values?\") {\n\t\t\t\tconfig.AskInterfaceParams(&i)\n\t\t\t}\n\n\t\t\tfmt.Println(\"[+] NOTE: You might need to enter your Edison board password\")\n\n\t\t\targs1 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"sed -i.bak -e '53 s/.*/ifconfig $IFNAME %s netmask %s/g' /etc/wpa_supplicant/wpa_cli-actions.sh\",\n\t\t\t\t\ti.Address, i.Netmask),\n\t\t\t}\n\n\t\t\targs2 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"sed -i -e '54i route add default gw %s' /etc/wpa_supplicant/wpa_cli-actions.sh\",\n\t\t\t\t\ti.Gateway),\n\t\t\t}\n\n\t\t\targs3 := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprintf(\"echo nameserver %s > /etc/resolv.conf\", i.DNS),\n\t\t\t}\n\t\t\tifaceDown := []string{\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprint(\"ifconfig wlan0 down\"),\n\t\t\t}\n\n\t\t\tifaceUp := []string{\n\t\t\t\t\"-o\",\n\t\t\t\t\"StrictHostKeyChecking=no\",\n\t\t\t\t\"root@\" + ip,\n\t\t\t\t\"-t\",\n\t\t\t\tfmt.Sprint(\"ifconfig wlan0 up\"),\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Updating network configuration\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args1...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Updating gateway settings\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args2...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Adding custom nameserver\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", args3...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Println(\"[+] Reloading interface settings\")\n\t\t\tif err := help.ExecStandardStd(\"ssh\", ifaceDown...); err != nil {\n\t\t\t\tfmt.Println(\"[-] Error shutting down wlan0 interface: \", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tif err := help.ExecStandardStd(\"ssh\", ifaceUp...); err != nil {\n\t\t\t\tfmt.Println(\"[-] Error starting wlan0 interface: \", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n\n}",
"func Save(pushURL string, tart config.Tart) {\n\tif config.All().Tarts == nil {\n\t\tconfig.All().Tarts = map[string]config.Tart{}\n\t}\n\tconfig.All().Tarts[pushURL] = tart\n\tconfig.Flush()\n}",
"func (w *Hostworker) SaveToFile() bool {\n\tos.Mkdir(\"/etc/lantern/hosts\", 0777)\n\tfile, err := os.Create(\"/etc/lantern/hosts/host_\" + w.Host + \".txt\")\n\tif err != nil {\n\t\tw.crawler.cfg.LogError(err)\n\t\treturn false\n\t}\n\tdefer func() {\n\t\tfile.Close()\n\t}()\n\n\twriter := bufio.NewWriter(file)\n\tw.SaveToWriter(writer)\n\twriter.Flush()\n\treturn true\n}",
"func (m *Machine) Save(n string) error {\n\tf, err := os.Create(n)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create vm config file\")\n\t}\n\tdefer f.Close()\n\n\tif err := json.NewEncoder(f).Encode(m); err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize machine object\")\n\t}\n\n\treturn nil\n}",
"func (c *Config) Save(filename string) (err error) {\n\tlog.Println(\"[DEBUG] Save\", filename)\n\n\tbody, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.writeFile(filename, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *ServiceState) save() {\n\tlog.Lvl3(\"Saving service\")\n\tb, err := network.Marshal(s.Storage)\n\tif err != nil {\n\t\tlog.Error(\"Couldn't marshal service:\", err)\n\t} else {\n\t\terr = ioutil.WriteFile(s.path+\"/prifi.bin\", b, 0660)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Couldn't save file:\", err)\n\t\t}\n\t}\n}",
"func (device *BlockDevice) Save() config.DeviceState {\n\tds := device.GenericDevice.Save()\n\tds.Type = string(device.DeviceType())\n\n\tds.BlockDrive = device.BlockDrive\n\n\treturn ds\n}",
"func (c *Config) Save() (err error) {\n\tc.init()\n\tlog.Infof(\"save config to %v\", c.Filename)\n\tdir := filepath.Dir(c.Filename)\n\tif _, e := os.Stat(dir); os.IsNotExist(e) {\n\t\terr = os.MkdirAll(dir, os.ModePerm)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"save config to %v fail with %v\", c.Filename, err)\n\t\t\treturn\n\t\t}\n\t}\n\terr = marshal(c.Filename, c)\n\tif err == nil {\n\t\tlog.Infof(\"save config to %v success\", c.Filename)\n\t} else {\n\t\tlog.Errorf(\"save config to %v fail with %v\", c.Filename, err)\n\t}\n\treturn\n}",
"func (o QperfSpecServerConfigurationOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v QperfSpecServerConfiguration) *bool { return v.HostNetwork }).(pulumi.BoolPtrOutput)\n}",
"func Persist() error {\n\tJSON, err := json.MarshalIndent(instance, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.WriteFile(filepath.Join(GetConfigFolder(), configName), JSON, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o QperfSpecClientConfigurationOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v QperfSpecClientConfiguration) *bool { return v.HostNetwork }).(pulumi.BoolPtrOutput)\n}",
"func (cfg *Configuration) Save() error {\n\tcfg.locker.Lock()\n\tdefer cfg.locker.Unlock()\n\tif cfg.FilePath == \"\" {\n\t\treturn errors.New(\"Configuration.FilePath was not set\")\n\t}\n\treturn gonfig.Write(cfg, true)\n}",
"func (c *Passward) Save() error {\n\n\tif !util.DirectoryExists(c.Path) {\n\t\tif err := os.MkdirAll(c.Path, 0700); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfile, err := os.Create(c.configPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tif err := toml.NewEncoder(file).Encode(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (self *JsonConfig) Save() (err error) {\n\tb, err := json.Marshal(self.Configurable.All())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(self.Path, b, 0600); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Write(tx Transport, host string, data, info []string, options ...TransportOption) error {\n\t// the Kind should configure the transport parameters before\n\n\terr := tx.Connect(host, options...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: %s\", host, err)\n\t}\n\n\tdefer tx.Close()\n\n\tfor i1, d1 := range data {\n\t\terr := tx.Write(&d1, &info[i1])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not write config %s: %s\", d1, err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o Iperf3SpecServerConfigurationPtrOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecServerConfiguration) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.HostNetwork\n\t}).(pulumi.BoolPtrOutput)\n}",
"func (m *AospDeviceOwnerDeviceConfiguration) SetWifiBlockEditConfigurations(value *bool)() {\n err := m.GetBackingStore().Set(\"wifiBlockEditConfigurations\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (cfg OpenVpnCfg) Save() error {\n\tcfgPath := getCfgPath(cfg.Name)\n\tkeyPath := getKeyPath(cfg.Name)\n\n\tcfgFile, err := os.OpenFile(cfgPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tcfgFile.Close()\n\t\tif err != nil {\n\t\t\tos.Remove(cfgPath)\n\t\t}\n\t}()\n\tkeyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tkeyFile.Close()\n\t\tif err != nil {\n\t\t\tos.Remove(keyPath)\n\t\t}\n\t}()\n\targ := templateArg{\n\t\tOpenVpnCfg: cfg,\n\t\tLibexecdir: staticconfig.Libexecdir,\n\t}\n\tif err = openVpnCfgTpl.Execute(cfgFile, arg); err != nil {\n\t\treturn err\n\t}\n\t_, err = keyFile.Write([]byte(cfg.Key))\n\treturn err\n}",
"func (c *Configuration) Save(filename string) error {\n\tb, err := json.MarshalIndent(c, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write([]byte(configHeader))\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.Write(b)\n\treturn err\n}",
"func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool {\n\tif hi == nil {\n\t\tpanic(\"nil Hostinfo\")\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif hi.Equal(c.hostinfo) {\n\t\treturn false\n\t}\n\tc.hostinfo = hi.Clone()\n\tj, _ := json.Marshal(c.hostinfo)\n\tc.logf(\"HostInfo: %s\", j)\n\treturn true\n}",
"func SaveConfig(path string) error {\n\t// TODO: Implement\n\n\treturn nil\n}",
"func (ws *WalletStore) Save() {\n\tvar buffer bytes.Buffer\n\tgob.Register(elliptic.P256())\n\tencoder := gob.NewEncoder(&buffer)\n\terr := encoder.Encode(ws.Wallets)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile := ws.Config.GetWalletStoreFile(ws.NodeID)\n\terr = ioutil.WriteFile(file, buffer.Bytes(), 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (b *BlockCreator) saveSync() error {\n\treturn persist.SaveJSON(settingsMetadata, b.persist, filepath.Join(b.persistDir, settingsFile))\n}",
"func SaveConfig(conf ClientConfig) error {\n configFilePath, err := getConfigFilePath()\n if err != nil {\n return err\n }\n\n d, err := yaml.Marshal(&conf)\n if err != nil {\n return err\n }\n writeErr := os.WriteFile(configFilePath, d, 0666)\n if writeErr != nil {\n return writeErr\n }\n\n return nil\n}",
"func (o Iperf3SpecClientConfigurationPtrOutput) HostNetwork() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecClientConfiguration) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.HostNetwork\n\t}).(pulumi.BoolPtrOutput)\n}",
"func (c Config) Save(writer io.Writer) error {\n\tlog.Printf(\"Saving config\")\n\tcontent, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\t_, err = writer.Write(content)\n\treturn errors.WithStack(err)\n}",
"func (api *configurationsnapshotAPI) Save(obj *cluster.ConfigurationSnapshotRequest) (*cluster.ConfigurationSnapshot, error) {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn apicl.ClusterV1().ConfigurationSnapshot().Save(context.Background(), obj)\n\t}\n\tif api.localSaveHandler != nil {\n\t\treturn api.localSaveHandler(obj)\n\t}\n\treturn nil, fmt.Errorf(\"Action not implemented for local operation\")\n}",
"func (c *Config) Save(path string) error {\n\tconfigBytes, err := yaml.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, configBytes, 0600)\n}",
"func (c *Config) Save(filename string) {\n\tconfigFile, err := os.Create(filename)\n\tif err != nil {\n\t\tlogrus.Error(\"creating config file\", err.Error())\n\t}\n\n\tlogrus.Info(\"Save config: \", c)\n\tvar out bytes.Buffer\n\tb, err := json.MarshalIndent(c, \"\", \"\\t\")\n\tif err != nil {\n\t\tlogrus.Error(\"error marshal json\", err)\n\t}\n\tjson.Indent(&out, b, \"\", \"\\t\")\n\tout.WriteTo(configFile)\n}",
"func (c *FwGeneral) Set(e Config) error {\n var err error\n _, fn := c.versioning()\n c.con.LogAction(\"(set) general settings\")\n\n path := c.xpath()\n path = path[:len(path) - 1]\n\n _, err = c.con.Set(path, fn(e), nil, nil)\n return err\n}",
"func (o *PersistConfig) Persist(storage endpoint.ConfigStorage) error {\n\treturn nil\n}",
"func (c *Config) Save(filename string) error {\n\tvar b bytes.Buffer\n\tenc := json.NewEncoder(&b)\n\tenc.SetIndent(\"\", \"\\t\")\n\tif err := enc.Encode(c); err != nil {\n\t\treturn fmt.Errorf(\"error encoding configuration: %w\", err)\n\t}\n\tif err := os.WriteFile(filename, b.Bytes(), 0600); err != nil {\n\t\treturn fmt.Errorf(\"error writing %q: %w\", filename, err)\n\t}\n\treturn nil\n}",
"func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) {\n\t// Remove garbage hosts and local hosts (but allow local hosts in testing).\n\tif err := host.NetAddress.IsValid(); err != nil {\n\t\thdb.staticLog.Debugf(\"WARN: host '%v' has an invalid NetAddress: %v\", host.NetAddress, err)\n\t\treturn\n\t}\n\t// Ignore all local hosts announced through the blockchain.\n\tif build.Release == \"standard\" && host.NetAddress.IsLocal() {\n\t\treturn\n\t}\n\n\t// Make sure the host gets into the host tree so it does not get dropped if\n\t// shutdown occurs before a scan can be performed.\n\toldEntry, exists := hdb.staticHostTree.Select(host.PublicKey)\n\tif exists {\n\t\t// Replace the netaddress with the most recently announced netaddress.\n\t\t// Also replace the FirstSeen value with the current block height if\n\t\t// the first seen value has been set to zero (no hosts actually have a\n\t\t// first seen height of zero, but due to rescans hosts can end up with\n\t\t// a zero-value FirstSeen field.\n\t\toldEntry.NetAddress = host.NetAddress\n\t\tif oldEntry.FirstSeen == 0 {\n\t\t\toldEntry.FirstSeen = hdb.blockHeight\n\t\t}\n\t\t// Resolve the host's used subnets and update the timestamp if they\n\t\t// changed. We only update the timestamp if resolving the ipNets was\n\t\t// successful.\n\t\tipNets, err := hdb.staticLookupIPNets(oldEntry.NetAddress)\n\t\tif err == nil && !equalIPNets(ipNets, oldEntry.IPNets) {\n\t\t\toldEntry.IPNets = ipNets\n\t\t\toldEntry.LastIPNetChange = time.Now()\n\t\t}\n\t\t// Modify hosttree\n\t\terr = hdb.modify(oldEntry)\n\t\tif err != nil {\n\t\t\thdb.staticLog.Println(\"ERROR: unable to modify host entry of host tree after a blockchain scan:\", err)\n\t\t}\n\t} else {\n\t\thost.FirstSeen = hdb.blockHeight\n\t\t// Insert into hosttree\n\t\terr := hdb.insert(host)\n\t\tif err != nil {\n\t\t\thdb.staticLog.Println(\"ERROR: unable to insert host entry into host tree after a blockchain scan:\", err)\n\t\t}\n\t}\n\n\t// Add the host to the scan queue.\n\thdb.queueScan(host)\n}",
"func (c *Config) Save() error {\n\tdir, err := getConfigDir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := path.Join(dir, configFile)\n\treturn ioutil.WriteFile(p, file, 0644)\n}",
"func (cfg *Config) SaveConfig() error {\n\t// Find home directory.\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\tlog.Printf(\"Error when fetching home directory\\n%v\", err)\n\t\treturn err\n\t}\n\n\tviper.SetConfigName(\".alfred\")\n\tviper.AddConfigPath(home)\n\tviper.Set(\"output_format\", cfg.OutputFormat)\n\tviper.Set(\"slack_token\", cfg.SlackToken)\n\tviper.Set(\"todoist_token\", cfg.TodoistToken)\n\treturn viper.WriteConfig()\n}",
"func saveStore(s dhtStore) {\n\tif s.path == \"\" {\n\t\treturn\n\t}\n\ttmp, err := ioutil.TempFile(s.path, \"marconi\")\n\tif err != nil {\n\t\tlog.Println(\"saveStore tempfile:\", err)\n\t\treturn\n\t}\n\terr = json.NewEncoder(tmp).Encode(s)\n\t// The file has to be closed already otherwise it can't be renamed on\n\t// Windows.\n\ttmp.Close()\n\tif err != nil {\n\t\tlog.Println(\"saveStore json encoding:\", err)\n\t\treturn\n\t}\n\n\t// Write worked, so replace the existing file. That's atomic in Linux, but\n\t// not on Windows.\n\tp := fmt.Sprintf(\"%v-%v\", s.path+\"/dht\", s.Port)\n\tif err := os.Rename(tmp.Name(), p); err != nil {\n\t\t// if os.IsExist(err) {\n\t\t// Not working for Windows:\n\t\t// http://code.google.com/p/go/issues/detail?id=3828\n\n\t\t// It's not possible to atomically rename files on Windows, so I\n\t\t// have to delete it and try again. If the program crashes between\n\t\t// the unlink and the rename operation, it loses the configuration,\n\t\t// unfortunately.\n\t\tif err := os.Remove(p); err != nil {\n\t\t\tlog.Println(\"saveStore failed to remove the existing config:\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := os.Rename(tmp.Name(), p); err != nil {\n\t\t\tlog.Println(\"saveStore failed to rename file after deleting the original config:\", err)\n\t\t\treturn\n\t\t}\n\t\t// } else {\n\t\t// \tlog.Println(\"saveStore failed when replacing existing config:\", err)\n\t\t// }\n\t} else {\n\t\t// log.Println(\"Saved DHT routing table to the filesystem.\")\n\t}\n}",
"func (cr *ConnectionRegistry) Save(c *Connection) error {\n\tif cr.list[c.ID] != nil {\n\t\treturn errors.New(\"id already in use\")\n\t}\n\tcr.list[c.ID] = c\n\tlog.Println(\"registering:\", c.ID, \" # of connections: \", len(cr.list))\n\treturn nil\n}",
"func (c *Config) Save(cfgPath string) error {\n\tcfgFile, err := yaml.Marshal(c)\n\tif err == nil {\n\t\terr = ioutil.WriteFile(cfgPath, cfgFile, 0600)\n\t}\n\treturn err\n}",
"func (epc *EquipmentPortCreate) Save(ctx context.Context) (*EquipmentPort, error) {\n\tif epc.create_time == nil {\n\t\tv := equipmentport.DefaultCreateTime()\n\t\tepc.create_time = &v\n\t}\n\tif epc.update_time == nil {\n\t\tv := equipmentport.DefaultUpdateTime()\n\t\tepc.update_time = &v\n\t}\n\tif len(epc.definition) > 1 {\n\t\treturn nil, errors.New(\"ent: multiple assignments on a unique edge \\\"definition\\\"\")\n\t}\n\tif epc.definition == nil {\n\t\treturn nil, errors.New(\"ent: missing required edge \\\"definition\\\"\")\n\t}\n\tif len(epc.parent) > 1 {\n\t\treturn nil, errors.New(\"ent: multiple assignments on a unique edge \\\"parent\\\"\")\n\t}\n\tif len(epc.link) > 1 {\n\t\treturn nil, errors.New(\"ent: multiple assignments on a unique edge \\\"link\\\"\")\n\t}\n\treturn epc.sqlSave(ctx)\n}",
"func (connection *Connection) Save(path string) error {\n\tjson, err := json.Marshal(connection)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(path, json, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (cfg *Config) Save(filename string) error {\n\tfilename_ := C.CString(filename)\n\tdefer freeString(filename_)\n\tok := bool(C.al_save_config_file(filename_, (*C.ALLEGRO_CONFIG)(cfg)))\n\tif !ok {\n\t\treturn fmt.Errorf(\"failed to save config file to '%s'\", filename)\n\t}\n\treturn nil\n}",
"func (wAPI WalletAPI) Save() error {\n\t_, _, err := wAPI.sendRequest(\n\t\t\"PUT\",\n\t\twAPI.Host+\":\"+wAPI.Port+\"/save\",\n\t\t\"\",\n\t)\n\n\treturn err\n}",
"func (c *Config) Save(file *os.File) error {\n\tif file == nil && c.file != nil {\n\t\tfile = c.file\n\t}\n\n\tif err := file.Truncate(0); err != nil {\n\t\treturn err\n\t}\n\tif _, err := file.Seek(0, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := yaml.NewEncoder(file).Encode(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn file.Sync()\n}",
"func (config *Config) Save(file string) error {\n\tbts, err := json.Marshal(*config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar out bytes.Buffer\n\tjson.Indent(&out, bts, \"\", \"\\t\")\n\n\treturn ioutil.WriteFile(file, out.Bytes(), 0600)\n}",
"func (c *Config) Save() error {\n\tpath := filepath.Dir(c.filePath)\n\terr := os.MkdirAll(path, directoryPermissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\traw, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(c.filePath, raw, filePermissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Save(c Config) error {\n\tb, err := toml.Marshal(c)\n\tif err != nil {\n\t\treturn errors.Wrap(errReadConfigFile, err)\n\t}\n\tfile := dfltFile\n\tif c.File != \"\" {\n\t\tfile = c.File\n\t}\n\tif err := os.WriteFile(file, b, 0644); err != nil {\n\t\treturn errors.Wrap(errWritingConfigFile, err)\n\t}\n\n\treturn nil\n}",
"func (s *slaveContext) SaveConfiguration() error {\n // master pubkey\n if mpubkey, err := s.GetMasterPublicKey(); err != nil {\n return errors.WithStack(err)\n } else {\n s.config.SaveMasterPublicKey(mpubkey)\n }\n // save slave node name to hostname\n if err := s.config.SaveHostname(); err != nil {\n return errors.WithStack(err)\n }\n // update hosts\n if err := s.config.UpdateHostsFile(); err != nil {\n return errors.WithStack(err)\n }\n/*\n // slave network interface\n TODO : (2017-05-15) we'll re-evaluate this option later. For not, this is none critical\n if err = s.config.SaveFixedNetworkInterface(); err != nil {\n return errors.WithStack(err)\n }\n*/\n // update linux hostname with systemd\n if err := exec.Command(\"/usr/bin/hostnamectl\", \"set-hostname\", s.config.SlaveSection.SlaveNodeName).Run(); err != nil {\n return errors.WithStack(err)\n }\n // save slave config into yaml\n return s.config.SaveSlaveConfig()\n}",
"func SaveRemote(remote *cfg.Remote) error {\n\tremotes, err := GetRemotes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tremotes.SetRemote(*remote)\n\treturn Write(InertiaRemotesPath(), remotes)\n}",
"func (o *NSPortInfo) Save() *bambou.Error {\n\n\treturn bambou.CurrentSession().SaveEntity(o)\n}",
"func saveClusterIP(current, overlay *unstructured.Unstructured) error {\n\t// Save the value of spec.clusterIP set by the cluster\n\tif clusterIP, found, err := unstructured.NestedString(current.Object, \"spec\",\n\t\t\"clusterIP\"); err != nil {\n\t\treturn err\n\t} else if found {\n\t\tif err := unstructured.SetNestedField(overlay.Object, clusterIP, \"spec\",\n\t\t\t\"clusterIP\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *MetaConfig) Save() error {\n\tdata, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(MetaConfigPath, data, 0644)\n}",
"func SaveHvacConfig(db Database, cfg dhvac.HvacSetup) error {\n\tcriteria := make(map[string]interface{})\n\tcriteria[\"Mac\"] = cfg.Mac\n\treturn SaveOnUpdateObject(db, cfg, pconst.DbConfig, pconst.TbHvacs, criteria)\n}",
"func (c *ConfigManager) Save() error {\n\tlogger.V(1).Info(\"saving ConfigMap\")\n\n\tvar tmpOptions pomeriumconfig.Options\n\n\ttmpOptions, err := c.GetCurrentConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not render current config: %w\", err)\n\t}\n\n\t// Make sure we can load the target configmap\n\tconfigObj := &corev1.ConfigMap{}\n\tif err := c.client.Get(context.Background(), types.NamespacedName{Name: c.configMap, Namespace: c.namespace}, configObj); err != nil {\n\t\terr = fmt.Errorf(\"output configmap not found: %w\", err)\n\t\treturn err\n\t}\n\n\tconfigBytes, err := yaml.Marshal(tmpOptions)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not serialize config: %w\", err)\n\t}\n\n\tconfigObj.Data = map[string]string{configKey: string(configBytes)}\n\n\t// TODO set deadline?\n\t// TODO use context from save?\n\terr = c.client.Update(context.Background(), configObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to update configmap: %w\", err)\n\t}\n\n\tlogger.Info(\"successfully saved ConfigMap\")\n\tc.pendingSave = false\n\treturn nil\n}",
"func (service *HTTPRestService) setNetworkInfo(networkName string, networkInfo *networkInfo) {\n\tservice.lock.Lock()\n\tdefer service.lock.Unlock()\n\tservice.state.Networks[networkName] = networkInfo\n\n\treturn\n}",
"func (s *Server) SaveConfig() (err error) {\n\t// TODO: Switch to an atomic implementation like renameio. Consider what\n\t// happens if Config.Save() panics: we'll have truncated the file\n\t// on disk and the hub will be unable to recover. For now, since we normally\n\t// only save the configuration during initialize and any configuration\n\t// errors could be fixed by reinitializing, the risk seems small.\n\tfile, err := utils.System.Create(upgrade.GetConfigFile())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif cerr := file.Close(); cerr != nil {\n\t\t\tcerr = xerrors.Errorf(\"closing hub configuration: %w\", cerr)\n\t\t\terr = multierror.Append(err, cerr).ErrorOrNil()\n\t\t}\n\t}()\n\n\terr = s.Config.Save(file)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"saving hub configuration: %w\", err)\n\t}\n\n\treturn nil\n}",
"func (c *ConfigurationFile) Save() error {\n\tcontent, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(c.location.get(), content, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func SetHost(v string) {\n\traw.Host = v\n}",
"func (set *HostSet) AddHost(c renter.Contract) {\n\tlh := new(lockedHost)\n\t// lazy connection function\n\tvar lastSeen time.Time\n\tlh.reconnect = func() error {\n\t\tif lh.s != nil && !lh.s.IsClosed() {\n\t\t\t// if it hasn't been long since the last reconnect, assume the\n\t\t\t// connection is still open\n\t\t\tif time.Since(lastSeen) < 2*time.Minute {\n\t\t\t\tlastSeen = time.Now()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// otherwise, the connection *might* still be open; test by sending\n\t\t\t// a \"ping\" RPC\n\t\t\t//\n\t\t\t// NOTE: this is somewhat inefficient; it means we might incur an\n\t\t\t// extra roundtrip when we don't need to. Better would be for the\n\t\t\t// caller to handle the reconnection logic after calling whatever\n\t\t\t// RPC it wants to call; that way, we only do extra work if the host\n\t\t\t// has actually disconnected. But that feels too burdensome.\n\t\t\tif _, err := lh.s.Settings(); err == nil {\n\t\t\t\tlastSeen = time.Now()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// connection timed out, or some other error occurred; close our\n\t\t\t// end (just in case) and fallthrough to the reconnection logic\n\t\t\tlh.s.Close()\n\t\t}\n\t\thostIP, err := set.hkr.ResolveHostKey(c.HostKey)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not resolve host key: %w\", err)\n\t\t}\n\t\t// create and lock the session manually so that we can use our custom\n\t\t// lock timeout\n\t\tlh.s, err = proto.NewUnlockedSession(hostIP, c.HostKey, set.currentHeight)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := lh.s.Lock(c.ID, c.RenterKey, set.lockTimeout); err != nil {\n\t\t\tlh.s.Close()\n\t\t\treturn err\n\t\t} else if _, err := lh.s.Settings(); err != nil {\n\t\t\tlh.s.Close()\n\t\t\treturn err\n\t\t}\n\t\tset.onConnect(lh.s)\n\t\tlastSeen = time.Now()\n\t\treturn nil\n\t}\n\tset.sessions[c.HostKey] = lh\n}",
"func Host(host string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.Host = host\n\t\treturn nil\n\t}\n}",
"func (f *freeClientPool) saveToDb() {\n\tnow := f.clock.Now()\n\tstorage := freeClientPoolStorage{\n\t\tLogOffset: uint64(f.logOffset(now)),\n\t\tList: make([]*freeClientPoolEntry, len(f.addressMap)),\n\t}\n\ti := 0\n\tfor _, e := range f.addressMap {\n\t\tif e.connected {\n\t\t\tf.calcLogUsage(e, now)\n\t\t}\n\t\tstorage.List[i] = e\n\t\ti++\n\t}\n\tenc, err := rlp.EncodeToBytes(storage)\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode client list\", \"err\", err)\n\t} else {\n\t\tf.db.Put([]byte(\"freeClientPool\"), enc)\n\t}\n}",
"func (c *ChainPing) SavePing() bool {\n\tc.mux.Lock()\n\tdefer c.mux.Unlock()\n\tlog.Printf(\"writing Ping chain of length %d\", len(c.Chain))\n\tbytes := parser.ParseToJSONPing(c.Chain)\n\te := saveToHDD(c.Path, bytes)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\treturn true\n}",
"func (ps *PlatformService) SaveConfig(newCfg *model.Config, sendConfigChangeClusterMessage bool) (*model.Config, *model.Config, *model.AppError) {\n\toldCfg, newCfg, err := ps.configStore.Set(newCfg)\n\tif errors.Is(err, config.ErrReadOnlyConfiguration) {\n\t\treturn nil, nil, model.NewAppError(\"saveConfig\", \"ent.cluster.save_config.error\", nil, \"\", http.StatusForbidden).Wrap(err)\n\t} else if err != nil {\n\t\treturn nil, nil, model.NewAppError(\"saveConfig\", \"app.save_config.app_error\", nil, \"\", http.StatusInternalServerError).Wrap(err)\n\t}\n\n\tif ps.startMetrics && *ps.Config().MetricsSettings.Enable {\n\t\tps.RestartMetrics()\n\t} else {\n\t\tps.ShutdownMetrics()\n\t}\n\n\tif ps.clusterIFace != nil {\n\t\terr := ps.clusterIFace.ConfigChanged(ps.configStore.RemoveEnvironmentOverrides(oldCfg),\n\t\t\tps.configStore.RemoveEnvironmentOverrides(newCfg), sendConfigChangeClusterMessage)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn oldCfg, newCfg, nil\n}",
"func (vm *vmQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {\n\tnewDevice := m.Clone()\n\tupdateKey := func(key string, value string) error {\n\t\ttx, err := vm.state.Cluster.Begin()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = db.ContainerConfigInsert(tx, vm.id, map[string]string{key: value})\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\n\t\terr = db.TxCommit(tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// Fill in the MAC address\n\tif !shared.StringInSlice(m[\"nictype\"], []string{\"physical\", \"ipvlan\", \"sriov\"}) && m[\"hwaddr\"] == \"\" {\n\t\tconfigKey := fmt.Sprintf(\"volatile.%s.hwaddr\", name)\n\t\tvolatileHwaddr := vm.localConfig[configKey]\n\t\tif volatileHwaddr == \"\" {\n\t\t\t// Generate a new MAC address\n\t\t\tvolatileHwaddr, err := deviceNextInterfaceHWAddr()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Update the database\n\t\t\terr = query.Retry(func() error {\n\t\t\t\terr := updateKey(configKey, volatileHwaddr)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Check if something else filled it in behind our back\n\t\t\t\t\tvalue, err1 := vm.state.Cluster.ContainerConfigGet(vm.id, configKey)\n\t\t\t\t\tif err1 != nil || value == \"\" {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tvm.localConfig[configKey] = value\n\t\t\t\t\tvm.expandedConfig[configKey] = value\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tvm.localConfig[configKey] = volatileHwaddr\n\t\t\t\tvm.expandedConfig[configKey] = volatileHwaddr\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tnewDevice[\"hwaddr\"] = volatileHwaddr\n\t}\n\n\treturn newDevice, nil\n}",
"func SaveCloudsConfig(data []byte) error {\n\thomedir, err := homedir.Dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcfgPath := filepath.Join(homedir, DevSpaceCloudConfigPath)\n\terr = os.MkdirAll(filepath.Dir(cfgPath), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(cfgPath, data, 0600)\n}",
"func (s *Syncthing) SaveConfig(dev *model.Dev) error {\n\tmarshalled, err := yaml.Marshal(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsyncthingInfoFile := getInfoFile(dev.Namespace, dev.Name)\n\tif err := os.WriteFile(syncthingInfoFile, marshalled, 0600); err != nil {\n\t\treturn fmt.Errorf(\"failed to write syncthing info file: %w\", err)\n\t}\n\n\treturn nil\n}",
"func Save(app *cli.Context) error {\n\tif err := cmds.InitLogging(); err != nil {\n\t\treturn err\n\t}\n\treturn save(app, &cmds.ServerConfig)\n}"
] | [
"0.57114166",
"0.5705443",
"0.55867183",
"0.55641735",
"0.5545801",
"0.5540158",
"0.55362916",
"0.5509479",
"0.5506129",
"0.5503006",
"0.5464419",
"0.5461447",
"0.5456357",
"0.54298675",
"0.5423216",
"0.5402514",
"0.5287204",
"0.5237437",
"0.5235597",
"0.52300316",
"0.522529",
"0.52008885",
"0.51970625",
"0.5190391",
"0.51560247",
"0.51429504",
"0.51167125",
"0.510831",
"0.5104182",
"0.50794",
"0.506037",
"0.5048555",
"0.503774",
"0.50306135",
"0.50082564",
"0.50077134",
"0.50053287",
"0.5000288",
"0.49943665",
"0.49883357",
"0.49755937",
"0.49525356",
"0.49459347",
"0.49404284",
"0.49333116",
"0.4918979",
"0.49127597",
"0.49054378",
"0.4899529",
"0.48971173",
"0.48877898",
"0.4886714",
"0.4886425",
"0.48856658",
"0.48692548",
"0.48646855",
"0.48599562",
"0.4852498",
"0.4848781",
"0.482615",
"0.48205963",
"0.48134598",
"0.48101157",
"0.4806775",
"0.48050693",
"0.480375",
"0.4800942",
"0.48000008",
"0.47892252",
"0.47799033",
"0.4775741",
"0.47687295",
"0.47679403",
"0.47660974",
"0.4751455",
"0.47444054",
"0.4742978",
"0.4726026",
"0.4723586",
"0.47196552",
"0.47161388",
"0.47155112",
"0.4704033",
"0.4694132",
"0.46923465",
"0.46918106",
"0.468867",
"0.46873245",
"0.4682927",
"0.46772492",
"0.4675236",
"0.46662387",
"0.46656907",
"0.46656182",
"0.4654023",
"0.46494576",
"0.46424916",
"0.46402735",
"0.46376544",
"0.46361384"
] | 0.76610893 | 0 |
SaveIPv4 changes the ipv4 configuration of the hostonly network. | SaveIPv4 изменяет конфигурацию ipv4 сети hostonly. | func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error {
if n.IPv4.IP != nil && n.IPv4.Mask != nil {
if err := vbox.vbm("hostonlyif", "ipconfig", n.Name, "--ip", n.IPv4.IP.String(), "--netmask", net.IP(n.IPv4.Mask).String()); err != nil {
return err
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (f *FSEIDFields) SetIPv4Flag() {\n\tf.Flags |= 0x02\n}",
"func (internet Internet) IPv4(v reflect.Value) (interface{}, error) {\n\treturn internet.ipv4(), nil\n}",
"func IPv4(a, b, c, d uint8) IP {\n\treturn IP{\n\t\tlo: 0xffff00000000 | uint64(a)<<24 | uint64(b)<<16 | uint64(c)<<8 | uint64(d),\n\t\tz: z4,\n\t}\n}",
"func (n *hostOnlyNetwork) Save(vbox VBoxManager) error {\n\tif err := n.SaveIPv4(vbox); err != nil {\n\t\treturn err\n\t}\n\n\tif n.DHCP {\n\t\tvbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--dhcp\") // not implemented as of VirtualBox 4.3\n\t}\n\n\treturn nil\n}",
"func IPv4(opts ...options.OptionFunc) string {\n\treturn singleFakeData(IPV4Tag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\treturn i.ipv4()\n\t}, opts...).(string)\n}",
"func (i Internet) Ipv4() string {\n\tips := make([]string, 0, 4)\n\n\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(1, 255)))\n\tfor j := 0; j < 3; j++ {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t}\n\n\treturn strings.Join(ips, \".\")\n}",
"func IPv4() (string, error) {\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Failed to determine your IP\")\n\t}\n\tlocalAddr := conn.LocalAddr().(*net.UDPAddr)\n\tmyIP := localAddr.IP.String()\n\tconn.Close()\n\treturn myIP, nil\n}",
"func IPv4(name string) (string, error) {\n\ti, err := net.InterfaceByName(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taddrs, err := i.Addrs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipn, ok := a.(*net.IPNet); ok {\n\t\t\tif ipn.IP.To4() != nil {\n\t\t\t\treturn ipn.IP.String(), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"no IPv4 found for interface: %q\", name)\n}",
"func Ipv4(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIpv4), v))\n\t})\n}",
"func NewIPv4(value string) (IPv4, error) {\n\tvar IP = IPv4{value: value}\n\n\tif !IP.validate() {\n\t\treturn IPv4{}, ErrInvalidIPv4\n\t}\n\n\treturn IP, nil\n}",
"func IpV4Address() string {\n\tblocks := []string{}\n\tfor i := 0; i < 4; i++ {\n\t\tnumber := seedAndReturnRandom(255)\n\t\tblocks = append(blocks, strconv.Itoa(number))\n\t}\n\n\treturn strings.Join(blocks, \".\")\n}",
"func ipv4only(addr IPAddr) bool {\n\treturn supportsIPv4 && addr.IP.To4() != nil\n}",
"func stringIPv4(n uint32) string {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, n)\n\treturn ip.String()\n}",
"func (internet *Internet) IPv4Address() string {\n\tvar parts []string\n\tfor i := 0; i < 4; i++ {\n\t\tparts = append(parts, fmt.Sprintf(\"%d\", internet.faker.random.Intn(253)+2))\n\t}\n\treturn strings.Join(parts, \".\")\n}",
"func (p *PeerToPeer) handlePacketIPv4(contents []byte, proto int) {\n\tLog(Trace, \"Handling IPv4 Packet\")\n\tf := new(ethernet.Frame)\n\tif err := f.UnmarshalBinary(contents); err != nil {\n\t\tLog(Error, \"Failed to unmarshal IPv4 packet\")\n\t}\n\n\tif f.EtherType != ethernet.EtherTypeIPv4 {\n\t\treturn\n\t}\n\tmsg := CreateNencP2PMessage(p.Crypter, contents, uint16(proto), 1, 1, 1)\n\tp.SendTo(f.Destination, msg)\n}",
"func (i *InstanceServiceHandler) CreateIPv4(ctx context.Context, instanceID string, reboot *bool) (*IPv4, error) {\n\turi := fmt.Sprintf(\"%s/%s/ipv4\", instancePath, instanceID)\n\n\tbody := RequestBody{\"reboot\": reboot}\n\n\treq, err := i.client.NewRequest(ctx, http.MethodPost, uri, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tip := new(ipv4Base)\n\tif err = i.client.DoWithContext(ctx, req, ip); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ip.IPv4, nil\n}",
"func uint32ToIPv4(intIP uint32) net.IP {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, intIP)\n\treturn ip\n}",
"func IPv4(str string) bool {\n\tip := net.ParseIP(str)\n\treturn ip != nil && strings.Contains(str, \".\")\n}",
"func (ipSet *IPSet) IsIPv4() bool {\n\treturn govalidator.IsIPv4(ipSet.IPv4)\n}",
"func TestGetSetIP4(t *testing.T) {\n\tip := IP{192, 168, 0, 3}\n\tvar r Record\n\tr.Set(&ip)\n\n\tvar ip2 IP\n\trequire.NoError(t, r.Load(&ip2))\n\tassert.Equal(t, ip, ip2)\n}",
"func isIPv4(fl FieldLevel) bool {\n\tip := net.ParseIP(fl.Field().String())\n\n\treturn ip != nil && ip.To4() != nil\n}",
"func (impl *IPv4Pool) AllocateIPv4Address(id string, key string) (string, base.ModelInterface, *base.ErrorResponse) {\n\tvar (\n\t\tc = impl.TemplateImpl.GetConnection()\n\t\trecord = new(entity.IPv4Pool)\n\t)\n\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": id,\n\t\t\t\"key\": key,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Allocate IPv4 failed, start transaction failed.\")\n\t\treturn \"\", nil, base.NewErrorResponseTransactionError()\n\t}\n\texist, err := impl.GetInternal(tx, id, record)\n\tif !exist {\n\t\ttx.Rollback()\n\t\treturn \"\", nil, base.NewErrorResponseNotExist()\n\t}\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn \"\", nil, base.NewErrorResponseTransactionError()\n\t}\n\n\tfoundKey := false\n\t// If try to find the address with the same key.\n\tif key != \"\" {\n\t\tfor i := range record.Ranges {\n\t\t\tfor j := range record.Ranges[i].Addresses {\n\t\t\t\tif record.Ranges[i].Addresses[j].Key == key {\n\t\t\t\t\tfoundKey = true\n\t\t\t\t\tif record.Ranges[i].Addresses[j].Allocated == true {\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"id\": record.ID,\n\t\t\t\t\t\t\t\"key\": key,\n\t\t\t\t\t\t\t\"address\": record.Ranges[i].Addresses[j].Address,\n\t\t\t\t\t\t}).Info(\"Allocate IPv4, found address with key but already allocated.\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\trecord.Ranges[i].Addresses[j].Allocated = true\n\t\t\t\t\t\tif record.Ranges[i].Free > 0 {\n\t\t\t\t\t\t\trecord.Ranges[i].Free--\n\t\t\t\t\t\t}\n\t\t\t\t\t\trecord.Ranges[i].Allocatable--\n\t\t\t\t\t\tif commited, err := impl.SaveAndCommit(tx, record); commited && err == nil {\n\t\t\t\t\t\t\treturn record.Ranges[i].Addresses[j].Address, record.ToModel(), nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn \"\", nil, base.NewErrorResponseTransactionError()\n\t\t\t\t\t}\n\t\t\t\t\t// found the address with the key, but in already allocated.\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif foundKey {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// if the key == nil, we don't have to find the address with the key.\n\tfor i := range record.Ranges {\n\t\tif record.Ranges[i].Free > 0 {\n\t\t\tfor j := range record.Ranges[i].Addresses {\n\t\t\t\tif record.Ranges[i].Addresses[j].Key != \"\" || record.Ranges[i].Addresses[j].Allocated == true {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trecord.Ranges[i].Addresses[j].Allocated = true\n\t\t\t\trecord.Ranges[i].Addresses[j].Key = key\n\t\t\t\tif record.Ranges[i].Free > 0 {\n\t\t\t\t\trecord.Ranges[i].Free--\n\t\t\t\t}\n\t\t\t\trecord.Ranges[i].Allocatable--\n\t\t\t\tcommited, err := impl.SaveAndCommit(tx, record)\n\t\t\t\tif commited && err == nil {\n\t\t\t\t\treturn record.Ranges[i].Addresses[j].Address, record.ToModel(), nil\n\t\t\t\t}\n\t\t\t\treturn \"\", nil, base.NewErrorResponseTransactionError()\n\t\t\t}\n\t\t}\n\t}\n\t// So no free address, try to use the allocatable address.\n\tfor i := range record.Ranges {\n\t\tif record.Ranges[i].Allocatable > 0 {\n\t\t\tfor j := range record.Ranges[i].Addresses {\n\t\t\t\tif record.Ranges[i].Addresses[j].Allocated == true {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trecord.Ranges[i].Addresses[j].Allocated = true\n\t\t\t\trecord.Ranges[i].Addresses[j].Key = key\n\t\t\t\tif record.Ranges[i].Free > 0 {\n\t\t\t\t\trecord.Ranges[i].Free--\n\t\t\t\t}\n\t\t\t\trecord.Ranges[i].Allocatable--\n\t\t\t\tcommited, err := impl.SaveAndCommit(tx, record)\n\t\t\t\tif commited && err == nil {\n\t\t\t\t\treturn record.Ranges[i].Addresses[j].Address, record.ToModel(), nil\n\t\t\t\t}\n\t\t\t\treturn \"\", nil, base.NewErrorResponseTransactionError()\n\t\t\t}\n\t\t}\n\t}\n\t// So no address can allocate.\n\ttx.Rollback()\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t\t\"key\": key,\n\t}).Info(\"Allocate IPv4 failed, no allocatable address.\")\n\n\treturn \"\", nil, errorResp.NewErrorResponseIPv4PoolEmpty()\n}",
"func (impl *IPv4Pool) FreeIPv4Address(id string, address string) (base.ModelInterface, *base.ErrorResponse) {\n\tvar (\n\t\tc = impl.TemplateImpl.GetConnection()\n\t\trecord = new(entity.IPv4Pool)\n\t)\n\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": id,\n\t\t\t\"address\": address,\n\t\t\t\"error\": err,\n\t\t}).Warn(\"Free IPv4 failed, start transaction failed.\")\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\texist, err := impl.GetInternal(tx, id, record)\n\tif !exist {\n\t\ttx.Rollback()\n\t\treturn nil, base.NewErrorResponseNotExist()\n\t}\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn nil, base.NewErrorResponseTransactionError()\n\t}\n\tfor i := range record.Ranges {\n\t\tif !base.IPStringBetween(record.Ranges[i].Start, record.Ranges[i].End, address) {\n\t\t\tcontinue\n\t\t}\n\t\tfor j := range record.Ranges[i].Addresses {\n\t\t\tif record.Ranges[i].Addresses[j].Address == address {\n\t\t\t\tif !record.Ranges[i].Addresses[j].Allocated {\n\t\t\t\t\ttx.Rollback()\n\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\"id\": id,\n\t\t\t\t\t\t\"address\": address,\n\t\t\t\t\t}).Warn(\"Free IPv4 failed, the address didn't allocate, transaction rollback.\")\n\t\t\t\t\treturn nil, errorResp.NewErrorResponseIPv4NotAllocatedError()\n\t\t\t\t}\n\t\t\t\trecord.Ranges[i].Addresses[j].Allocated = false\n\t\t\t\trecord.Ranges[i].Allocatable++\n\t\t\t\tif record.Ranges[i].Addresses[j].Key == \"\" {\n\t\t\t\t\trecord.Ranges[i].Free++\n\t\t\t\t}\n\t\t\t\tcommited, err := impl.SaveAndCommit(tx, record)\n\t\t\t\tif commited && err == nil {\n\t\t\t\t\treturn record.ToModel(), nil\n\t\t\t\t}\n\t\t\t\treturn nil, base.NewErrorResponseTransactionError()\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\t// Can't find the address in pool.\n\ttx.Rollback()\n\treturn nil, errorResp.NewErrorResponseIPv4AddressNotExistError()\n}",
"func uint32ToIPV4(addr uint32) net.IP {\n\tip := make([]byte, net.IPv4len)\n\tbinary.BigEndian.PutUint32(ip, addr)\n\treturn ip\n}",
"func (o *StorageHitachiPortAllOf) SetIpv4Address(v string) {\n\to.Ipv4Address = &v\n}",
"func PublicIpv4EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldPublicIpv4), v))\n\t})\n}",
"func PublicIpv4(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv4), v))\n\t})\n}",
"func (f *FakeInstance) DeleteIPv4(_ context.Context, _, _ string) error {\n\tpanic(\"implement me\")\n}",
"func (ip IP) As4() [4]byte {\n\tif ip.z == z4 || ip.Is4in6() {\n\t\tvar ret [4]byte\n\t\tbinary.BigEndian.PutUint32(ret[:], uint32(ip.lo))\n\t\treturn ret\n\t}\n\tif ip.z == z0 {\n\t\tpanic(\"As4 called on IP zero value\")\n\t}\n\tpanic(\"As4 called on IPv6 address\")\n}",
"func (instanceKey *InstanceKey) IsIPv4() bool {\n\treturn ipv4Regexp.MatchString(instanceKey.Hostname)\n}",
"func (u *IPv4) DeepCopy() *IPv4 {\n\tif u == nil {\n\t\treturn nil\n\t}\n\tout := new(IPv4)\n\tu.DeepCopyInto(out)\n\treturn out\n}",
"func IPv4ClassfulNetwork(address net.IP) *net.IPNet {\n\tif address.To4() != nil {\n\t\tvar newIP net.IP\n\t\tvar newMask net.IPMask\n\t\tswitch {\n\t\tcase uint8(address[0]) < 128:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), 0, 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 0, 0, 0)\n\t\tcase uint8(address[0]) < 192:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 0, 0)\n\t\tcase uint8(address[0]) < 224:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), uint8(address[2]), 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 255, 0)\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t\treturn &net.IPNet{IP: newIP, Mask: newMask}\n\t}\n\treturn nil\n}",
"func IsIPv4(value string) bool {\n\tip := net.ParseIP(value)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To4() != nil\n}",
"func IsValidIP4(ipAddress string) bool {\n\tipAddress = strings.Trim(ipAddress, \" \")\n\tif !regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`).\n\t\tMatchString(ipAddress) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func ResolveIPv4(host string) (net.IP, error) {\n\tif node := DefaultHosts.Search(host); node != nil {\n\t\tif ip := node.Data.(net.IP).To4(); ip != nil {\n\t\t\treturn ip, nil\n\t\t}\n\t}\n\n\tip := net.ParseIP(host)\n\tif ip != nil {\n\t\tif !strings.Contains(host, \":\") {\n\t\t\treturn ip, nil\n\t\t}\n\t\treturn nil, errIPVersion\n\t}\n\n\tif DefaultResolver != nil {\n\t\treturn DefaultResolver.ResolveIPv4(host)\n\t}\n\n\tipAddrs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ip := range ipAddrs {\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\treturn ip4, nil\n\t\t}\n\t}\n\n\treturn nil, errIPNotFound\n}",
"func (c *Client) PublicIPv4() (net.IP, error) {\n\tresp, err := c.get(\"/public-ipv4\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn net.ParseIP(resp), nil\n}",
"func (f *FloatingIP) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(f.IP); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing IPv4Address '%s'\", f.IP)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(32, 32),\n\t}, nil\n}",
"func IsIPv4(dots string) bool {\n\tip := net.ParseIP(dots)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To4() != nil\n}",
"func WithIPv4Mask(mask net.IPMask) Option {\n\treturn func(o *Options) {\n\t\to.IPv4Mask = mask\n\t}\n}",
"func ToVppIP4Address(addr net.IP) ip_types.IP4Address {\n\tip := [4]uint8{}\n\tcopy(ip[:], addr.To4())\n\treturn ip\n}",
"func (n *NetworkAssociation) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(n.ServerIP); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing ServerIP '%s'\", ip)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(24, 32),\n\t}, nil\n}",
"func GetIPv4Addr(ifAdds map[string]string, ipAdds *map[string]string) {\n\ttempAdds := *ipAdds\n\tfor k, v := range ifAdds {\n\t\tif runtime.GOOS == \"linux\" {\n\t\t\tif (strings.HasPrefix(k, \"local\") || strings.HasPrefix(k, \"en\") || strings.HasPrefix(k, \"eth\")) && strings.HasSuffix(k, \"_0\") {\n\t\t\t\ttempAdds[\"eth_ipv4\"] = v\n\t\t\t} else if strings.HasPrefix(k, \"wireless\") && strings.HasSuffix(k, \"_0\") {\n\t\t\t\ttempAdds[\"wireless_ipv4\"] = v\n\t\t\t}\n\t\t} else {\n\t\t\tif (strings.HasPrefix(k, \"local\") || strings.HasPrefix(k, \"en\") || strings.HasPrefix(k, \"eth\")) && strings.HasSuffix(k, \"_1\") {\n\t\t\t\ttempAdds[\"eth_ipv4\"] = v\n\t\t\t} else if strings.HasPrefix(k, \"wireless\") && strings.HasSuffix(k, \"_1\") {\n\t\t\t\ttempAdds[\"wireless_ipv4\"] = v\n\t\t\t}\n\t\t}\n\n\t}\n}",
"func (i Internet) LocalIpv4() string {\n\tips := make([]string, 0, 4)\n\tips = append(ips, i.Faker.RandomStringElement([]string{\"10\", \"172\", \"192\"}))\n\n\tif ips[0] == \"10\" {\n\t\tfor j := 0; j < 3; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\tif ips[0] == \"172\" {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(16, 31)))\n\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\tif ips[0] == \"192\" {\n\t\tips = append(ips, \"168\")\n\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t\t}\n\t}\n\n\treturn strings.Join(ips, \".\")\n}",
"func (s *Server) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(s.IPv4Address); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing IPv4Address '%s'\", s.IPv4Address)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(32, 32),\n\t}, nil\n}",
"func (o *LocalDatabaseProvider) SetIpPoolV4(v string) {\n\to.IpPoolV4 = &v\n}",
"func (f *FakeInstance) CreateIPv4(_ context.Context, _ string, _ *bool) (*govultr.IPv4, *http.Response, error) {\n\tpanic(\"implement me\")\n}",
"func Ipv4EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldIpv4), v))\n\t})\n}",
"func (in *Ipv4) DeepCopy() *Ipv4 {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Ipv4)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (o NodeBalancerOutput) Ipv4() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NodeBalancer) pulumi.StringOutput { return v.Ipv4 }).(pulumi.StringOutput)\n}",
"func PublicIpv4EQ(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv4), v))\n\t})\n}",
"func DetectHostIPv4() (string, error) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn ipnet.IP.String(), nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"cannot detect host IPv4 address\")\n}",
"func (o *IppoolPoolMember) SetIpV4Address(v string) {\n\to.IpV4Address = &v\n}",
"func isIP4AddrResolvable(fl FieldLevel) bool {\n\tif !isIPv4(fl) {\n\t\treturn false\n\t}\n\n\t_, err := net.ResolveIPAddr(\"ip4\", fl.Field().String())\n\n\treturn err == nil\n}",
"func (c *Client) GetIPv4() string {\n\treturn c.ip.String()\n}",
"func (o *StorageHitachiPortAllOf) GetIpv4Address() string {\n\tif o == nil || o.Ipv4Address == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Ipv4Address\n}",
"func (f *FSEIDFields) HasIPv4() bool {\n\treturn has2ndBit(f.Flags)\n}",
"func indexAsIPv4(i uint32, baseSlashEight int) string {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, uint32(i)+uint32(baseSlashEight*16777216))\n\treturn ip.String()\n}",
"func (af AddressFamily) IsIPv4() bool {\n\treturn af == AddressFamilyIPv4\n}",
"func (i *InstanceServiceHandler) DefaultReverseIPv4(ctx context.Context, instanceID, ip string) error {\n\turi := fmt.Sprintf(\"%s/%s/ipv4/reverse/default\", instancePath, instanceID)\n\treqBody := RequestBody{\"ip\": ip}\n\n\treq, err := i.client.NewRequest(ctx, http.MethodPost, uri, reqBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn i.client.DoWithContext(ctx, req, nil)\n}",
"func IsIPv4(ip *net.IP) bool {\n\treturn ip.To4() != nil\n}",
"func (a *Lisp_address) lisp_is_ipv4() bool {\n\treturn((len(a.address) == 4))\n}",
"func ParsePublicIPV4(ip string) (*l5dNetPb.IPAddress, error) {\n\tnetIP := net.ParseIP(ip)\n\tif netIP != nil {\n\t\toBigInt := IPToInt(netIP.To4())\n\t\tnetIPAddress := &l5dNetPb.IPAddress{\n\t\t\tIp: &l5dNetPb.IPAddress_Ipv4{\n\t\t\t\tIpv4: uint32(oBigInt.Uint64()),\n\t\t\t},\n\t\t}\n\t\treturn netIPAddress, nil\n\t}\n\treturn nil, fmt.Errorf(\"Invalid IP address: %s\", ip)\n}",
"func IntToIPv4(intip *big.Int) net.IP {\n\tipByte := make([]byte, net.IPv4len)\n\tuint32IP := intip.Uint64()\n\tbinary.BigEndian.PutUint32(ipByte, uint32(uint32IP))\n\treturn net.IP(ipByte)\n}",
"func NewIPv4Address(address uint32, length uint) IPv4Address {\n\treturn IPv4Address{\n\t\tAddress: address,\n\t\tLength: length,\n\t}\n}",
"func EnsureIPv4(ipOrHost string) (string, error) {\n\tip := net.ParseIP(ipOrHost)\n\tif ip != nil {\n\t\tif ip.To4() == nil {\n\t\t\treturn \"\", fmt.Errorf(\"%s is IPv6 address\", ipOrHost)\n\t\t}\n\t\treturn ipOrHost, nil\n\t}\n\taddrs, err := net.LookupHost(ipOrHost)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tips := make([]string, 0)\n\tfor _, addr := range addrs {\n\t\tif ip := net.ParseIP(addr); ip != nil {\n\t\t\tif ip.To4() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tips = append(ips, addr)\n\t\t}\n\t}\n\tif len(ips) == 0 {\n\t\treturn \"\", errors.New(\"no IPv4 address found\")\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn ips[rand.Intn(len(ips))], nil\n}",
"func network4(addr uint32, prefix uint) uint32 {\n\treturn addr & netmask(prefix)\n}",
"func getLocalIPV4() string {\n\taddrList, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tlogrus.Panicf(\"net.InterfaceAddrs error : %v\", err)\n\t}\n\n\tfor _, addr := range addrList {\n\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() {\n\t\t\tif ip.IP.To4() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn ip.IP.String()\n\t\t}\n\t}\n\treturn \"127.0.0.1\"\n}",
"func IsIPv4(ip string) bool {\n\treturn net.ParseIP(ip).To4() != nil\n}",
"func (f *FakeInstance) DefaultReverseIPv4(_ context.Context, _, _ string) error {\n\tpanic(\"implement me\")\n}",
"func IsIPv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(s)\n\treturn ip != nil && ip.To4() != nil\n}",
"func (s IPV4) String() string {\n\treturn fmt.Sprintf(\"%v.%v.%v.%v\", s[0], s[1], s[2], s[3])\n}",
"func IsIPv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(s)\n\treturn ip != nil && strings.Contains(s, \".\") // && ip.To4() != nil\n}",
"func (m *InterfaceProtocolConfigIPV4) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDhcp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatic(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *NetworkElementSummaryAllOf) SetIpv4Address(v string) {\n\to.Ipv4Address = &v\n}",
"func PublicIpv4ContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldPublicIpv4), v))\n\t})\n}",
"func (ds *DataStore) AssignPodIPv4Address(k8sPod *k8sapi.K8SPodInfo) (string, int, error) {\n\tds.lock.Lock()\n\tdefer ds.lock.Unlock()\n\n\tklog.V(2).Infof(\"AssignIPv4Address: IP address pool stats: total: %d, assigned %d\", ds.total, ds.assigned)\n\tpodKey := PodKey{\n\t\tname: k8sPod.Name,\n\t\tnamespace: k8sPod.Namespace,\n\t\tcontainer: k8sPod.Container,\n\t}\n\tipAddr, ok := ds.podsIP[podKey]\n\tif ok {\n\t\tif ipAddr.IP == k8sPod.IP && k8sPod.IP != \"\" {\n\t\t\t// The caller invoke multiple times to assign(PodName/NameSpace --> same IPAddress). It is not a error, but not very efficient.\n\t\t\tklog.V(1).Infof(\"AssignPodIPv4Address: duplicate pod assign for IP %s, name %s, namespace %s, container %s\",\n\t\t\t\tk8sPod.IP, k8sPod.Name, k8sPod.Namespace, k8sPod.Container)\n\t\t\treturn ipAddr.IP, ipAddr.DeviceNumber, nil\n\t\t}\n\t\t// TODO Handle this bug assert? May need to add a counter here, if counter is too high, need to mark node as unhealthy...\n\t\t// This is a bug that the caller invokes multiple times to assign(PodName/NameSpace -> a different IP address).\n\t\tklog.Errorf(\"AssignPodIPv4Address: current IP %s is changed to IP %s for pod(name %s, namespace %s, container %s)\",\n\t\t\tipAddr.IP, k8sPod.IP, k8sPod.Name, k8sPod.Namespace, k8sPod.Container)\n\t\treturn \"\", 0, errors.New(\"AssignPodIPv4Address: invalid pod with multiple IP addresses\")\n\t}\n\treturn ds.assignPodIPv4AddressUnsafe(k8sPod)\n}",
"func (ipSet *IPSet) HasIPv4() bool {\n\treturn ipSet.IPv4 != \"\"\n}",
"func IsIP4(val interface{}) bool {\n\treturn isMatch(ip4, val)\n}",
"func (o *IPPrefixesSynthetics) SetPrefixesIpv4(v []string) {\n\to.PrefixesIpv4 = v\n}",
"func (ip IPv4) Equals(value Value) bool {\n\to, ok := value.(IPv4)\n\treturn ok && ip.value == o.value\n}",
"func NewV4(pubkey *ecdsa.PublicKey, ip net.IP, tcp, udp int) *Node {\n\tvar r qnr.Record\n\tif len(ip) > 0 {\n\t\tr.Set(qnr.IP(ip))\n\t}\n\tif udp != 0 {\n\t\tr.Set(qnr.UDP(udp))\n\t}\n\tif tcp != 0 {\n\t\tr.Set(qnr.TCP(tcp))\n\t}\n\tsignV4Compat(&r, pubkey)\n\tn, err := New(v4CompatID{}, &r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}",
"func (p *IPv4) Swap() {\n\tp.src, p.dst = p.dst, p.src\n}",
"func AllLocalIP4() ([]net.IP, error) {\n\tdevices, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := []net.IP{}\n\tfor _, dev := range devices {\n\t\tif dev.Flags&net.FlagUp != 0 {\n\t\t\taddrs, err := dev.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := range addrs {\n\t\t\t\tif ip, ok := addrs[i].(*net.IPNet); ok {\n\t\t\t\t\tif ip.IP.To4() != nil {\n\t\t\t\t\t\tret = append(ret, ip.IP)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ret, nil\n}",
"func IPv4Address(ctx context.Context, client *docker.Client, containerID string) (net.IP, error) {\n\tc, err := client.InspectContainerWithContext(containerID, ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"find container %s address: %w\", containerID, err)\n\t}\n\treturn ipv4Address(c)\n}",
"func FilterIPV4(ips []net.IP) []string {\n\tvar ret = make([]string, 0)\n\tfor _, ip := range ips {\n\t\tif ip.To4() != nil {\n\t\t\tret = append(ret, ip.String())\n\t\t}\n\t}\n\treturn ret\n}",
"func GetIPv4(c *fluent.GRIBIClient, wantACK fluent.ProgrammingResult, t testing.TB, _ ...TestOpt) {\n\tops := []func(){\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithIndex(1).\n\t\t\t\t\tWithIPAddress(\"1.1.1.1\"))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.NextHopGroupEntry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithID(1).\n\t\t\t\t\tAddNextHop(1, 1))\n\t\t},\n\t\tfunc() {\n\t\t\tc.Modify().AddEntry(t,\n\t\t\t\tfluent.IPv4Entry().\n\t\t\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\t\t\tWithNextHopGroup(1).\n\t\t\t\t\tWithPrefix(\"42.42.42.42/32\"))\n\t\t},\n\t}\n\n\tres := doModifyOps(c, t, ops, wantACK, false)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithNextHopGroupOperation(1).\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tchk.HasResult(t, res,\n\t\tfluent.OperationResult().\n\t\t\tWithIPv4Operation(\"42.42.42.42/32\").\n\t\t\tWithOperationType(constants.Add).\n\t\t\tWithProgrammingResult(wantACK).\n\t\t\tAsResult(),\n\t\tchk.IgnoreOperationID(),\n\t)\n\n\tctx := context.Background()\n\tc.Start(ctx, t)\n\tdefer c.Stop(t)\n\tgr, err := c.Get().\n\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\tWithAFT(fluent.IPv4).\n\t\tSend()\n\n\tif err != nil {\n\t\tt.Fatalf(\"got unexpected error from get, got: %v\", err)\n\t}\n\n\tchk.GetResponseHasEntries(t, gr,\n\t\tfluent.IPv4Entry().\n\t\t\tWithNetworkInstance(server.DefaultNetworkInstanceName).\n\t\t\tWithNextHopGroup(1).\n\t\t\tWithPrefix(\"42.42.42.42/32\"),\n\t)\n}",
"func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}",
"func (l4netlb *L4NetLB) ensureIPv4Resources(result *L4NetLBSyncResult, nodeNames []string, bsLink string) {\n\tfr, ipAddrType, err := l4netlb.ensureIPv4ForwardingRule(bsLink)\n\tif err != nil {\n\t\t// User can misconfigure the forwarding rule if Network Tier will not match service level Network Tier.\n\t\tresult.GCEResourceInError = annotations.ForwardingRuleResource\n\t\tresult.Error = fmt.Errorf(\"failed to ensure forwarding rule - %w\", err)\n\t\tresult.MetricsLegacyState.IsUserError = utils.IsUserError(err)\n\t\treturn\n\t}\n\tif fr.IPProtocol == string(corev1.ProtocolTCP) {\n\t\tresult.Annotations[annotations.TCPForwardingRuleKey] = fr.Name\n\t} else {\n\t\tresult.Annotations[annotations.UDPForwardingRuleKey] = fr.Name\n\t}\n\tresult.MetricsLegacyState.IsManagedIP = ipAddrType == IPAddrManaged\n\tresult.MetricsLegacyState.IsPremiumTier = fr.NetworkTier == cloud.NetworkTierPremium.ToGCEValue()\n\n\tl4netlb.ensureIPv4NodesFirewall(nodeNames, fr.IPAddress, result)\n\tif result.Error != nil {\n\t\tklog.Errorf(\"ensureIPv4Resources: Failed to ensure nodes firewall for L4 NetLB Service %s/%s, error: %v\", l4netlb.Service.Namespace, l4netlb.Service.Name, err)\n\t\treturn\n\t}\n\n\tresult.Status = utils.AddIPToLBStatus(result.Status, fr.IPAddress)\n}",
"func parseIPv4(ip string) net.IP {\n\tif parsedIP := net.ParseIP(strings.TrimSpace(ip)); parsedIP != nil {\n\t\tif ipv4 := parsedIP.To4(); ipv4 != nil {\n\t\t\treturn ipv4\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *RIBMessage) IPv4Flow() (*IPv4FlowAnnounceTextMessage, error) {\n\treturn nil, nil\n}",
"func (m *IPV4) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func IsIpv4(s string) bool {\n\tips := strings.Split(s, ipSep)\n\tif len(ips) != ipV4Len {\n\t\treturn false\n\t}\n\tfor _, v := range ips {\n\t\tnum, e := strconv.Atoi(v)\n\t\tif e != nil || num > ipv4Max || num < ipv4Min {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (ds *DataStore) DelIPv4AddressFromStore(nicID string, ipv4 string) error {\n\tds.lock.Lock()\n\tdefer ds.lock.Unlock()\n\tklog.V(2).Infof(\"Deleting NIC(%s)'s IPv4 address %s from datastore\", nicID, ipv4)\n\tklog.V(2).Infof(\"IP Address Pool stats: total: %d, assigned: %d\", ds.total, ds.assigned)\n\n\tcurNIC, ok := ds.nicIPPools[nicID]\n\tif !ok {\n\t\treturn errors.New(UnknownNICError)\n\t}\n\n\tipAddr, ok := curNIC.IPv4Addresses[ipv4]\n\tif !ok {\n\t\treturn errors.New(UnknownIPError)\n\t}\n\n\tif ipAddr.Assigned {\n\t\treturn errors.New(IPInUseError)\n\t}\n\n\tds.total--\n\t// Prometheus gauge\n\ttotalIPs.Set(float64(ds.total))\n\n\tdelete(curNIC.IPv4Addresses, ipv4)\n\n\tklog.V(1).Infof(\"Deleted NIC(%s)'s IP %s from datastore\", nicID, ipv4)\n\treturn nil\n}",
"func (ds *DataStore) AddIPv4AddressFromStore(nicID string, ipv4 string) error {\n\tds.lock.Lock()\n\tdefer ds.lock.Unlock()\n\n\tklog.V(2).Infof(\"Adding NIC(%s)'s IPv4 address %s to datastore\", nicID, ipv4)\n\tklog.V(2).Infof(\"IP Address Pool stats: total: %d, assigned: %d\", ds.total, ds.assigned)\n\n\tcurNIC, ok := ds.nicIPPools[nicID]\n\tif !ok {\n\t\treturn errors.New(\"add NIC's IP to datastore: unknown NIC\")\n\t}\n\n\t_, ok = curNIC.IPv4Addresses[ipv4]\n\tif ok {\n\t\treturn errors.New(DuplicateIPError)\n\t}\n\n\tds.total++\n\t// Prometheus gauge\n\ttotalIPs.Set(float64(ds.total))\n\n\tcurNIC.IPv4Addresses[ipv4] = &AddressInfo{Address: ipv4, Assigned: false}\n\tklog.V(1).Infof(\"Added NIC(%s)'s IP %s to datastore\", nicID, ipv4)\n\treturn nil\n}",
"func (ip IPv4) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d.%d\", ip[0], ip[1], ip[2], ip[3])\n}",
"func IncrementIPv4(ip net.IP, inc int) net.IP {\n\tip = ip.To4()\n\tv := binary.BigEndian.Uint32(ip)\n\tif v >= uint32(0) {\n\t\tv = v + uint32(inc)\n\t} else {\n\t\tv = v - uint32(-inc)\n\t}\n\tip = make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, v)\n\treturn ip\n}",
"func (crc *CasbinRuleCreate) SetV4(s string) *CasbinRuleCreate {\n\tcrc.mutation.SetV4(s)\n\treturn crc\n}",
"func (p *IPPacket) DstV4() net.IP {\n\treturn net.IPv4((*p)[16], (*p)[17], (*p)[18], (*p)[19])\n}",
"func (ep *epInfoCache) IPv4Address() netip.Addr {\n\treturn ep.ipv4\n}",
"func IntToIpv4(intv int64) (ip string, err error) {\n\tif intv < ipv4MinInt || intv > ipv4MaxInt {\n\t\terr = BadIpv4Error\n\t\treturn\n\t}\n\n\tip = strings.Join([]string{\n\t\tstrconv.Itoa(int(intv & ip1x >> ipv4Shift[0])),\n\t\tstrconv.Itoa(int(intv & ip2x >> ipv4Shift[1])),\n\t\tstrconv.Itoa(int(intv & ip3x >> ipv4Shift[2])),\n\t\tstrconv.Itoa(int(intv & ip4x >> ipv4Shift[3]))},\n\t\tipSep)\n\n\treturn\n}"
] | [
"0.6443081",
"0.6388994",
"0.63776445",
"0.6346549",
"0.6192383",
"0.61467063",
"0.6119341",
"0.59505796",
"0.58408594",
"0.5829703",
"0.5822138",
"0.5800136",
"0.574169",
"0.5729033",
"0.57041806",
"0.5692545",
"0.5672946",
"0.56706774",
"0.5669973",
"0.5656812",
"0.56447315",
"0.56057584",
"0.5597116",
"0.5588403",
"0.55849063",
"0.556407",
"0.55628103",
"0.5550961",
"0.551651",
"0.55156076",
"0.5490048",
"0.5484926",
"0.5465212",
"0.5451935",
"0.54303",
"0.5404971",
"0.5391032",
"0.53788126",
"0.5376378",
"0.5369238",
"0.53685534",
"0.535852",
"0.53558964",
"0.53378344",
"0.53253394",
"0.5323153",
"0.5321143",
"0.5314374",
"0.5310506",
"0.5297026",
"0.5289147",
"0.52889216",
"0.5284165",
"0.5241419",
"0.52404153",
"0.52169997",
"0.5216206",
"0.521614",
"0.5215295",
"0.5210172",
"0.5209929",
"0.52054214",
"0.5192797",
"0.51924574",
"0.5190526",
"0.5189518",
"0.51833075",
"0.5174843",
"0.51738375",
"0.51737523",
"0.5157874",
"0.51505876",
"0.51471895",
"0.5141837",
"0.51411355",
"0.512112",
"0.5110004",
"0.5101439",
"0.5095463",
"0.50915",
"0.5091398",
"0.5082896",
"0.5080923",
"0.50804216",
"0.50737786",
"0.50705194",
"0.5070183",
"0.5068823",
"0.5058205",
"0.5054369",
"0.5047928",
"0.5046807",
"0.50303435",
"0.5029502",
"0.5018306",
"0.5017512",
"0.50008744",
"0.50002414",
"0.49979165",
"0.49900252"
] | 0.8872807 | 0 |
createHostonlyAdapter creates a new hostonly network. | createHostonlyAdapter создает новый сетевой интерфейс host-only. | func createHostonlyAdapter(vbox VBoxManager) (*hostOnlyNetwork, error) {
out, err := vbox.vbmOut("hostonlyif", "create")
if err != nil {
return nil, err
}
res := reHostOnlyAdapterCreated.FindStringSubmatch(string(out))
if res == nil {
return nil, errors.New("Failed to create host-only adapter")
}
return &hostOnlyNetwork{Name: res[1]}, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func createHostWithIp(nodeId int, ip string, port int) (core.Host, error) {\n\t// Producing private key using nodeId\n\tr := mrand.New(mrand.NewSource(int64(nodeId)))\n\n\tprvKey, _ := ecdsa.GenerateKey(btcec.S256(), r)\n\tsk := (*crypto.Secp256k1PrivateKey)(prvKey)\n\n\t// Starting a peer with default configs\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%s\", strconv.Itoa(port))),\n\t\tlibp2p.Identity(sk),\n\t\tlibp2p.DefaultTransports,\n\t\tlibp2p.DefaultMuxers,\n\t\tlibp2p.DefaultSecurity,\n\t}\n\n\th, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}",
"func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"list\", \"hostonlyifs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbyName := map[string]*hostOnlyNetwork{}\n\tbyIP := map[string]*hostOnlyNetwork{}\n\tn := &hostOnlyNetwork{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"Name\":\n\t\t\tn.Name = val\n\t\tcase \"GUID\":\n\t\t\tn.GUID = val\n\t\tcase \"DHCP\":\n\t\t\tn.DHCP = (val != \"Disabled\")\n\t\tcase \"IPAddress\":\n\t\t\tn.IPv4.IP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tn.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"HardwareAddress\":\n\t\t\tmac, err := net.ParseMAC(val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tn.HwAddr = mac\n\t\tcase \"MediumType\":\n\t\t\tn.Medium = val\n\t\tcase \"Status\":\n\t\t\tn.Status = val\n\t\tcase \"VBoxNetworkName\":\n\t\t\tn.NetworkName = val\n\n\t\t\tif _, present := byName[n.NetworkName]; present {\n\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one.\", n.NetworkName)\n\t\t\t}\n\t\t\tbyName[n.NetworkName] = n\n\n\t\t\tif len(n.IPv4.IP) != 0 {\n\t\t\t\tif _, present := byIP[n.IPv4.IP.String()]; present {\n\t\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one.\", n.IPv4.IP)\n\t\t\t\t}\n\t\t\t\tbyIP[n.IPv4.IP.String()] = n\n\t\t\t}\n\n\t\t\tn = &hostOnlyNetwork{}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn byName, nil\n}",
"func createHost(port int) (core.Host, error) {\n\t// Producing private key\n\tprvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader)\n\tsk := (*crypto.Secp256k1PrivateKey)(prvKey)\n\n\t// Starting a peer with default configs\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%d\", port)),\n\t\tlibp2p.Identity(sk),\n\t\tlibp2p.DefaultTransports,\n\t\tlibp2p.DefaultMuxers,\n\t\tlibp2p.DefaultSecurity,\n\t}\n\n\th, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}",
"func NewHost(host string) Host {\n\treturn Host(host)\n}",
"func (s stack) CreateHost(ctx context.Context, request abstract.HostRequest, extra interface{}) (_ *abstract.HostFull, _ *userdata.Content, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\treturn nil, nil, fail.NotImplementedError(\"useless method\")\n}",
"func (d *Device) CreateHost(ctx context.Context, hostname string) (*Host, error) {\n\tspath := \"/host\"\n\tparam := struct {\n\t\tNAME string `json:\"NAME\"`\n\t\tTYPE string `json:\"TYPE\"`\n\t\tOPERATIONSYSTEM string `json:\"OPERATIONSYSTEM\"`\n\t\tDESCRIPTION string `json:\"DESCRIPTION\"`\n\t}{\n\t\tNAME: encodeHostName(hostname),\n\t\tTYPE: strconv.Itoa(TypeHost),\n\t\tOPERATIONSYSTEM: \"0\",\n\t\tDESCRIPTION: hostname,\n\t}\n\tjb, err := json.Marshal(param)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(ErrCreatePostValue+\": %w\", err)\n\t}\n\treq, err := d.newRequest(ctx, \"POST\", spath, bytes.NewBuffer(jb))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(ErrCreateRequest+\": %w\", err)\n\t}\n\n\thost := &Host{}\n\tif err = d.requestWithRetry(req, host, DefaultHTTPRetryCount); err != nil {\n\t\treturn nil, fmt.Errorf(ErrRequestWithRetry+\": %w\", err)\n\t}\n\n\treturn host, nil\n}",
"func NewHost(ip net.IP, hostname string, aliases ...string) Host {\n\treturn Host{\n\t\tIP: ip,\n\t\tHostname: hostname,\n\t\tAliases: aliases,\n\t}\n}",
"func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error {\n\tname := dhcpPrefix + ifname\n\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// On some platforms (OSX), creating a host-only adapter adds a default dhcpserver,\n\t// while on others (Windows?) it does not.\n\tcommand := \"add\"\n\tif dhcp, ok := dhcps[name]; ok {\n\t\tcommand = \"modify\"\n\t\tif (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && dhcp.Enabled {\n\t\t\t// dhcp is up to date\n\t\t\treturn nil\n\t\t}\n\t}\n\n\targs := []string{\"dhcpserver\", command,\n\t\t\"--netname\", name,\n\t\t\"--ip\", d.IPv4.IP.String(),\n\t\t\"--netmask\", net.IP(d.IPv4.Mask).String(),\n\t\t\"--lowerip\", d.LowerIP.String(),\n\t\t\"--upperip\", d.UpperIP.String(),\n\t}\n\tif d.Enabled {\n\t\targs = append(args, \"--enable\")\n\t} else {\n\t\targs = append(args, \"--disable\")\n\t}\n\n\treturn vbox.vbm(args...)\n}",
"func NewHost(addr Address, peerCount, channelLimit uint64, incomingBandwidth, outgoingBandwidth uint32) (Host, error) {\n\tvar cAddr *C.struct__ENetAddress\n\tif addr != nil {\n\t\tcAddr = &(addr.(*enetAddress)).cAddr\n\t}\n\n\thost := C.enet_host_create(\n\t\tcAddr,\n\t\t(C.size_t)(peerCount),\n\t\t(C.size_t)(channelLimit),\n\t\t(C.enet_uint32)(incomingBandwidth),\n\t\t(C.enet_uint32)(outgoingBandwidth),\n\t)\n\n\tif host == nil {\n\t\treturn nil, errors.New(\"unable to create host\")\n\t}\n\n\treturn &enetHost{\n\t\tcHost: host,\n\t}, nil\n}",
"func NewBindHostForbidden() *BindHostForbidden {\n\treturn &BindHostForbidden{}\n}",
"func NewPerHost(defaultDialer, bypass Dialer) *PerHost {\n\treturn &PerHost{\n\t\tdef: defaultDialer,\n\t\tbypass: bypass,\n\t}\n}",
"func getHostOnlyNetworkInterface(mc *driver.MachineConfig) (string, error) {\n\t// Check if the interface/dhcp exists.\n\tnets, err := HostonlyNets()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdhcps, err := DHCPs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, n := range nets {\n\t\tif dhcp, ok := dhcps[n.NetworkName]; ok {\n\t\t\tif dhcp.IPv4.IP.Equal(mc.DHCPIP) &&\n\t\t\t\tdhcp.IPv4.Mask.String() == mc.NetMask.String() &&\n\t\t\t\tdhcp.LowerIP.Equal(mc.LowerIP) &&\n\t\t\t\tdhcp.UpperIP.Equal(mc.UpperIP) &&\n\t\t\t\tdhcp.Enabled == mc.DHCPEnabled {\n\t\t\t\treturn n.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// No existing host-only interface found. Create a new one.\n\thostonlyNet, err := CreateHostonlyNet()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thostonlyNet.IPv4.IP = mc.HostIP\n\thostonlyNet.IPv4.Mask = mc.NetMask\n\tif err := hostonlyNet.Config(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Create and add a DHCP server to the host-only network\n\tdhcp := driver.DHCP{}\n\tdhcp.IPv4.IP = mc.DHCPIP\n\tdhcp.IPv4.Mask = mc.NetMask\n\tdhcp.LowerIP = mc.LowerIP\n\tdhcp.UpperIP = mc.UpperIP\n\tdhcp.Enabled = true\n\tif err := AddHostonlyDHCP(hostonlyNet.Name, dhcp); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostonlyNet.Name, nil\n}",
"func NewTransportConfigHostonly() *TransportConfig {\n\treturn &TransportConfig{\n\t\tComponentNumber: 1,\n\t}\n}",
"func NewHost(address string) (*Host, error) {\n\taddr, err := NewAddress(address)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to create Host\")\n\t}\n\treturn &Host{Address: addr}, nil\n}",
"func NewAdapter(g *gce.Cloud) NetworkEndpointGroupCloud {\n\treturn &cloudProviderAdapter{\n\t\tc: g,\n\t\tnetworkURL: g.NetworkURL(),\n\t\tsubnetworkURL: g.SubnetworkURL(),\n\t}\n}",
"func (tester *ServiceTester) CreateHost(t *testing.T, name string, subnet *abstract.Subnet, public bool) (*abstract.HostFull, *userdata.Content, fail.Error) {\n\tctx := context.Background()\n\ttpls, xerr := tester.Service.ListTemplatesBySizing(ctx, abstract.HostSizingRequirements{\n\t\tMinCores: 1,\n\t\tMinRAMSize: 1,\n\t\tMinDiskSize: 10,\n\t}, false)\n\tassert.Nil(t, xerr)\n\timg, xerr := tester.Service.SearchImage(ctx, \"Ubuntu 20.04\")\n\tassert.Nil(t, xerr)\n\thostRequest := abstract.HostRequest{\n\t\tResourceName: name,\n\t\tSubnets: []*abstract.Subnet{subnet},\n\t\tDefaultRouteIP: \"\",\n\t\tPublicIP: public,\n\t\tTemplateID: tpls[0].ID,\n\t\tImageID: img.ID,\n\t\tKeyPair: nil,\n\t\tPassword: \"\",\n\t\tDiskSize: 0,\n\t}\n\treturn tester.Service.CreateHost(context.Background(), hostRequest, nil)\n}",
"func createHnsNetwork(backend string, networkAdapter string) (string, error) {\n\tvar network hcsshim.HNSNetwork\n\tif backend == \"vxlan\" {\n\t\t// Ignoring the return because both true and false without an error represent that the firewall rule was created or already exists\n\t\tif _, err := wapi.FirewallRuleAdd(\"OverlayTraffic4789UDP\", \"Overlay network traffic UDP\", \"\", \"4789\", wapi.NET_FW_IP_PROTOCOL_UDP, wapi.NET_FW_PROFILE2_ALL); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error creating firewall rules: %v\", err)\n\t\t}\n\t\tlogrus.Infof(\"Creating VXLAN network using the vxlanAdapter: %s\", networkAdapter)\n\t\tnetwork = hcsshim.HNSNetwork{\n\t\t\tType: \"Overlay\",\n\t\t\tName: CalicoHnsNetworkName,\n\t\t\tNetworkAdapterName: networkAdapter,\n\t\t\tSubnets: []hcsshim.Subnet{\n\t\t\t\t{\n\t\t\t\t\tAddressPrefix: \"192.168.255.0/30\",\n\t\t\t\t\tGatewayAddress: \"192.168.255.1\",\n\t\t\t\t\tPolicies: []json.RawMessage{\n\t\t\t\t\t\t[]byte(\"{ \\\"Type\\\": \\\"VSID\\\", \\\"VSID\\\": 9999 }\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\tnetwork = hcsshim.HNSNetwork{\n\t\t\tType: \"L2Bridge\",\n\t\t\tName: CalicoHnsNetworkName,\n\t\t\tNetworkAdapterName: networkAdapter,\n\t\t\tSubnets: []hcsshim.Subnet{\n\t\t\t\t{\n\t\t\t\t\tAddressPrefix: \"192.168.255.0/30\",\n\t\t\t\t\tGatewayAddress: \"192.168.255.1\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tif _, err := network.Create(); err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating the %s network: %v\", CalicoHnsNetworkName, err)\n\t}\n\n\t// Check if network exists. If it does not after 5 minutes, fail\n\tfor start := time.Now(); time.Since(start) < 5*time.Minute; {\n\t\tnetwork, err := hcsshim.GetHNSNetworkByName(CalicoHnsNetworkName)\n\t\tif err == nil {\n\t\t\treturn network.ManagementIP, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"failed to create %s network\", CalicoHnsNetworkName)\n}",
"func (client *Client) CreateHost(request model.HostRequest) (*model.Host, error) {\n\treturn client.osclt.CreateHost(request)\n}",
"func createHost() (context.Context, host.Host, error) {\n\tctx, _ /* cancel */ := context.WithCancel(context.Background())\n\t// defer cancel()\n\n\tprvKey, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tport, err := freeport.GetFreePort()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\thost, err := libp2p.New(\n\t\tctx,\n\t\tlibp2p.Identity(prvKey),\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%v\", port)),\n\t)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn ctx, host, nil\n}",
"func (client *Client) CreateHost(request model.HostRequest) (*model.Host, error) {\n\treturn client.feclt.CreateHost(request)\n}",
"func NewAdapter() (adapter *Adapter, err error) {\n\tp := configProvider()\n\tadapter = &Adapter{\n\t\tec2: ec2.New(p),\n\t\tec2metadata: ec2metadata.New(p),\n\t\tautoscaling: autoscaling.New(p),\n\t\tacm: acm.New(p),\n\t\tiam: iam.New(p),\n\t\tcloudformation: cloudformation.New(p),\n\t\thealthCheckPath: DefaultHealthCheckPath,\n\t\thealthCheckPort: DefaultHealthCheckPort,\n\t\thealthCheckInterval: DefaultHealthCheckInterval,\n\t\tcreationTimeout: DefaultCreationTimeout,\n\t\tstackTTL: DefaultStackTTL,\n\t}\n\n\tadapter.manifest, err = buildManifest(adapter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn\n}",
"func newTestingHost(testdir string, cs modules.ConsensusSet, tp modules.TransactionPool) (modules.Host, error) {\n\tg, err := gateway.New(\"localhost:0\", false, filepath.Join(testdir, modules.GatewayDir), false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw, err := newTestingWallet(testdir, cs, tp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, err := host.New(cs, g, tp, w, \"localhost:0\", filepath.Join(testdir, modules.HostDir))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// configure host to accept contracts\n\tsettings := h.InternalSettings()\n\tsettings.AcceptingContracts = true\n\terr = h.SetInternalSettings(settings)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add storage to host\n\tstorageFolder := filepath.Join(testdir, \"storage\")\n\terr = os.MkdirAll(storageFolder, 0700)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = h.AddStorageFolder(storageFolder, modules.SectorSize*64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}",
"func NewVppAdapter(addr string, useShm bool) adapter.VppAPI {\n\tif useShm {\n\t\tfmt.Fprint(os.Stderr, noShmWarning)\n\t\tpanic(\"No implementation for shared memory in pure Go client!\")\n\t}\n\t// addr is used as socket path\n\treturn socketclient.NewVppClient(addr)\n}",
"func NewAdapter(b *AdapterBuilder) (a *Adapter) {\n\t// allocate default adapter\n\ta = &Adapter{\n\t\tbeforeErr: doNothing,\n\t\tafterErr: doNothing,\n\t\tinternalHandler: defaultInternalServerErrorHandler,\n\t\twrapInternal: b.WrapInternal,\n\t}\n\n\t// check for nil arguments\n\tif b.AfterError != nil {\n\t\ta.afterErr = b.AfterError\n\t}\n\tif b.BeforeError != nil {\n\t\ta.beforeErr = b.BeforeError\n\t}\n\tif b.InternalHandler != nil {\n\t\ta.internalHandler = b.InternalHandler\n\t}\n\n\t// return adapter that is safe to use and will\n\t// not panic because of nil function pointers\n\treturn\n}",
"func NewAdapter(driverName string, masterDSNs []string, slaveDSNs []string, dbSpecified ...bool) *Adapter {\n\ta := &Adapter{}\n\ta.driverName = driverName\n\ta.masterDSNs = masterDSNs\n\ta.slaveDSNs = slaveDSNs\n\n\tif len(dbSpecified) == 0 {\n\t\ta.dbSpecified = false\n\t} else if len(dbSpecified) == 1 {\n\t\ta.dbSpecified = dbSpecified[0]\n\t} else {\n\t\tpanic(errors.New(\"invalid parameter: dbSpecified\"))\n\t}\n\n\t// Open the DB, create it if not existed.\n\ta.open()\n\n\t// Call the destructor when the object is released.\n\truntime.SetFinalizer(a, finalizer)\n\n\treturn a\n}",
"func NewHostAddress(host string, port uint16) *HostAddress {\n\treturn &HostAddress{host: host, port: port}\n}",
"func NewBindHostMethodNotAllowed() *BindHostMethodNotAllowed {\n\treturn &BindHostMethodNotAllowed{}\n}",
"func NewHost(config v2.Host, clusterInfo types.ClusterInfo) types.Host {\n\taddr, _ := net.ResolveTCPAddr(\"tcp\", config.Address)\n\n\treturn &host{\n\t\thostInfo: newHostInfo(addr, config, clusterInfo),\n\t\tweight: config.Weight,\n\t}\n}",
"func (client *Client) CreateHost(name, iqn string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/create/host/id/\\\"%s\\\"/\\\"%s\\\"\", iqn, name)\n}",
"func NewDisableHostForbidden() *DisableHostForbidden {\n\treturn &DisableHostForbidden{}\n}",
"func (b *BridgeNetworkDriver) Create(name string, subnet string) (*Network, error) {\n\t// 取到网段字符串中的网关ip地址和网络的ip段\n\tip, IPRange, _ := net.ParseCIDR(subnet)\n\tIPRange.IP = ip\n\n\tn := &Network{\n\t\tName: name,\n\t\tIPRange: IPRange,\n\t\tDriver: b.Name(),\n\t}\n\n\terr := b.initBridge(n)\n\treturn n, err\n}",
"func (p *Proxy) NewHost(c *exec.Cmd) (*Host, error) {\n\th := &Host{\n\t\tcmd: c,\n\t\tproxy: p,\n\t}\n\tvar err error\n\th.httpTransfer, h.httpsTransfer, err = h.setupCmd(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn h, nil\n}",
"func (s *PolicySets) NewHostRule(isInbound bool) *hns.ACLPolicy {\n\tdirection := hns.Out\n\tif isInbound {\n\t\tdirection = hns.In\n\t}\n\n\treturn &hns.ACLPolicy{\n\t\tType: hns.ACL,\n\t\tRuleType: hns.Host,\n\t\tAction: hns.Allow,\n\t\tDirection: direction,\n\t\tPriority: 100,\n\t\tProtocol: 256, // Any\n\t}\n}",
"func NewHost(ctx *pulumi.Context,\n\tname string, args *HostArgs, opts ...pulumi.ResourceOption) (*Host, error) {\n\tif args == nil || args.Hostname == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Hostname'\")\n\t}\n\tif args == nil || args.Password == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Password'\")\n\t}\n\tif args == nil || args.Username == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Username'\")\n\t}\n\tif args == nil {\n\t\targs = &HostArgs{}\n\t}\n\tvar resource Host\n\terr := ctx.RegisterResource(\"vsphere:index/host:Host\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (d *Driver) createNetwork() error {\n\tconn, err := getConnection()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting libvirt connection\")\n\t}\n\tdefer conn.Close()\n\n\t// network: default\n\t// It is assumed that the libvirt/kvm installation has already created this network\n\n\t// network: private\n\n\t// Only create the private network if it does not already exist\n\tif _, err := conn.LookupNetworkByName(d.PrivateNetwork); err != nil {\n\t\t// create the XML for the private network from our networkTmpl\n\t\ttmpl := template.Must(template.New(\"network\").Parse(networkTmpl))\n\t\tvar networkXML bytes.Buffer\n\t\tif err := tmpl.Execute(&networkXML, d); err != nil {\n\t\t\treturn errors.Wrap(err, \"executing network template\")\n\t\t}\n\n\t\t// define the network using our template\n\t\tnetwork, err := conn.NetworkDefineXML(networkXML.String())\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"defining network from xml: %s\", networkXML.String())\n\t\t}\n\n\t\t// and finally create it\n\t\tif err := network.Create(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"creating network %s\", d.PrivateNetwork)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func New(tb testing.TB, settings hostdb.HostSettings, wm host.Wallet, tpool host.TransactionPool) *Host {\n\ttb.Helper()\n\tl, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\ttb.Fatal(err)\n\t}\n\ttb.Cleanup(func() { l.Close() })\n\tsettings.NetAddress = modules.NetAddress(l.Addr().String())\n\tsettings.UnlockHash, err = wm.Address()\n\tif err != nil {\n\t\ttb.Fatal(err)\n\t}\n\tkey := ed25519.NewKeyFromSeed(frand.Bytes(ed25519.SeedSize))\n\th := &Host{\n\t\tPublicKey: hostdb.HostKeyFromPublicKey(ed25519hash.ExtractPublicKey(key)),\n\t\tSettings: settings,\n\t\tl: l,\n\t}\n\tcs := newEphemeralContractStore(key)\n\tss := newEphemeralSectorStore()\n\tsh := host.NewSessionHandler(key, (*constantHostSettings)(&h.Settings), cs, ss, wm, tpool, nopMetricsRecorder{})\n\tgo listen(sh, l)\n\th.cw = host.NewChainWatcher(tpool, wm, cs, ss)\n\treturn h\n}",
"func NewHostFilter(host string) *HostFilter {\n\tctx, cancel := context.WithCancel(context.Background())\n\tf := &HostFilter{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\thost: host,\n\n\t\toutputCh: make(chan map[string][]*targetgroup.Group),\n\t}\n\treturn f\n}",
"func NewAdapter() Adapter {\n\treturn createAdapter()\n}",
"func NewAdapter() Adapter {\n\treturn createAdapter()\n}",
"func NewAdapter() Adapter {\n\treturn createAdapter()\n}",
"func NewAdapter() Adapter {\n\treturn createAdapter()\n}",
"func NewAdapter() Adapter {\n\treturn createAdapter()\n}",
"func makeOrgVdcNetworkWithDhcp(vcd *TestVCD, check *C, edgeGateway *EdgeGateway) *types.OrgVDCNetwork {\n\tvar networkConfig = types.OrgVDCNetwork{\n\t\tXmlns: types.XMLNamespaceVCloud,\n\t\tName: TestCreateOrgVdcNetworkDhcp,\n\t\tDescription: TestCreateOrgVdcNetworkDhcp,\n\t\tConfiguration: &types.NetworkConfiguration{\n\t\t\tFenceMode: types.FenceModeNAT,\n\t\t\tIPScopes: &types.IPScopes{\n\t\t\t\tIPScope: []*types.IPScope{&types.IPScope{\n\t\t\t\t\tIsInherited: false,\n\t\t\t\t\tGateway: \"32.32.32.1\",\n\t\t\t\t\tNetmask: \"255.255.255.0\",\n\t\t\t\t\tIPRanges: &types.IPRanges{\n\t\t\t\t\t\tIPRange: []*types.IPRange{\n\t\t\t\t\t\t\t&types.IPRange{\n\t\t\t\t\t\t\t\tStartAddress: \"32.32.32.10\",\n\t\t\t\t\t\t\t\tEndAddress: \"32.32.32.20\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackwardCompatibilityMode: true,\n\t\t},\n\t\tEdgeGateway: &types.Reference{\n\t\t\tHREF: edgeGateway.EdgeGateway.HREF,\n\t\t\tID: edgeGateway.EdgeGateway.ID,\n\t\t\tName: edgeGateway.EdgeGateway.Name,\n\t\t\tType: edgeGateway.EdgeGateway.Type,\n\t\t},\n\t\tIsShared: false,\n\t}\n\n\t// Create network\n\terr := vcd.vdc.CreateOrgVDCNetworkWait(&networkConfig)\n\tif err != nil {\n\t\tfmt.Printf(\"error creating Network <%s>: %s\\n\", TestCreateOrgVdcNetworkDhcp, err)\n\t}\n\tcheck.Assert(err, IsNil)\n\tAddToCleanupList(TestCreateOrgVdcNetworkDhcp, \"network\", vcd.org.Org.Name+\"|\"+vcd.vdc.Vdc.Name, \"TestCreateOrgVdcNetworkDhcp\")\n\tnetwork, err := vcd.vdc.GetOrgVdcNetworkByName(TestCreateOrgVdcNetworkDhcp, true)\n\tcheck.Assert(err, IsNil)\n\n\t// Add DHCP pool\n\tdhcpPoolConfig := make([]interface{}, 1)\n\tdhcpPool := make(map[string]interface{})\n\tdhcpPool[\"start_address\"] = \"32.32.32.21\"\n\tdhcpPool[\"end_address\"] = \"32.32.32.250\"\n\tdhcpPool[\"default_lease_time\"] = 3600\n\tdhcpPool[\"max_lease_time\"] = 7200\n\tdhcpPoolConfig[0] = dhcpPool\n\ttask, err := edgeGateway.AddDhcpPool(network.OrgVDCNetwork, dhcpPoolConfig)\n\tcheck.Assert(err, IsNil)\n\terr = task.WaitTaskCompletion()\n\tcheck.Assert(err, IsNil)\n\n\treturn network.OrgVDCNetwork\n}",
"func makeBasicHost(listenPort int, secio bool, randseed int64) (host.Host, error) {\r\n\r\n\tvar r io.Reader\r\n\tif randseed == 0 {\r\n\t\tr = rand.Reader\r\n\t} else {\r\n\t\tr = mrand.New(mrand.NewSource(randseed))\r\n\t}\r\n\r\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\t// To create a 'swarm' (BasicHost), we need an <ipfs-protocol ID> like 'QmNt...xc'\r\n\t// We generate a key pair on every run and uses and ID extracted from the public key.\r\n\t// We also need a <multiaddress> indicating how to reach this peer.\r\n\topts := []libp2p.Option{\r\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\r\n\t\tlibp2p.Identity(priv),\r\n\t}\r\n\r\n\tif !secio {\r\n\t\topts = append(opts, libp2p.NoSecurity)\r\n\t}\r\n\r\n\tbasicHost, err := libp2p.New(context.Background(), opts...)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\thostAddr, _ := ma.NewMultiaddr(fmt.Sprintf(\"/ipfs/%s\", basicHost.ID().Pretty()))\r\n\r\n\tvar addr ma.Multiaddr\r\n\tfor _, a := range basicHost.Addrs() {\r\n\t\tif strings.Contains(a.String(), \"p2p-circuit\") {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\taddr = a\r\n\t\tbreak\r\n\t}\r\n\r\n\t// concat addr + hostAddr\r\n\tfullAddr := addr.Encapsulate(hostAddr)\r\n\tlog.Printf(\"I am %s\\n\", fullAddr)\r\n\tif secio {\r\n\t\tlog.Printf(\"Now run \\\"./echo -l %d -d %s -secio\\\" on a different terminal\\n\", listenPort+1, fullAddr)\r\n\t} else {\r\n\t\tlog.Printf(\"Now run \\\"./echo -l %d -d %s \\\" on a different terminal\\n\", listenPort+1, fullAddr)\r\n\t}\r\n\r\n\treturn basicHost, nil\r\n}",
"func NewAdapter(db *sql.DB, tableName string) (*Adapter, error) {\n\treturn NewAdapterWithDBSchema(db, \"public\", tableName)\n}",
"func (nc *Builder) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.Routing, error)) (host.Host, error) {\n\t// Node must build a host acting as a libp2p relay. Additionally it\n\t// runs the autoNAT service which allows other nodes to check for their\n\t// own dialability by having this node attempt to dial them.\n\tmakeDHTRightType := func(h host.Host) (routing.PeerRouting, error) {\n\t\treturn makeDHT(h)\n\t}\n\n\tif nc.IsRelay {\n\t\tcfg := nc.Repo.Config()\n\t\tpublicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpublicAddrFactory := func(lc *libp2p.Config) error {\n\t\t\tlc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {\n\t\t\t\tif cfg.Swarm.PublicRelayAddress == \"\" {\n\t\t\t\t\treturn addrs\n\t\t\t\t}\n\t\t\t\treturn append(addrs, publicAddr)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\trelayHost, err := libp2p.New(\n\t\t\tctx,\n\t\t\tlibp2p.EnableRelay(circuit.OptHop),\n\t\t\tlibp2p.EnableAutoRelay(),\n\t\t\tlibp2p.Routing(makeDHTRightType),\n\t\t\tpublicAddrFactory,\n\t\t\tlibp2p.ChainOptions(nc.Libp2pOpts...),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Set up autoNATService as a streamhandler on the host.\n\t\t_, err = autonatsvc.NewAutoNATService(ctx, relayHost)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn relayHost, nil\n\t}\n\treturn libp2p.New(\n\t\tctx,\n\t\tlibp2p.EnableAutoRelay(),\n\t\tlibp2p.Routing(makeDHTRightType),\n\t\tlibp2p.ChainOptions(nc.Libp2pOpts...),\n\t)\n}",
"func FL_mk_host( ipv4 string, ipv6 string, mac string, swname string, port int ) ( flhost FL_host_json ) {\n\n\tflhost = FL_host_json { }\t\t\t// new struct\n\tflhost.Mac = make( []string, 1 )\n\tflhost.Ipv4 = make( []string, 1 )\n\tflhost.Ipv6 = make( []string, 1 )\n\tflhost.Mac[0] = mac\n\tflhost.Ipv4[0] = ipv4\n\tflhost.Ipv6[0] = ipv6\n\n\tflhost.AttachmentPoint = make( []FL_attachment_json, 1 )\n\tflhost.AttachmentPoint[0].SwitchDPID = swname\n\tflhost.AttachmentPoint[0].Port = port\n\n\treturn\n}",
"func NewHost(conf Config, middlewares ...Middleware) (host *Host) {\n\thost = &Host{\n\t\thandlers: map[string]*endpoint{},\n\t\tconf: conf,\n\n\t\tbasepath: \"\",\n\t\tmstack: middlewares,\n\t}\n\tif !conf.DisableAutoReport {\n\t\tos.Stdout.WriteString(\"Registration Info:\\r\\n\")\n\t}\n\thost.initCheck()\n\treturn\n}",
"func NewHostDevCni(typeOfVlan string, logger logr.Logger) *HostDevCni {\n\treturn &HostDevCni{VlanType: typeOfVlan,\n\t\tLog: logger}\n}",
"func (b *Bridge) CreateHostTap(tap string, lan int) (string, error) {\n\tbridgeLock.Lock()\n\tdefer bridgeLock.Unlock()\n\n\treturn b.createHostTap(tap, lan)\n}",
"func MakeBasicHost(listenPort int, protocolID string, randseed int64) (host.Host, error) {\n\n\t// If the seed is zero, use real cryptographic randomness. Otherwise, use a\n\t// deterministic randomness source to make generated keys stay the same\n\t// across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\n\t// Generate a key pair for this host. We will use it at least\n\t// to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t\tlibp2p.DisableRelay(),\n\t}\n\n\tif protocolID == plaintext.ID {\n\t\topts = append(opts, libp2p.NoSecurity)\n\t} else if protocolID == noise.ID {\n\t\ttpt, err := noise.New(priv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, libp2p.Security(protocolID, tpt))\n\t} else if protocolID == secio.ID {\n\t\ttpt, err := secio.New(priv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, libp2p.Security(protocolID, tpt))\n\t} else {\n\t\treturn nil, fmt.Errorf(\"security protocolID '%s' is not supported\", protocolID)\n\t}\n\n\tbasicHost, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build host multiaddress\n\thostAddr, _ := ma.NewMultiaddr(fmt.Sprintf(\"/ipfs/%s\", basicHost.ID().Pretty()))\n\n\t// Now we can build a full multiaddress to reach this host\n\t// by encapsulating both addresses:\n\taddr := basicHost.Addrs()[0]\n\tfullAddr := addr.Encapsulate(hostAddr)\n\tlog.Printf(\"I am %s\\n\", fullAddr)\n\tlog.Printf(\"Now run \\\"./echo -l %d -d %s -security %s\\\" on a different terminal\\n\", listenPort+1, fullAddr, protocolID)\n\n\treturn basicHost, nil\n}",
"func NewBindHostNotImplemented() *BindHostNotImplemented {\n\treturn &BindHostNotImplemented{}\n}",
"func NewAdapter(config *aws.Config, ds string) *Adapter {\n\ta := &Adapter{}\n\ta.Config = config\n\ta.DataSourceName = ds\n\ta.Service = dynamodb.New(session.New(config), a.Config)\n\ta.DB = dynamo.New(session.New(), a.Config)\n\treturn a\n}",
"func createHostWebSocket(port int) (core.Host, error) {\n\n\t// Starting a peer with QUIC transport\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/udp/%d/quic\", port)),\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%d/ws\", port)),\n\t\tlibp2p.Transport(ws.New),\n\t\tlibp2p.DefaultMuxers,\n\t\tlibp2p.DefaultSecurity,\n\t}\n\n\th, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}",
"func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork {\n\ts := processSettings(opts...)\n\n\tbitswapNetwork := impl{\n\t\thost: host,\n\t\trouting: r,\n\n\t\tprotocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers,\n\t\tprotocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero,\n\t\tprotocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne,\n\t\tprotocolBitswap: s.ProtocolPrefix + ProtocolBitswap,\n\n\t\tsupportedProtocols: s.SupportedProtocols,\n\t}\n\n\treturn &bitswapNetwork\n}",
"func NewMockhost(ctrl *gomock.Controller) *Mockhost {\n\tmock := &Mockhost{ctrl: ctrl}\n\tmock.recorder = &MockhostMockRecorder{mock}\n\treturn mock\n}",
"func NewHostN(address string, nodeID core.RecordRef) (*Host, error) {\n\th, err := NewHost(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.NodeID = nodeID\n\treturn h, nil\n}",
"func (s *HostListener) Create(ctx context.Context, in *protocol.HostDefinition) (_ *protocol.Host, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(&err)\n\tdefer fail.OnExitWrapError(&err, \"cannot create host\")\n\tdefer fail.OnPanic(&err)\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif ctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\tif ok, err := govalidator.ValidateStruct(in); err != nil || !ok {\n\t\tlogrus.Warnf(\"Structure validation failure: %v\", in) // FIXME: Generate json tags in protobuf\n\t}\n\n\tname := in.GetName()\n\tjob, xerr := PrepareJob(ctx, in.GetTenantId(), fmt.Sprintf(\"/host/%s/create\", name))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\ttracer := debug.NewTracer(job.Task(), tracing.ShouldTrace(\"listeners.home\"), \"('%s')\", name).WithStopwatch().Entering()\n\tdefer tracer.Exiting()\n\tdefer fail.OnExitLogError(&err, tracer.TraceMessage())\n\n\tvar sizing *abstract.HostSizingRequirements\n\tif in.SizingAsString != \"\" {\n\t\tsizing, _, err = converters.HostSizingRequirementsFromStringToAbstract(in.SizingAsString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if in.Sizing != nil {\n\t\tsizing = converters.HostSizingRequirementsFromProtocolToAbstract(in.Sizing)\n\t}\n\tif sizing == nil {\n\t\tsizing = &abstract.HostSizingRequirements{MinGPU: -1}\n\t}\n\n\t// Determine if the Subnet(s) to use exist\n\t// Because of legacy, the subnet can be fully identified by network+subnet, or can be identified by network+network,\n\t// because previous release of SafeScale created network AND subnet with the same name\n\tvar (\n\t\tnetworkRef string\n\t\tsubnetInstance resources.Subnet\n\t\tsubnets []*abstract.Subnet\n\t)\n\tif !in.GetSingle() {\n\t\tnetworkRef = in.GetNetwork()\n\t}\n\tif len(in.GetSubnets()) > 0 {\n\t\tfor _, v := range in.GetSubnets() {\n\t\t\tsubnetInstance, xerr = subnetfactory.Load(job.Service(), networkRef, v)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, xerr\n\t\t\t}\n\n\t\t\tdefer func(instance resources.Subnet) { // nolint\n\t\t\t\tinstance.Released()\n\t\t\t}(subnetInstance)\n\n\t\t\txerr = subnetInstance.Review(func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\t\tas, ok := clonable.(*abstract.Subnet)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fail.InconsistentError(\"'*abstract.Subnet' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t\t}\n\n\t\t\t\tsubnets = append(subnets, as)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, xerr\n\t\t\t}\n\t\t}\n\t}\n\tif len(subnets) == 0 && networkRef != \"\" {\n\t\tsubnetInstance, xerr = subnetfactory.Load(job.Service(), networkRef, networkRef)\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\n\t\tdefer subnetInstance.Released()\n\n\t\txerr = subnetInstance.Review(func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\tas, ok := clonable.(*abstract.Subnet)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\"'*abstract.Subnet' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t}\n\n\t\t\tsubnets = append(subnets, as)\n\t\t\treturn nil\n\t\t})\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\t}\n\tif len(subnets) == 0 && !in.GetSingle() {\n\t\treturn nil, fail.InvalidRequestError(\"insufficient use of --network and/or --subnet or missing --single\")\n\t}\n\n\tdomain := in.Domain\n\tdomain = strings.Trim(domain, \".\")\n\tif domain != \"\" {\n\t\tdomain = \".\" + domain\n\t}\n\n\thostReq := abstract.HostRequest{\n\t\tResourceName: name,\n\t\tHostName: name + domain,\n\t\tSingle: in.GetSingle(),\n\t\tKeepOnFailure: in.GetKeepOnFailure(),\n\t\tSubnets: subnets,\n\t\tImageRef: in.GetImageId(),\n\t}\n\n\thostInstance, xerr := hostfactory.New(job.Service())\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\t_, xerr = hostInstance.Create(job.Context(), hostReq, *sizing)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\tdefer hostInstance.Released()\n\n\t// logrus.Infof(\"Host '%s' created\", name)\n\treturn hostInstance.ToProtocol()\n}",
"func NewHost(uri string) Host {\n\t// no need to decompose uri using net/url package\n\treturn Host{uri: uri, client: http.Client{}}\n}",
"func makeBasicHost(listenPort int, secio bool, randseed int64) (host.Host, error) {\n\n\t// If the seed is zero, use real cryptographic randomness. Otherwise, use a\n\t// deterministic randomness source to make generated keys stay the same\n\t// across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\n\t// Generate a key pair for this host. We will use it\n\t// to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t}\n\n\t// if !secio {\n\t// \topts = append(opts, libp2p.NoEncryption())\n\t// }\n\n\tbasicHost, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build host multiaddress\n\thostAddr, _ := ma.NewMultiaddr(fmt.Sprintf(\"/ipfs/%s\", basicHost.ID().Pretty()))\n\n\t// Now we can build a full multiaddress to reach this host\n\t// by encapsulating both addresses:\n\taddr := basicHost.Addrs()[0]\n\tfullAddr := addr.Encapsulate(hostAddr)\n\tlog.Printf(\"I am %s\\n\", fullAddr)\n\tif secio {\n\t\tlog.Printf(\"Now run \\\"go run main.go -l %d -d %s -secio\\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t} else {\n\t\tlog.Printf(\"Now run \\\"go run main.go -l %d -d %s\\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t}\n\n\treturn basicHost, nil\n}",
"func resourceHostCreate(d *schema.ResourceData, m interface{}) error {\n\tapi := m.(*zabbix.API)\n\n\titem, err := buildHostObject(d, m)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titems := []zabbix.Host{*item}\n\n\terr = api.HostsCreate(items)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Trace(\"created host: %+v\", items[0])\n\n\td.SetId(items[0].HostID)\n\n\treturn resourceHostRead(d, m)\n}",
"func createBaremetalHost() *metal3api.BareMetalHost {\n\n\tbmh := &metal3api.BareMetalHost{}\n\tbmh.ObjectMeta = metav1.ObjectMeta{Name: hostName, Namespace: hostNamespace}\n\tc := fakeclient.NewFakeClient(bmh)\n\n\treconciler := &BareMetalHostReconciler{\n\t\tClient: c,\n\t\tProvisionerFactory: nil,\n\t\tLog: ctrl.Log.WithName(\"bmh_reconciler\").WithName(\"BareMetalHost\"),\n\t}\n\n\treconciler.Create(context.TODO(), bmh)\n\n\treturn bmh\n}",
"func NewBindHostUnauthorized() *BindHostUnauthorized {\n\treturn &BindHostUnauthorized{}\n}",
"func (h *HostService) CreateHost(data map[string]interface{}, params map[string]string) (*Host, error) {\n\tmandatoryFields = []string{\"name\", \"inventory\"}\n\tvalidate, status := ValidateParams(data, mandatoryFields)\n\n\tif !status {\n\t\terr := fmt.Errorf(\"Mandatory input arguments are absent: %s\", validate)\n\t\treturn nil, err\n\t}\n\n\tresult := new(Host)\n\tpayload, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add check if Host exists and return proper error\n\n\tresp, err := h.client.Requester.PostJSON(hostsAPIEndpoint, bytes.NewReader(payload), result, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := CheckResponse(resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}",
"func (h Hostingv4) CreatePrivateIP(vlan hosting.Vlan, ip string) (hosting.IPAddress, error) {\n\tvar fn = \"CreatePrivateIP\"\n\tif vlan.RegionID == \"\" || vlan.ID == \"\" {\n\t\treturn hosting.IPAddress{}, &HostingError{fn, \"Vlan\", \"ID/RegionID\", ErrNotProvided}\n\t}\n\tregionid, err := strconv.Atoi(vlan.RegionID)\n\tif err != nil {\n\t\treturn hosting.IPAddress{}, &HostingError{fn, \"Vlan\", \"RegionID\", ErrParse}\n\t}\n\tvlanid, err := strconv.Atoi(vlan.ID)\n\tif err != nil {\n\t\treturn hosting.IPAddress{}, &HostingError{fn, \"Vlan\", \"ID\", ErrParse}\n\t}\n\n\tvar ipv4 iPAddressv4\n\tvar response = Operation{}\n\terr = h.Send(\"hosting.iface.create\", []interface{}{\n\t\tmap[string]interface{}{\n\t\t\t\"datacenter_id\": regionid,\n\t\t\t\"bandwidth\": hosting.DefaultBandwidth,\n\t\t\t\"ip\": ip,\n\t\t\t\"vlan\": vlanid,\n\t\t}}, &response)\n\tif err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\tif err = h.waitForOp(response); err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\n\tif err = h.Send(\"hosting.ip.info\", []interface{}{response.IPID}, &ipv4); err != nil {\n\t\treturn hosting.IPAddress{}, err\n\t}\n\n\treturn toIPAddress(ipv4), nil\n}",
"func NewAdapter(fs http.FileSystem, path string) *Adapter {\n\treturn &Adapter{fs, path}\n}",
"func NewHwAdapter(hw *hw.HwRoot) HwAdapter {\n\treturn HwAdapter{hw.GetServicePort(), mykafka.NewWriter(mykafka.HwClient, mykafka.TunerTopic)}\n}",
"func NewAdapter(text string) *Adapter {\n\treturn &Adapter{text: text}\n}",
"func (account *SCloudaccount) createNetwork(ctx context.Context, wireId, networkType string, net CANetConf) error {\n\tnetwork := &SNetwork{}\n\tnetwork.Name = net.Name\n\tif hint, err := NetworkManager.NewIfnameHint(net.Name); err != nil {\n\t\tlog.Errorf(\"can't NewIfnameHint form hint %s\", net.Name)\n\t} else {\n\t\tnetwork.IfnameHint = hint\n\t}\n\tnetwork.GuestIpStart = net.IpStart\n\tnetwork.GuestIpEnd = net.IpEnd\n\tnetwork.GuestIpMask = net.IpMask\n\tnetwork.GuestGateway = net.Gateway\n\tnetwork.VlanId = int(net.VlanID)\n\tnetwork.WireId = wireId\n\tnetwork.ServerType = networkType\n\tnetwork.IsPublic = true\n\tnetwork.Status = api.NETWORK_STATUS_AVAILABLE\n\tnetwork.PublicScope = string(rbacscope.ScopeDomain)\n\tnetwork.ProjectId = account.ProjectId\n\tnetwork.DomainId = account.DomainId\n\tnetwork.Description = net.Description\n\n\tnetwork.SetModelManager(NetworkManager, network)\n\t// TODO: Prevent IP conflict\n\tlog.Infof(\"create network %s succussfully\", network.Id)\n\terr := NetworkManager.TableSpec().Insert(ctx, network)\n\treturn err\n}",
"func createSingleHostNetworking(ctx context.Context, svc iaas.Service, singleHostRequest abstract.HostRequest) (_ resources.Subnet, _ func() fail.Error, ferr fail.Error) {\n\t// Build network name\n\tcfg, xerr := svc.GetConfigurationOptions(ctx)\n\tif xerr != nil {\n\t\treturn nil, nil, xerr\n\t}\n\n\tbucketName := cfg.GetString(\"MetadataBucketName\")\n\tif bucketName == \"\" {\n\t\treturn nil, nil, fail.InconsistentError(\"missing service configuration option 'MetadataBucketName'\")\n\t}\n\n\t// Trim and TrimPrefix don't do the same thing\n\tnetworkName := fmt.Sprintf(\"sfnet-%s\", strings.TrimPrefix(bucketName, objectstorage.BucketNamePrefix+\"-\"))\n\n\t// Create network if needed\n\tnetworkInstance, xerr := LoadNetwork(ctx, svc, networkName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tnetworkInstance, xerr = NewNetwork(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\trequest := abstract.NetworkRequest{\n\t\t\t\tName: networkName,\n\t\t\t\tCIDR: abstract.SingleHostNetworkCIDR,\n\t\t\t\tKeepOnFailure: true,\n\t\t\t}\n\t\t\txerr = networkInstance.Create(ctx, &request, nil)\n\t\t\tif xerr != nil {\n\t\t\t\t// handle a particular case of *fail.ErrDuplicate...\n\t\t\t\tswitch cerr := xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate:\n\t\t\t\t\tvalue, found := cerr.Annotation(\"managed\")\n\t\t\t\t\tif found && value != nil {\n\t\t\t\t\t\tmanaged, ok := value.(bool)\n\t\t\t\t\t\tif ok && !managed {\n\t\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t// ... otherwise, try to get Network that is created by another goroutine\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate, *fail.ErrNotAvailable:\n\t\t\t\t\t// If these errors occurred, another goroutine is running to create the same Network, so wait for it\n\t\t\t\t\tnetworkInstance, xerr = LoadNetwork(ctx, svc, networkName)\n\t\t\t\t\tif xerr != nil {\n\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t}\n\n\tnid, err := networkInstance.GetID()\n\tif err != nil {\n\t\treturn nil, nil, fail.ConvertError(err)\n\t}\n\n\t// Check if Subnet exists\n\tvar (\n\t\tsubnetRequest abstract.SubnetRequest\n\t\tcidrIndex uint\n\t)\n\tsubnetInstance, xerr := LoadSubnet(ctx, svc, nid, singleHostRequest.ResourceName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tsubnetInstance, xerr = NewSubnet(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar (\n\t\t\t\tsubnetCIDR string\n\t\t\t)\n\n\t\t\tsubnetCIDR, cidrIndex, xerr = ReserveCIDRForSingleHost(ctx, networkInstance)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar dnsServers []string\n\t\t\topts, xerr := svc.GetConfigurationOptions(ctx)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrNotFound:\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t} else if servers := strings.TrimSpace(opts.GetString(\"DNSServers\")); servers != \"\" {\n\t\t\t\tdnsServers = strings.Split(servers, \",\")\n\t\t\t}\n\n\t\t\tsubnetRequest.Name = singleHostRequest.ResourceName\n\t\t\tsubnetRequest.NetworkID, err = networkInstance.GetID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fail.ConvertError(err)\n\t\t\t}\n\t\t\tsubnetRequest.IPVersion = ipversion.IPv4\n\t\t\tsubnetRequest.CIDR = subnetCIDR\n\t\t\tsubnetRequest.DNSServers = dnsServers\n\t\t\tsubnetRequest.HA = false\n\n\t\t\txerr = subnetInstance.CreateSubnetWithoutGateway(ctx, subnetRequest)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tferr = debug.InjectPlannedFail(ferr)\n\t\t\t\tif ferr != nil && !singleHostRequest.KeepOnFailure {\n\t\t\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\t\t\tif derr != nil {\n\t\t\t\t\t\t_ = ferr.AddConsequence(\n\t\t\t\t\t\t\tfail.Wrap(\n\t\t\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\",\n\t\t\t\t\t\t\t\tsingleHostRequest.ResourceName,\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Sets the CIDR index in instance metadata\n\t\t\txerr = subnetInstance.Alter(ctx, func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\t\tas, ok := clonable.(*abstract.Subnet)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fail.InconsistentError(\n\t\t\t\t\t\t\"'*abstract.Subnet' expected, '%s' provided\", reflect.TypeOf(clonable).String(),\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tas.SingleHostCIDRIndex = cidrIndex\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t} else {\n\t\treturn nil, nil, fail.DuplicateError(\"there is already a Subnet named '%s'\", singleHostRequest.ResourceName)\n\t}\n\n\tundoFunc := func() fail.Error {\n\t\tvar errs []error\n\t\tif !singleHostRequest.KeepOnFailure {\n\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\", singleHostRequest.ResourceName,\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t\tderr = FreeCIDRForSingleHost(cleanupContextFrom(ctx), networkInstance, cidrIndex)\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to free CIDR slot in Network '%s'\",\n\t\t\t\t\t\tnetworkInstance.GetName(),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn fail.NewErrorList(errs)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn subnetInstance, undoFunc, nil\n}",
"func NewGetAdapterHostEthInterfacesDefault(code int) *GetAdapterHostEthInterfacesDefault {\n\treturn &GetAdapterHostEthInterfacesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (FinagleFmt) Create(host string, port int) ZKRecord {\n\treturn &FinagleRecord{\n\t\tServiceEndpoint: endpoint{host, port},\n\t\tAdditionalEndpoints: make(map[string]endpoint),\n\t\tShard: 0,\n\t\tStatus: statusAlive,\n\t}\n}",
"func HostOnly(addr string) string {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn addr\n\t} else {\n\t\treturn host\n\t}\n}",
"func NewHostNetworkMock(t minimock.Tester) *HostNetworkMock {\n\tm := &HostNetworkMock{t: t}\n\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.BuildResponseMock = mHostNetworkMockBuildResponse{mock: m}\n\tm.GetNodeIDMock = mHostNetworkMockGetNodeID{mock: m}\n\tm.NewRequestBuilderMock = mHostNetworkMockNewRequestBuilder{mock: m}\n\tm.PublicAddressMock = mHostNetworkMockPublicAddress{mock: m}\n\tm.RegisterRequestHandlerMock = mHostNetworkMockRegisterRequestHandler{mock: m}\n\tm.SendRequestMock = mHostNetworkMockSendRequest{mock: m}\n\tm.StartMock = mHostNetworkMockStart{mock: m}\n\tm.StopMock = mHostNetworkMockStop{mock: m}\n\n\treturn m\n}",
"func NewAdapter(cfg Config) (AdapterInterface, error) {\n\n\terr := validateCfg(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta := &Adapter{\n\t\tcfg: cfg,\n\t}\n\n\terr = a.initLogFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn a, nil\n}",
"func makeRandomHost() (host.Host, *kaddht.IpfsDHT) {\n\tctx := context.Background()\n\tport := 10000 + rand.Intn(10000)\n\n\thost, err := libp2p.New(ctx,\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%d\", port)),\n\t\tlibp2p.EnableRelay(circuit.OptHop, circuit.OptDiscovery))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Bootstrap the DHT. In the default configuration, this spawns a Background\n\t// thread that will refresh the peer table every five minutes.\n\tdht, err := kaddht.New(ctx, host)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = dht.Bootstrap(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn host, dht\n}",
"func NewDisableHostUnauthorized() *DisableHostUnauthorized {\n\treturn &DisableHostUnauthorized{}\n}",
"func makeBasicHostZLH(listenPort int, secio bool, randseed int64) (host.Host, error) {\n\t// If the seed is zero, use real cryptographic randomness. Otherwise, use a\n\t// deterministic randomness source to make generated keys stay the same\n\t// across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\n\t// Generate a key pair for this host. We will use it\n\t// to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t}\n\n\tbasicHost, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build host multiaddress\n\thostAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf(\"/ipfs/%s\", basicHost.ID().Pretty()))\n\n\t// Now we can build a full miltiaddress to reach this host by encapsulation both addresses:\n\taddr := basicHost.Addrs()[0]\n\tfullAddr := addr.Encapsulate(hostAddr)\n\tlog.Printf(\"I am %s\\n\", fullAddr)\n\tif secio {\n\t\tlog.Printf(\"Now run \\\"go run main.go -l %d -d %s -secio\\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t} else {\n\t\tlog.Printf(\"Now run \\\"go run main.go -l %d -d %s \\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t}\n\treturn basicHost, nil\n}",
"func (d *Driver) createNetworks() error {\n\tif err := d.createNetwork(\"default\", defaultNetworkTmpl); err != nil {\n\t\treturn errors.Wrap(err, \"creating default network\")\n\t}\n\tif err := d.createNetwork(d.NetworkName, privateNetworkTmpl); err != nil {\n\t\treturn errors.Wrap(err, \"creating private network\")\n\t}\n\n\treturn nil\n}",
"func (d *Driver) CreateNetwork(r *sdk.CreateNetworkRequest) error {\n\tvar netCidr *net.IPNet\n\tvar netGw string\n\tvar err error\n\tlog.Debugf(\"Network Create Called: [ %+v ]\", r)\n\tfor _, v4 := range r.IPv4Data {\n\t\tnetGw = v4.Gateway\n\t\t_, netCidr, err = net.ParseCIDR(v4.Pool)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Parse and validate the config. It should not be conflict with existing networks' config\n\tconfig, err := parseNetworkOptions(r.NetworkID, r.Options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Generate a name for what will be the sandbox side pipe interface\n\tcontainerIfName, err := d.getContainerIfName(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"containerIfName:%v\", containerIfName)\n\tconfig.ContainerIfName = containerIfName\n\n\tn := &network{\n\t\tid: r.NetworkID,\n\t\tconfig: config,\n\t\tendpoints: endpointTable{},\n\t\tcidr: netCidr,\n\t\tgateway: netGw,\n\t}\n\n\tbName, err := getBridgeName(r)\n\tconfig.BridgeName = bName\n\tlog.Debugf(\"bridgeName:%v\", bName)\n\n\t// Initialize handle when needed\n\td.Lock()\n\tif d.nlh == nil {\n\t\td.nlh = NlHandle()\n\t}\n\td.Unlock()\n\n\t// Create or retrieve the bridge L3 interface\n\tbridgeIface, err := newInterface(d.nlh, bName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.bridge = bridgeIface\n\tsetupDevice(bridgeIface)\n\tsetupDeviceUp(config, bridgeIface)\n\n\td.addNetwork(n)\n\treturn nil\n}",
"func NewHostNode(ctx context.Context, config Config, blockchain chain.Blockchain) (HostNode, error) {\n\tps := pstoremem.NewPeerstore()\n\tdb, err := NewDatabase(config.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = db.Initialize()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpriv, err := db.GetPrivKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// get saved hostnode\n\tsavedAddresses, err := db.GetSavedPeers()\n\tif err != nil {\n\t\tconfig.Log.Errorf(\"error retrieving saved hostnode: %s\", err)\n\t}\n\n\tnetAddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:\"+config.Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlisten, err := mnet.FromNetAddr(netAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlistenAddress := []multiaddr.Multiaddr{listen}\n\n\t//append saved addresses\n\tlistenAddress = append(listenAddress, savedAddresses...)\n\n\th, err := libp2p.New(\n\t\tctx,\n\t\tlibp2p.ListenAddrs(listenAddress...),\n\t\tlibp2p.Identity(priv),\n\t\tlibp2p.EnableRelay(),\n\t\tlibp2p.Peerstore(ps),\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddrs, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{\n\t\tID: h.ID(),\n\t\tAddrs: listenAddress,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, a := range addrs {\n\t\tconfig.Log.Infof(\"binding to address: %s\", a)\n\t}\n\n\t// setup gossip sub protocol\n\tg, err := pubsub.NewGossipSub(ctx, h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnode := &hostNode{\n\t\tprivateKey: config.PrivateKey,\n\t\thost: h,\n\t\tgossipSub: g,\n\t\tctx: ctx,\n\t\ttimeoutInterval: timeoutInterval,\n\t\theartbeatInterval: heartbeatInterval,\n\t\tlog: config.Log,\n\t\ttopics: map[string]*pubsub.Topic{},\n\t\tdb: db,\n\t}\n\n\tdiscovery, err := NewDiscoveryProtocol(ctx, node, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnode.discoveryProtocol = discovery\n\n\tsyncProtocol, err := NewSyncProtocol(ctx, node, config, blockchain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnode.syncProtocol = syncProtocol\n\n\treturn node, nil\n}",
"func NewHostNS(address string, nodeID core.RecordRef, shortID core.ShortNodeID) (*Host, error) {\n\th, err := NewHostN(address, nodeID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.ShortID = shortID\n\treturn h, nil\n}",
"func New(address string, port int) *Host {\n\thost := Host{\n\t\taddress: address,\n\t\tport: port,\n\t}\n\treturn &host\n}",
"func (d *Driver) CreateNetwork(r *pluginNet.CreateNetworkRequest) error {\n\tdefer osl.InitOSContext()()\n\n\tid := r.NetworkID\n\topts := r.Options\n\tipV4Data := r.IPv4Data\n\tipV6Data := r.IPv6Data\n\tlogrus.Infof(\"CreateNetwork macvlan with networkID=%s,opts=%s\", id, opts)\n\n\tif id == \"\" {\n\t\treturn fmt.Errorf(\"invalid network id\")\n\t}\n\n\t// reject a null v4 network\n\tif len(ipV4Data) == 0 || ipV4Data[0].Pool == \"0.0.0.0/0\" {\n\t\treturn fmt.Errorf(\"ipv4 pool is empty\")\n\t}\n\n\t// parse and validate the config and bind to networkConfiguration\n\tconfig, err := parseNetworkOptions(id, opts)\n\tif err != nil {\n\t\tstr := fmt.Sprintf(\"CreateNetwork opts is invalid %s\", opts)\n\t\tlogrus.Errorf(str)\n\t\treturn fmt.Errorf(str)\n\t}\n\n\tconfig.ID = id\n\terr = config.processIPAM(id, ipV4Data, ipV6Data)\n\tif err != nil {\n\t\tstr := fmt.Sprintf(\"CreateNetwork ipV4Data is invalid %s\", ipV4Data)\n\t\tlogrus.Errorf(str)\n\t\treturn fmt.Errorf(str)\n\t}\n\t// verify the macvlan mode from -o macvlan_mode option\n\tswitch config.MacvlanMode {\n\tcase \"\", modeBridge:\n\t\t// default to macvlan bridge mode if -o macvlan_mode is empty\n\t\tconfig.MacvlanMode = modeBridge\n\tcase modePrivate:\n\t\tconfig.MacvlanMode = modePrivate\n\tcase modePassthru:\n\t\tconfig.MacvlanMode = modePassthru\n\tcase modeVepa:\n\t\tconfig.MacvlanMode = modeVepa\n\tdefault:\n\t\tstr := fmt.Sprintf(\"requested macvlan mode '%s' is not valid, 'bridge' mode is the macvlan driver default\", config.MacvlanMode)\n\t\tlogrus.Errorf(str)\n\t\treturn fmt.Errorf(str)\n\t}\n\t// loopback is not a valid parent link\n\tif config.Parent == \"lo\" {\n\t\tstr := fmt.Sprintf(\"loopback interface is not a valid %s parent link\", macvlanType)\n\t\tlogrus.Errorf(str)\n\t\treturn fmt.Errorf(str)\n\t}\n\t// if parent interface not specified, create a dummy type link to use named dummy+net_id\n\tif config.Parent == \"\" {\n\t\tconfig.Parent = getDummyName(stringid.TruncateID(config.ID))\n\t\t// empty parent and --internal are handled the same. Set here to update k/v\n\t\tconfig.Internal = true\n\t}\n\n\terr = d.createNetwork(config)\n\tif err != nil {\n\t\tstr := fmt.Sprintf(\"CreateNetwork is failed %v\", err)\n\t\tlogrus.Errorf(str)\n\t\treturn fmt.Errorf(str)\n\t}\n\n\treturn nil\n}",
"func NewHostDiscovery() HostDiscovery {\n\treturn &hostDiscovery{}\n}",
"func createVhostUserEndpoint(netInfo NetworkInfo, socket string) (*VhostUserEndpoint, error) {\n\n\tvhostUserEndpoint := &VhostUserEndpoint{\n\t\tSocketPath: socket,\n\t\tHardAddr: netInfo.Iface.HardwareAddr.String(),\n\t\tIfaceName: netInfo.Iface.Name,\n\t\tEndpointType: VhostUserEndpointType,\n\t}\n\treturn vhostUserEndpoint, nil\n}",
"func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost {\n\thost := new(DinnerHost)\n\thost.Init(tableCount, maxParallel, maxDinner)\n\treturn host\n}",
"func networkCreateExample() string {\n\treturn `$ pouch network create -n pouchnet -d bridge --gateway 192.168.1.1 --subnet 192.168.1.0/24\npouchnet: e1d541722d68dc5d133cca9e7bd8fd9338603e1763096c8e853522b60d11f7b9`\n}",
"func NewAdapter() Adapter {\n\tbuilder := NewBuilder()\n\treturn createAdapter(builder)\n}",
"func newAdapter(config *AdapterConfig) (adapters.ListChecker, error) {\n\tvar u *url.URL\n\tvar err error\n\tif u, err = url.Parse(config.ProviderURL); err != nil {\n\t\t// bogus URL format\n\t\treturn nil, err\n\t}\n\n\ta := adapter{\n\t\tbackend: u,\n\t\tclosing: make(chan bool),\n\t\trefreshInterval: config.RefreshInterval,\n\t\tttl: config.TimeToLive,\n\t}\n\n\t// install an empty list\n\ta.setList([]*net.IPNet{})\n\n\t// load up the list synchronously so we're ready to accept traffic immediately\n\ta.refreshList()\n\n\t// crank up the async list refresher\n\tgo a.listRefresher()\n\n\treturn &a, nil\n}",
"func makeBasicHost(listenPort int, secio bool, randseed int64) { //(host.Host, error) {\n\n\t// If the seed is zero, use real cryptographic randomness. Otherwise, use a\n\t// deterministic randomness source to make generated keys stay the same\n\t// across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\tif *verbose { log.Printf(\"r = \", r) }\n\n\t// Generate a key pair for this host. We will use it\n\t// to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\t//return nil, err\n\t\tlog.Fatal(err)\n\t}\n\tif *verbose { log.Printf(\"priv = \", priv) }\n\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/\"+GetMyIP()+\"/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t}\n\tif *verbose { log.Printf(\"opts = \", opts) }\n\n\tbasicHost, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\t//return nil, err\n\t\tlog.Fatal(err)\n\t}\n\tif *verbose {\n\t\tlog.Printf(\"basicHost = \", basicHost)\n\t\tlog.Printf(\"basicHost.ID() = \", basicHost.ID())\n\t\tlog.Printf(\"basicHost.ID().Pretty() = \", basicHost.ID().Pretty())\n\t}\n\n\t// Build host multiaddress\n\thostAddr, _ := ma.NewMultiaddr(fmt.Sprintf(\"/ipfs/%s\", basicHost.ID().Pretty()))\n\tif *verbose { log.Printf(\"hostAddr = \", hostAddr) }\n\n\t// Now we can build a full multiaddress to reach this host\n\t// by encapsulating both addresses:\n\taddr := basicHost.Addrs()[0]\n\tif *verbose { log.Printf(\"addr = \", addr) }\n\tfullAddr := addr.Encapsulate(hostAddr)\n\tlog.Printf(\"My fullAddr = %s\\n\", fullAddr)\n\tif secio {\n\t\tlog.Printf(\"Now run \\\"go run defs.go p2p.go mux.go blockchain.go main.go -l %d -d %s -secio\\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t} else {\n\t\tlog.Printf(\"Now run \\\"go run defs.go p2p.go mux.go blockchain.go main.go -l %d -d %s\\\" on a different terminal\\n\", listenPort+1, fullAddr)\n\t}\n\n\t//return basicHost, nil\n\tha = basicHost // ha defined in defs.go\n\tif *verbose { log.Printf(\"basicHost = \", ha) }\n}",
"func createHostQUIC(port int) (core.Host, error) {\n\t// Producing private key\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader)\n\n\tquicTransport, err := quic.NewTransport(priv, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Starting a peer with QUIC transport\n\topts := []libp2p.Option{\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/udp/%d/quic\", port)),\n\t\tlibp2p.Transport(quicTransport),\n\t\tlibp2p.Identity(priv),\n\t\tlibp2p.DefaultMuxers,\n\t\tlibp2p.DefaultSecurity,\n\t}\n\n\th, err := libp2p.New(context.Background(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}",
"func CreateDefaultExtNetwork(networkType string) error {\n\treturn fmt.Errorf(\"CreateDefaultExtNetwork shouldn't be called for linux platform\")\n}",
"func setupHost(ctx context.Context) (host.Host, *dht.IpfsDHT) {\n\t// Set up the host identity options\n\tprvkey, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader)\n\tidentity := libp2p.Identity(prvkey)\n\t// Handle any potential error\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalln(\"Failed to Generate P2P Identity Configuration!\")\n\t}\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P Identity Configuration.\")\n\n\t// Set up TCP, QUIC transport and options\n\ttlstransport, err := tls.New(prvkey)\n\tsecurity := libp2p.Security(tls.ID, tlstransport)\n\ttcpTransport := libp2p.Transport(tcp.NewTCPTransport)\n\tquicTransport := libp2p.Transport(libp2pquic.NewTransport)\n\t// Handle any potential error\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalln(\"Failed to Generate P2P Security and Transport Configurations!\")\n\t}\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P Security and Transport Configurations.\")\n\n\t// Set up host listener address options\n\ttcpMuladdr4, err := multiaddr.NewMultiaddr(\"/ip4/0.0.0.0/tcp/0\")\n\ttcpMuladdr6, err1 := multiaddr.NewMultiaddr(\"/ip6/::/tcp/0\")\n\tquicMuladdr4, err2 := multiaddr.NewMultiaddr(\"/ip4/0.0.0.0/udp/0/quic\")\n\tquicMuladdr6, err3 := multiaddr.NewMultiaddr(\"/ip6/::/udp/0/quic\")\n\tlisten := libp2p.ListenAddrs(quicMuladdr6, quicMuladdr4, tcpMuladdr6, tcpMuladdr4)\n\t// Handle any potential error\n\tcheckErr(err)\n\tcheckErr(err1)\n\tcheckErr(err2)\n\tcheckErr(err3)\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P Address Listener Configuration.\")\n\n\t// Set up the stream multiplexer and connection manager options\n\tmuxer := libp2p.Muxer(\"/yamux/1.0.0\", yamux.DefaultTransport)\n\tconn := libp2p.ConnectionManager(connmgr.NewConnManager(100, 400, time.Minute))\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P Stream Multiplexer, Connection Manager Configurations.\")\n\n\t// Setup NAT traversal and relay options\n\tnat := libp2p.NATPortMap()\n\trelay := libp2p.EnableAutoRelay()\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P NAT Traversal and Relay Configurations.\")\n\n\t// Declare a KadDHT\n\tvar kaddht *dht.IpfsDHT\n\t// Setup a routing configuration with the KadDHT\n\trouting := libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {\n\t\tkaddht = setupKadDHT(ctx, h)\n\t\treturn kaddht, err\n\t})\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated P2P Routing Configurations.\")\n\n\topts := libp2p.ChainOptions(identity, listen, security, tcpTransport, quicTransport, muxer, conn, nat, routing, relay)\n\n\t// Construct a new libP2P host with the created options\n\tlibhost, err := libp2p.New(ctx, opts)\n\t// Handle any potential error\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalln(\"Failed to Create the P2P Host!\")\n\t}\n\n\t// Return the created host and the kademlia DHT\n\treturn libhost, kaddht\n}",
"func ConfigHostNew() *ConfigHost {\n\th := ConfigHost{\n\t\tTLS: regclient.TLSEnabled,\n\t}\n\treturn &h\n}",
"func RequireHostHeader(allowed []string, h *render.Renderer, stripPort bool) mux.MiddlewareFunc {\n\twant := make(map[string]struct{}, len(allowed))\n\tfor _, v := range allowed {\n\t\twant[strings.ToLower(v)] = struct{}{}\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\thost := strings.ToLower(r.Host)\n\n\t\t\tif stripPort {\n\t\t\t\tif i := strings.Index(host, \":\"); i > 0 {\n\t\t\t\t\thost = host[0:i]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, ok := want[host]; !ok {\n\t\t\t\tcontroller.Unauthorized(w, r, h)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}",
"func (c *Client) MakeHost(pk ci.PrivKey, opts *HostOpts) error {\n\thost, err := makeHost(pk, opts, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Host = host\n\treturn nil\n}",
"func (client WorkloadNetworksClient) CreateDhcpResponder(resp *http.Response) (result WorkloadNetworkDhcp, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func AddHost(name, addr string) (*Host, error) {\n\t// Create a network namespace\n\th, err := NewHost(name)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to NewHost: \", err)\n\t}\n\t// setup a veth pair\n\t_, err = h.setupVeth(\"eth2\", 1500)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open netns: \", err)\n\t}\n\t// setup a IP for host\n\th.setIfaceIP(addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to setIfaceIP for %s: %v\", h.Name, err)\n\t\treturn nil, err\n\t}\n\treturn h, nil\n}",
"func (data *DNSData) AddNetworkAdapter(name string, networkAdapter compute.VirtualMachineNetworkAdapter) {\n\tif networkAdapter.PrivateIPv4Address != nil {\n\t\tdata.Add(name,\n\t\t\tnet.ParseIP(*networkAdapter.PrivateIPv4Address),\n\t\t)\n\t}\n\tif networkAdapter.PrivateIPv6Address != nil {\n\t\tdata.Add(name,\n\t\t\tnet.ParseIP(*networkAdapter.PrivateIPv6Address),\n\t\t)\n\t}\n}"
] | [
"0.5605203",
"0.5528005",
"0.5470935",
"0.54345465",
"0.5420929",
"0.53596747",
"0.53540576",
"0.5293824",
"0.52900076",
"0.52677083",
"0.52036476",
"0.52007806",
"0.5181415",
"0.5149478",
"0.5139053",
"0.511084",
"0.5104876",
"0.5074306",
"0.50325537",
"0.5014431",
"0.49915975",
"0.49881247",
"0.4966575",
"0.49329117",
"0.49076483",
"0.49035245",
"0.48922315",
"0.48681793",
"0.48655298",
"0.4823171",
"0.48156744",
"0.48128363",
"0.48018137",
"0.47923383",
"0.47837648",
"0.47458747",
"0.4735962",
"0.46994296",
"0.46994296",
"0.46994296",
"0.46994296",
"0.46994296",
"0.4696455",
"0.46826854",
"0.46820843",
"0.46702752",
"0.4662444",
"0.4655375",
"0.464832",
"0.46443158",
"0.4642055",
"0.46334925",
"0.46124744",
"0.45790285",
"0.45711532",
"0.45686564",
"0.45634842",
"0.455431",
"0.45513067",
"0.45407212",
"0.45236543",
"0.4523336",
"0.4520994",
"0.4507867",
"0.45068806",
"0.4505637",
"0.45052296",
"0.4494038",
"0.44896004",
"0.44885153",
"0.44827223",
"0.44744426",
"0.44680947",
"0.446155",
"0.4459507",
"0.4436449",
"0.44334635",
"0.44325364",
"0.44236764",
"0.44221917",
"0.44172123",
"0.44113755",
"0.4392641",
"0.43924427",
"0.43910626",
"0.43902966",
"0.43844253",
"0.43835723",
"0.43822372",
"0.43778163",
"0.43753082",
"0.43658537",
"0.43643335",
"0.43636107",
"0.43635625",
"0.43574765",
"0.43531704",
"0.43468437",
"0.43453753",
"0.43415263"
] | 0.84993964 | 0 |
listHostOnlyAdapters gets all hostonly adapters in a map keyed by NetworkName. | listHostOnlyAdapters получает все адаптеры hostonly в виде карты, ключом которой является NetworkName. | func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) {
out, err := vbox.vbmOut("list", "hostonlyifs")
if err != nil {
return nil, err
}
byName := map[string]*hostOnlyNetwork{}
byIP := map[string]*hostOnlyNetwork{}
n := &hostOnlyNetwork{}
err = parseKeyValues(out, reColonLine, func(key, val string) error {
switch key {
case "Name":
n.Name = val
case "GUID":
n.GUID = val
case "DHCP":
n.DHCP = (val != "Disabled")
case "IPAddress":
n.IPv4.IP = net.ParseIP(val)
case "NetworkMask":
n.IPv4.Mask = parseIPv4Mask(val)
case "HardwareAddress":
mac, err := net.ParseMAC(val)
if err != nil {
return err
}
n.HwAddr = mac
case "MediumType":
n.Medium = val
case "Status":
n.Status = val
case "VBoxNetworkName":
n.NetworkName = val
if _, present := byName[n.NetworkName]; present {
return fmt.Errorf("VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one.", n.NetworkName)
}
byName[n.NetworkName] = n
if len(n.IPv4.IP) != 0 {
if _, present := byIP[n.IPv4.IP.String()]; present {
return fmt.Errorf("VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one.", n.IPv4.IP)
}
byIP[n.IPv4.IP.String()] = n
}
n = &hostOnlyNetwork{}
}
return nil
})
if err != nil {
return nil, err
}
return byName, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) {\n\tout, err := vbox.vbmOut(\"list\", \"dhcpservers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := map[string]*dhcpServer{}\n\tdhcp := &dhcpServer{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"NetworkName\":\n\t\t\tdhcp = &dhcpServer{}\n\t\t\tm[val] = dhcp\n\t\t\tdhcp.NetworkName = val\n\t\tcase \"IP\":\n\t\t\tdhcp.IPv4.IP = net.ParseIP(val)\n\t\tcase \"upperIPAddress\":\n\t\t\tdhcp.UpperIP = net.ParseIP(val)\n\t\tcase \"lowerIPAddress\":\n\t\t\tdhcp.LowerIP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tdhcp.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"Enabled\":\n\t\t\tdhcp.Enabled = (val == \"Yes\")\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
"func AddressesForHost(host string) []string {\n\tss := collection.NewStringSet()\n\tif host == \"\" { // All address on machine\n\t\tif iFaces, err := net.Interfaces(); err == nil {\n\t\t\tfor _, iFace := range iFaces {\n\t\t\t\tconst interesting = net.FlagUp | net.FlagBroadcast\n\t\t\t\tif iFace.Flags&interesting == interesting {\n\t\t\t\t\tvar addrs []net.Addr\n\t\t\t\t\tif addrs, err = iFace.Addrs(); err == nil {\n\t\t\t\t\t\tfor _, addr := range addrs {\n\t\t\t\t\t\t\tvar ip net.IP\n\t\t\t\t\t\t\tswitch v := addr.(type) {\n\t\t\t\t\t\t\tcase *net.IPNet:\n\t\t\t\t\t\t\t\tip = v.IP\n\t\t\t\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\t\t\t\tip = v.IP\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif ip.IsGlobalUnicast() {\n\t\t\t\t\t\t\t\tss.Add(ip.String())\n\t\t\t\t\t\t\t\tvar names []string\n\t\t\t\t\t\t\t\tif names, err = net.LookupAddr(ip.String()); err == nil {\n\t\t\t\t\t\t\t\t\tfor _, name := range names {\n\t\t\t\t\t\t\t\t\t\tif strings.HasSuffix(name, \".\") {\n\t\t\t\t\t\t\t\t\t\t\tname = name[:len(name)-1]\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tss.Add(name)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tss.Add(host)\n\t\tif net.ParseIP(host) == nil {\n\t\t\tif ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tss.Add(ip.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, one := range []string{\"::\", \"::1\", \"127.0.0.1\"} {\n\t\tif ss.Contains(one) {\n\t\t\tdelete(ss, one)\n\t\t\tss.Add(\"localhost\")\n\t\t}\n\t}\n\taddrs := ss.Values()\n\tsort.Slice(addrs, func(i, j int) bool {\n\t\tisName1 := net.ParseIP(addrs[i]) == nil\n\t\tisName2 := net.ParseIP(addrs[j]) == nil\n\t\tif isName1 == isName2 {\n\t\t\treturn txt.NaturalLess(addrs[i], addrs[j], true)\n\t\t}\n\t\treturn isName1\n\t})\n\treturn addrs\n}",
"func createHostonlyAdapter(vbox VBoxManager) (*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"hostonlyif\", \"create\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := reHostOnlyAdapterCreated.FindStringSubmatch(string(out))\n\tif res == nil {\n\t\treturn nil, errors.New(\"Failed to create host-only adapter\")\n\t}\n\n\treturn &hostOnlyNetwork{Name: res[1]}, nil\n}",
"func (c *Eth) ShowList() ([]string, error) {\n c.con.LogQuery(\"(show) list of ethernet interfaces\")\n path := c.xpath(nil)\n return c.con.EntryListUsing(c.con.Show, path[:len(path) - 1])\n}",
"func NonTailscaleInterfaces() (map[winipcfg.LUID]*winipcfg.IPAdapterAddresses, error) {\n\tifs, err := winipcfg.GetAdaptersAddresses(windows.AF_UNSPEC, winipcfg.GAAFlagIncludeAllInterfaces)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := map[winipcfg.LUID]*winipcfg.IPAdapterAddresses{}\n\tfor _, iface := range ifs {\n\t\tif iface.Description() == tsconst.WintunInterfaceDesc {\n\t\t\tcontinue\n\t\t}\n\t\tret[iface.LUID] = iface\n\t}\n\n\treturn ret, nil\n}",
"func (client *Client) ShowHostMaps(host string) ([]Volume, *ResponseStatus, error) {\n\tif len(host) > 0 {\n\t\thost = fmt.Sprintf(\"\\\"%s\\\"\", host)\n\t}\n\tres, status, err := client.FormattedRequest(\"/show/host-maps/%s\", host)\n\tif err != nil {\n\t\treturn nil, status, err\n\t}\n\n\tmappings := make([]Volume, 0)\n\tfor _, rootObj := range res.Objects {\n\t\tif rootObj.Name != \"host-view\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, object := range rootObj.Objects {\n\t\t\tif object.Name == \"volume-view\" {\n\t\t\t\tvol := Volume{}\n\t\t\t\tvol.fillFromObject(&object)\n\t\t\t\tmappings = append(mappings, vol)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn mappings, status, err\n}",
"func (d *RestrictedDialer) AllowedHosts() []string {\n\tranges := []string{}\n\tfor _, ipRange := range d.allowedHosts {\n\t\tranges = append(ranges, ipRange.String())\n\t}\n\treturn ranges\n}",
"func (m *MicroService) blackListHost(host string, blacklist bool) {\r\n\tfor idx, inst := range m.Instances {\r\n\t\tif inst.Host == host {\r\n\t\t\tm.blackList(idx, blacklist)\r\n\t\t}\r\n\t}\r\n}",
"func (h *ConfigHandler) GetHostList(ctx *fasthttp.RequestCtx) {\n\tuser, ok := common.GlobalSession.GetUser(ctx.ID())\n\tif !ok {\n\t\th.WriteJSON(ctx, nil, common.NewNotLoginError())\n\t\treturn\n\t}\n\n\tconf, err := h.Service.GetVPNConfig(context.Background(), &user)\n\tif err != nil {\n\t\th.WriteJSON(ctx, nil, err)\n\t\treturn\n\t}\n\tdata := vpnConfigResponseEncode(conf)\n\th.WriteJSON(ctx, map[string]interface{}{\n\t\t\"list\": data.Hosts,\n\t}, nil)\n\treturn\n}",
"func getHostOnlyNetworkInterface(mc *driver.MachineConfig) (string, error) {\n\t// Check if the interface/dhcp exists.\n\tnets, err := HostonlyNets()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdhcps, err := DHCPs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, n := range nets {\n\t\tif dhcp, ok := dhcps[n.NetworkName]; ok {\n\t\t\tif dhcp.IPv4.IP.Equal(mc.DHCPIP) &&\n\t\t\t\tdhcp.IPv4.Mask.String() == mc.NetMask.String() &&\n\t\t\t\tdhcp.LowerIP.Equal(mc.LowerIP) &&\n\t\t\t\tdhcp.UpperIP.Equal(mc.UpperIP) &&\n\t\t\t\tdhcp.Enabled == mc.DHCPEnabled {\n\t\t\t\treturn n.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// No existing host-only interface found. Create a new one.\n\thostonlyNet, err := CreateHostonlyNet()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thostonlyNet.IPv4.IP = mc.HostIP\n\thostonlyNet.IPv4.Mask = mc.NetMask\n\tif err := hostonlyNet.Config(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Create and add a DHCP server to the host-only network\n\tdhcp := driver.DHCP{}\n\tdhcp.IPv4.IP = mc.DHCPIP\n\tdhcp.IPv4.Mask = mc.NetMask\n\tdhcp.LowerIP = mc.LowerIP\n\tdhcp.UpperIP = mc.UpperIP\n\tdhcp.Enabled = true\n\tif err := AddHostonlyDHCP(hostonlyNet.Name, dhcp); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostonlyNet.Name, nil\n}",
"func WhiteListHostFilter(hosts ...string) HostFilter {\n\thostInfos, err := addrsToHosts(hosts, 9042)\n\tif err != nil {\n\t\t// dont want to panic here, but rather not break the API\n\t\tpanic(fmt.Errorf(\"unable to lookup host info from address: %v\", err))\n\t}\n\n\tm := make(map[string]bool, len(hostInfos))\n\tfor _, host := range hostInfos {\n\t\tm[host.ConnectAddress().String()] = true\n\t}\n\n\treturn HostFilterFunc(func(host *HostInfo) bool {\n\t\treturn m[host.ConnectAddress().String()]\n\t})\n}",
"func (b *BlubberBlockDirectory) ListHosts(\n\tvoid blubberstore.Empty, hosts *blubberstore.BlockHolderList) error {\n\tvar host string\n\tb.blockMapMtx.RLock()\n\tdefer b.blockMapMtx.RUnlock()\n\n\tfor host, _ = range b.blockHostMap {\n\t\thosts.HostPort = append(hosts.HostPort, host)\n\t}\n\n\treturn nil\n}",
"func NewGetAdapterHostEthInterfacesDefault(code int) *GetAdapterHostEthInterfacesDefault {\n\treturn &GetAdapterHostEthInterfacesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error {\n\tname := dhcpPrefix + ifname\n\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// On some platforms (OSX), creating a host-only adapter adds a default dhcpserver,\n\t// while on others (Windows?) it does not.\n\tcommand := \"add\"\n\tif dhcp, ok := dhcps[name]; ok {\n\t\tcommand = \"modify\"\n\t\tif (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && dhcp.Enabled {\n\t\t\t// dhcp is up to date\n\t\t\treturn nil\n\t\t}\n\t}\n\n\targs := []string{\"dhcpserver\", command,\n\t\t\"--netname\", name,\n\t\t\"--ip\", d.IPv4.IP.String(),\n\t\t\"--netmask\", net.IP(d.IPv4.Mask).String(),\n\t\t\"--lowerip\", d.LowerIP.String(),\n\t\t\"--upperip\", d.UpperIP.String(),\n\t}\n\tif d.Enabled {\n\t\targs = append(args, \"--enable\")\n\t} else {\n\t\targs = append(args, \"--disable\")\n\t}\n\n\treturn vbox.vbm(args...)\n}",
"func (r *reader) GetBlockedHostnames() (hostnames []string, err error) {\n\ts, err := r.envParams.GetEnv(\"BLOCK_HOSTNAMES\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(s) == 0 {\n\t\treturn nil, nil\n\t}\n\thostnames = strings.Split(s, \",\")\n\tfor _, hostname := range hostnames {\n\t\tif !r.verifier.MatchHostname(hostname) {\n\t\t\treturn nil, fmt.Errorf(\"hostname %q does not seem valid\", hostname)\n\t\t}\n\t}\n\treturn hostnames, nil\n}",
"func (r *reader) GetUnblockedHostnames() (hostnames []string, err error) {\n\ts, err := r.envParams.GetEnv(\"UNBLOCK\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(s) == 0 {\n\t\treturn nil, nil\n\t}\n\thostnames = strings.Split(s, \",\")\n\tfor _, hostname := range hostnames {\n\t\tif !r.verifier.MatchHostname(hostname) {\n\t\t\treturn nil, fmt.Errorf(\"hostname %q does not seem valid\", hostname)\n\t\t}\n\t}\n\treturn hostnames, nil\n}",
"func (p *PorterHelper) List() (map[string]string, error) {\n\tentries := p.Cache.List()\n\n\tres := make(map[string]string)\n\n\tfor _, entry := range entries {\n\t\tres[entry.ProxyEndpoint] = entry.AuthorizationToken\n\t}\n\n\treturn res, nil\n}",
"func ips(h *hostsfile.Hostsfile, ipnet *net.IPNet) map[string]bool {\n\tips := make(map[string]bool)\n\t// Make sure we never touch localhost (may be missing in hosts-file).\n\tif ipnet.Contains(net.IP{127, 0, 0, 1}) {\n\t\tips[\"127.0.0.1\"] = true\n\t}\n\tfor _, r := range h.Records() {\n\t\tif ipnet.Contains(r.IpAddress.IP) {\n\t\t\tips[r.IpAddress.String()] = true\n\t\t}\n\t}\n\treturn ips\n}",
"func HostIP() []string {\n\tvar out []string\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\tlogs.WithFields(logs.Fields{\n\t\t\t\"Error\": err,\n\t\t}).Error(\"Unable to resolve net.InterfaceAddrs\")\n\t}\n\tfor _, addr := range addrs {\n\t\t// check the address type and if it is not a loopback the display it\n\t\tif ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() != nil {\n\t\t\t\tout = append(out, ipnet.IP.String())\n\t\t\t}\n\t\t\tif ipnet.IP.To16() != nil {\n\t\t\t\tout = append(out, ipnet.IP.String())\n\t\t\t}\n\t\t}\n\t}\n\treturn List2Set(out)\n}",
"func (api *PublicStorageHostManagerAPI) FilteredHosts() (allFiltered []storage.HostInfo) {\n\treturn api.shm.filteredTree.All()\n}",
"func (p *ProxySQL) HostsLike(opts ...HostOpts) ([]*Host, error) {\n\tmut.RLock()\n\tdefer mut.RUnlock()\n\thostq, err := buildAndParseHostQuery(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// run query built from these opts\n\trows, err := query(p, buildSelectQuery(hostq))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\tentries := make([]*Host, 0)\n\tfor rows.Next() {\n\t\tvar (\n\t\t\thostgroup_id int\n\t\t\thostname string\n\t\t\tport int\n\t\t\tstatus string\n\t\t\tweight int\n\t\t\tcompression int\n\t\t\tmax_connections int\n\t\t\tmax_replication_lag int\n\t\t\tuse_ssl int\n\t\t\tmax_latency_ms int\n\t\t\tcomment string\n\t\t)\n\t\terr := scanRows(rows, &hostgroup_id, &hostname, &port, &status, &weight, &compression, &max_connections, &max_replication_lag, &use_ssl, &max_latency_ms, &comment)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thost := &Host{hostgroup_id, hostname, port, status, weight, compression, max_connections, max_replication_lag, use_ssl, max_latency_ms, comment}\n\t\tentries = append(entries, host)\n\t}\n\tif rowsErr(rows) != nil && rowsErr(rows) != sql.ErrNoRows {\n\t\treturn nil, rowsErr(rows)\n\t}\n\treturn entries, nil\n}",
"func (a *Alias) HostedZones() []string {\n\tvar hostedZones []string\n\tfor _, alias := range a.AdvancedAliases {\n\t\tif alias.HostedZone != nil {\n\t\t\thostedZones = append(hostedZones, *alias.HostedZone)\n\t\t}\n\t}\n\treturn hostedZones\n}",
"func (d *portworx) GetPoolDrives(n *node.Node) (map[string][]string, error) {\n\tsystemOpts := node.SystemctlOpts{\n\t\tConnectionOpts: node.ConnectionOpts{\n\t\t\tTimeout: startDriverTimeout,\n\t\t\tTimeBeforeRetry: defaultRetryInterval,\n\t\t},\n\t\tAction: \"start\",\n\t}\n\tpoolDrives := make(map[string][]string, 0)\n\tlog.Infof(\"Getting available block drives on node [%s]\", n.Name)\n\tblockDrives, err := d.nodeDriver.GetBlockDrives(*n, systemOpts)\n\n\tif err != nil {\n\t\treturn poolDrives, err\n\t}\n\tfor _, v := range blockDrives {\n\t\tlabelsMap := v.Labels\n\t\tif pm, ok := labelsMap[\"pxpool\"]; ok {\n\t\t\tpoolDrives[pm] = append(poolDrives[pm], v.Path)\n\t\t}\n\t}\n\treturn poolDrives, nil\n}",
"func gatherHostportMappings(podPortMapping *PodPortMapping, isIPv6 bool) []*PortMapping {\n\tmappings := []*PortMapping{}\n\tfor _, pm := range podPortMapping.PortMappings {\n\t\tif pm.HostPort <= 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif pm.HostIP != \"\" && utilnet.IsIPv6String(pm.HostIP) != isIPv6 {\n\t\t\tcontinue\n\t\t}\n\t\tmappings = append(mappings, pm)\n\t}\n\treturn mappings\n}",
"func (o *AggregatedDomain) HostInterfaces(info *bambou.FetchingInfo) (HostInterfacesList, *bambou.Error) {\n\n\tvar list HostInterfacesList\n\terr := bambou.CurrentSession().FetchChildren(o, HostInterfaceIdentity, &list, info)\n\treturn list, err\n}",
"func (api *hostAPI) List(ctx context.Context, opts *api.ListWatchOptions) ([]*Host, error) {\n\tvar objlist []*Host\n\tobjs, err := api.ct.List(\"Host\", ctx, opts)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, obj := range objs {\n\t\tswitch tp := obj.(type) {\n\t\tcase *Host:\n\t\t\teobj := obj.(*Host)\n\t\t\tobjlist = append(objlist, eobj)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Got invalid object type %v while looking for Host\", tp)\n\t\t}\n\t}\n\n\treturn objlist, nil\n}",
"func AdvertiseHost(listen string) string {\n\tif listen == \"0.0.0.0\" {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil || len(addrs) == 0 {\n\t\t\treturn \"localhost\"\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {\n\t\t\t\treturn ip.IP.To4().String()\n\t\t\t}\n\t\t}\n\t\treturn \"localhost\"\n\t}\n\n\treturn listen\n}",
"func (p partition) Hosts() []string {\n\tvar res []string\n\tfor _, a := range p {\n\t\tres = append(res, a.node.Address())\n\t}\n\treturn res\n}",
"func (ls *LocalStorage) filterAllowedNodes(clients map[string]provisioner.API, deploymentName, role string) ([]provisioner.API, error) {\n\t// Find all PVs for given deployment & role\n\tlist, err := ls.deps.KubeCli.CoreV1().PersistentVolumes().List(metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"%s=%s,%s=%s\", k8sutil.LabelKeyArangoDeployment, deploymentName, k8sutil.LabelKeyRole, role),\n\t})\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\texcludedNodes := make(map[string]struct{})\n\tfor _, pv := range list.Items {\n\t\tnodeName := pv.GetAnnotations()[nodeNameAnnotation]\n\t\texcludedNodes[nodeName] = struct{}{}\n\t}\n\tresult := make([]provisioner.API, 0, len(clients))\n\tfor nodeName, c := range clients {\n\t\tif _, found := excludedNodes[nodeName]; !found {\n\t\t\tresult = append(result, c)\n\t\t}\n\t}\n\treturn result, nil\n}",
"func boundIPs(c *caddy.Controller) (ips []net.IP) {\n\tconf := dnsserver.GetConfig(c)\n\thosts := conf.ListenHosts\n\tif hosts == nil || hosts[0] == \"\" {\n\t\thosts = nil\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\thosts = append(hosts, addr.String())\n\t\t}\n\t}\n\tfor _, host := range hosts {\n\t\tip, _, _ := net.ParseCIDR(host)\n\t\tip4 := ip.To4()\n\t\tif ip4 != nil && !ip4.IsLoopback() {\n\t\t\tips = append(ips, ip4)\n\t\t\tcontinue\n\t\t}\n\t\tip6 := ip.To16()\n\t\tif ip6 != nil && !ip6.IsLoopback() {\n\t\t\tips = append(ips, ip6)\n\t\t}\n\t}\n\treturn ips\n}",
"func NewBindHostForbidden() *BindHostForbidden {\n\treturn &BindHostForbidden{}\n}",
"func ListWifiNetworks() []string {\n\ts := spinner.New(spinner.CharSets[11], 100*time.Millisecond)\n\ts.Prefix = \"Searching networks: \"\n\ts.Start()\n\n\tlistWifiCmd := exec.Command(\"bash\", \"-c\", \"/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport scan\")\n\twifiList, err := listWifiCmd.Output()\n\tHandleError(err)\n\ts.Stop()\n\n\tif len(wifiList) == 0 {\n\t\tfmt.Println(\"There are no available networks\")\n\t\tif isWifiOff() {\n\t\t\tfmt.Println(\"It looks like the wifi is off. You can turn it on running `wificli on`\")\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tvar list []string\n\twifiListArray := strings.Split(string(wifiList), \"\\n\")[1:]\n\tfor _, network := range wifiListArray {\n\t\tnetworkFields := strings.Fields(network)\n\t\tif len(networkFields) > 0 {\n\t\t\tlist = append(list, networkFields[0])\n\t\t}\n\t}\n\n\treturn list\n}",
"func GetHostAliases(ctx context.Context) ([]string, error) {\n\tname, err := GetHostname(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't extract a host alias from the kubelet: %w\", err)\n\t}\n\tif err := validate.ValidHostname(name); err != nil {\n\t\treturn nil, fmt.Errorf(\"host alias from kubelet is not valid: %w\", err)\n\t}\n\treturn []string{name}, nil\n}",
"func (mc *MockContiv) GetHostIPs() []net.IP {\n\treturn mc.hostIPs\n}",
"func (p *pgSerDe) ListDbClientIps() ([]string, error) {\n\tconst sql = \"SELECT DISTINCT(client_addr) FROM pg_stat_activity\"\n\trows, err := p.dbConn.Query(sql)\n\tif err != nil {\n\t\tlog.Printf(\"ListDbClientIps(): error querying database: %v\", err)\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tresult := make([]string, 0)\n\tfor rows.Next() {\n\t\tvar addr *string\n\t\tif err := rows.Scan(&addr); err != nil {\n\t\t\tlog.Printf(\"ListDbClientIps(): error scanning row: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tif addr != nil {\n\t\t\tresult = append(result, *addr)\n\t\t}\n\t}\n\treturn result, nil\n}",
"func (p *PCache) List(kind string) map[string]interface{} {\n\tp.RLock()\n\tkindMap := p.kinds[kind]\n\tp.RUnlock()\n\titems := map[string]interface{}{}\n\n\tif kindMap != nil {\n\t\tkindMap.Lock()\n\t\tfor key, entry := range kindMap.entries {\n\t\t\titems[key] = entry\n\t\t}\n\t\tkindMap.Unlock()\n\t}\n\n\treturn items\n}",
"func (d *Daemon) syncHostIPs() error {\n\tif option.Config.DryMode {\n\t\treturn nil\n\t}\n\n\ttype ipIDLabel struct {\n\t\tidentity.IPIdentityPair\n\t\tlabels.Labels\n\t}\n\tspecialIdentities := make([]ipIDLabel, 0, 2)\n\n\tif option.Config.EnableIPv4 {\n\t\taddrs, err := d.datapath.LocalNodeAddressing().IPv4().LocalAddresses()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Unable to list local IPv4 addresses\")\n\t\t}\n\n\t\tfor _, ip := range addrs {\n\t\t\tif option.Config.IsExcludedLocalAddress(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(ip) > 0 {\n\t\t\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\t\t\tidentity.IPIdentityPair{\n\t\t\t\t\t\tIP: ip,\n\t\t\t\t\t\tID: identity.ReservedIdentityHost,\n\t\t\t\t\t},\n\t\t\t\t\tlabels.LabelHost,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tipv4Ident := identity.ReservedIdentityWorldIPv4\n\t\tipv4Label := labels.LabelWorldIPv4\n\t\tif !option.Config.EnableIPv6 {\n\t\t\tipv4Ident = identity.ReservedIdentityWorld\n\t\t\tipv4Label = labels.LabelWorld\n\t\t}\n\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\tidentity.IPIdentityPair{\n\t\t\t\tIP: net.IPv4zero,\n\t\t\t\tMask: net.CIDRMask(0, net.IPv4len*8),\n\t\t\t\tID: ipv4Ident,\n\t\t\t},\n\t\t\tipv4Label,\n\t\t})\n\t}\n\n\tif option.Config.EnableIPv6 {\n\t\taddrs, err := d.datapath.LocalNodeAddressing().IPv6().LocalAddresses()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Unable to list local IPv6 addresses\")\n\t\t}\n\n\t\taddrs = append(addrs, node.GetIPv6Router())\n\t\tfor _, ip := range addrs {\n\t\t\tif option.Config.IsExcludedLocalAddress(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(ip) > 0 {\n\t\t\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\t\t\tidentity.IPIdentityPair{\n\t\t\t\t\t\tIP: ip,\n\t\t\t\t\t\tID: identity.ReservedIdentityHost,\n\t\t\t\t\t},\n\t\t\t\t\tlabels.LabelHost,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tipv6Ident := identity.ReservedIdentityWorldIPv6\n\t\tipv6Label := labels.LabelWorldIPv6\n\t\tif !option.Config.EnableIPv4 {\n\t\t\tipv6Ident = identity.ReservedIdentityWorld\n\t\t\tipv6Label = labels.LabelWorld\n\t\t}\n\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\tidentity.IPIdentityPair{\n\t\t\t\tIP: net.IPv6zero,\n\t\t\t\tMask: net.CIDRMask(0, net.IPv6len*8),\n\t\t\t\tID: ipv6Ident,\n\t\t\t},\n\t\t\tipv6Label,\n\t\t})\n\t}\n\n\texistingEndpoints, err := lxcmap.DumpToMap()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdaemonResourceID := ipcachetypes.NewResourceID(ipcachetypes.ResourceKindDaemon, \"\", \"\")\n\tfor _, ipIDLblsPair := range specialIdentities {\n\t\tisHost := ipIDLblsPair.ID == identity.ReservedIdentityHost\n\t\tif isHost {\n\t\t\tadded, err := lxcmap.SyncHostEntry(ipIDLblsPair.IP)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to add host entry to endpoint map: %s\", err)\n\t\t\t}\n\t\t\tif added {\n\t\t\t\tlog.WithField(logfields.IPAddr, ipIDLblsPair.IP).Debugf(\"Added local ip to endpoint map\")\n\t\t\t}\n\t\t}\n\n\t\tdelete(existingEndpoints, ipIDLblsPair.IP.String())\n\n\t\tlbls := ipIDLblsPair.Labels\n\t\tif ipIDLblsPair.ID.IsWorld() {\n\t\t\tp := netip.PrefixFrom(ippkg.MustAddrFromIP(ipIDLblsPair.IP), 0)\n\t\t\td.ipcache.OverrideIdentity(p, lbls, source.Local, daemonResourceID)\n\t\t} else {\n\t\t\td.ipcache.UpsertLabels(ippkg.IPToNetPrefix(ipIDLblsPair.IP),\n\t\t\t\tlbls,\n\t\t\t\tsource.Local, daemonResourceID,\n\t\t\t)\n\t\t}\n\t}\n\n\t// existingEndpoints is a map from endpoint IP to endpoint info. Referring\n\t// to the key as host IP here because we only care about the host endpoint.\n\tfor hostIP, info := range existingEndpoints {\n\t\tif ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {\n\t\t\tif err := lxcmap.DeleteEntry(ip); err != nil {\n\t\t\t\tlog.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\tlogfields.IPAddr: hostIP,\n\t\t\t\t}).Warn(\"Unable to delete obsolete host IP from BPF map\")\n\t\t\t} else {\n\t\t\t\tlog.Debugf(\"Removed outdated host IP %s from endpoint map\", hostIP)\n\t\t\t}\n\n\t\t\td.ipcache.RemoveLabels(ippkg.IPToNetPrefix(ip), labels.LabelHost, daemonResourceID)\n\t\t}\n\t}\n\n\tif option.Config.EnableVTEP {\n\t\terr := setupVTEPMapping()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = setupRouteToVtepCidr()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *Eth) GetList() ([]string, error) {\n c.con.LogQuery(\"(get) list of ethernet interfaces\")\n path := c.xpath(nil)\n return c.con.EntryListUsing(c.con.Get, path[:len(path) - 1])\n}",
"func (r Virtual_DedicatedHost) GetGuests() (resp []datatypes.Virtual_Guest, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_DedicatedHost\", \"getGuests\", nil, &r.Options, &resp)\n\treturn\n}",
"func (h *Hub) AllHosts() map[string]client.HostConfig {\n\th.Lock()\n\tdefer h.Unlock()\n\tr := map[string]client.HostConfig{}\n\tfor _, host := range h.hosts {\n\t\tc := host.GetConfig()\n\t\tr[c.HostID] = c\n\t}\n\treturn r\n}",
"func (s *ServerT) Hosts() ([]string, error) {\n\tvar hosts = []string{s.Config.HTTPHost}\n\n\tif s.Config.HTTPHost != \"localhost\" {\n\t\thosts = append(hosts, \"localhost\")\n\t}\n\n\taddresses, _ := net.InterfaceAddrs()\n\tfor _, address := range addresses {\n\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\thost := ipnet.IP.To4()\n\t\t\tif host != nil {\n\t\t\t\thosts = append(hosts, host.String())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn hosts, nil\n}",
"func ListDeviceNames(withDescription bool, withIP bool) ([]string, error) {\n\tdevices, err := pcap.FindAllDevs()\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tret := []string{}\n\tfor _, dev := range devices {\n\t\tr := dev.Name\n\n\t\tif withDescription {\n\t\t\tdesc := \"No description available\"\n\t\t\tif len(dev.Description) > 0 {\n\t\t\t\tdesc = dev.Description\n\t\t\t}\n\t\t\tr += fmt.Sprintf(\" (%s)\", desc)\n\t\t}\n\n\t\tif withIP {\n\t\t\tips := \"Not assigned ip address\"\n\t\t\tif len(dev.Addresses) > 0 {\n\t\t\t\tips = \"\"\n\n\t\t\t\tfor i, address := range []pcap.InterfaceAddress(dev.Addresses) {\n\t\t\t\t\t// Add a space between the IP address.\n\t\t\t\t\tif i > 0 {\n\t\t\t\t\t\tips += \" \"\n\t\t\t\t\t}\n\n\t\t\t\t\tips += fmt.Sprintf(\"%s\", address.IP.String())\n\t\t\t\t}\n\t\t\t}\n\t\t\tr += fmt.Sprintf(\" (%s)\", ips)\n\n\t\t}\n\t\tret = append(ret, r)\n\t}\n\treturn ret, nil\n}",
"func (r *reader) GetDNSUnblockedHostnames() (hostnames []string, err error) {\n\ts, err := r.envParams.GetEnv(\"UNBLOCK\")\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(s) == 0 {\n\t\treturn nil, nil\n\t}\n\thostnames = strings.Split(s, \",\")\n\tfor _, hostname := range hostnames {\n\t\tif !r.verifier.MatchHostname(hostname) {\n\t\t\treturn nil, fmt.Errorf(\"hostname %q does not seem valid\", hostname)\n\t\t}\n\t}\n\treturn hostnames, nil\n}",
"func ListDockerHosts(w http.ResponseWriter, r *http.Request) {\n\t// let's get all host uri from map.\n\tvar hosts []string\n\tfor host, _ := range clientMap {\n\t\thosts = append(hosts, host)\n\t}\n\t// map with list of hosts\n\tresponse := make(map[string][]string)\n\tresponse[\"hosts\"] = hosts\n\t// convert map to json\n\tjsonString, err := json.Marshal(response)\n\tif err != nil {\n\t\tfmt.Fprintln(w,\"{ \\\"error\\\" : \\\"Internal server error\\\" }\")\n\t}\n\tfmt.Fprintln(w,string(jsonString))\n}",
"func (o *LdapProvider) GetHostnames() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Hostnames\n}",
"func ActiveAddresses() map[string]net.Interface {\n\tresult := make(map[string]net.Interface)\n\tif iFaces, err := net.Interfaces(); err == nil {\n\t\tfor _, iFace := range iFaces {\n\t\t\tconst interesting = net.FlagUp | net.FlagBroadcast\n\t\t\tif iFace.Flags&interesting == interesting {\n\t\t\t\tif name := Address(iFace); name != \"\" {\n\t\t\t\t\tresult[name] = iFace\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}",
"func (s *k8sStore) ListIngresses() []*networkingv1.Ingress {\n\t// filter ingress rules\n\tvar ingresses []*networkingv1.Ingress\n\tfor _, item := range s.listers.Ingress.List() {\n\t\ting := item.(*networkingv1.Ingress)\n\n\t\tingresses = append(ingresses, ing)\n\t}\n\n\treturn ingresses\n}",
"func validateAdapters(adapterMap map[string]Adapter, errs []error) []error {\n\tfor adapterName, adapter := range adapterMap {\n\t\tif !adapter.Disabled {\n\t\t\t// Verify that every adapter has a valid endpoint associated with it\n\t\t\terrs = validateAdapterEndpoint(adapter.Endpoint, adapterName, errs)\n\n\t\t\t// Verify that valid user_sync URLs are specified in the config\n\t\t\terrs = validateAdapterUserSyncURL(adapter.UserSyncURL, adapterName, errs)\n\t\t}\n\t}\n\treturn errs\n}",
"func HostOnly(addr string) string {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn addr\n\t} else {\n\t\treturn host\n\t}\n}",
"func (c *Client) ListAddresses() (map[int][]*AddrEntry, error) {\n\tvar msg netlink.Message\n\tmsg.Header.Type = unix.RTM_GETADDR\n\tmsg.Header.Flags = netlink.Dump | netlink.Request\n\n\tvar ifimsg iproute2.IfInfoMsg\n\tae := netlink.NewAttributeEncoder()\n\tae.Uint32(unix.IFLA_EXT_MASK, uint32(iproute2.RTEXT_FILTER_BRVLAN))\n\tmsg.Data, _ = ifimsg.MarshalBinary()\n\tdata, err := ae.Encode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmsg.Data = append(msg.Data, data...)\n\n\tmsgs, err := c.conn.Execute(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tentries := make(map[int][]*AddrEntry)\n\tfor _, msg := range msgs {\n\t\tif msg.Header.Type != unix.RTM_NEWADDR {\n\t\t\tcontinue\n\t\t}\n\n\t\te, ok, err := parseAddrMsg(&msg)\n\t\tif err != nil {\n\t\t\treturn entries, err\n\t\t}\n\t\tif ok {\n\t\t\tentries[e.Ifindex] = append(entries[e.Ifindex], e)\n\t\t}\n\t}\n\treturn entries, nil\n}",
"func (r Virtual_Guest) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getAllowedHost\", nil, &r.Options, &resp)\n\treturn\n}",
"func filteredConfig(list *configstore.ItemList, dropAlias ...string) (map[string]*string, error) {\n\tcfg := make(map[string]*string)\n\tfor _, i := range list.Items {\n\t\tif !utils.ListContainsString(dropAlias, i.Key()) {\n\t\t\t// assume only one value per alias\n\t\t\tif _, ok := cfg[i.Key()]; !ok {\n\t\t\t\tv, err := i.Value()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\tcfg[i.Key()] = &v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn cfg, nil\n}",
"func RegisteredListenNetworks() []string {\n\tnetworks := []string{}\n\tfor network := range listenFuncs {\n\t\tnetworks = append(networks, network)\n\t}\n\treturn networks\n}",
"func iterateAdapters(task func(adapters.Adapter, ...string), adapterList ...string) {\n\tonce.Do(func() {\n\t\tcheckLocalTimezone()\n\t\tcheckDatabase()\n\t})\n\n\tif len(adapterList) == 0 {\n\t\tfor _, a := range adapters.Adapters {\n\t\t\twg.Add(1)\n\t\t\tgo task(a)\n\t\t}\n\t} else {\n\t\tfor _, adapterCode := range adapterList {\n\t\t\toperatorCodes := make([]string, 0, len(adapterCode))\n\t\t\tsep := strings.Index(adapterCode, \".\")\n\t\t\tif sep > 0 {\n\t\t\t\tfor index := sep + 1; index < len(adapterCode); index++ {\n\t\t\t\t\toperatorCodes = append(operatorCodes, adapterCode[index:index+1])\n\t\t\t\t}\n\t\t\t\tadapterCode = adapterCode[:sep]\n\t\t\t}\n\n\t\t\ta := adapters.MustGetAdapterByCode(string(adapterCode))\n\t\t\twg.Add(1)\n\t\t\tgo task(a, operatorCodes...)\n\t\t}\n\t}\n\n\twg.Wait()\n}",
"func NewIpNetwork_getRemoteHostByName_Params_List(s *capnp.Segment, sz int32) (IpNetwork_getRemoteHostByName_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn IpNetwork_getRemoteHostByName_Params_List{l}, err\n}",
"func (v *Virter) getDHCPHosts(network libvirt.Network) ([]libvirtxml.NetworkDHCPHost, error) {\n\thosts := []libvirtxml.NetworkDHCPHost{}\n\n\tnetworkDescription, err := getNetworkDescription(v.libvirt, network)\n\tif err != nil {\n\t\treturn hosts, err\n\t}\n\tif len(networkDescription.IPs) < 1 {\n\t\treturn hosts, fmt.Errorf(\"no IPs in network\")\n\t}\n\n\tipDescription := networkDescription.IPs[0]\n\n\tdhcpDescription := ipDescription.DHCP\n\tif dhcpDescription == nil {\n\t\treturn hosts, fmt.Errorf(\"no DHCP in network\")\n\t}\n\n\tfor _, host := range dhcpDescription.Hosts {\n\t\thosts = append(hosts, host)\n\t}\n\n\treturn hosts, nil\n}",
"func (authManager *AuthManager) GetDatastoreIgnoreMapForBlockVolumes(ctx context.Context) map[string]*cnsvsphere.DatastoreInfo {\n\tdatastoreIgnoreMapForBlockVolumes := make(map[string]*cnsvsphere.DatastoreInfo)\n\tauthManager.rwMutex.RLock()\n\tdefer authManager.rwMutex.RUnlock()\n\tfor dsURL, dsInfo := range authManager.datastoreIgnoreMapForBlockVolumes {\n\t\tdatastoreIgnoreMapForBlockVolumes[dsURL] = dsInfo\n\t}\n\treturn datastoreIgnoreMapForBlockVolumes\n}",
"func (s *Module) DiskList() ([]pkg.VDisk, error) {\n\tpools, err := s.diskPools()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar disks []pkg.VDisk\n\tfor _, pool := range pools {\n\n\t\titems, err := os.ReadDir(pool)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list virtual disks\")\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo, err := item.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get file info for '%s'\", item.Name())\n\t\t\t}\n\n\t\t\tdisks = append(disks, pkg.VDisk{\n\t\t\t\tPath: filepath.Join(pool, item.Name()),\n\t\t\t\tSize: info.Size(),\n\t\t\t})\n\t\t}\n\n\t\treturn disks, nil\n\t}\n\n\treturn disks, nil\n}",
"func CyberghostServers() []models.CyberghostServer {\n\treturn []models.CyberghostServer{\n\t\t{Region: \"Albania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 3}, {31, 171, 155, 4}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 12}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Albania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 4}, {31, 171, 155, 5}, {31, 171, 155, 6}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Algeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 132}, {176, 125, 228, 134}, {176, 125, 228, 135}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 138}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}}},\n\t\t{Region: \"Algeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 131}, {176, 125, 228, 133}, {176, 125, 228, 134}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}, {176, 125, 228, 143}}},\n\t\t{Region: \"Andorra\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 137}, {188, 241, 82, 138}, {188, 241, 82, 140}, {188, 241, 82, 142}, {188, 241, 82, 147}, {188, 241, 82, 155}, {188, 241, 82, 159}, {188, 241, 82, 160}, {188, 241, 82, 161}, {188, 241, 82, 166}}},\n\t\t{Region: \"Andorra\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 133}, {188, 241, 82, 134}, {188, 241, 82, 136}, {188, 241, 82, 137}, {188, 241, 82, 146}, {188, 241, 82, 153}, {188, 241, 82, 155}, {188, 241, 82, 160}, {188, 241, 82, 164}, {188, 241, 82, 168}}},\n\t\t{Region: \"Argentina\", Group: \"Premium TCP USA\", Hostname: \"93-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 4}, {146, 70, 39, 9}, {146, 70, 39, 15}, {146, 70, 39, 19}, {146, 70, 39, 135}, {146, 70, 39, 136}, {146, 70, 39, 139}, {146, 70, 39, 142}, {146, 70, 39, 143}, {146, 70, 39, 145}}},\n\t\t{Region: \"Argentina\", Group: \"Premium UDP USA\", Hostname: \"94-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 3}, {146, 70, 39, 5}, {146, 70, 39, 6}, {146, 70, 39, 8}, {146, 70, 39, 11}, {146, 70, 39, 12}, {146, 70, 39, 131}, {146, 70, 39, 134}, {146, 70, 39, 142}, {146, 70, 39, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 134}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 138}, {185, 253, 160, 139}, {185, 253, 160, 140}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 132}, {185, 253, 160, 133}, {185, 253, 160, 134}, {185, 253, 160, 135}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 144}}},\n\t\t{Region: \"Australia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-au.cg-dialup.net\", IPs: []net.IP{{154, 16, 81, 22}, {181, 214, 215, 7}, {181, 214, 215, 15}, {181, 214, 215, 18}, {191, 101, 210, 15}, {191, 101, 210, 50}, {191, 101, 210, 60}, {202, 60, 80, 78}, {202, 60, 80, 82}, {202, 60, 80, 102}}},\n\t\t{Region: \"Australia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-au.cg-dialup.net\", IPs: []net.IP{{181, 214, 215, 4}, {181, 214, 215, 16}, {191, 101, 210, 18}, {191, 101, 210, 21}, {191, 101, 210, 36}, {191, 101, 210, 58}, {191, 101, 210, 60}, {202, 60, 80, 74}, {202, 60, 80, 106}, {202, 60, 80, 124}}},\n\t\t{Region: \"Austria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 9}, {37, 19, 223, 16}, {37, 19, 223, 113}, {37, 19, 223, 205}, {37, 19, 223, 211}, {37, 19, 223, 218}, {37, 19, 223, 223}, {37, 19, 223, 245}, {37, 120, 155, 104}, {89, 187, 168, 174}}},\n\t\t{Region: \"Austria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 202}, {37, 19, 223, 205}, {37, 19, 223, 229}, {37, 19, 223, 239}, {37, 19, 223, 241}, {37, 19, 223, 243}, {37, 120, 155, 103}, {89, 187, 168, 160}, {89, 187, 168, 174}, {89, 187, 168, 181}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium TCP USA\", Hostname: \"93-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 136}, {95, 181, 238, 142}, {95, 181, 238, 144}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 152}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium UDP USA\", Hostname: \"94-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 138}, {95, 181, 238, 140}, {95, 181, 238, 141}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 151}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium TCP Asia\", Hostname: \"96-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 132}, {84, 252, 93, 133}, {84, 252, 93, 135}, {84, 252, 93, 138}, {84, 252, 93, 139}, {84, 252, 93, 141}, {84, 252, 93, 142}, {84, 252, 93, 143}, {84, 252, 93, 144}, {84, 252, 93, 145}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium UDP Asia\", Hostname: \"95-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 131}, {84, 252, 93, 133}, {84, 252, 93, 134}, {84, 252, 93, 135}, {84, 252, 93, 136}, {84, 252, 93, 139}, {84, 252, 93, 140}, {84, 252, 93, 141}, {84, 252, 93, 143}, {84, 252, 93, 145}}},\n\t\t{Region: \"Belarus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 5}, {45, 132, 194, 6}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 25}, {45, 132, 194, 27}, {45, 132, 194, 30}, {45, 132, 194, 35}, {45, 132, 194, 44}, {45, 132, 194, 49}}},\n\t\t{Region: \"Belarus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 6}, {45, 132, 194, 8}, {45, 132, 194, 9}, {45, 132, 194, 11}, {45, 132, 194, 15}, {45, 132, 194, 19}, {45, 132, 194, 20}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 26}}},\n\t\t{Region: \"Belgium\", Group: \"Premium TCP Europe\", Hostname: \"97-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 165}, {37, 120, 143, 166}, {185, 210, 217, 10}, {185, 210, 217, 248}, {193, 9, 114, 211}, {193, 9, 114, 220}, {194, 110, 115, 195}, {194, 110, 115, 199}, {194, 110, 115, 205}, {194, 110, 115, 238}}},\n\t\t{Region: \"Belgium\", Group: \"Premium UDP Europe\", Hostname: \"87-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 163}, {37, 120, 143, 167}, {185, 210, 217, 9}, {185, 210, 217, 13}, {185, 210, 217, 55}, {185, 210, 217, 251}, {185, 232, 21, 120}, {194, 110, 115, 214}, {194, 110, 115, 218}, {194, 110, 115, 236}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Brazil\", Group: \"Premium TCP USA\", Hostname: \"93-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 5}, {188, 241, 177, 11}, {188, 241, 177, 38}, {188, 241, 177, 45}, {188, 241, 177, 132}, {188, 241, 177, 135}, {188, 241, 177, 136}, {188, 241, 177, 152}, {188, 241, 177, 153}, {188, 241, 177, 156}}},\n\t\t{Region: \"Brazil\", Group: \"Premium UDP USA\", Hostname: \"94-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 8}, {188, 241, 177, 37}, {188, 241, 177, 40}, {188, 241, 177, 42}, {188, 241, 177, 45}, {188, 241, 177, 135}, {188, 241, 177, 139}, {188, 241, 177, 149}, {188, 241, 177, 152}, {188, 241, 177, 154}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 101}, {37, 120, 152, 103}, {37, 120, 152, 104}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}, {37, 120, 152, 110}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 100}, {37, 120, 152, 101}, {37, 120, 152, 102}, {37, 120, 152, 103}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 35}, {188, 215, 235, 36}, {188, 215, 235, 38}, {188, 215, 235, 39}, {188, 215, 235, 45}, {188, 215, 235, 49}, {188, 215, 235, 51}, {188, 215, 235, 53}, {188, 215, 235, 54}, {188, 215, 235, 57}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 36}, {188, 215, 235, 40}, {188, 215, 235, 42}, {188, 215, 235, 44}, {188, 215, 235, 46}, {188, 215, 235, 47}, {188, 215, 235, 48}, {188, 215, 235, 50}, {188, 215, 235, 55}, {188, 215, 235, 57}}},\n\t\t{Region: \"Canada\", Group: \"Premium TCP USA\", Hostname: \"93-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 136}, {66, 115, 142, 139}, {66, 115, 142, 156}, {66, 115, 142, 162}, {66, 115, 142, 172}, {104, 200, 151, 99}, {104, 200, 151, 111}, {104, 200, 151, 153}, {104, 200, 151, 164}, {172, 98, 89, 137}}},\n\t\t{Region: \"Canada\", Group: \"Premium UDP USA\", Hostname: \"94-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 135}, {66, 115, 142, 154}, {66, 115, 142, 165}, {104, 200, 151, 32}, {104, 200, 151, 57}, {104, 200, 151, 85}, {104, 200, 151, 86}, {104, 200, 151, 147}, {172, 98, 89, 144}, {172, 98, 89, 173}}},\n\t\t{Region: \"Chile\", Group: \"Premium TCP USA\", Hostname: \"93-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 12}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"Chile\", Group: \"Premium UDP USA\", Hostname: \"94-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 4}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"China\", Group: \"Premium TCP Asia\", Hostname: \"96-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 137}, {188, 241, 80, 139}, {188, 241, 80, 140}, {188, 241, 80, 141}, {188, 241, 80, 142}}},\n\t\t{Region: \"China\", Group: \"Premium UDP Asia\", Hostname: \"95-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 136}, {188, 241, 80, 137}, {188, 241, 80, 138}, {188, 241, 80, 139}, {188, 241, 80, 142}}},\n\t\t{Region: \"Colombia\", Group: \"Premium TCP USA\", Hostname: \"93-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 7}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}, {146, 70, 9, 13}, {146, 70, 9, 14}}},\n\t\t{Region: \"Colombia\", Group: \"Premium UDP USA\", Hostname: \"94-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 6}, {146, 70, 9, 7}, {146, 70, 9, 8}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium TCP USA\", Hostname: \"93-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 10}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 13}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium UDP USA\", Hostname: \"94-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 9}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 14}}},\n\t\t{Region: \"Croatia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 5}, {146, 70, 8, 8}, {146, 70, 8, 9}, {146, 70, 8, 10}, {146, 70, 8, 11}, {146, 70, 8, 12}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 15}, {146, 70, 8, 16}}},\n\t\t{Region: \"Croatia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 3}, {146, 70, 8, 4}, {146, 70, 8, 5}, {146, 70, 8, 6}, {146, 70, 8, 7}, {146, 70, 8, 9}, {146, 70, 8, 11}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 16}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 133}, {185, 253, 162, 135}, {185, 253, 162, 136}, {185, 253, 162, 137}, {185, 253, 162, 139}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 132}, {185, 253, 162, 134}, {185, 253, 162, 135}, {185, 253, 162, 137}, {185, 253, 162, 138}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 235}, {138, 199, 56, 236}, {138, 199, 56, 237}, {138, 199, 56, 245}, {138, 199, 56, 246}, {138, 199, 56, 249}, {195, 181, 161, 12}, {195, 181, 161, 16}, {195, 181, 161, 20}, {195, 181, 161, 23}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 227}, {138, 199, 56, 229}, {138, 199, 56, 231}, {138, 199, 56, 235}, {138, 199, 56, 241}, {138, 199, 56, 247}, {195, 181, 161, 10}, {195, 181, 161, 16}, {195, 181, 161, 18}, {195, 181, 161, 22}}},\n\t\t{Region: \"Denmark\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 145, 83}, {37, 120, 145, 88}, {37, 120, 145, 93}, {37, 120, 194, 36}, {37, 120, 194, 56}, {37, 120, 194, 57}, {95, 174, 65, 163}, {95, 174, 65, 174}, {185, 206, 224, 238}, {185, 206, 224, 243}}},\n\t\t{Region: \"Denmark\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 194, 39}, {95, 174, 65, 167}, {95, 174, 65, 170}, {185, 206, 224, 227}, {185, 206, 224, 230}, {185, 206, 224, 236}, {185, 206, 224, 238}, {185, 206, 224, 245}, {185, 206, 224, 250}, {185, 206, 224, 254}}},\n\t\t{Region: \"Egypt\", Group: \"Premium TCP Europe\", Hostname: \"97-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 40}, {188, 214, 122, 42}, {188, 214, 122, 43}, {188, 214, 122, 45}, {188, 214, 122, 48}, {188, 214, 122, 50}, {188, 214, 122, 52}, {188, 214, 122, 60}, {188, 214, 122, 70}, {188, 214, 122, 73}}},\n\t\t{Region: \"Egypt\", Group: \"Premium UDP Europe\", Hostname: \"87-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 37}, {188, 214, 122, 38}, {188, 214, 122, 44}, {188, 214, 122, 54}, {188, 214, 122, 57}, {188, 214, 122, 59}, {188, 214, 122, 60}, {188, 214, 122, 61}, {188, 214, 122, 67}, {188, 214, 122, 69}}},\n\t\t{Region: \"Estonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 86}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 93}, {95, 153, 32, 94}}},\n\t\t{Region: \"Estonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 85}, {95, 153, 32, 87}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 94}}},\n\t\t{Region: \"Finland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 99}, {188, 126, 89, 102}, {188, 126, 89, 105}, {188, 126, 89, 107}, {188, 126, 89, 108}, {188, 126, 89, 110}, {188, 126, 89, 112}, {188, 126, 89, 115}, {188, 126, 89, 116}, {188, 126, 89, 119}}},\n\t\t{Region: \"Finland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 101}, {188, 126, 89, 104}, {188, 126, 89, 109}, {188, 126, 89, 110}, {188, 126, 89, 111}, {188, 126, 89, 113}, {188, 126, 89, 114}, {188, 126, 89, 115}, {188, 126, 89, 122}, {188, 126, 89, 124}}},\n\t\t{Region: \"France\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 43, 167}, {84, 17, 60, 147}, {84, 17, 60, 155}, {151, 106, 8, 108}, {191, 101, 31, 202}, {191, 101, 31, 254}, {191, 101, 217, 45}, {191, 101, 217, 159}, {191, 101, 217, 211}, {191, 101, 217, 240}}},\n\t\t{Region: \"France\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 60, 59}, {84, 17, 60, 121}, {191, 101, 31, 81}, {191, 101, 31, 84}, {191, 101, 31, 126}, {191, 101, 31, 127}, {191, 101, 217, 140}, {191, 101, 217, 201}, {191, 101, 217, 206}, {191, 101, 217, 211}}},\n\t\t{Region: \"Georgia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 131}, {95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 135}, {95, 181, 236, 136}, {95, 181, 236, 138}, {95, 181, 236, 139}, {95, 181, 236, 142}, {95, 181, 236, 144}}},\n\t\t{Region: \"Georgia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 136}, {95, 181, 236, 137}, {95, 181, 236, 139}, {95, 181, 236, 141}, {95, 181, 236, 142}, {95, 181, 236, 143}, {95, 181, 236, 144}}},\n\t\t{Region: \"Germany\", Group: \"Premium TCP Europe\", Hostname: \"97-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 39}, {84, 17, 48, 234}, {84, 17, 49, 106}, {84, 17, 49, 112}, {84, 17, 49, 218}, {154, 28, 188, 35}, {154, 28, 188, 66}, {154, 28, 188, 133}, {154, 28, 188, 144}, {154, 28, 188, 145}}},\n\t\t{Region: \"Germany\", Group: \"Premium UDP Europe\", Hostname: \"87-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 41}, {84, 17, 48, 224}, {84, 17, 49, 95}, {84, 17, 49, 236}, {84, 17, 49, 241}, {138, 199, 36, 151}, {154, 13, 1, 177}, {154, 28, 188, 73}, {154, 28, 188, 76}, {154, 28, 188, 93}}},\n\t\t{Region: \"Greece\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 165}, {185, 51, 134, 171}, {185, 51, 134, 172}, {185, 51, 134, 245}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 249}, {185, 51, 134, 251}, {185, 51, 134, 254}}},\n\t\t{Region: \"Greece\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 166}, {185, 51, 134, 173}, {185, 51, 134, 174}, {185, 51, 134, 244}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 251}, {185, 51, 134, 252}, {185, 51, 134, 253}}},\n\t\t{Region: \"Greenland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 8}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 13}, {91, 90, 120, 14}, {91, 90, 120, 17}}},\n\t\t{Region: \"Greenland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 9}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 14}, {91, 90, 120, 15}, {91, 90, 120, 16}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium TCP Asia\", Hostname: \"96-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 144}, {84, 17, 56, 148}, {84, 17, 56, 153}, {84, 17, 56, 162}, {84, 17, 56, 163}, {84, 17, 56, 169}, {84, 17, 56, 170}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 181}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium UDP Asia\", Hostname: \"95-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 143}, {84, 17, 56, 147}, {84, 17, 56, 150}, {84, 17, 56, 152}, {84, 17, 56, 161}, {84, 17, 56, 164}, {84, 17, 56, 168}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 183}}},\n\t\t{Region: \"Hungary\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 247}, {86, 106, 74, 251}, {86, 106, 74, 253}, {185, 189, 114, 117}, {185, 189, 114, 118}, {185, 189, 114, 119}, {185, 189, 114, 121}, {185, 189, 114, 123}, {185, 189, 114, 125}, {185, 189, 114, 126}}},\n\t\t{Region: \"Hungary\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 245}, {86, 106, 74, 247}, {86, 106, 74, 248}, {86, 106, 74, 249}, {86, 106, 74, 250}, {86, 106, 74, 252}, {86, 106, 74, 253}, {185, 189, 114, 120}, {185, 189, 114, 121}, {185, 189, 114, 122}}},\n\t\t{Region: \"Iceland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 4}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 12}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"Iceland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 5}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 9}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"India\", Group: \"Premium TCP Europe\", Hostname: \"97-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 68}, {103, 13, 112, 70}, {103, 13, 112, 72}, {103, 13, 112, 74}, {103, 13, 112, 75}, {103, 13, 113, 74}, {103, 13, 113, 79}, {103, 13, 113, 82}, {103, 13, 113, 83}, {103, 13, 113, 84}}},\n\t\t{Region: \"India\", Group: \"Premium UDP Europe\", Hostname: \"87-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 67}, {103, 13, 112, 70}, {103, 13, 112, 71}, {103, 13, 112, 77}, {103, 13, 112, 80}, {103, 13, 113, 72}, {103, 13, 113, 74}, {103, 13, 113, 75}, {103, 13, 113, 77}, {103, 13, 113, 85}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 4}, {146, 70, 14, 5}, {146, 70, 14, 6}, {146, 70, 14, 7}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 5}, {146, 70, 14, 8}, {146, 70, 14, 9}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 14}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Iran\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 5}, {62, 133, 46, 6}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 9}, {62, 133, 46, 10}, {62, 133, 46, 14}, {62, 133, 46, 15}}},\n\t\t{Region: \"Iran\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 11}, {62, 133, 46, 12}, {62, 133, 46, 13}, {62, 133, 46, 14}, {62, 133, 46, 15}, {62, 133, 46, 16}}},\n\t\t{Region: \"Ireland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 154}, {37, 120, 235, 166}, {37, 120, 235, 174}, {77, 81, 139, 35}, {84, 247, 48, 6}, {84, 247, 48, 19}, {84, 247, 48, 22}, {84, 247, 48, 23}, {84, 247, 48, 25}, {84, 247, 48, 26}}},\n\t\t{Region: \"Ireland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 147}, {37, 120, 235, 148}, {37, 120, 235, 153}, {37, 120, 235, 158}, {37, 120, 235, 169}, {37, 120, 235, 174}, {84, 247, 48, 8}, {84, 247, 48, 11}, {84, 247, 48, 20}, {84, 247, 48, 23}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium TCP Europe\", Hostname: \"97-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 156}, {91, 90, 124, 157}, {91, 90, 124, 158}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium UDP Europe\", Hostname: \"87-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 155}, {91, 90, 124, 156}, {91, 90, 124, 157}}},\n\t\t{Region: \"Israel\", Group: \"Premium TCP Europe\", Hostname: \"97-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 174}, {185, 77, 248, 103}, {185, 77, 248, 111}, {185, 77, 248, 113}, {185, 77, 248, 114}, {185, 77, 248, 124}, {185, 77, 248, 125}, {185, 77, 248, 127}, {185, 77, 248, 128}, {185, 77, 248, 129}}},\n\t\t{Region: \"Israel\", Group: \"Premium UDP Europe\", Hostname: \"87-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 163}, {160, 116, 0, 165}, {160, 116, 0, 172}, {185, 77, 248, 103}, {185, 77, 248, 106}, {185, 77, 248, 114}, {185, 77, 248, 117}, {185, 77, 248, 118}, {185, 77, 248, 126}, {185, 77, 248, 129}}},\n\t\t{Region: \"Italy\", Group: \"Premium TCP Europe\", Hostname: \"97-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 21}, {84, 17, 58, 100}, {84, 17, 58, 106}, {84, 17, 58, 111}, {84, 17, 58, 117}, {87, 101, 94, 122}, {212, 102, 55, 100}, {212, 102, 55, 106}, {212, 102, 55, 110}, {212, 102, 55, 122}}},\n\t\t{Region: \"Italy\", Group: \"Premium UDP Europe\", Hostname: \"87-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 19}, {84, 17, 58, 95}, {84, 17, 58, 105}, {84, 17, 58, 119}, {84, 17, 58, 120}, {87, 101, 94, 116}, {185, 217, 71, 137}, {185, 217, 71, 138}, {185, 217, 71, 153}, {212, 102, 55, 108}}},\n\t\t{Region: \"Japan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 6}, {156, 146, 35, 10}, {156, 146, 35, 15}, {156, 146, 35, 22}, {156, 146, 35, 37}, {156, 146, 35, 39}, {156, 146, 35, 40}, {156, 146, 35, 41}, {156, 146, 35, 44}, {156, 146, 35, 50}}},\n\t\t{Region: \"Japan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 4}, {156, 146, 35, 14}, {156, 146, 35, 15}, {156, 146, 35, 18}, {156, 146, 35, 25}, {156, 146, 35, 34}, {156, 146, 35, 36}, {156, 146, 35, 46}, {156, 146, 35, 49}, {156, 146, 35, 50}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium TCP Europe\", Hostname: \"97-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 134}, {62, 133, 47, 136}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}, {62, 133, 47, 144}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium UDP Europe\", Hostname: \"87-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 133}, {62, 133, 47, 134}, {62, 133, 47, 135}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}}},\n\t\t{Region: \"Kenya\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Kenya\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Korea\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 134}, {79, 110, 55, 141}, {79, 110, 55, 147}, {79, 110, 55, 148}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 153}, {79, 110, 55, 155}, {79, 110, 55, 157}}},\n\t\t{Region: \"Korea\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 133}, {79, 110, 55, 134}, {79, 110, 55, 136}, {79, 110, 55, 138}, {79, 110, 55, 140}, {79, 110, 55, 149}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 157}}},\n\t\t{Region: \"Latvia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 244}, {109, 248, 148, 245}, {109, 248, 148, 246}, {109, 248, 148, 247}, {109, 248, 148, 249}, {109, 248, 148, 250}, {109, 248, 148, 253}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 25}}},\n\t\t{Region: \"Latvia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 248}, {109, 248, 148, 250}, {109, 248, 148, 254}, {109, 248, 149, 19}, {109, 248, 149, 20}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 26}, {109, 248, 149, 28}, {109, 248, 149, 30}}},\n\t\t{Region: \"Liechtenstein\", Group: \"Premium UDP Europe\", Hostname: \"87-1-li.cg-dialup.net\", IPs: []net.IP{{91, 90, 122, 131}, {91, 90, 122, 134}, {91, 90, 122, 137}, {91, 90, 122, 138}, {91, 90, 122, 139}, {91, 90, 122, 140}, {91, 90, 122, 141}, {91, 90, 122, 142}, {91, 90, 122, 144}, {91, 90, 122, 145}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 212}, {85, 206, 162, 215}, {85, 206, 162, 219}, {85, 206, 162, 222}, {85, 206, 165, 17}, {85, 206, 165, 23}, {85, 206, 165, 25}, {85, 206, 165, 26}, {85, 206, 165, 30}, {85, 206, 165, 31}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 209}, {85, 206, 162, 210}, {85, 206, 162, 211}, {85, 206, 162, 213}, {85, 206, 162, 214}, {85, 206, 162, 217}, {85, 206, 162, 218}, {85, 206, 162, 220}, {85, 206, 165, 26}, {85, 206, 165, 30}}},\n\t\t{Region: \"Luxembourg\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lu.cg-dialup.net\", IPs: []net.IP{{5, 253, 204, 7}, {5, 253, 204, 10}, {5, 253, 204, 12}, {5, 253, 204, 23}, {5, 253, 204, 26}, {5, 253, 204, 30}, {5, 253, 204, 37}, {5, 253, 204, 39}, {5, 253, 204, 44}, {5, 253, 204, 45}}},\n\t\t{Region: \"Macao\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 131}, {84, 252, 92, 133}, {84, 252, 92, 135}, {84, 252, 92, 137}, {84, 252, 92, 138}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 142}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macao\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 132}, {84, 252, 92, 134}, {84, 252, 92, 135}, {84, 252, 92, 136}, {84, 252, 92, 137}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 143}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 4}, {146, 70, 15, 6}, {146, 70, 15, 8}, {146, 70, 15, 9}, {146, 70, 15, 10}, {146, 70, 15, 11}, {146, 70, 15, 12}, {146, 70, 15, 13}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 3}, {146, 70, 15, 4}, {146, 70, 15, 5}, {146, 70, 15, 6}, {146, 70, 15, 7}, {146, 70, 15, 8}, {146, 70, 15, 10}, {146, 70, 15, 12}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malta\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 133}, {176, 125, 230, 135}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 140}, {176, 125, 230, 142}, {176, 125, 230, 143}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Malta\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 131}, {176, 125, 230, 133}, {176, 125, 230, 134}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 139}, {176, 125, 230, 140}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Mexico\", Group: \"Premium TCP USA\", Hostname: \"93-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 132}, {77, 81, 142, 134}, {77, 81, 142, 136}, {77, 81, 142, 139}, {77, 81, 142, 142}, {77, 81, 142, 154}, {77, 81, 142, 155}, {77, 81, 142, 157}, {77, 81, 142, 158}, {77, 81, 142, 159}}},\n\t\t{Region: \"Mexico\", Group: \"Premium UDP USA\", Hostname: \"94-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 130}, {77, 81, 142, 131}, {77, 81, 142, 132}, {77, 81, 142, 139}, {77, 81, 142, 141}, {77, 81, 142, 142}, {77, 81, 142, 146}, {77, 81, 142, 147}, {77, 81, 142, 154}, {77, 81, 142, 159}}},\n\t\t{Region: \"Moldova\", Group: \"Premium TCP Europe\", Hostname: \"97-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 245}, {178, 175, 130, 246}, {178, 175, 130, 251}, {178, 175, 130, 254}, {178, 175, 142, 131}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Moldova\", Group: \"Premium UDP Europe\", Hostname: \"87-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 246}, {178, 175, 130, 250}, {178, 175, 130, 251}, {178, 175, 130, 253}, {178, 175, 130, 254}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Monaco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 131}, {95, 181, 233, 132}, {95, 181, 233, 133}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 140}, {95, 181, 233, 141}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Monaco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 132}, {95, 181, 233, 135}, {95, 181, 233, 136}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 141}, {95, 181, 233, 142}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 132}, {185, 253, 163, 133}, {185, 253, 163, 135}, {185, 253, 163, 136}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 143}, {185, 253, 163, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 131}, {185, 253, 163, 133}, {185, 253, 163, 134}, {185, 253, 163, 137}, {185, 253, 163, 138}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 144}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium TCP Europe\", Hostname: \"97-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 135}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 142}, {176, 125, 229, 143}, {176, 125, 229, 144}, {176, 125, 229, 145}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium UDP Europe\", Hostname: \"87-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 134}, {176, 125, 229, 136}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 139}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 143}, {176, 125, 229, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 134}, {95, 181, 232, 136}, {95, 181, 232, 137}, {95, 181, 232, 138}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 131}, {95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 135}, {95, 181, 232, 137}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 142}, {95, 181, 232, 143}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium TCP Europe\", Hostname: \"97-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 98}, {181, 214, 206, 22}, {181, 214, 206, 27}, {181, 214, 206, 36}, {195, 78, 54, 10}, {195, 78, 54, 20}, {195, 78, 54, 43}, {195, 78, 54, 50}, {195, 78, 54, 119}, {195, 181, 172, 78}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium UDP Europe\", Hostname: \"87-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 110}, {181, 214, 206, 29}, {181, 214, 206, 42}, {195, 78, 54, 8}, {195, 78, 54, 19}, {195, 78, 54, 47}, {195, 78, 54, 110}, {195, 78, 54, 141}, {195, 78, 54, 143}, {195, 78, 54, 157}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 100}, {43, 250, 207, 101}, {43, 250, 207, 102}, {43, 250, 207, 103}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 108}, {43, 250, 207, 109}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 102}, {43, 250, 207, 104}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 107}, {43, 250, 207, 108}, {43, 250, 207, 109}, {43, 250, 207, 110}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 73}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 74}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Norway\", Group: \"Premium TCP Europe\", Hostname: \"97-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 137}, {45, 12, 223, 140}, {185, 206, 225, 29}, {185, 206, 225, 231}, {185, 253, 97, 234}, {185, 253, 97, 236}, {185, 253, 97, 238}, {185, 253, 97, 244}, {185, 253, 97, 250}, {185, 253, 97, 254}}},\n\t\t{Region: \"Norway\", Group: \"Premium UDP Europe\", Hostname: \"87-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 133}, {45, 12, 223, 134}, {45, 12, 223, 142}, {185, 206, 225, 227}, {185, 206, 225, 228}, {185, 206, 225, 231}, {185, 206, 225, 235}, {185, 253, 97, 237}, {185, 253, 97, 246}, {185, 253, 97, 254}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 3}, {146, 70, 12, 4}, {146, 70, 12, 6}, {146, 70, 12, 8}, {146, 70, 12, 9}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 4}, {146, 70, 12, 5}, {146, 70, 12, 6}, {146, 70, 12, 7}, {146, 70, 12, 8}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Panama\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 132}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 139}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Panama\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 135}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 140}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Philippines\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 38}, {188, 214, 125, 40}, {188, 214, 125, 43}, {188, 214, 125, 44}, {188, 214, 125, 45}, {188, 214, 125, 52}, {188, 214, 125, 55}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Philippines\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 40}, {188, 214, 125, 46}, {188, 214, 125, 49}, {188, 214, 125, 52}, {188, 214, 125, 54}, {188, 214, 125, 57}, {188, 214, 125, 58}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Poland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 132}, {138, 199, 59, 136}, {138, 199, 59, 137}, {138, 199, 59, 143}, {138, 199, 59, 144}, {138, 199, 59, 152}, {138, 199, 59, 153}, {138, 199, 59, 166}, {138, 199, 59, 174}, {138, 199, 59, 175}}},\n\t\t{Region: \"Poland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 130}, {138, 199, 59, 136}, {138, 199, 59, 148}, {138, 199, 59, 149}, {138, 199, 59, 153}, {138, 199, 59, 156}, {138, 199, 59, 157}, {138, 199, 59, 164}, {138, 199, 59, 171}, {138, 199, 59, 173}}},\n\t\t{Region: \"Portugal\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 112}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 216}, {89, 26, 243, 218}, {89, 26, 243, 220}, {89, 26, 243, 222}, {89, 26, 243, 223}, {89, 26, 243, 225}, {89, 26, 243, 228}}},\n\t\t{Region: \"Portugal\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 99}, {89, 26, 243, 113}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 199}, {89, 26, 243, 216}, {89, 26, 243, 219}, {89, 26, 243, 225}, {89, 26, 243, 226}, {89, 26, 243, 227}}},\n\t\t{Region: \"Qatar\", Group: \"Premium TCP Europe\", Hostname: \"97-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 133}, {95, 181, 234, 135}, {95, 181, 234, 136}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 140}, {95, 181, 234, 141}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Qatar\", Group: \"Premium UDP Europe\", Hostname: \"87-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 131}, {95, 181, 234, 132}, {95, 181, 234, 133}, {95, 181, 234, 134}, {95, 181, 234, 135}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 72}, {5, 8, 16, 74}, {5, 8, 16, 84}, {5, 8, 16, 85}, {5, 8, 16, 123}, {5, 8, 16, 124}, {5, 8, 16, 132}, {146, 70, 52, 35}, {146, 70, 52, 44}, {146, 70, 52, 54}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 75}, {5, 8, 16, 87}, {5, 8, 16, 99}, {5, 8, 16, 110}, {5, 8, 16, 138}, {146, 70, 52, 29}, {146, 70, 52, 52}, {146, 70, 52, 58}, {146, 70, 52, 59}, {146, 70, 52, 67}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 133}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 140}, {95, 181, 235, 141}, {95, 181, 235, 142}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 132}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 136}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 141}, {95, 181, 235, 144}}},\n\t\t{Region: \"Serbia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 179}, {37, 120, 193, 186}, {37, 120, 193, 188}, {37, 120, 193, 190}, {141, 98, 103, 36}, {141, 98, 103, 38}, {141, 98, 103, 39}, {141, 98, 103, 43}, {141, 98, 103, 44}, {141, 98, 103, 46}}},\n\t\t{Region: \"Serbia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 180}, {37, 120, 193, 186}, {37, 120, 193, 187}, {37, 120, 193, 188}, {37, 120, 193, 189}, {37, 120, 193, 190}, {141, 98, 103, 35}, {141, 98, 103, 36}, {141, 98, 103, 39}, {141, 98, 103, 41}}},\n\t\t{Region: \"Singapore\", Group: \"Premium TCP Asia\", Hostname: \"96-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 168}, {84, 17, 39, 171}, {84, 17, 39, 175}, {84, 17, 39, 177}, {84, 17, 39, 178}, {84, 17, 39, 181}, {84, 17, 39, 183}, {84, 17, 39, 185}}},\n\t\t{Region: \"Singapore\", Group: \"Premium UDP Asia\", Hostname: \"95-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 166}, {84, 17, 39, 167}, {84, 17, 39, 171}, {84, 17, 39, 174}, {84, 17, 39, 175}, {84, 17, 39, 178}, {84, 17, 39, 180}, {84, 17, 39, 185}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 218}, {195, 80, 150, 219}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 219}, {195, 80, 150, 220}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Asia\", Hostname: \"96-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 212}, {154, 127, 50, 215}, {154, 127, 50, 217}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 222}, {154, 127, 60, 196}, {154, 127, 60, 198}, {154, 127, 60, 199}, {154, 127, 60, 200}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Europe\", Hostname: \"97-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Asia\", Hostname: \"95-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 210}, {154, 127, 50, 214}, {154, 127, 50, 218}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 221}, {154, 127, 50, 222}, {154, 127, 60, 195}, {154, 127, 60, 199}, {154, 127, 60, 206}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Europe\", Hostname: \"87-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"Spain\", Group: \"Premium TCP Europe\", Hostname: \"97-1-es.cg-dialup.net\", IPs: []net.IP{{37, 120, 142, 41}, {37, 120, 142, 52}, {37, 120, 142, 55}, {37, 120, 142, 61}, {37, 120, 142, 173}, {84, 17, 62, 131}, {84, 17, 62, 142}, {84, 17, 62, 144}, {185, 93, 3, 108}, {185, 93, 3, 114}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 137}, {95, 181, 239, 138}, {95, 181, 239, 140}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 140}, {95, 181, 239, 141}, {95, 181, 239, 142}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sweden\", Group: \"Premium TCP Europe\", Hostname: \"97-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 207}, {188, 126, 73, 209}, {188, 126, 73, 214}, {188, 126, 73, 219}, {188, 126, 79, 6}, {188, 126, 79, 11}, {188, 126, 79, 19}, {188, 126, 79, 25}, {195, 246, 120, 148}, {195, 246, 120, 161}}},\n\t\t{Region: \"Sweden\", Group: \"Premium UDP Europe\", Hostname: \"87-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 201}, {188, 126, 73, 211}, {188, 126, 73, 213}, {188, 126, 73, 218}, {188, 126, 79, 6}, {188, 126, 79, 8}, {188, 126, 79, 19}, {195, 246, 120, 142}, {195, 246, 120, 144}, {195, 246, 120, 168}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 4}, {84, 17, 52, 20}, {84, 17, 52, 44}, {84, 17, 52, 65}, {84, 17, 52, 72}, {84, 17, 52, 80}, {84, 17, 52, 83}, {84, 17, 52, 85}, {185, 32, 222, 112}, {185, 189, 150, 73}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 5}, {84, 17, 52, 14}, {84, 17, 52, 24}, {84, 17, 52, 64}, {84, 17, 52, 73}, {84, 17, 52, 85}, {185, 32, 222, 114}, {185, 189, 150, 52}, {185, 189, 150, 57}, {195, 225, 118, 43}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 100}, {45, 133, 181, 102}, {45, 133, 181, 103}, {45, 133, 181, 106}, {45, 133, 181, 109}, {45, 133, 181, 113}, {45, 133, 181, 115}, {45, 133, 181, 116}, {45, 133, 181, 123}, {45, 133, 181, 125}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 99}, {45, 133, 181, 102}, {45, 133, 181, 107}, {45, 133, 181, 108}, {45, 133, 181, 109}, {45, 133, 181, 114}, {45, 133, 181, 116}, {45, 133, 181, 117}, {45, 133, 181, 123}, {45, 133, 181, 124}}},\n\t\t{Region: \"Thailand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 6}, {146, 70, 13, 7}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 11}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Thailand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 10}, {146, 70, 13, 11}, {146, 70, 13, 12}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Turkey\", Group: \"Premium TCP Europe\", Hostname: \"97-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 9}, {188, 213, 34, 11}, {188, 213, 34, 15}, {188, 213, 34, 16}, {188, 213, 34, 23}, {188, 213, 34, 25}, {188, 213, 34, 28}, {188, 213, 34, 41}, {188, 213, 34, 108}, {188, 213, 34, 110}}},\n\t\t{Region: \"Turkey\", Group: \"Premium UDP Europe\", Hostname: \"87-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 8}, {188, 213, 34, 11}, {188, 213, 34, 14}, {188, 213, 34, 28}, {188, 213, 34, 35}, {188, 213, 34, 42}, {188, 213, 34, 43}, {188, 213, 34, 100}, {188, 213, 34, 103}, {188, 213, 34, 107}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 18}, {31, 28, 161, 20}, {31, 28, 161, 27}, {31, 28, 163, 34}, {31, 28, 163, 37}, {31, 28, 163, 44}, {62, 149, 7, 167}, {62, 149, 7, 172}, {62, 149, 29, 45}, {62, 149, 29, 57}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 27}, {31, 28, 163, 38}, {31, 28, 163, 42}, {31, 28, 163, 54}, {31, 28, 163, 61}, {62, 149, 7, 162}, {62, 149, 7, 163}, {62, 149, 29, 35}, {62, 149, 29, 38}, {62, 149, 29, 41}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 184}, {217, 138, 193, 185}, {217, 138, 193, 186}, {217, 138, 193, 188}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 186}, {217, 138, 193, 187}, {217, 138, 193, 188}, {217, 138, 193, 189}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 173, 49}, {45, 133, 173, 56}, {45, 133, 173, 82}, {45, 133, 173, 86}, {95, 154, 200, 153}, {95, 154, 200, 156}, {181, 215, 176, 103}, {181, 215, 176, 246}, {181, 215, 176, 251}, {194, 110, 13, 141}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 172, 100}, {45, 133, 172, 126}, {45, 133, 173, 84}, {95, 154, 200, 174}, {181, 215, 176, 110}, {181, 215, 176, 151}, {181, 215, 176, 158}, {191, 101, 209, 142}, {194, 110, 13, 107}, {194, 110, 13, 128}}},\n\t\t{Region: \"United States\", Group: \"Premium TCP USA\", Hostname: \"93-1-us.cg-dialup.net\", IPs: []net.IP{{102, 129, 145, 15}, {102, 129, 152, 195}, {102, 129, 152, 248}, {154, 21, 208, 159}, {185, 242, 5, 117}, {185, 242, 5, 123}, {185, 242, 5, 229}, {191, 96, 227, 173}, {191, 96, 227, 196}, {199, 115, 119, 248}}},\n\t\t{Region: \"United States\", Group: \"Premium UDP USA\", Hostname: \"94-1-us.cg-dialup.net\", IPs: []net.IP{{23, 82, 14, 113}, {23, 105, 177, 122}, {45, 89, 173, 222}, {84, 17, 35, 4}, {89, 187, 171, 132}, {156, 146, 37, 45}, {156, 146, 59, 86}, {184, 170, 240, 231}, {191, 96, 150, 248}, {199, 115, 119, 248}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium TCP USA\", Hostname: \"93-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 132}, {95, 181, 237, 133}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 138}, {95, 181, 237, 139}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 143}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium UDP USA\", Hostname: \"94-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 131}, {95, 181, 237, 132}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 142}, {95, 181, 237, 143}, {95, 181, 237, 144}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium TCP Asia\", Hostname: \"96-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 101}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 108}, {188, 214, 152, 109}, {188, 214, 152, 110}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium UDP Asia\", Hostname: \"95-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 100}, {188, 214, 152, 101}, {188, 214, 152, 102}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 109}}},\n\t}\n}",
"func filterIPs(addrs []net.Addr) string {\n\tvar ipAddr string\n\tfor _, addr := range addrs {\n\t\tif v, ok := addr.(*net.IPNet); ok {\n\t\t\tif ip := v.IP.To4(); ip != nil {\n\t\t\t\tipAddr = v.IP.String()\n\t\t\t\tif !strings.HasPrefix(ipAddr, `169.254.`) {\n\t\t\t\t\treturn ipAddr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ipAddr\n}",
"func (c *cache) List() []string {\n\tnames := make([]string, len(c.items))\n\n\ti := 0\n\tfor key := range c.items {\n\t\tnames[i] = key\n\t\ti ++\n\t}\n\n\treturn names\n}",
"func getHostMapping() []verifyMapping {\n\treturn []verifyMapping{\n\t\t{\"volumes\", sourceBinds, expectedBinds},\n\t\t{\"logging\", sourceLogConfig, expectedLogconfig},\n\t\t{\"network_mode\", \"host\", container.NetworkMode(\"host\")},\n\t\t{\"ports\", sourcePortBindings, expectedPortBindings},\n\t\t{\"restart\", \"on-failure:5\", container.RestartPolicy{Name: \"on-failure\", MaximumRetryCount: 5}},\n\t\t{\"cap_add\", []interface{}{\"ALL\"}, strslice.StrSlice{\"ALL\"}},\n\t\t{\"cap_drop\", []interface{}{\"NET_ADMIN\"}, strslice.StrSlice{\"NET_ADMIN\"}},\n\t\t{\"dns\", []interface{}{\"8.8.8.8\"}, []string{\"8.8.8.8\"}},\n\t\t{\"dns_search\", []interface{}{\"example.com\"}, []string{\"example.com\"}},\n\t\t{\"extra_hosts\", []interface{}{\"somehost:162.242.195.82\", \"otherhost:50.31.209.229\"}, []string{\"somehost:162.242.195.82\", \"otherhost:50.31.209.229\"}},\n\t\t{\"ipc\", \"host\", container.IpcMode(\"host\")},\n\t\t{\"pid\", \"host\", container.PidMode(\"host\")},\n\t\t{\"external_links\", []interface{}{\"db\", \"test:external\"}, []string{\"db\", \"test:external\"}},\n\t\t{\"privileged\", true, true},\n\t\t{\"read_only\", true, true},\n\t\t{\"security_opt\", []interface{}{\"label:user:USER\", \"label:role:ROLE\"}, []string{\"label:user:USER\", \"label:role:ROLE\"}},\n\t\t{\"tmpfs\", []interface{}{\"/tmp:rw,size=787448k,mode=1777\"}, map[string]string{\"/tmp\": \"rw,size=787448k,mode=1777\"}},\n\t\t{\"userns_mode\", \"host\", container.UsernsMode(\"host\")},\n\t\t{\"shm_size\", \"64M\", int64(64000000)},\n\t\t{\"sysctls\", []interface{}{\"net.core.somaxconn=1024\", \"net.ipv4.tcp_syncookies=0\"}, map[string]string{\"net.core.somaxconn\": \"1024\", \"net.ipv4.tcp_syncookies\": \"0\"}},\n\t\t{\"init\", true, func(x bool) *bool { return &x }(true)}, //bodge for inline *bool\n\t\t// Resources\n\t\t{\"cgroup_parent\", \"m-executor-abcd\", \"m-executor-abcd\"},\n\t\t{\"devices\", []interface{}{\"/dev/ttyUSB0:/dev/ttyUSB0\"}, expectedDevices},\n\t\t{\"ulimits\", sourceUlimits, expectedUlimits},\n\t}\n}",
"func filterBindAddrs(addrs []PtBindAddr, methodNames []string) []PtBindAddr {\n\tvar result []PtBindAddr\n\n\tfor _, ba := range addrs {\n\t\tfor _, methodName := range methodNames {\n\t\t\tif ba.MethodName == methodName {\n\t\t\t\tresult = append(result, ba)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result\n}",
"func (storage *SrvStorage) GetVhostBindings(vhost string) []*binding.Binding {\n\tvar bindings []*binding.Binding\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(bindingPrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbind := &binding.Binding{}\n\t\t\tbind.Unmarshal(value, storage.protoVersion)\n\t\t\tbindings = append(bindings, bind)\n\t\t},\n\t)\n\n\treturn bindings\n}",
"func (k Keeper) GetBlackListedAddrs() map[string]bool {\n\treturn k.blacklistedAddrs\n}",
"func (client WorkloadNetworksClient) ListDhcpSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}",
"func (t *Tracker) HostTimes() map[string]time.Time {\n\tm := make(map[string]time.Time)\n\tt.Query(func(t *Tracker) {\n\t\tfor k, v := range t.hostTimes {\n\t\t\tm[k] = v\n\t\t}\n\t})\n\n\treturn m\n}",
"func GroupHostList(group string) ([]string, error) {\n\t//default result not found\n\tresult := []string{}\n\t//get all host groups\n\thost_groups, err := HostGroups()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, host_group := range host_groups {\n\t\tif host_group.GroupName == group {\n\t\t\tresult = host_group.Members\n\t\t}\n\t}\n\treturn result, nil\n}",
"func (v *version) GatewayHosts() GatewayHostInformer {\n\treturn &gatewayHostInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}",
"func addressRecordsByHostName(records map[addressRecordID]addressRecord) map[hostName][]addressRecord {\n\tbyName := make(map[hostName][]addressRecord)\n\tfor _, record := range records {\n\t\tbyName[record.name] = append(byName[record.name], record)\n\t}\n\n\treturn byName\n}",
"func (b *BlubberBlockDirectory) GetFreeHosts(req blubberstore.FreeHostsRequest,\n\thosts *blubberstore.BlockHolderList) error {\n\tvar allhosts []string\n\tvar onehost string\n\tvar i int\n\tvar min int = -1\n\n\tif b.doozerConn == nil {\n\t\tvar host string\n\t\tfor host, _ = range b.blockHostMap {\n\t\t\tallhosts = append(allhosts, host)\n\t\t}\n\t} else {\n\t\tvar names []string\n\t\tvar name string\n\t\tvar rev int64\n\t\tvar err error\n\n\t\trev, err = b.doozerConn.Rev()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(caoimhe): reading until the end may return MANY records.\n\t\tnames, err = b.doozerConn.Getdir(b.blockServicePrefix, rev, 0, -1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, name = range names {\n\t\t\tvar data []byte\n\t\t\tdata, _, err = b.doozerConn.Get(b.blockServicePrefix+\"/\"+name, &rev)\n\t\t\tif err == nil {\n\t\t\t\tallhosts = append(allhosts, string(data))\n\t\t\t} else {\n\t\t\t\tlog.Print(\"Unable to retrieve version \", rev, \" of \",\n\t\t\t\t\tb.blockServicePrefix, \"/\", name, \": \", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i = 0; int32(i) < req.GetNumHosts(); i++ {\n\t\tvar ok bool\n\t\tif i >= len(allhosts) {\n\t\t\tbreak\n\t\t}\n\n\t\thosts.HostPort = append(hosts.HostPort, allhosts[i])\n\t\t_, ok = b.blockHostMap[allhosts[i]]\n\t\tif !ok {\n\t\t\tmin = 0\n\t\t} else if min < 1 || len(b.blockHostMap[allhosts[i]]) < min {\n\t\t\tmin = len(b.blockHostMap[allhosts[i]])\n\t\t}\n\t}\n\n\tfor i, onehost = range allhosts {\n\t\tvar ok bool\n\n\t\tif int32(i) < req.GetNumHosts() {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, ok = b.blockHostMap[onehost]\n\t\tif !ok {\n\t\t\thosts.HostPort = append(hosts.HostPort[1:], onehost)\n\t\t\tmin = 0\n\t\t} else if len(b.blockHostMap[onehost]) <= min {\n\t\t\thosts.HostPort = append(hosts.HostPort[1:], onehost)\n\t\t\tmin = len(b.blockHostMap[onehost])\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (i *InternalData) GetAdapterName() {\n\n\tl, err := net.Interfaces()\n\tcheckErr(err)\n\n\tfor _, f := range l {\n\n\t\tbyNameInterface, err := net.InterfaceByName(f.Name)\n\t\tcheckErr(err)\n\n\t\taddr, err := byNameInterface.Addrs()\n\t\tcheckErr(err)\n\n\t\tfor _, v := range addr {\n\n\t\t\tif v.String()[:strings.Index(v.String(), \"/\")] == i.IntIP {\n\t\t\t\ti.AdapterName = f.Name\n\t\t\t}\n\t\t}\n\t}\n}",
"func NewGetAdapterHostEthInterfacesMoidDefault(code int) *GetAdapterHostEthInterfacesMoidDefault {\n\treturn &GetAdapterHostEthInterfacesMoidDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func privateNetworkInterfaces(all []net.Interface, fallback []string, logger log.Logger) []string {\n\tvar privInts []string\n\tfor _, i := range all {\n\t\taddrs, err := getInterfaceAddrs(&i)\n\t\tif err != nil {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"error getting addresses from network interface\", \"interface\", i.Name, \"err\", err)\n\t\t}\n\t\tfor _, a := range addrs {\n\t\t\ts := a.String()\n\t\t\tip, _, err := net.ParseCIDR(s)\n\t\t\tif err != nil {\n\t\t\t\tlevel.Warn(logger).Log(\"msg\", \"error parsing network interface IP address\", \"interface\", i.Name, \"addr\", s, \"err\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ip.IsPrivate() {\n\t\t\t\tprivInts = append(privInts, i.Name)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif len(privInts) == 0 {\n\t\treturn fallback\n\t}\n\treturn privInts\n}",
"func (p preferScheduleOnHost) filter(pools *csp.CSPList) (*csp.CSPList, error) {\n\tplist, err := p.scheduleOnHost.filter(pools)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(plist.GetPoolUIDs()) == 0 {\n\t\treturn pools, nil\n\t}\n\treturn plist, nil\n}",
"func onlyPublic(addrs []ma.Multiaddr) []ma.Multiaddr {\n\troutable := []ma.Multiaddr{}\n\tfor _, addr := range addrs {\n\t\tif manet.IsPublicAddr(addr) {\n\t\t\troutable = append(routable, addr)\n\t\t}\n\t}\n\treturn routable\n}",
"func (client *DedicatedHostsClient) ListByHostGroup(resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) *DedicatedHostsListByHostGroupPager {\n\treturn &DedicatedHostsListByHostGroupPager{\n\t\tclient: client,\n\t\trequester: func(ctx context.Context) (*policy.Request, error) {\n\t\t\treturn client.listByHostGroupCreateRequest(ctx, resourceGroupName, hostGroupName, options)\n\t\t},\n\t\tadvancer: func(ctx context.Context, resp DedicatedHostsListByHostGroupResponse) (*policy.Request, error) {\n\t\t\treturn runtime.NewRequest(ctx, http.MethodGet, *resp.DedicatedHostListResult.NextLink)\n\t\t},\n\t}\n}",
"func (f FakeContainerImpl) GetDefaultHostIPs() ([]string, error) {\n\tpanic(\"implement me\")\n}",
"func (p scheduleOnHost) filter(pools *csp.CSPList) (*csp.CSPList, error) {\n\tif p.hostName == \"\" {\n\t\treturn pools, nil\n\t}\n\tfilteredPools := pools.Filter(csp.HasAnnotation(string(scheduleOnHostAnnotation), p.hostName))\n\treturn filteredPools, nil\n}",
"func parseHosts() {\n\t// Convert the hosts entries into IP and IPNet\n\tfor _, h := range cfg.Hosts {\n\t\t// Does it look like a CIDR?\n\t\t_, ipv4Net, err := net.ParseCIDR(h)\n\t\tif err == nil {\n\t\t\tallowedNetworks = append(allowedNetworks, ipv4Net)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Does it look like an IP?\n\t\tip := net.ParseIP(h)\n\t\tif ip != nil {\n\t\t\tallowedHosts = append(allowedHosts, ip)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Does it look like a hostname?\n\t\tips, err := net.LookupIP(h)\n\t\tif err == nil {\n\t\t\tallowedHosts = append(allowedHosts, ips...)\n\t\t}\n\t}\n}",
"func ReadHostList(cfg *common.Config) ([]string, error) {\n\tconfig := struct {\n\t\tHosts []string `config:\"hosts\" validate:\"required\"`\n\t\tWorker int `config:\"worker\" validate:\"min=1\"`\n\t}{\n\t\tWorker: 1,\n\t}\n\n\terr := cfg.Unpack(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlst := config.Hosts\n\tif len(lst) == 0 || config.Worker <= 1 {\n\t\treturn lst, nil\n\t}\n\n\t// duplicate entries config.Workers times\n\thosts := make([]string, 0, len(lst)*config.Worker)\n\tfor _, entry := range lst {\n\t\tfor i := 0; i < config.Worker; i++ {\n\t\t\thosts = append(hosts, entry)\n\t\t}\n\t}\n\n\treturn hosts, nil\n}",
"func (dir *Dir) Hostnames() ([]string, error) {\n\tif dir.Config.Changed(\"host-wrapper\") {\n\t\tvariables := map[string]string{\n\t\t\t\"HOST\": dir.Config.Get(\"host\"),\n\t\t\t\"ENVIRONMENT\": dir.Config.Get(\"environment\"),\n\t\t\t\"DIRNAME\": dir.BaseName(),\n\t\t\t\"DIRPATH\": dir.Path,\n\t\t\t\"SCHEMA\": dir.Config.Get(\"schema\"),\n\t\t}\n\t\tshellOut, err := util.NewInterpolatedShellOut(dir.Config.Get(\"host-wrapper\"), variables)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn shellOut.RunCaptureSplit()\n\t}\n\treturn dir.Config.GetSlice(\"host\", ',', true), nil\n}",
"func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {\n\tsvcConfigs := make([]model.Config, 0)\n\tfor index := range configs {\n\t\tvirtualService := configs[index].Spec.(*v1alpha3.VirtualService)\n\t\tfor _, vsHost := range virtualService.Hosts {\n\t\t\tif host.Name(vsHost).Matches(hostname) {\n\t\t\t\tsvcConfigs = append(svcConfigs, configs[index])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn svcConfigs\n}",
"func GetAllGateway() []domain.Gateway {\n\titems := make([]domain.Gateway, 0)\n\n\tcols := domain.Gateways.Iter()\n\tfor kv := range cols {\n\t\tgateway := kv.Value.(*domain.Gateway)\n\t\titems = append(items, *gateway)\n\t}\n\n\treturn items\n}",
"func getServerBindaddrs(bindaddrList *string, options *string, transports *string) ([]pt_extras.Bindaddr, error) {\n\tvar result []pt_extras.Bindaddr\n\tvar serverBindaddr string\n\tvar serverTransports string\n\n\t// Get the list of all requested bindaddrs.\n\tif *bindaddrList != \"\" {\n\t\tserverBindaddr = *bindaddrList\n\t}\n\n\tfor _, spec := range strings.Split(serverBindaddr, \",\") {\n\t\tvar bindaddr pt_extras.Bindaddr\n\n\t\tparts := strings.SplitN(spec, \"-\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"-bindaddr: %q: doesn't contain \\\"-\\\"\", spec)\n\t\t}\n\t\tbindaddr.MethodName = parts[0]\n\t\taddr, err := pt_extras.ResolveAddr(parts[1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"-bindaddr: %q: %s\", spec, err.Error())\n\t\t}\n\t\tbindaddr.Addr = addr\n\t\tbindaddr.Options = *options\n\t\tresult = append(result, bindaddr)\n\t}\n\n\tif transports == nil {\n\t\treturn nil, errors.New(\"must specify -transport or -transports in server mode\")\n\t} else {\n\t\tserverTransports = *transports\n\t}\n\tresult = pt_extras.FilterBindaddrs(result, strings.Split(serverTransports, \",\"))\n\tif len(result) == 0 {\n\t\tgolog.Errorf(\"no valid bindaddrs\")\n\t}\n\treturn result, nil\n}",
"func (client *DedicatedHostsClient) ListByHostGroup(resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) DedicatedHostListResultPager {\n\treturn &dedicatedHostListResultPager{\n\t\tpipeline: client.con.Pipeline(),\n\t\trequester: func(ctx context.Context) (*azcore.Request, error) {\n\t\t\treturn client.listByHostGroupCreateRequest(ctx, resourceGroupName, hostGroupName, options)\n\t\t},\n\t\tresponder: client.listByHostGroupHandleResponse,\n\t\terrorer: client.listByHostGroupHandleError,\n\t\tadvancer: func(ctx context.Context, resp DedicatedHostListResultResponse) (*azcore.Request, error) {\n\t\t\treturn azcore.NewRequest(ctx, http.MethodGet, *resp.DedicatedHostListResult.NextLink)\n\t\t},\n\t\tstatusCodes: []int{http.StatusOK},\n\t}\n}",
"func (client *Client) ListHosts() ([]*model.Host, error) {\n\treturn client.osclt.ListHosts()\n}",
"func (client *Client) ListHosts() ([]*model.Host, error) {\n\treturn client.feclt.ListHosts()\n}",
"func FilterStrictDomains(domains []string, publicSuffixes map[string]bool) []string {\n\tvar output []string\n\tfor _, domain := range domains {\n\t\tfirst := strings.Index(domain, \".\")\n\t\tcleanedDomain := domain[:first]\n\t\tif _, ok := publicSuffixes[cleanedDomain]; !ok {\n\t\t\toutput = append(output, domain)\n\t\t}\n\t}\n\treturn output\n}",
"func NewDisableHostForbidden() *DisableHostForbidden {\n\treturn &DisableHostForbidden{}\n}",
"func doGetAllIpKeys(d *db.DB, dbSpec *db.TableSpec) ([]db.Key, error) {\n\n var keys []db.Key\n\n intfTable, err := d.GetTable(dbSpec)\n if err != nil {\n return keys, err\n }\n\n keys, err = intfTable.GetKeys()\n log.Infof(\"Found %d INTF table keys\", len(keys))\n return keys, err\n}",
"func (c *CachedSites) List() string {\n\tsites := \"\"\n\tfor site := range c.cache {\n\t\tsites += site.Host + \"\\n\"\n\t}\n\treturn sites\n}",
"func getBrokerUrls() (map[int]config.HostPort, error) {\n\tres := make(map[int]config.HostPort)\n\tbrokers, err := zookeeper.Brokers()\n\tif err != nil {\n\t\treturn res, err\n\t}\n\tfor _, id := range brokers {\n\t\tbroker, err := zookeeper.Broker(id)\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\t\tres[id] = config.HostPort{Host: broker.Host, Port: broker.Port}\n\t}\n\treturn res, nil\n}",
"func (pr *ProxyRegistry) ListConnectedProxies() map[string]*models.Proxy {\n\tpr.mu.Lock()\n\tdefer pr.mu.Unlock()\n\n\tproxies := make(map[string]*models.Proxy, len(pr.connectedProxies))\n\tfor _, p := range pr.connectedProxies {\n\t\t// A proxy could connect twice quickly and not register the disconnect, so we return the proxy with the higher connection ID.\n\t\tif prior := proxies[p.UUID.String()]; prior == nil || prior.GetConnectionID() < p.GetConnectionID() {\n\t\t\tproxies[p.UUID.String()] = p\n\t\t}\n\t}\n\treturn proxies\n}",
"func (p *MemoryProposer) ListAccepters() (addrs []string, err error) {\n\tp.mtx.Lock()\n\tdefer p.mtx.Unlock()\n\n\taddrs = make([]string, 0, len(p.accepters))\n\tfor addr := range p.accepters {\n\t\taddrs = append(addrs, addr)\n\t}\n\n\tsort.Strings(addrs)\n\treturn addrs, nil\n}",
"func (dir *Dir) Hostnames() ([]string, error) {\n\tif dir.Config.Changed(\"host-wrapper\") {\n\t\tvariables := map[string]string{\n\t\t\t\"HOST\": dir.Config.GetAllowEnvVar(\"host\"),\n\t\t\t\"ENVIRONMENT\": dir.Config.Get(\"environment\"),\n\t\t\t\"DIRNAME\": dir.BaseName(),\n\t\t\t\"DIRPATH\": dir.Path,\n\t\t\t\"SCHEMA\": dir.Config.GetAllowEnvVar(\"schema\"),\n\t\t}\n\t\tshellOut, err := shellout.New(dir.Config.Get(\"host-wrapper\")).WithVariables(variables)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn shellOut.RunCaptureSplit()\n\t}\n\treturn dir.Config.GetSliceAllowEnvVar(\"host\", ',', true), nil\n}",
"func AllDeviceDrivers() map[string]bool {\n\tret := make(map[string]bool)\n\tfor k, v := range allDeviceDrivers {\n\t\tret[k] = v\n\t}\n\treturn ret\n}",
"func (s *SQLStorage) GetWhiteList(ctx context.Context) ([]*IPNet, error) {\n\treturn s.getSubnetList(ctx, \"SELECT ip, mask FROM whitelist\")\n}",
"func (m *Manager) List() map[string]Modem {\n\tdevList := make(map[string]Modem)\n\tfor k, v := range m.devices {\n\t\tif v.ready == 1 {\n\t\t\tdevList[k] = v\n\t\t}\n\t}\n\treturn devList\n}",
"func SetAllowedHosts(allowed []string) {\n\tDefaultDialer.SetAllowedHosts(allowed)\n}"
] | [
"0.4922467",
"0.49170256",
"0.48662174",
"0.46048605",
"0.45793784",
"0.4545906",
"0.4537662",
"0.45205545",
"0.45185655",
"0.44573838",
"0.44563538",
"0.4444831",
"0.44146225",
"0.4406497",
"0.43984336",
"0.43956515",
"0.43870476",
"0.4317521",
"0.42667553",
"0.4260609",
"0.42515606",
"0.4248183",
"0.42229694",
"0.4182807",
"0.41670847",
"0.41662374",
"0.41613647",
"0.41502324",
"0.41390446",
"0.41300252",
"0.41218328",
"0.41194957",
"0.4085199",
"0.40843797",
"0.40837824",
"0.40698856",
"0.40653926",
"0.40623856",
"0.4059313",
"0.40451148",
"0.4041583",
"0.40285683",
"0.4024153",
"0.40218836",
"0.40194538",
"0.40189895",
"0.40127406",
"0.40117258",
"0.40057692",
"0.39976585",
"0.39969555",
"0.39822626",
"0.39792445",
"0.39789227",
"0.3964914",
"0.395633",
"0.39546037",
"0.3953619",
"0.39536142",
"0.3952923",
"0.39528817",
"0.39513925",
"0.39495382",
"0.3948868",
"0.39481497",
"0.39446932",
"0.3944175",
"0.3943653",
"0.39428255",
"0.39368647",
"0.39219832",
"0.39183503",
"0.3914064",
"0.3908435",
"0.3906975",
"0.39060232",
"0.39050052",
"0.39018774",
"0.38928354",
"0.38883248",
"0.3887354",
"0.388587",
"0.38603163",
"0.38581607",
"0.38506192",
"0.3844624",
"0.38389942",
"0.38335449",
"0.38289446",
"0.38284928",
"0.3827315",
"0.38265342",
"0.38194177",
"0.38188052",
"0.38162273",
"0.38156033",
"0.3814484",
"0.38130245",
"0.38119915",
"0.38114277"
] | 0.7816921 | 0 |
removeOrphanDHCPServers removed the DHCP servers linked to no hostonly adapter | removeOrphanDHCPServers удалил DHCP-серверы, связанные с ни одним адаптером Host-only | func removeOrphanDHCPServers(vbox VBoxManager) error {
dhcps, err := listDHCPServers(vbox)
if err != nil {
return err
}
if len(dhcps) == 0 {
return nil
}
nets, err := listHostOnlyAdapters(vbox)
if err != nil {
return err
}
for name := range dhcps {
if strings.HasPrefix(name, dhcpPrefix) {
if _, present := nets[name]; !present {
if err := vbox.vbm("dhcpserver", "remove", "--netname", name); err != nil {
log.Warnf("Unable to remove orphan dhcp server %q: %s", name, err)
}
}
}
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func DHCPsDelete() error {\n\tdhcps, err := DHCPsGet()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, d := range dhcps {\n\t\tif isUnmanaged(UnmanagedID(d.Ifname), LINKTYPE) {\n\t\t\tlogger.Log.Info(fmt.Sprintf(\"Skipping Unmanaged Link %v DHCP configuration\", d.Ifname))\n\t\t\tcontinue\n\t\t}\n\t\terr = DHCPDelete(d.Ifname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) {\n\tout, err := vbox.vbmOut(\"list\", \"dhcpservers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := map[string]*dhcpServer{}\n\tdhcp := &dhcpServer{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"NetworkName\":\n\t\t\tdhcp = &dhcpServer{}\n\t\t\tm[val] = dhcp\n\t\t\tdhcp.NetworkName = val\n\t\tcase \"IP\":\n\t\t\tdhcp.IPv4.IP = net.ParseIP(val)\n\t\tcase \"upperIPAddress\":\n\t\t\tdhcp.UpperIP = net.ParseIP(val)\n\t\tcase \"lowerIPAddress\":\n\t\t\tdhcp.LowerIP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tdhcp.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"Enabled\":\n\t\t\tdhcp.Enabled = (val == \"Yes\")\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
"func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error {\n\tname := dhcpPrefix + ifname\n\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// On some platforms (OSX), creating a host-only adapter adds a default dhcpserver,\n\t// while on others (Windows?) it does not.\n\tcommand := \"add\"\n\tif dhcp, ok := dhcps[name]; ok {\n\t\tcommand = \"modify\"\n\t\tif (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && dhcp.Enabled {\n\t\t\t// dhcp is up to date\n\t\t\treturn nil\n\t\t}\n\t}\n\n\targs := []string{\"dhcpserver\", command,\n\t\t\"--netname\", name,\n\t\t\"--ip\", d.IPv4.IP.String(),\n\t\t\"--netmask\", net.IP(d.IPv4.Mask).String(),\n\t\t\"--lowerip\", d.LowerIP.String(),\n\t\t\"--upperip\", d.UpperIP.String(),\n\t}\n\tif d.Enabled {\n\t\targs = append(args, \"--enable\")\n\t} else {\n\t\targs = append(args, \"--disable\")\n\t}\n\n\treturn vbox.vbm(args...)\n}",
"func removeServerInConfig(server string) {\n\tfor k, v := range selfConf.Servers {\n\t\tif v == server {\n\t\t\tselfConf.Servers = selfConf.Servers[:k+copy(selfConf.Servers[k:], selfConf.Servers[k+1:])]\n\t\t}\n\t}\n}",
"func (p *Pool) RemoveHostAndPlugins(host string) {\n\tp.mutex.Lock()\n\tdefer p.mutex.Unlock()\n\tfor plHostName, pls := range p.hosts {\n\t\tif plHostName == host {\n\t\t\t// stop all plugins\n\t\t\tfor _, pl := range pls.plugins {\n\t\t\t\tpl.stop()\n\t\t\t}\n\t\t\tdelete(p.hosts, host)\n\t\t}\n\t}\n}",
"func (adminOrg *AdminOrg) removeAllOrgVDCs() error {\n\tfor _, vdcs := range adminOrg.AdminOrg.Vdcs.Vdcs {\n\n\t\tadminVdcUrl := adminOrg.client.VCDHREF\n\t\tsplitVdcId := strings.Split(vdcs.HREF, \"/api/vdc/\")\n\t\tif len(splitVdcId) == 1 {\n\t\t\tadminVdcUrl.Path += \"/admin/vdc/\" + strings.Split(vdcs.HREF, \"/api/admin/vdc/\")[1] + \"/action/disable\"\n\t\t} else {\n\t\t\tadminVdcUrl.Path += \"/admin/vdc/\" + splitVdcId[1] + \"/action/disable\"\n\t\t}\n\n\t\treq := adminOrg.client.NewRequest(map[string]string{}, http.MethodPost, adminVdcUrl, nil)\n\t\t_, err := checkResp(adminOrg.client.Http.Do(req))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error disabling vdc: %s\", err)\n\t\t}\n\t\t// Get admin vdc HREF for normal deletion\n\t\tadminVdcUrl.Path = strings.Split(adminVdcUrl.Path, \"/action/disable\")[0]\n\t\treq = adminOrg.client.NewRequest(map[string]string{\n\t\t\t\"recursive\": \"true\",\n\t\t\t\"force\": \"true\",\n\t\t}, http.MethodDelete, adminVdcUrl, nil)\n\t\tresp, err := checkResp(adminOrg.client.Http.Do(req))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting vdc: %s\", err)\n\t\t}\n\t\ttask := NewTask(adminOrg.client)\n\t\tif err = decodeBody(types.BodyTypeXML, resp, task.Task); err != nil {\n\t\t\treturn fmt.Errorf(\"error decoding task response: %s\", err)\n\t\t}\n\t\tif task.Task.Status == \"error\" {\n\t\t\treturn fmt.Errorf(\"vdc not properly destroyed\")\n\t\t}\n\t\terr = task.WaitTaskCompletion()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't finish removing vdc %s\", err)\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func deleteNodeVIPs(svcIPs []string, protocol v1.Protocol, sourcePort int32) error {\n\tklog.V(5).Infof(\"Searching to remove Gateway VIPs - %s, %d\", protocol, sourcePort)\n\tgatewayRouters, _, err := gateway.GetOvnGateways()\n\tif err != nil {\n\t\tklog.Errorf(\"Error while searching for gateways: %v\", err)\n\t\treturn err\n\t}\n\n\tfor _, gatewayRouter := range gatewayRouters {\n\t\tvar loadBalancers []string\n\t\tgatewayLB, err := gateway.GetGatewayLoadBalancer(gatewayRouter, protocol)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Gateway router %s does not have load balancer (%v)\", gatewayRouter, err)\n\t\t\tcontinue\n\t\t}\n\t\tips := svcIPs\n\t\tif len(ips) == 0 {\n\t\t\tips, err = gateway.GetGatewayPhysicalIPs(gatewayRouter)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Gateway router %s does not have physical ip (%v)\", gatewayRouter, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tloadBalancers = append(loadBalancers, gatewayLB)\n\t\tif config.Gateway.Mode == config.GatewayModeShared {\n\t\t\tworkerNode := util.GetWorkerFromGatewayRouter(gatewayRouter)\n\t\t\tworkerLB, err := loadbalancer.GetWorkerLoadBalancer(workerNode, protocol)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Worker switch %s does not have load balancer (%v)\", workerNode, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tloadBalancers = append(loadBalancers, workerLB)\n\t\t}\n\t\tfor _, loadBalancer := range loadBalancers {\n\t\t\tfor _, ip := range ips {\n\t\t\t\t// With the physical_ip:sourcePort as the VIP, delete an entry in 'load_balancer'.\n\t\t\t\tvip := util.JoinHostPortInt32(ip, sourcePort)\n\t\t\t\tklog.V(5).Infof(\"Removing gateway VIP: %s from load balancer: %s\", vip, loadBalancer)\n\t\t\t\tif err := loadbalancer.DeleteLoadBalancerVIP(loadBalancer, vip); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func DHCPsConfigure(dhcp []Dhcp) error {\n\tfor _, d := range dhcp {\n\t\tif isUnmanaged(UnmanagedID(d.Ifname), LINKTYPE) {\n\t\t\tlogger.Log.Info(fmt.Sprintf(\"Skipping Unmanaged Link %v DHCP configuration\", d.Ifname))\n\t\t\tcontinue\n\t\t}\n\t\terr := DHCPDelete(d.Ifname)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*NotFoundError); ok != true {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := DHCPCreate(d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (h *ValidationHandler) removeServers(swagger *openapi3.Swagger) (*openapi3.Swagger, error) {\n\t// collect API pathPrefix path prefixes\n\tprefixes := make(map[string]struct{}, 0) // a \"set\"\n\tfor _, s := range swagger.Servers {\n\t\tu, err := url.Parse(s.URL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tprefixes[u.Path] = struct{}{}\n\t}\n\tif len(prefixes) != 1 {\n\t\treturn nil, fmt.Errorf(\"requires a single API pathPrefix path prefix: %v\", prefixes)\n\t}\n\tvar prefix string\n\tfor k := range prefixes {\n\t\tprefix = k\n\t}\n\n\t// update the paths to start with the API pathPrefix path prefixes\n\tpaths := make(openapi3.Paths, 0)\n\tfor key, path := range swagger.Paths {\n\t\tpaths[prefix+key] = path\n\t}\n\tswagger.Paths = paths\n\n\t// now remove the servers\n\tswagger.Servers = nil\n\n\treturn swagger, nil\n}",
"func (self *basicPodManager) DeleteOrphanedMirrorPods() {\n\tpodByFullName, mirrorPodByFullName := self.getFullNameMaps()\n\n\tfor podFullName := range mirrorPodByFullName {\n\t\tif _, ok := podByFullName[podFullName]; !ok {\n\t\t\tself.mirrorClient.DeleteMirrorPod(podFullName)\n\t\t}\n\t}\n}",
"func vSphereRemoveHost(ctx context.Context, obj *object.HostSystem) error {\n\tdisconnectTask, err := obj.Disconnect(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := disconnectTask.Wait(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tdestroyTask, err := obj.Destroy(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn destroyTask.Wait(ctx)\n}",
"func WithDHCPNameServers(dns []string) Option {\n\treturn func(d *dnsmasq) {\n\t\td.dns = dns\n\t}\n}",
"func CyberghostServers() []models.CyberghostServer {\n\treturn []models.CyberghostServer{\n\t\t{Region: \"Albania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 3}, {31, 171, 155, 4}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 12}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Albania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 4}, {31, 171, 155, 5}, {31, 171, 155, 6}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Algeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 132}, {176, 125, 228, 134}, {176, 125, 228, 135}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 138}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}}},\n\t\t{Region: \"Algeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 131}, {176, 125, 228, 133}, {176, 125, 228, 134}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}, {176, 125, 228, 143}}},\n\t\t{Region: \"Andorra\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 137}, {188, 241, 82, 138}, {188, 241, 82, 140}, {188, 241, 82, 142}, {188, 241, 82, 147}, {188, 241, 82, 155}, {188, 241, 82, 159}, {188, 241, 82, 160}, {188, 241, 82, 161}, {188, 241, 82, 166}}},\n\t\t{Region: \"Andorra\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 133}, {188, 241, 82, 134}, {188, 241, 82, 136}, {188, 241, 82, 137}, {188, 241, 82, 146}, {188, 241, 82, 153}, {188, 241, 82, 155}, {188, 241, 82, 160}, {188, 241, 82, 164}, {188, 241, 82, 168}}},\n\t\t{Region: \"Argentina\", Group: \"Premium TCP USA\", Hostname: \"93-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 4}, {146, 70, 39, 9}, {146, 70, 39, 15}, {146, 70, 39, 19}, {146, 70, 39, 135}, {146, 70, 39, 136}, {146, 70, 39, 139}, {146, 70, 39, 142}, {146, 70, 39, 143}, {146, 70, 39, 145}}},\n\t\t{Region: \"Argentina\", Group: \"Premium UDP USA\", Hostname: \"94-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 3}, {146, 70, 39, 5}, {146, 70, 39, 6}, {146, 70, 39, 8}, {146, 70, 39, 11}, {146, 70, 39, 12}, {146, 70, 39, 131}, {146, 70, 39, 134}, {146, 70, 39, 142}, {146, 70, 39, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 134}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 138}, {185, 253, 160, 139}, {185, 253, 160, 140}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 132}, {185, 253, 160, 133}, {185, 253, 160, 134}, {185, 253, 160, 135}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 144}}},\n\t\t{Region: \"Australia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-au.cg-dialup.net\", IPs: []net.IP{{154, 16, 81, 22}, {181, 214, 215, 7}, {181, 214, 215, 15}, {181, 214, 215, 18}, {191, 101, 210, 15}, {191, 101, 210, 50}, {191, 101, 210, 60}, {202, 60, 80, 78}, {202, 60, 80, 82}, {202, 60, 80, 102}}},\n\t\t{Region: \"Australia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-au.cg-dialup.net\", IPs: []net.IP{{181, 214, 215, 4}, {181, 214, 215, 16}, {191, 101, 210, 18}, {191, 101, 210, 21}, {191, 101, 210, 36}, {191, 101, 210, 58}, {191, 101, 210, 60}, {202, 60, 80, 74}, {202, 60, 80, 106}, {202, 60, 80, 124}}},\n\t\t{Region: \"Austria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 9}, {37, 19, 223, 16}, {37, 19, 223, 113}, {37, 19, 223, 205}, {37, 19, 223, 211}, {37, 19, 223, 218}, {37, 19, 223, 223}, {37, 19, 223, 245}, {37, 120, 155, 104}, {89, 187, 168, 174}}},\n\t\t{Region: \"Austria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 202}, {37, 19, 223, 205}, {37, 19, 223, 229}, {37, 19, 223, 239}, {37, 19, 223, 241}, {37, 19, 223, 243}, {37, 120, 155, 103}, {89, 187, 168, 160}, {89, 187, 168, 174}, {89, 187, 168, 181}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium TCP USA\", Hostname: \"93-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 136}, {95, 181, 238, 142}, {95, 181, 238, 144}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 152}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium UDP USA\", Hostname: \"94-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 138}, {95, 181, 238, 140}, {95, 181, 238, 141}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 151}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium TCP Asia\", Hostname: \"96-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 132}, {84, 252, 93, 133}, {84, 252, 93, 135}, {84, 252, 93, 138}, {84, 252, 93, 139}, {84, 252, 93, 141}, {84, 252, 93, 142}, {84, 252, 93, 143}, {84, 252, 93, 144}, {84, 252, 93, 145}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium UDP Asia\", Hostname: \"95-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 131}, {84, 252, 93, 133}, {84, 252, 93, 134}, {84, 252, 93, 135}, {84, 252, 93, 136}, {84, 252, 93, 139}, {84, 252, 93, 140}, {84, 252, 93, 141}, {84, 252, 93, 143}, {84, 252, 93, 145}}},\n\t\t{Region: \"Belarus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 5}, {45, 132, 194, 6}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 25}, {45, 132, 194, 27}, {45, 132, 194, 30}, {45, 132, 194, 35}, {45, 132, 194, 44}, {45, 132, 194, 49}}},\n\t\t{Region: \"Belarus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 6}, {45, 132, 194, 8}, {45, 132, 194, 9}, {45, 132, 194, 11}, {45, 132, 194, 15}, {45, 132, 194, 19}, {45, 132, 194, 20}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 26}}},\n\t\t{Region: \"Belgium\", Group: \"Premium TCP Europe\", Hostname: \"97-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 165}, {37, 120, 143, 166}, {185, 210, 217, 10}, {185, 210, 217, 248}, {193, 9, 114, 211}, {193, 9, 114, 220}, {194, 110, 115, 195}, {194, 110, 115, 199}, {194, 110, 115, 205}, {194, 110, 115, 238}}},\n\t\t{Region: \"Belgium\", Group: \"Premium UDP Europe\", Hostname: \"87-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 163}, {37, 120, 143, 167}, {185, 210, 217, 9}, {185, 210, 217, 13}, {185, 210, 217, 55}, {185, 210, 217, 251}, {185, 232, 21, 120}, {194, 110, 115, 214}, {194, 110, 115, 218}, {194, 110, 115, 236}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Brazil\", Group: \"Premium TCP USA\", Hostname: \"93-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 5}, {188, 241, 177, 11}, {188, 241, 177, 38}, {188, 241, 177, 45}, {188, 241, 177, 132}, {188, 241, 177, 135}, {188, 241, 177, 136}, {188, 241, 177, 152}, {188, 241, 177, 153}, {188, 241, 177, 156}}},\n\t\t{Region: \"Brazil\", Group: \"Premium UDP USA\", Hostname: \"94-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 8}, {188, 241, 177, 37}, {188, 241, 177, 40}, {188, 241, 177, 42}, {188, 241, 177, 45}, {188, 241, 177, 135}, {188, 241, 177, 139}, {188, 241, 177, 149}, {188, 241, 177, 152}, {188, 241, 177, 154}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 101}, {37, 120, 152, 103}, {37, 120, 152, 104}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}, {37, 120, 152, 110}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 100}, {37, 120, 152, 101}, {37, 120, 152, 102}, {37, 120, 152, 103}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 35}, {188, 215, 235, 36}, {188, 215, 235, 38}, {188, 215, 235, 39}, {188, 215, 235, 45}, {188, 215, 235, 49}, {188, 215, 235, 51}, {188, 215, 235, 53}, {188, 215, 235, 54}, {188, 215, 235, 57}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 36}, {188, 215, 235, 40}, {188, 215, 235, 42}, {188, 215, 235, 44}, {188, 215, 235, 46}, {188, 215, 235, 47}, {188, 215, 235, 48}, {188, 215, 235, 50}, {188, 215, 235, 55}, {188, 215, 235, 57}}},\n\t\t{Region: \"Canada\", Group: \"Premium TCP USA\", Hostname: \"93-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 136}, {66, 115, 142, 139}, {66, 115, 142, 156}, {66, 115, 142, 162}, {66, 115, 142, 172}, {104, 200, 151, 99}, {104, 200, 151, 111}, {104, 200, 151, 153}, {104, 200, 151, 164}, {172, 98, 89, 137}}},\n\t\t{Region: \"Canada\", Group: \"Premium UDP USA\", Hostname: \"94-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 135}, {66, 115, 142, 154}, {66, 115, 142, 165}, {104, 200, 151, 32}, {104, 200, 151, 57}, {104, 200, 151, 85}, {104, 200, 151, 86}, {104, 200, 151, 147}, {172, 98, 89, 144}, {172, 98, 89, 173}}},\n\t\t{Region: \"Chile\", Group: \"Premium TCP USA\", Hostname: \"93-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 12}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"Chile\", Group: \"Premium UDP USA\", Hostname: \"94-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 4}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"China\", Group: \"Premium TCP Asia\", Hostname: \"96-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 137}, {188, 241, 80, 139}, {188, 241, 80, 140}, {188, 241, 80, 141}, {188, 241, 80, 142}}},\n\t\t{Region: \"China\", Group: \"Premium UDP Asia\", Hostname: \"95-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 136}, {188, 241, 80, 137}, {188, 241, 80, 138}, {188, 241, 80, 139}, {188, 241, 80, 142}}},\n\t\t{Region: \"Colombia\", Group: \"Premium TCP USA\", Hostname: \"93-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 7}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}, {146, 70, 9, 13}, {146, 70, 9, 14}}},\n\t\t{Region: \"Colombia\", Group: \"Premium UDP USA\", Hostname: \"94-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 6}, {146, 70, 9, 7}, {146, 70, 9, 8}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium TCP USA\", Hostname: \"93-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 10}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 13}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium UDP USA\", Hostname: \"94-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 9}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 14}}},\n\t\t{Region: \"Croatia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 5}, {146, 70, 8, 8}, {146, 70, 8, 9}, {146, 70, 8, 10}, {146, 70, 8, 11}, {146, 70, 8, 12}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 15}, {146, 70, 8, 16}}},\n\t\t{Region: \"Croatia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 3}, {146, 70, 8, 4}, {146, 70, 8, 5}, {146, 70, 8, 6}, {146, 70, 8, 7}, {146, 70, 8, 9}, {146, 70, 8, 11}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 16}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 133}, {185, 253, 162, 135}, {185, 253, 162, 136}, {185, 253, 162, 137}, {185, 253, 162, 139}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 132}, {185, 253, 162, 134}, {185, 253, 162, 135}, {185, 253, 162, 137}, {185, 253, 162, 138}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 235}, {138, 199, 56, 236}, {138, 199, 56, 237}, {138, 199, 56, 245}, {138, 199, 56, 246}, {138, 199, 56, 249}, {195, 181, 161, 12}, {195, 181, 161, 16}, {195, 181, 161, 20}, {195, 181, 161, 23}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 227}, {138, 199, 56, 229}, {138, 199, 56, 231}, {138, 199, 56, 235}, {138, 199, 56, 241}, {138, 199, 56, 247}, {195, 181, 161, 10}, {195, 181, 161, 16}, {195, 181, 161, 18}, {195, 181, 161, 22}}},\n\t\t{Region: \"Denmark\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 145, 83}, {37, 120, 145, 88}, {37, 120, 145, 93}, {37, 120, 194, 36}, {37, 120, 194, 56}, {37, 120, 194, 57}, {95, 174, 65, 163}, {95, 174, 65, 174}, {185, 206, 224, 238}, {185, 206, 224, 243}}},\n\t\t{Region: \"Denmark\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 194, 39}, {95, 174, 65, 167}, {95, 174, 65, 170}, {185, 206, 224, 227}, {185, 206, 224, 230}, {185, 206, 224, 236}, {185, 206, 224, 238}, {185, 206, 224, 245}, {185, 206, 224, 250}, {185, 206, 224, 254}}},\n\t\t{Region: \"Egypt\", Group: \"Premium TCP Europe\", Hostname: \"97-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 40}, {188, 214, 122, 42}, {188, 214, 122, 43}, {188, 214, 122, 45}, {188, 214, 122, 48}, {188, 214, 122, 50}, {188, 214, 122, 52}, {188, 214, 122, 60}, {188, 214, 122, 70}, {188, 214, 122, 73}}},\n\t\t{Region: \"Egypt\", Group: \"Premium UDP Europe\", Hostname: \"87-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 37}, {188, 214, 122, 38}, {188, 214, 122, 44}, {188, 214, 122, 54}, {188, 214, 122, 57}, {188, 214, 122, 59}, {188, 214, 122, 60}, {188, 214, 122, 61}, {188, 214, 122, 67}, {188, 214, 122, 69}}},\n\t\t{Region: \"Estonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 86}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 93}, {95, 153, 32, 94}}},\n\t\t{Region: \"Estonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 85}, {95, 153, 32, 87}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 94}}},\n\t\t{Region: \"Finland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 99}, {188, 126, 89, 102}, {188, 126, 89, 105}, {188, 126, 89, 107}, {188, 126, 89, 108}, {188, 126, 89, 110}, {188, 126, 89, 112}, {188, 126, 89, 115}, {188, 126, 89, 116}, {188, 126, 89, 119}}},\n\t\t{Region: \"Finland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 101}, {188, 126, 89, 104}, {188, 126, 89, 109}, {188, 126, 89, 110}, {188, 126, 89, 111}, {188, 126, 89, 113}, {188, 126, 89, 114}, {188, 126, 89, 115}, {188, 126, 89, 122}, {188, 126, 89, 124}}},\n\t\t{Region: \"France\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 43, 167}, {84, 17, 60, 147}, {84, 17, 60, 155}, {151, 106, 8, 108}, {191, 101, 31, 202}, {191, 101, 31, 254}, {191, 101, 217, 45}, {191, 101, 217, 159}, {191, 101, 217, 211}, {191, 101, 217, 240}}},\n\t\t{Region: \"France\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 60, 59}, {84, 17, 60, 121}, {191, 101, 31, 81}, {191, 101, 31, 84}, {191, 101, 31, 126}, {191, 101, 31, 127}, {191, 101, 217, 140}, {191, 101, 217, 201}, {191, 101, 217, 206}, {191, 101, 217, 211}}},\n\t\t{Region: \"Georgia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 131}, {95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 135}, {95, 181, 236, 136}, {95, 181, 236, 138}, {95, 181, 236, 139}, {95, 181, 236, 142}, {95, 181, 236, 144}}},\n\t\t{Region: \"Georgia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 136}, {95, 181, 236, 137}, {95, 181, 236, 139}, {95, 181, 236, 141}, {95, 181, 236, 142}, {95, 181, 236, 143}, {95, 181, 236, 144}}},\n\t\t{Region: \"Germany\", Group: \"Premium TCP Europe\", Hostname: \"97-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 39}, {84, 17, 48, 234}, {84, 17, 49, 106}, {84, 17, 49, 112}, {84, 17, 49, 218}, {154, 28, 188, 35}, {154, 28, 188, 66}, {154, 28, 188, 133}, {154, 28, 188, 144}, {154, 28, 188, 145}}},\n\t\t{Region: \"Germany\", Group: \"Premium UDP Europe\", Hostname: \"87-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 41}, {84, 17, 48, 224}, {84, 17, 49, 95}, {84, 17, 49, 236}, {84, 17, 49, 241}, {138, 199, 36, 151}, {154, 13, 1, 177}, {154, 28, 188, 73}, {154, 28, 188, 76}, {154, 28, 188, 93}}},\n\t\t{Region: \"Greece\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 165}, {185, 51, 134, 171}, {185, 51, 134, 172}, {185, 51, 134, 245}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 249}, {185, 51, 134, 251}, {185, 51, 134, 254}}},\n\t\t{Region: \"Greece\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 166}, {185, 51, 134, 173}, {185, 51, 134, 174}, {185, 51, 134, 244}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 251}, {185, 51, 134, 252}, {185, 51, 134, 253}}},\n\t\t{Region: \"Greenland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 8}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 13}, {91, 90, 120, 14}, {91, 90, 120, 17}}},\n\t\t{Region: \"Greenland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 9}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 14}, {91, 90, 120, 15}, {91, 90, 120, 16}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium TCP Asia\", Hostname: \"96-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 144}, {84, 17, 56, 148}, {84, 17, 56, 153}, {84, 17, 56, 162}, {84, 17, 56, 163}, {84, 17, 56, 169}, {84, 17, 56, 170}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 181}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium UDP Asia\", Hostname: \"95-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 143}, {84, 17, 56, 147}, {84, 17, 56, 150}, {84, 17, 56, 152}, {84, 17, 56, 161}, {84, 17, 56, 164}, {84, 17, 56, 168}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 183}}},\n\t\t{Region: \"Hungary\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 247}, {86, 106, 74, 251}, {86, 106, 74, 253}, {185, 189, 114, 117}, {185, 189, 114, 118}, {185, 189, 114, 119}, {185, 189, 114, 121}, {185, 189, 114, 123}, {185, 189, 114, 125}, {185, 189, 114, 126}}},\n\t\t{Region: \"Hungary\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 245}, {86, 106, 74, 247}, {86, 106, 74, 248}, {86, 106, 74, 249}, {86, 106, 74, 250}, {86, 106, 74, 252}, {86, 106, 74, 253}, {185, 189, 114, 120}, {185, 189, 114, 121}, {185, 189, 114, 122}}},\n\t\t{Region: \"Iceland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 4}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 12}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"Iceland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 5}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 9}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"India\", Group: \"Premium TCP Europe\", Hostname: \"97-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 68}, {103, 13, 112, 70}, {103, 13, 112, 72}, {103, 13, 112, 74}, {103, 13, 112, 75}, {103, 13, 113, 74}, {103, 13, 113, 79}, {103, 13, 113, 82}, {103, 13, 113, 83}, {103, 13, 113, 84}}},\n\t\t{Region: \"India\", Group: \"Premium UDP Europe\", Hostname: \"87-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 67}, {103, 13, 112, 70}, {103, 13, 112, 71}, {103, 13, 112, 77}, {103, 13, 112, 80}, {103, 13, 113, 72}, {103, 13, 113, 74}, {103, 13, 113, 75}, {103, 13, 113, 77}, {103, 13, 113, 85}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 4}, {146, 70, 14, 5}, {146, 70, 14, 6}, {146, 70, 14, 7}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 5}, {146, 70, 14, 8}, {146, 70, 14, 9}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 14}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Iran\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 5}, {62, 133, 46, 6}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 9}, {62, 133, 46, 10}, {62, 133, 46, 14}, {62, 133, 46, 15}}},\n\t\t{Region: \"Iran\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 11}, {62, 133, 46, 12}, {62, 133, 46, 13}, {62, 133, 46, 14}, {62, 133, 46, 15}, {62, 133, 46, 16}}},\n\t\t{Region: \"Ireland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 154}, {37, 120, 235, 166}, {37, 120, 235, 174}, {77, 81, 139, 35}, {84, 247, 48, 6}, {84, 247, 48, 19}, {84, 247, 48, 22}, {84, 247, 48, 23}, {84, 247, 48, 25}, {84, 247, 48, 26}}},\n\t\t{Region: \"Ireland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 147}, {37, 120, 235, 148}, {37, 120, 235, 153}, {37, 120, 235, 158}, {37, 120, 235, 169}, {37, 120, 235, 174}, {84, 247, 48, 8}, {84, 247, 48, 11}, {84, 247, 48, 20}, {84, 247, 48, 23}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium TCP Europe\", Hostname: \"97-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 156}, {91, 90, 124, 157}, {91, 90, 124, 158}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium UDP Europe\", Hostname: \"87-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 155}, {91, 90, 124, 156}, {91, 90, 124, 157}}},\n\t\t{Region: \"Israel\", Group: \"Premium TCP Europe\", Hostname: \"97-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 174}, {185, 77, 248, 103}, {185, 77, 248, 111}, {185, 77, 248, 113}, {185, 77, 248, 114}, {185, 77, 248, 124}, {185, 77, 248, 125}, {185, 77, 248, 127}, {185, 77, 248, 128}, {185, 77, 248, 129}}},\n\t\t{Region: \"Israel\", Group: \"Premium UDP Europe\", Hostname: \"87-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 163}, {160, 116, 0, 165}, {160, 116, 0, 172}, {185, 77, 248, 103}, {185, 77, 248, 106}, {185, 77, 248, 114}, {185, 77, 248, 117}, {185, 77, 248, 118}, {185, 77, 248, 126}, {185, 77, 248, 129}}},\n\t\t{Region: \"Italy\", Group: \"Premium TCP Europe\", Hostname: \"97-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 21}, {84, 17, 58, 100}, {84, 17, 58, 106}, {84, 17, 58, 111}, {84, 17, 58, 117}, {87, 101, 94, 122}, {212, 102, 55, 100}, {212, 102, 55, 106}, {212, 102, 55, 110}, {212, 102, 55, 122}}},\n\t\t{Region: \"Italy\", Group: \"Premium UDP Europe\", Hostname: \"87-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 19}, {84, 17, 58, 95}, {84, 17, 58, 105}, {84, 17, 58, 119}, {84, 17, 58, 120}, {87, 101, 94, 116}, {185, 217, 71, 137}, {185, 217, 71, 138}, {185, 217, 71, 153}, {212, 102, 55, 108}}},\n\t\t{Region: \"Japan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 6}, {156, 146, 35, 10}, {156, 146, 35, 15}, {156, 146, 35, 22}, {156, 146, 35, 37}, {156, 146, 35, 39}, {156, 146, 35, 40}, {156, 146, 35, 41}, {156, 146, 35, 44}, {156, 146, 35, 50}}},\n\t\t{Region: \"Japan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 4}, {156, 146, 35, 14}, {156, 146, 35, 15}, {156, 146, 35, 18}, {156, 146, 35, 25}, {156, 146, 35, 34}, {156, 146, 35, 36}, {156, 146, 35, 46}, {156, 146, 35, 49}, {156, 146, 35, 50}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium TCP Europe\", Hostname: \"97-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 134}, {62, 133, 47, 136}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}, {62, 133, 47, 144}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium UDP Europe\", Hostname: \"87-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 133}, {62, 133, 47, 134}, {62, 133, 47, 135}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}}},\n\t\t{Region: \"Kenya\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Kenya\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Korea\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 134}, {79, 110, 55, 141}, {79, 110, 55, 147}, {79, 110, 55, 148}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 153}, {79, 110, 55, 155}, {79, 110, 55, 157}}},\n\t\t{Region: \"Korea\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 133}, {79, 110, 55, 134}, {79, 110, 55, 136}, {79, 110, 55, 138}, {79, 110, 55, 140}, {79, 110, 55, 149}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 157}}},\n\t\t{Region: \"Latvia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 244}, {109, 248, 148, 245}, {109, 248, 148, 246}, {109, 248, 148, 247}, {109, 248, 148, 249}, {109, 248, 148, 250}, {109, 248, 148, 253}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 25}}},\n\t\t{Region: \"Latvia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 248}, {109, 248, 148, 250}, {109, 248, 148, 254}, {109, 248, 149, 19}, {109, 248, 149, 20}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 26}, {109, 248, 149, 28}, {109, 248, 149, 30}}},\n\t\t{Region: \"Liechtenstein\", Group: \"Premium UDP Europe\", Hostname: \"87-1-li.cg-dialup.net\", IPs: []net.IP{{91, 90, 122, 131}, {91, 90, 122, 134}, {91, 90, 122, 137}, {91, 90, 122, 138}, {91, 90, 122, 139}, {91, 90, 122, 140}, {91, 90, 122, 141}, {91, 90, 122, 142}, {91, 90, 122, 144}, {91, 90, 122, 145}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 212}, {85, 206, 162, 215}, {85, 206, 162, 219}, {85, 206, 162, 222}, {85, 206, 165, 17}, {85, 206, 165, 23}, {85, 206, 165, 25}, {85, 206, 165, 26}, {85, 206, 165, 30}, {85, 206, 165, 31}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 209}, {85, 206, 162, 210}, {85, 206, 162, 211}, {85, 206, 162, 213}, {85, 206, 162, 214}, {85, 206, 162, 217}, {85, 206, 162, 218}, {85, 206, 162, 220}, {85, 206, 165, 26}, {85, 206, 165, 30}}},\n\t\t{Region: \"Luxembourg\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lu.cg-dialup.net\", IPs: []net.IP{{5, 253, 204, 7}, {5, 253, 204, 10}, {5, 253, 204, 12}, {5, 253, 204, 23}, {5, 253, 204, 26}, {5, 253, 204, 30}, {5, 253, 204, 37}, {5, 253, 204, 39}, {5, 253, 204, 44}, {5, 253, 204, 45}}},\n\t\t{Region: \"Macao\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 131}, {84, 252, 92, 133}, {84, 252, 92, 135}, {84, 252, 92, 137}, {84, 252, 92, 138}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 142}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macao\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 132}, {84, 252, 92, 134}, {84, 252, 92, 135}, {84, 252, 92, 136}, {84, 252, 92, 137}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 143}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 4}, {146, 70, 15, 6}, {146, 70, 15, 8}, {146, 70, 15, 9}, {146, 70, 15, 10}, {146, 70, 15, 11}, {146, 70, 15, 12}, {146, 70, 15, 13}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 3}, {146, 70, 15, 4}, {146, 70, 15, 5}, {146, 70, 15, 6}, {146, 70, 15, 7}, {146, 70, 15, 8}, {146, 70, 15, 10}, {146, 70, 15, 12}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malta\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 133}, {176, 125, 230, 135}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 140}, {176, 125, 230, 142}, {176, 125, 230, 143}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Malta\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 131}, {176, 125, 230, 133}, {176, 125, 230, 134}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 139}, {176, 125, 230, 140}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Mexico\", Group: \"Premium TCP USA\", Hostname: \"93-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 132}, {77, 81, 142, 134}, {77, 81, 142, 136}, {77, 81, 142, 139}, {77, 81, 142, 142}, {77, 81, 142, 154}, {77, 81, 142, 155}, {77, 81, 142, 157}, {77, 81, 142, 158}, {77, 81, 142, 159}}},\n\t\t{Region: \"Mexico\", Group: \"Premium UDP USA\", Hostname: \"94-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 130}, {77, 81, 142, 131}, {77, 81, 142, 132}, {77, 81, 142, 139}, {77, 81, 142, 141}, {77, 81, 142, 142}, {77, 81, 142, 146}, {77, 81, 142, 147}, {77, 81, 142, 154}, {77, 81, 142, 159}}},\n\t\t{Region: \"Moldova\", Group: \"Premium TCP Europe\", Hostname: \"97-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 245}, {178, 175, 130, 246}, {178, 175, 130, 251}, {178, 175, 130, 254}, {178, 175, 142, 131}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Moldova\", Group: \"Premium UDP Europe\", Hostname: \"87-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 246}, {178, 175, 130, 250}, {178, 175, 130, 251}, {178, 175, 130, 253}, {178, 175, 130, 254}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Monaco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 131}, {95, 181, 233, 132}, {95, 181, 233, 133}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 140}, {95, 181, 233, 141}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Monaco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 132}, {95, 181, 233, 135}, {95, 181, 233, 136}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 141}, {95, 181, 233, 142}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 132}, {185, 253, 163, 133}, {185, 253, 163, 135}, {185, 253, 163, 136}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 143}, {185, 253, 163, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 131}, {185, 253, 163, 133}, {185, 253, 163, 134}, {185, 253, 163, 137}, {185, 253, 163, 138}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 144}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium TCP Europe\", Hostname: \"97-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 135}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 142}, {176, 125, 229, 143}, {176, 125, 229, 144}, {176, 125, 229, 145}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium UDP Europe\", Hostname: \"87-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 134}, {176, 125, 229, 136}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 139}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 143}, {176, 125, 229, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 134}, {95, 181, 232, 136}, {95, 181, 232, 137}, {95, 181, 232, 138}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 131}, {95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 135}, {95, 181, 232, 137}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 142}, {95, 181, 232, 143}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium TCP Europe\", Hostname: \"97-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 98}, {181, 214, 206, 22}, {181, 214, 206, 27}, {181, 214, 206, 36}, {195, 78, 54, 10}, {195, 78, 54, 20}, {195, 78, 54, 43}, {195, 78, 54, 50}, {195, 78, 54, 119}, {195, 181, 172, 78}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium UDP Europe\", Hostname: \"87-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 110}, {181, 214, 206, 29}, {181, 214, 206, 42}, {195, 78, 54, 8}, {195, 78, 54, 19}, {195, 78, 54, 47}, {195, 78, 54, 110}, {195, 78, 54, 141}, {195, 78, 54, 143}, {195, 78, 54, 157}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 100}, {43, 250, 207, 101}, {43, 250, 207, 102}, {43, 250, 207, 103}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 108}, {43, 250, 207, 109}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 102}, {43, 250, 207, 104}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 107}, {43, 250, 207, 108}, {43, 250, 207, 109}, {43, 250, 207, 110}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 73}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 74}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Norway\", Group: \"Premium TCP Europe\", Hostname: \"97-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 137}, {45, 12, 223, 140}, {185, 206, 225, 29}, {185, 206, 225, 231}, {185, 253, 97, 234}, {185, 253, 97, 236}, {185, 253, 97, 238}, {185, 253, 97, 244}, {185, 253, 97, 250}, {185, 253, 97, 254}}},\n\t\t{Region: \"Norway\", Group: \"Premium UDP Europe\", Hostname: \"87-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 133}, {45, 12, 223, 134}, {45, 12, 223, 142}, {185, 206, 225, 227}, {185, 206, 225, 228}, {185, 206, 225, 231}, {185, 206, 225, 235}, {185, 253, 97, 237}, {185, 253, 97, 246}, {185, 253, 97, 254}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 3}, {146, 70, 12, 4}, {146, 70, 12, 6}, {146, 70, 12, 8}, {146, 70, 12, 9}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 4}, {146, 70, 12, 5}, {146, 70, 12, 6}, {146, 70, 12, 7}, {146, 70, 12, 8}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Panama\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 132}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 139}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Panama\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 135}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 140}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Philippines\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 38}, {188, 214, 125, 40}, {188, 214, 125, 43}, {188, 214, 125, 44}, {188, 214, 125, 45}, {188, 214, 125, 52}, {188, 214, 125, 55}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Philippines\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 40}, {188, 214, 125, 46}, {188, 214, 125, 49}, {188, 214, 125, 52}, {188, 214, 125, 54}, {188, 214, 125, 57}, {188, 214, 125, 58}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Poland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 132}, {138, 199, 59, 136}, {138, 199, 59, 137}, {138, 199, 59, 143}, {138, 199, 59, 144}, {138, 199, 59, 152}, {138, 199, 59, 153}, {138, 199, 59, 166}, {138, 199, 59, 174}, {138, 199, 59, 175}}},\n\t\t{Region: \"Poland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 130}, {138, 199, 59, 136}, {138, 199, 59, 148}, {138, 199, 59, 149}, {138, 199, 59, 153}, {138, 199, 59, 156}, {138, 199, 59, 157}, {138, 199, 59, 164}, {138, 199, 59, 171}, {138, 199, 59, 173}}},\n\t\t{Region: \"Portugal\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 112}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 216}, {89, 26, 243, 218}, {89, 26, 243, 220}, {89, 26, 243, 222}, {89, 26, 243, 223}, {89, 26, 243, 225}, {89, 26, 243, 228}}},\n\t\t{Region: \"Portugal\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 99}, {89, 26, 243, 113}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 199}, {89, 26, 243, 216}, {89, 26, 243, 219}, {89, 26, 243, 225}, {89, 26, 243, 226}, {89, 26, 243, 227}}},\n\t\t{Region: \"Qatar\", Group: \"Premium TCP Europe\", Hostname: \"97-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 133}, {95, 181, 234, 135}, {95, 181, 234, 136}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 140}, {95, 181, 234, 141}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Qatar\", Group: \"Premium UDP Europe\", Hostname: \"87-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 131}, {95, 181, 234, 132}, {95, 181, 234, 133}, {95, 181, 234, 134}, {95, 181, 234, 135}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 72}, {5, 8, 16, 74}, {5, 8, 16, 84}, {5, 8, 16, 85}, {5, 8, 16, 123}, {5, 8, 16, 124}, {5, 8, 16, 132}, {146, 70, 52, 35}, {146, 70, 52, 44}, {146, 70, 52, 54}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 75}, {5, 8, 16, 87}, {5, 8, 16, 99}, {5, 8, 16, 110}, {5, 8, 16, 138}, {146, 70, 52, 29}, {146, 70, 52, 52}, {146, 70, 52, 58}, {146, 70, 52, 59}, {146, 70, 52, 67}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 133}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 140}, {95, 181, 235, 141}, {95, 181, 235, 142}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 132}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 136}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 141}, {95, 181, 235, 144}}},\n\t\t{Region: \"Serbia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 179}, {37, 120, 193, 186}, {37, 120, 193, 188}, {37, 120, 193, 190}, {141, 98, 103, 36}, {141, 98, 103, 38}, {141, 98, 103, 39}, {141, 98, 103, 43}, {141, 98, 103, 44}, {141, 98, 103, 46}}},\n\t\t{Region: \"Serbia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 180}, {37, 120, 193, 186}, {37, 120, 193, 187}, {37, 120, 193, 188}, {37, 120, 193, 189}, {37, 120, 193, 190}, {141, 98, 103, 35}, {141, 98, 103, 36}, {141, 98, 103, 39}, {141, 98, 103, 41}}},\n\t\t{Region: \"Singapore\", Group: \"Premium TCP Asia\", Hostname: \"96-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 168}, {84, 17, 39, 171}, {84, 17, 39, 175}, {84, 17, 39, 177}, {84, 17, 39, 178}, {84, 17, 39, 181}, {84, 17, 39, 183}, {84, 17, 39, 185}}},\n\t\t{Region: \"Singapore\", Group: \"Premium UDP Asia\", Hostname: \"95-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 166}, {84, 17, 39, 167}, {84, 17, 39, 171}, {84, 17, 39, 174}, {84, 17, 39, 175}, {84, 17, 39, 178}, {84, 17, 39, 180}, {84, 17, 39, 185}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 218}, {195, 80, 150, 219}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 219}, {195, 80, 150, 220}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Asia\", Hostname: \"96-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 212}, {154, 127, 50, 215}, {154, 127, 50, 217}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 222}, {154, 127, 60, 196}, {154, 127, 60, 198}, {154, 127, 60, 199}, {154, 127, 60, 200}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Europe\", Hostname: \"97-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Asia\", Hostname: \"95-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 210}, {154, 127, 50, 214}, {154, 127, 50, 218}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 221}, {154, 127, 50, 222}, {154, 127, 60, 195}, {154, 127, 60, 199}, {154, 127, 60, 206}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Europe\", Hostname: \"87-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"Spain\", Group: \"Premium TCP Europe\", Hostname: \"97-1-es.cg-dialup.net\", IPs: []net.IP{{37, 120, 142, 41}, {37, 120, 142, 52}, {37, 120, 142, 55}, {37, 120, 142, 61}, {37, 120, 142, 173}, {84, 17, 62, 131}, {84, 17, 62, 142}, {84, 17, 62, 144}, {185, 93, 3, 108}, {185, 93, 3, 114}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 137}, {95, 181, 239, 138}, {95, 181, 239, 140}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 140}, {95, 181, 239, 141}, {95, 181, 239, 142}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sweden\", Group: \"Premium TCP Europe\", Hostname: \"97-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 207}, {188, 126, 73, 209}, {188, 126, 73, 214}, {188, 126, 73, 219}, {188, 126, 79, 6}, {188, 126, 79, 11}, {188, 126, 79, 19}, {188, 126, 79, 25}, {195, 246, 120, 148}, {195, 246, 120, 161}}},\n\t\t{Region: \"Sweden\", Group: \"Premium UDP Europe\", Hostname: \"87-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 201}, {188, 126, 73, 211}, {188, 126, 73, 213}, {188, 126, 73, 218}, {188, 126, 79, 6}, {188, 126, 79, 8}, {188, 126, 79, 19}, {195, 246, 120, 142}, {195, 246, 120, 144}, {195, 246, 120, 168}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 4}, {84, 17, 52, 20}, {84, 17, 52, 44}, {84, 17, 52, 65}, {84, 17, 52, 72}, {84, 17, 52, 80}, {84, 17, 52, 83}, {84, 17, 52, 85}, {185, 32, 222, 112}, {185, 189, 150, 73}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 5}, {84, 17, 52, 14}, {84, 17, 52, 24}, {84, 17, 52, 64}, {84, 17, 52, 73}, {84, 17, 52, 85}, {185, 32, 222, 114}, {185, 189, 150, 52}, {185, 189, 150, 57}, {195, 225, 118, 43}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 100}, {45, 133, 181, 102}, {45, 133, 181, 103}, {45, 133, 181, 106}, {45, 133, 181, 109}, {45, 133, 181, 113}, {45, 133, 181, 115}, {45, 133, 181, 116}, {45, 133, 181, 123}, {45, 133, 181, 125}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 99}, {45, 133, 181, 102}, {45, 133, 181, 107}, {45, 133, 181, 108}, {45, 133, 181, 109}, {45, 133, 181, 114}, {45, 133, 181, 116}, {45, 133, 181, 117}, {45, 133, 181, 123}, {45, 133, 181, 124}}},\n\t\t{Region: \"Thailand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 6}, {146, 70, 13, 7}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 11}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Thailand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 10}, {146, 70, 13, 11}, {146, 70, 13, 12}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Turkey\", Group: \"Premium TCP Europe\", Hostname: \"97-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 9}, {188, 213, 34, 11}, {188, 213, 34, 15}, {188, 213, 34, 16}, {188, 213, 34, 23}, {188, 213, 34, 25}, {188, 213, 34, 28}, {188, 213, 34, 41}, {188, 213, 34, 108}, {188, 213, 34, 110}}},\n\t\t{Region: \"Turkey\", Group: \"Premium UDP Europe\", Hostname: \"87-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 8}, {188, 213, 34, 11}, {188, 213, 34, 14}, {188, 213, 34, 28}, {188, 213, 34, 35}, {188, 213, 34, 42}, {188, 213, 34, 43}, {188, 213, 34, 100}, {188, 213, 34, 103}, {188, 213, 34, 107}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 18}, {31, 28, 161, 20}, {31, 28, 161, 27}, {31, 28, 163, 34}, {31, 28, 163, 37}, {31, 28, 163, 44}, {62, 149, 7, 167}, {62, 149, 7, 172}, {62, 149, 29, 45}, {62, 149, 29, 57}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 27}, {31, 28, 163, 38}, {31, 28, 163, 42}, {31, 28, 163, 54}, {31, 28, 163, 61}, {62, 149, 7, 162}, {62, 149, 7, 163}, {62, 149, 29, 35}, {62, 149, 29, 38}, {62, 149, 29, 41}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 184}, {217, 138, 193, 185}, {217, 138, 193, 186}, {217, 138, 193, 188}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 186}, {217, 138, 193, 187}, {217, 138, 193, 188}, {217, 138, 193, 189}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 173, 49}, {45, 133, 173, 56}, {45, 133, 173, 82}, {45, 133, 173, 86}, {95, 154, 200, 153}, {95, 154, 200, 156}, {181, 215, 176, 103}, {181, 215, 176, 246}, {181, 215, 176, 251}, {194, 110, 13, 141}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 172, 100}, {45, 133, 172, 126}, {45, 133, 173, 84}, {95, 154, 200, 174}, {181, 215, 176, 110}, {181, 215, 176, 151}, {181, 215, 176, 158}, {191, 101, 209, 142}, {194, 110, 13, 107}, {194, 110, 13, 128}}},\n\t\t{Region: \"United States\", Group: \"Premium TCP USA\", Hostname: \"93-1-us.cg-dialup.net\", IPs: []net.IP{{102, 129, 145, 15}, {102, 129, 152, 195}, {102, 129, 152, 248}, {154, 21, 208, 159}, {185, 242, 5, 117}, {185, 242, 5, 123}, {185, 242, 5, 229}, {191, 96, 227, 173}, {191, 96, 227, 196}, {199, 115, 119, 248}}},\n\t\t{Region: \"United States\", Group: \"Premium UDP USA\", Hostname: \"94-1-us.cg-dialup.net\", IPs: []net.IP{{23, 82, 14, 113}, {23, 105, 177, 122}, {45, 89, 173, 222}, {84, 17, 35, 4}, {89, 187, 171, 132}, {156, 146, 37, 45}, {156, 146, 59, 86}, {184, 170, 240, 231}, {191, 96, 150, 248}, {199, 115, 119, 248}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium TCP USA\", Hostname: \"93-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 132}, {95, 181, 237, 133}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 138}, {95, 181, 237, 139}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 143}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium UDP USA\", Hostname: \"94-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 131}, {95, 181, 237, 132}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 142}, {95, 181, 237, 143}, {95, 181, 237, 144}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium TCP Asia\", Hostname: \"96-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 101}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 108}, {188, 214, 152, 109}, {188, 214, 152, 110}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium UDP Asia\", Hostname: \"95-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 100}, {188, 214, 152, 101}, {188, 214, 152, 102}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 109}}},\n\t}\n}",
"func (lv *Libvirt) RemoveTransientDHCPHost(newHost *libvirtxml.NetworkDHCPHost, app *App) error {\n\tlv.dhcpLeases.mutex.Lock()\n\tdefer lv.dhcpLeases.mutex.Unlock()\n\n\tdelete(lv.dhcpLeases.leases, newHost)\n\treturn lv.rebuildDHCPStaticLeases(app)\n}",
"func delExternalClientBlackholeFromNodes(nodes []v1.Node, routingTable, externalV4, externalV6 string, useV4 bool) {\n\tfor _, node := range nodes {\n\t\tif useV4 {\n\t\t\tout, err := runCommand(containerRuntime, \"exec\", node.Name, \"ip\", \"route\", \"del\", \"blackhole\", externalV4, \"table\", routingTable)\n\t\t\tframework.ExpectNoError(err, fmt.Sprintf(\"failed to delete blackhole route to %s on node %s table %s, out: %s\", externalV4, node.Name, routingTable, out))\n\t\t\tcontinue\n\t\t}\n\t\tout, err := runCommand(containerRuntime, \"exec\", node.Name, \"ip\", \"route\", \"del\", \"blackhole\", externalV6, \"table\", routingTable)\n\t\tframework.ExpectNoError(err, fmt.Sprintf(\"failed to delete blackhole route to %s on node %s table %s, out: %s\", externalV6, node.Name, routingTable, out))\n\t}\n}",
"func (p *ProxySQL) RemoveHostsLike(opts ...HostOpts) error {\n\tmut.Lock()\n\tdefer mut.Unlock()\n\thostq, err := buildAndParseHostQuery(opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// build a query with these options\n\t_, err = exec(p, buildDeleteQuery(hostq))\n\treturn err\n}",
"func cleanUpNodes(provisioner provision.Provisioner) {\n\n\tfor _, machine := range provisioner.GetMachinesAll() {\n\t\tnodeName := machine.Name\n\t\texistNode := false\n\t\tfor _, node := range provisioner.Cluster.Nodes {\n\t\t\tif node.Name == nodeName {\n\t\t\t\tnode.Credential = \"\"\n\t\t\t\tnode.PublicIP = \"\"\n\t\t\t\tnode.PrivateIP = \"\"\n\t\t\t\texistNode = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif existNode {\n\t\t\tif err := provisioner.DrainAndDeleteNode(nodeName); err != nil {\n\t\t\t\tlogger.Warnf(\"[%s.%s] %s\", provisioner.Cluster.Namespace, provisioner.Cluster.Name, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif err := provisioner.Cluster.PutStore(); err != nil {\n\t\tlogger.Warnf(\"[%s.%s] Failed to update a cluster-entity. (cause='%v')\", provisioner.Cluster.Namespace, provisioner.Cluster.Name, err)\n\t}\n\tlogger.Infof(\"[%s.%s] Garbage data has been cleaned.\", provisioner.Cluster.Namespace, provisioner.Cluster.Name)\n}",
"func DHCPDelete(ifname LinkID) error {\n\tif isUnmanaged(UnmanagedID(ifname), LINKTYPE) {\n\t\treturn NewUnmanagedLinkDHCPCannotBeModifiedError(ifname)\n\t}\n\tout, err := exec.Command(prefixInstallPAth+\"dhcp_stop.sh\", string(ifname)).Output()\n\tif err != nil {\n\t\treturn NewCannotStopDHCPError(ifname, err)\n\t}\n\tif string(out) == \"Service not running\" {\n\t\treturn NewDHCPRunningNotFoundError(ifname)\n\t}\n\treturn nil\n}",
"func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []byte {\n\t// If we're using the host netns, we have nothing to do besides hash the file.\n\tif !netnsEnabled {\n\t\treturn resolvConf\n\t}\n\tcleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})\n\t// if IPv6 is not enabled, also clean out any IPv6 address nameserver\n\tif !ipv6Enabled {\n\t\tcleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})\n\t}\n\t// if the resulting resolvConf has no more nameservers defined, add appropriate\n\t// default DNS servers for IPv4 and (optionally) IPv6\n\tif len(getNameservers(cleanedResolvConf)) == 0 {\n\t\tlogrus.Infof(\"No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v\", defaultIPv4Dns)\n\t\tdns := defaultIPv4Dns\n\t\tif ipv6Enabled {\n\t\t\tlogrus.Infof(\"IPv6 enabled; Adding default IPv6 external servers: %v\", defaultIPv6Dns)\n\t\t\tdns = append(dns, defaultIPv6Dns...)\n\t\t}\n\t\tcleanedResolvConf = append(cleanedResolvConf, []byte(\"\\n\"+strings.Join(dns, \"\\n\"))...)\n\t}\n\treturn cleanedResolvConf\n}",
"func WhitelistRemove(conn io.ReadWriteCloser, pks ...cipher.PubKey) error {\n\treturn rpc.NewClient(conn).Call(rpcMethod(\"WhitelistRemove\"), &pks, &empty)\n}",
"func (p *linodeProvider) removeAbandoned(s *linodeServer) {\n\tconst warn = \"WARNING: Cannot clean up %s: %v\"\n\n\tnow := time.Now()\n\n\tconfigs, err := p.configs(s)\n\tif err != nil {\n\t\tprintf(warn, s, err)\n\t}\n\tfor _, config := range configs {\n\t\tt, err := ParseLabelTime(config.Label)\n\t\tif err == nil && now.Sub(t) > p.backend.HaltTimeout.Duration {\n\t\t\terr := p.removeConfig(s, \"abandoned\", config.ConfigID)\n\t\t\tif err != nil {\n\t\t\t\tprintf(warn, s, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tdisks, err := p.disks(s)\n\tif err != nil {\n\t\tprintf(warn, s, err)\n\t}\n\tvar diskIds []int\n\tfor _, disk := range disks {\n\t\tt, err := ParseLabelTime(disk.Label)\n\t\tif err == nil && now.Sub(t) > p.backend.HaltTimeout.Duration && disk.DiskID != s.d.Root && disk.DiskID != s.d.Swap {\n\t\t\tdiskIds = append(diskIds, disk.DiskID)\n\t\t}\n\t}\n\tif len(diskIds) > 0 {\n\t\terr := p.removeDisks(s, \"abandoned\", 0, diskIds...)\n\t\tif err != nil {\n\t\t\tprintf(warn, s, err)\n\t\t}\n\t}\n}",
"func (p* Proxy) StopProxyForServer(s *Server) (error) {\n\n f := logrus.Fields{\n \"proxy\": p.Name, \"server\": s.Name, \"user\": s.User,\n }\n\n rcon, err := p.GetRcon()\n if err != nil { \n log.Error(f,\"Server not removed.\", err)\n return err \n }\n\n serverFQDN, err := p.ProxiedServerFQDN(s)\n if err != nil { \n serverFQDN = p.attachedServerFQDN(s)\n log.Error(f, \"Failed to get proxy name from DNS. Will try to remove server forced host anyway.\", err)\n }\n f[\"serverFQDN\"] = serverFQDN\n\n command := fmt.Sprintf(\"bconf remForcedHost(%d, \\\"%s\\\")\", 0, serverFQDN)\n reply, err := rcon.Send(command)\n f[\"command\"] = command\n f[\"reply\"] = reply\n if err == nil {\n log.Info(f, \"Remote remove-forced-host completed.\")\n } else {\n log.Error(f, \"Failed on remote remove-forced-host: will try to remote remove-server.\", err)\n }\n\n return err\n}",
"func cleanupDGP(nodes *kapi.NodeList) error {\n\t// remove dnat_snat entries as well as LRPs\n\tfor _, node := range nodes.Items {\n\t\tdelPbrAndNatRules(node.Name, []string{types.InterNodePolicyPriority, types.MGMTPortPolicyPriority})\n\t}\n\t// remove SBDB MAC bindings for DGP\n\tfor _, ip := range []string{types.V4NodeLocalNATSubnetNextHop, types.V6NodeLocalNATSubnetNextHop} {\n\t\tuuid, stderr, err := util.RunOVNSbctl(\"--columns=_uuid\", \"--no-headings\", \"find\", \"mac_binding\",\n\t\t\tfmt.Sprintf(`ip=\"%s\"`, ip))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to get DGP MAC binding, err: %v, stderr: %s\", err, stderr)\n\t\t}\n\t\tif len(uuid) > 0 {\n\t\t\t_, stderr, err = util.RunOVNSbctl(\"destroy\", \"mac_binding\", uuid)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to remove mac_binding for DGP, err: %v, stderr: %s\", err, stderr)\n\t\t\t}\n\t\t}\n\t}\n\t// remove node local switch\n\t_, stderr, err := util.RunOVNNbctl(\"--if-exists\", \"ls-del\", types.NodeLocalSwitch)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to remove node local switch, err: %v, stderr: %s\", err, stderr)\n\t}\n\tdgpName := types.RouterToSwitchPrefix + types.NodeLocalSwitch\n\n\t// remove lrp on ovn_cluster_router. Will also remove gateway chassis.\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exists\", \"lrp-del\", dgpName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to delete DGP LRP, error: %v, stderr: %s\", err, stderr)\n\t}\n\treturn nil\n}",
"func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"list\", \"hostonlyifs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbyName := map[string]*hostOnlyNetwork{}\n\tbyIP := map[string]*hostOnlyNetwork{}\n\tn := &hostOnlyNetwork{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"Name\":\n\t\t\tn.Name = val\n\t\tcase \"GUID\":\n\t\t\tn.GUID = val\n\t\tcase \"DHCP\":\n\t\t\tn.DHCP = (val != \"Disabled\")\n\t\tcase \"IPAddress\":\n\t\t\tn.IPv4.IP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tn.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"HardwareAddress\":\n\t\t\tmac, err := net.ParseMAC(val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tn.HwAddr = mac\n\t\tcase \"MediumType\":\n\t\t\tn.Medium = val\n\t\tcase \"Status\":\n\t\t\tn.Status = val\n\t\tcase \"VBoxNetworkName\":\n\t\t\tn.NetworkName = val\n\n\t\t\tif _, present := byName[n.NetworkName]; present {\n\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one.\", n.NetworkName)\n\t\t\t}\n\t\t\tbyName[n.NetworkName] = n\n\n\t\t\tif len(n.IPv4.IP) != 0 {\n\t\t\t\tif _, present := byIP[n.IPv4.IP.String()]; present {\n\t\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one.\", n.IPv4.IP)\n\t\t\t\t}\n\t\t\t\tbyIP[n.IPv4.IP.String()] = n\n\t\t\t}\n\n\t\t\tn = &hostOnlyNetwork{}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn byName, nil\n}",
"func gatewayCleanup(nodeName string) error {\n\tgatewayRouter := types.GWRouterPrefix + nodeName\n\n\t// Get the gateway router port's IP address (connected to join switch)\n\tvar nextHops []net.IP\n\n\tgwIPAddrs, err := util.GetLRPAddrs(types.GWRouterToJoinSwitchPrefix + gatewayRouter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, gwIPAddr := range gwIPAddrs {\n\t\tnextHops = append(nextHops, gwIPAddr.IP)\n\t}\n\tstaticRouteCleanup(nextHops)\n\n\t// Remove the patch port that connects join switch to gateway router\n\t_, stderr, err := util.RunOVNNbctl(\"--if-exist\", \"lsp-del\", types.JoinSwitchToGWRouterPrefix+gatewayRouter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete logical switch port %s%s: \"+\n\t\t\t\"stderr: %q, error: %v\", types.JoinSwitchToGWRouterPrefix, gatewayRouter, stderr, err)\n\t}\n\n\t// Remove router to lb associations from the LBCache before removing the router\n\tlbCache, err := ovnlb.GetLBCache()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get load_balancer cache for router %s: %v\", gatewayRouter, err)\n\t}\n\tlbCache.RemoveRouter(gatewayRouter)\n\n\t// Remove the gateway router associated with nodeName\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"lr-del\",\n\t\tgatewayRouter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete gateway router %s, stderr: %q, \"+\n\t\t\t\"error: %v\", gatewayRouter, stderr, err)\n\t}\n\n\t// Remove external switch\n\texternalSwitch := types.ExternalSwitchPrefix + nodeName\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"ls-del\",\n\t\texternalSwitch)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete external switch %s, stderr: %q, \"+\n\t\t\t\"error: %v\", externalSwitch, stderr, err)\n\t}\n\n\texGWexternalSwitch := types.ExternalSwitchPrefix + types.ExternalSwitchPrefix + nodeName\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"ls-del\",\n\t\texGWexternalSwitch)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete external switch %s, stderr: %q, \"+\n\t\t\t\"error: %v\", exGWexternalSwitch, stderr, err)\n\t}\n\n\t// We don't know the gateway mode as this is running in the master, try to delete the additional local\n\t// gateway for the shared gateway mode. it will be no op if this is done for other gateway modes.\n\tdelPbrAndNatRules(nodeName, nil)\n\treturn nil\n}",
"func (dns *EdgeDNS) cleanResolvForHost() {\n\tbs, err := ioutil.ReadFile(hostResolv)\n\tif err != nil {\n\t\tklog.Warningf(\"read file %s err: %v\", hostResolv, err)\n\t}\n\n\tresolv := strings.Split(string(bs), \"\\n\")\n\tif resolv == nil {\n\t\treturn\n\t}\n\tnameserver := \"\"\n\tfor _, item := range resolv {\n\t\tif strings.Contains(item, dns.ListenIP.String()) || item == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tnameserver = nameserver + item + \"\\n\"\n\t}\n\tif err := ioutil.WriteFile(hostResolv, []byte(nameserver), 0600); err != nil {\n\t\tklog.Errorf(\"failed to write nameserver to file %s, err: %v\", hostResolv, err)\n\t}\n}",
"func (v *Virter) getDHCPHosts(network libvirt.Network) ([]libvirtxml.NetworkDHCPHost, error) {\n\thosts := []libvirtxml.NetworkDHCPHost{}\n\n\tnetworkDescription, err := getNetworkDescription(v.libvirt, network)\n\tif err != nil {\n\t\treturn hosts, err\n\t}\n\tif len(networkDescription.IPs) < 1 {\n\t\treturn hosts, fmt.Errorf(\"no IPs in network\")\n\t}\n\n\tipDescription := networkDescription.IPs[0]\n\n\tdhcpDescription := ipDescription.DHCP\n\tif dhcpDescription == nil {\n\t\treturn hosts, fmt.Errorf(\"no DHCP in network\")\n\t}\n\n\tfor _, host := range dhcpDescription.Hosts {\n\t\thosts = append(hosts, host)\n\t}\n\n\treturn hosts, nil\n}",
"func (d *DistributedBackupDescriptor) RemoveEmpty() *DistributedBackupDescriptor {\n\tfor node, desc := range d.Nodes {\n\t\tif len(desc.Classes) == 0 {\n\t\t\tdelete(d.Nodes, node)\n\t\t}\n\t}\n\treturn d\n}",
"func deleteAllNetworks() error {\n\tnetworks, err := hcsshim.HNSListNetworkRequest(\"GET\", \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, network := range networks {\n\t\tif network.Name != \"nat\" {\n\t\t\t_, err = network.Delete()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func serversRemove(w http.ResponseWriter, r *http.Request) {\n\tsetHeader(w, r)\n\treadCookies(r)\n\tserver := r.URL.Query().Get(\"removeServer\")\n\tremoveServerInCookie(server, w, r)\n\tremoveServerInConfig(server)\n\thttp.Redirect(w, r, \"/public\", 301)\n}",
"func (a *Client) DeleteNodesMacaddressDhcpWhitelist(params *DeleteNodesMacaddressDhcpWhitelistParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteNodesMacaddressDhcpWhitelistNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteNodesMacaddressDhcpWhitelistParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"DeleteNodesMacaddressDhcpWhitelist\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/nodes/{macaddress}/dhcp/whitelist\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/x-gzip\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &DeleteNodesMacaddressDhcpWhitelistReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*DeleteNodesMacaddressDhcpWhitelistNoContent), nil\n}",
"func (p *ProxySQL) RemoveHost(host *Host) error {\n\tmut.Lock()\n\tdefer mut.Unlock()\n\t// build a query with these options\n\t_, err := exec(p, fmt.Sprintf(\"delete from mysql_servers where %s\", host.where()))\n\treturn err\n}",
"func (d *DHCPv4) NTPServers() []net.IP {\n\treturn GetIPs(OptionNTPServers, d.Options)\n}",
"func (c *FortiSDKClient) DeleteNetworkingInterfaceDHCP(mkey string) (err error) {\n\tlogPrefix := \"DeleteNetworkingInterfaceDHCP - \"\n\tHTTPMethod := \"DELETE\"\n\tpath := \"/api/v2/cmdb/system.dhcp/server\"\n\tpath += \"/\" + EscapeURLString(mkey)\n\n\treq := c.NewRequest(HTTPMethod, path, nil, nil)\n\terr = req.Send()\n\tif err != nil || req.HTTPResponse == nil {\n\t\terr = fmt.Errorf(\"cannot send request %s\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(req.HTTPResponse.Body)\n\tif err != nil || body == nil {\n\t\terr = fmt.Errorf(\"cannot get response body %s\", err)\n\t\treturn\n\t}\n\n\tvar result map[string]interface{}\n\tjson.Unmarshal([]byte(string(body)), &result)\n\n\treq.HTTPResponse.Body.Close()\n\n\tlog.Printf(logPrefix+\"Path called %s\", path)\n\tlog.Printf(logPrefix+\"FortiOS response: %s\", string(body))\n\n\tif result != nil {\n\t\tif result[\"status\"] == nil {\n\t\t\terr = fmt.Errorf(\"cannot get status from the response\")\n\t\t\treturn\n\t\t}\n\n\t\tif result[\"status\"] != \"success\" {\n\t\t\tif result[\"error\"] != nil {\n\t\t\t\terr = fmt.Errorf(\"status is %s and error no is %.0f\", result[\"status\"], result[\"error\"])\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"status is %s and error no is not found\", result[\"status\"])\n\t\t\t}\n\n\t\t\tif result[\"http_status\"] != nil {\n\t\t\t\terr = fmt.Errorf(\"%s, details: %s\", err, util.HttpStatus2Str(int(result[\"http_status\"].(float64))))\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"%s, and http_status no is not found\", err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t} else {\n\t\terr = fmt.Errorf(\"cannot get the right response\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (c *VMReplicaSet) orphan(cm *controller.VirtualMachineControllerRefManager, rs *virtv1.VirtualMachineReplicaSet, vms []*virtv1.VirtualMachine) error {\n\n\tvar wg sync.WaitGroup\n\terrChan := make(chan error, len(vms))\n\twg.Add(len(vms))\n\n\tfor _, vm := range vms {\n\t\tgo func(vm *virtv1.VirtualMachine) {\n\t\t\tdefer wg.Done()\n\t\t\terr := cm.ReleaseVirtualMachine(vm)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t}(vm)\n\t}\n\twg.Wait()\n\tselect {\n\tcase err := <-errChan:\n\t\treturn err\n\tdefault:\n\t}\n\treturn nil\n}",
"func (m *ServerManager) DeleteUnhealthServerAtPeriodic(ctx context.Context, duration time.Duration) {\n\tticker := time.NewTicker(duration)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tvar deadServerIDs []int\n\t\t\tm.servers.Range(func(_, value interface{}) bool {\n\t\t\t\tserver, ok := value.(*Server)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tif time.Now().Sub(server.updated) > duration {\n\t\t\t\t\tdeadServerIDs = append(deadServerIDs, server.ServerID)\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tfor _, id := range deadServerIDs {\n\t\t\t\tm.Delete(id)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}",
"func ToNameserversStripTD(nss []string) ([]*Nameserver, error) {\n\tnservers := []*Nameserver{}\n\tfor _, ns := range nss {\n\t\tif !strings.HasSuffix(ns, \".\") {\n\t\t\treturn nil, fmt.Errorf(\"provider code already removed nameserver trailing dot (%v)\", ns)\n\t\t\t// If you see this error, maybe the provider should call ToNameservers instead.\n\t\t}\n\t\tnservers = append(nservers, &Nameserver{Name: ns[0 : len(ns)-1]})\n\t}\n\treturn nservers, nil\n}",
"func removeRBDLocks(instance *ec2.Instance) {\n\taddress, found := hosts[instance.InstanceId]\n\tif !found {\n\t\tglog.Errorf(\"The instance: %s was not found in the hosts map\", instance.InstanceId)\n\t\treturn\n\t}\n\tglog.Infof(\"Instance: %s, address: %s, state: %s, checking for locks\", instance.InstanceId, address, instance.State.Name)\n\n\tvar deleted = false\n\n\tfor i := 0; i < 3; i++ {\n\t\terr := rbdClient.UnlockClient(address)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to unlock the images, attempting again if possible\")\n\t\t\t<-time.After(time.Duration(5) * time.Second)\n\t\t}\n\t\tdeleted = true\n\t}\n\n\tif !deleted {\n\t\tglog.Errorf(\"Failed to unlock any images that could have been held by client: %s\", address)\n\t}\n\n\t// step: delete from the hosts map\n\tdelete(hosts, instance.InstanceId)\n}",
"func (w *worker) excludeHostFromClickHouseCluster(host *chop.ChiHost) {\n\t// Specify in options to exclude host from ClickHouse config file\n\toptions := chopmodel.NewClickHouseConfigFilesGeneratorOptions().\n\t\tSetRemoteServersGeneratorOptions(\n\t\t\tchopmodel.NewRemoteServersGeneratorOptions().\n\t\t\t\tExcludeHost(host).\n\t\t\t\tExcludeReconcileAttributes(\n\t\t\t\t\tchop.NewChiHostReconcileAttributes().SetAdd(),\n\t\t\t\t),\n\t\t)\n\n\t// Remove host from cluster config and wait for ClickHouse to pick-up the change\n\tif w.waitExcludeHost(host) {\n\t\t_ = w.reconcileCHIConfigMapCommon(host.GetCHI(), options, true)\n\t\t_ = w.waitHostNotInCluster(host)\n\t}\n}",
"func (st *State) dropServer(id string) {\n\tst.mu.Lock()\n\tdefer st.mu.Unlock()\n\tfor i, s := range st.servers {\n\t\tif s.ID == id {\n\t\t\tcopy(st.servers[i:], st.servers[i+1:])\n\t\t\tst.servers[len(st.servers)-1] = nil\n\t\t\tst.servers = st.servers[:len(st.servers)-1]\n\t\t\treturn\n\t\t}\n\t}\n}",
"func removeWorkers(addrs []dist.Address) error {\n\tdirLock.Lock()\n\tdefer dirLock.Unlock()\n\n\t// cs 101 here we come\n\tres := make([]*dist.Address, 0)\n\tfor i := range directory {\n\t\trm := false\n\t\tfor k := range addrs {\n\t\t\tif directory[i].Equal(addrs[k]) {\n\t\t\t\trm = true\n\t\t\t}\n\t\t}\n\t\tif !rm {\n\t\t\tif directory[i].Valid() {\n\t\t\t\tres = append(res, directory[i])\n\t\t\t}\n\t\t}\n\t}\n\tdirectory = res\n\treturn nil\n}",
"func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Multiaddr {\n\tlisAddrs, _ := s.InterfaceListenAddresses()\n\tvar ourAddrs []ma.Multiaddr\n\tfor _, addr := range lisAddrs {\n\t\tprotos := addr.Protocols()\n\t\t// we're only sure about filtering out /ip4 and /ip6 addresses, so far\n\t\tif protos[0].Code == ma.P_IP4 || protos[0].Code == ma.P_IP6 {\n\t\t\tourAddrs = append(ourAddrs, addr)\n\t\t}\n\t}\n\n\treturn maybeRemoveWebTransportAddrs(ma.FilterAddrs(addrs,\n\t\tfunc(addr ma.Multiaddr) bool { return !ma.Contains(ourAddrs, addr) },\n\t\ts.canDial,\n\t\t// TODO: Consider allowing link-local addresses\n\t\tfunc(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },\n\t\tfunc(addr ma.Multiaddr) bool {\n\t\t\treturn s.gater == nil || s.gater.InterceptAddrDial(p, addr)\n\t\t},\n\t))\n}",
"func (n PowerVSNetwork) cleanup(options *CleanupOptions) error {\n\tresourceLogger := logrus.WithFields(logrus.Fields{\"resource\": options.Resource.Name})\n\tresourceLogger.Info(\"Cleaning up the networks\")\n\tpclient, err := NewPowerVSClient(options)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't create powervs client\")\n\t}\n\n\tnetworks, err := pclient.GetNetworks()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get the networks in %q\", pclient.resource.Name)\n\t}\n\n\tfor _, net := range networks.Networks {\n\t\tports, err := pclient.GetPorts(*net.NetworkID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to get ports of network %q\", *net.Name)\n\t\t}\n\t\tfor _, port := range ports.Ports {\n\t\t\terr = pclient.DeletePort(*net.NetworkID, *port.PortID)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to delete port of network %q\", *net.Name)\n\t\t\t}\n\t\t}\n\t\terr = pclient.DeleteNetwork(*net.NetworkID)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to delete network %q\", *net.Name)\n\t\t}\n\t}\n\tresourceLogger.Info(\"Successfully deleted the networks\")\n\treturn nil\n}",
"func (orgVdcNet *OpenApiOrgVdcNetwork) DeletNetworkDhcp() error {\n\tendpoint := types.OpenApiPathVersion1_0_0 + types.OpenApiEndpointOrgVdcNetworksDhcp\n\tapiVersion, err := orgVdcNet.client.getOpenApiHighestElevatedVersion(endpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif orgVdcNet.OpenApiOrgVdcNetwork.ID == \"\" {\n\t\treturn fmt.Errorf(\"cannot delete Org VDC network DHCP configuration without ID\")\n\t}\n\n\turlRef, err := orgVdcNet.client.OpenApiBuildEndpoint(fmt.Sprintf(endpoint, orgVdcNet.OpenApiOrgVdcNetwork.ID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = orgVdcNet.client.OpenApiDeleteItem(apiVersion, urlRef, nil, nil)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting Org VDC network DHCP configuration: %s\", err)\n\t}\n\n\treturn nil\n}",
"func (self *basicPodManager) DeleteOrphanedMirrorPods(mirrorPods *mirrorPods) {\n\tpodFullNames := mirrorPods.GetOrphanedMirrorPodNames()\n\tfor _, podFullName := range podFullNames {\n\t\tself.mirrorManager.DeleteMirrorPod(podFullName)\n\t}\n}",
"func (f *Framework) CleanUp(ns string) error {\n\treturn f.AgonesClient.StableV1alpha1().GameServers(ns).\n\t\tDeleteCollection(&v1.DeleteOptions{}, v1.ListOptions{})\n}",
"func (md *MassDns) Clean() error {\n\t// remove only temp resolvers file\n\tif md.tempResolversPath != \"\" {\n\t\tos.Remove(md.tempResolversPath)\n\t\tmd.tempResolversPath = \"\"\n\t}\n\treturn nil\n}",
"func (o ArgoCDSpecInitialSSHKnownHostsOutput) Excludedefaulthosts() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecInitialSSHKnownHosts) *bool { return v.Excludedefaulthosts }).(pulumi.BoolPtrOutput)\n}",
"func (i *DHCPInterface) SetDNSServers(dns []string) {\n\tfor _, server := range dns {\n\t\ti.dnsServers = append(i.dnsServers, []byte(net.ParseIP(server).To4())...)\n\t}\n}",
"func IPIsDoHOnlyServer(ip netip.Addr) bool {\n\treturn nextDNSv6RangeA.Contains(ip) || nextDNSv6RangeB.Contains(ip) ||\n\t\tnextDNSv4RangeA.Contains(ip) || nextDNSv4RangeB.Contains(ip)\n}",
"func (pr *PortRegistry) DeregisterServerPorts(ports []int32) {\n\tfor i := 0; i < len(ports); i++ {\n\t\tpr.HostPorts[ports[i]] = false\n\t}\n}",
"func multiJoinSwitchGatewayCleanup(nodeName string, upgradeOnly bool) error {\n\tgatewayRouter := types.GWRouterPrefix + nodeName\n\n\t// Get the gateway router port's IP address (connected to join switch)\n\tvar nextHops []net.IP\n\n\tgwIPAddrs, err := util.GetLRPAddrs(types.GWRouterToJoinSwitchPrefix + gatewayRouter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, gwIPAddr := range gwIPAddrs {\n\t\t// Delete logical router policy whose nexthop is the old rtoj- gateway port address\n\t\tstdout, stderr, err := util.RunOVNNbctl(\"--data=bare\", \"--no-heading\", \"--columns=_uuid\",\n\t\t\t\"find\", \"logical_router_policy\", fmt.Sprintf(\"nexthop=%s\", gwIPAddr.IP))\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Unable to find LR policy of nexthop: %s for node %s, stderr: %s, err: %v\",\n\t\t\t\tgwIPAddr.IP, nodeName, stderr, err)\n\t\t\tcontinue\n\t\t}\n\t\tif stdout != \"\" {\n\t\t\tpolicyIDs := strings.Fields(stdout)\n\t\t\tfor _, policyID := range policyIDs {\n\t\t\t\t_, stderr, err = util.RunOVNNbctl(\"remove\", \"logical_router\", types.OVNClusterRouter, \"policies\", policyID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Errorf(\"Unable to remove LR policy: %s, stderr: %s, err: %v\", policyID, stderr, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tnextHops = append(nextHops, gwIPAddr.IP)\n\t}\n\tstaticRouteCleanup(nextHops)\n\n\t// Remove the join switch that connects ovn_cluster_router to gateway router\n\t_, stderr, err := util.RunOVNNbctl(\"--if-exist\", \"ls-del\", \"join_\"+nodeName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete the join logical switch %s, \"+\n\t\t\t\"stderr: %q, error: %v\", \"join_\"+nodeName, stderr, err)\n\t}\n\n\t// Remove the logical router port on the distributed router that connects to the join switch\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"lrp-del\", \"dtoj-\"+nodeName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete the patch port dtoj-%s on distributed router \"+\n\t\t\t\"stderr: %q, error: %v\", nodeName, stderr, err)\n\t}\n\n\t// Remove the logical router port on the gateway router that connects to the join switch\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"lrp-del\", types.GWRouterToJoinSwitchPrefix+gatewayRouter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete the port %s%s on gateway router \"+\n\t\t\t\"stderr: %q, error: %v\", types.GWRouterToJoinSwitchPrefix, gatewayRouter, stderr, err)\n\t}\n\n\tif upgradeOnly {\n\t\treturn nil\n\t}\n\n\t// Remove router to lb associations from the LBCache before removing the router\n\tlbCache, err := ovnlb.GetLBCache()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get load_balancer cache for router %s: %v\", gatewayRouter, err)\n\t}\n\tlbCache.RemoveRouter(gatewayRouter)\n\n\t// Remove the gateway router associated with nodeName\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"lr-del\",\n\t\tgatewayRouter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete gateway router %s, stderr: %q, \"+\n\t\t\t\"error: %v\", gatewayRouter, stderr, err)\n\t}\n\n\t// Remove external switch\n\texternalSwitch := types.ExternalSwitchPrefix + nodeName\n\t_, stderr, err = util.RunOVNNbctl(\"--if-exist\", \"ls-del\",\n\t\texternalSwitch)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete external switch %s, stderr: %q, \"+\n\t\t\t\"error: %v\", externalSwitch, stderr, err)\n\t}\n\n\t// We don't know the gateway mode as this is running in the master, try to delete the additional local\n\t// gateway for the shared gateway mode. it will be no op if this is done for other gateway modes.\n\tdelPbrAndNatRules(nodeName, nil)\n\treturn nil\n}",
"func (d *Daemon) syncHostIPs() error {\n\tif option.Config.DryMode {\n\t\treturn nil\n\t}\n\n\ttype ipIDLabel struct {\n\t\tidentity.IPIdentityPair\n\t\tlabels.Labels\n\t}\n\tspecialIdentities := make([]ipIDLabel, 0, 2)\n\n\tif option.Config.EnableIPv4 {\n\t\taddrs, err := d.datapath.LocalNodeAddressing().IPv4().LocalAddresses()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Unable to list local IPv4 addresses\")\n\t\t}\n\n\t\tfor _, ip := range addrs {\n\t\t\tif option.Config.IsExcludedLocalAddress(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(ip) > 0 {\n\t\t\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\t\t\tidentity.IPIdentityPair{\n\t\t\t\t\t\tIP: ip,\n\t\t\t\t\t\tID: identity.ReservedIdentityHost,\n\t\t\t\t\t},\n\t\t\t\t\tlabels.LabelHost,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tipv4Ident := identity.ReservedIdentityWorldIPv4\n\t\tipv4Label := labels.LabelWorldIPv4\n\t\tif !option.Config.EnableIPv6 {\n\t\t\tipv4Ident = identity.ReservedIdentityWorld\n\t\t\tipv4Label = labels.LabelWorld\n\t\t}\n\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\tidentity.IPIdentityPair{\n\t\t\t\tIP: net.IPv4zero,\n\t\t\t\tMask: net.CIDRMask(0, net.IPv4len*8),\n\t\t\t\tID: ipv4Ident,\n\t\t\t},\n\t\t\tipv4Label,\n\t\t})\n\t}\n\n\tif option.Config.EnableIPv6 {\n\t\taddrs, err := d.datapath.LocalNodeAddressing().IPv6().LocalAddresses()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warning(\"Unable to list local IPv6 addresses\")\n\t\t}\n\n\t\taddrs = append(addrs, node.GetIPv6Router())\n\t\tfor _, ip := range addrs {\n\t\t\tif option.Config.IsExcludedLocalAddress(ip) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(ip) > 0 {\n\t\t\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\t\t\tidentity.IPIdentityPair{\n\t\t\t\t\t\tIP: ip,\n\t\t\t\t\t\tID: identity.ReservedIdentityHost,\n\t\t\t\t\t},\n\t\t\t\t\tlabels.LabelHost,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tipv6Ident := identity.ReservedIdentityWorldIPv6\n\t\tipv6Label := labels.LabelWorldIPv6\n\t\tif !option.Config.EnableIPv4 {\n\t\t\tipv6Ident = identity.ReservedIdentityWorld\n\t\t\tipv6Label = labels.LabelWorld\n\t\t}\n\t\tspecialIdentities = append(specialIdentities, ipIDLabel{\n\t\t\tidentity.IPIdentityPair{\n\t\t\t\tIP: net.IPv6zero,\n\t\t\t\tMask: net.CIDRMask(0, net.IPv6len*8),\n\t\t\t\tID: ipv6Ident,\n\t\t\t},\n\t\t\tipv6Label,\n\t\t})\n\t}\n\n\texistingEndpoints, err := lxcmap.DumpToMap()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdaemonResourceID := ipcachetypes.NewResourceID(ipcachetypes.ResourceKindDaemon, \"\", \"\")\n\tfor _, ipIDLblsPair := range specialIdentities {\n\t\tisHost := ipIDLblsPair.ID == identity.ReservedIdentityHost\n\t\tif isHost {\n\t\t\tadded, err := lxcmap.SyncHostEntry(ipIDLblsPair.IP)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to add host entry to endpoint map: %s\", err)\n\t\t\t}\n\t\t\tif added {\n\t\t\t\tlog.WithField(logfields.IPAddr, ipIDLblsPair.IP).Debugf(\"Added local ip to endpoint map\")\n\t\t\t}\n\t\t}\n\n\t\tdelete(existingEndpoints, ipIDLblsPair.IP.String())\n\n\t\tlbls := ipIDLblsPair.Labels\n\t\tif ipIDLblsPair.ID.IsWorld() {\n\t\t\tp := netip.PrefixFrom(ippkg.MustAddrFromIP(ipIDLblsPair.IP), 0)\n\t\t\td.ipcache.OverrideIdentity(p, lbls, source.Local, daemonResourceID)\n\t\t} else {\n\t\t\td.ipcache.UpsertLabels(ippkg.IPToNetPrefix(ipIDLblsPair.IP),\n\t\t\t\tlbls,\n\t\t\t\tsource.Local, daemonResourceID,\n\t\t\t)\n\t\t}\n\t}\n\n\t// existingEndpoints is a map from endpoint IP to endpoint info. Referring\n\t// to the key as host IP here because we only care about the host endpoint.\n\tfor hostIP, info := range existingEndpoints {\n\t\tif ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {\n\t\t\tif err := lxcmap.DeleteEntry(ip); err != nil {\n\t\t\t\tlog.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\tlogfields.IPAddr: hostIP,\n\t\t\t\t}).Warn(\"Unable to delete obsolete host IP from BPF map\")\n\t\t\t} else {\n\t\t\t\tlog.Debugf(\"Removed outdated host IP %s from endpoint map\", hostIP)\n\t\t\t}\n\n\t\t\td.ipcache.RemoveLabels(ippkg.IPToNetPrefix(ip), labels.LabelHost, daemonResourceID)\n\t\t}\n\t}\n\n\tif option.Config.EnableVTEP {\n\t\terr := setupVTEPMapping()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = setupRouteToVtepCidr()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (VPCNetwork) cleanup(options *CleanupOptions) error {\n\tresourceLogger := logrus.WithFields(logrus.Fields{\"resource\": options.Resource.Name})\n\tresourceLogger.Info(\"Cleaning up the networks\")\n\tclient, err := NewVPCClient(options)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"couldn't create VPC client\")\n\t}\n\n\tsubnetList, _, err := client.ListSubnets(&vpcv1.ListSubnetsOptions{\n\t\tResourceGroupID: &client.ResourceGroupID,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to list the subnets\")\n\t}\n\n\tfor _, subnet := range subnetList.Subnets {\n\t\tpg, _, err := client.GetSubnetPublicGateway(&vpcv1.GetSubnetPublicGatewayOptions{\n\t\t\tID: subnet.ID,\n\t\t})\n\t\tif pg != nil && err == nil {\n\t\t\t_, err := client.UnsetSubnetPublicGateway(&vpcv1.UnsetSubnetPublicGatewayOptions{\n\t\t\t\tID: subnet.ID,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to unset the gateway for %q\", *subnet.Name)\n\t\t\t}\n\n\t\t\t_, err = client.DeletePublicGateway(&vpcv1.DeletePublicGatewayOptions{\n\t\t\t\tID: pg.ID,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to delete the gateway %q\", *pg.Name)\n\t\t\t}\n\t\t\tresourceLogger.WithFields(logrus.Fields{\"name\": pg.Name}).Info(\"Successfully deleted the gateway\")\n\t\t}\n\t\t_, err = client.DeleteSubnet(&vpcv1.DeleteSubnetOptions{ID: subnet.ID})\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to delete the subnet %q\", *subnet.Name)\n\t\t}\n\t}\n\n\t// Delete the unbound floating IPs that were previously used by a VSI\n\tfips, _, err := client.ListFloatingIps(&vpcv1.ListFloatingIpsOptions{\n\t\tResourceGroupID: &client.ResourceGroupID,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to list the floating IPs\")\n\t}\n\tfor _, fip := range fips.FloatingIps {\n\t\t_, err = client.DeleteFloatingIP(&vpcv1.DeleteFloatingIPOptions{\n\t\t\tID: fip.ID,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to delete the floating IP %q\", *fip.Name)\n\t\t}\n\t\tresourceLogger.WithFields(logrus.Fields{\"name\": fip.Name}).Info(\"Successfully deleted the floating IP\")\n\t}\n\n\tresourceLogger.Info(\"Successfully deleted the subnets\")\n\treturn nil\n}",
"func (client WorkloadNetworksClient) DeleteDhcpResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}",
"func (m *MicroService) blackListHost(host string, blacklist bool) {\r\n\tfor idx, inst := range m.Instances {\r\n\t\tif inst.Host == host {\r\n\t\t\tm.blackList(idx, blacklist)\r\n\t\t}\r\n\t}\r\n}",
"func RemoveHvacConfig(db Database, mac string) error {\n\tcriteria := make(map[string]interface{})\n\tcriteria[\"Mac\"] = mac\n\treturn db.DeleteRecord(pconst.DbConfig, pconst.TbHvacs, criteria)\n}",
"func Clean(c Config) {\n\n\tSetup(&c)\n\tContainers, _ := model.DockerContainerList()\n\n\tfor _, Container := range Containers {\n\t\ttarget := false\n\t\tif l := Container.Labels[\"pygmy.enable\"]; l == \"true\" || l == \"1\" {\n\t\t\ttarget = true\n\t\t}\n\t\tif l := Container.Labels[\"pygmy\"]; l == \"pygmy\" {\n\t\t\ttarget = true\n\t\t}\n\n\t\tif target {\n\t\t\terr := model.DockerKill(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully killed %v.\\n\", Container.Names[0])\n\t\t\t}\n\n\t\t\terr = model.DockerRemove(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully removed %v.\\n\", Container.Names[0])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, network := range c.Networks {\n\t\tmodel.DockerNetworkRemove(&network)\n\t\tif s, _ := model.DockerNetworkStatus(&network); s {\n\t\t\tfmt.Printf(\"Successfully removed network %v\\n\", network.Name)\n\t\t} else {\n\t\t\tfmt.Printf(\"Network %v was not removed\\n\", network.Name)\n\t\t}\n\t}\n\n\tfor _, resolver := range c.Resolvers {\n\t\tresolver.Clean()\n\t}\n}",
"func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr {\n\tvar public, private []ma.Multiaddr\n\n\tfor _, a := range addrs {\n\t\tif isRelayAddr(a) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif manet.IsPublicAddr(a) || isDNSAddr(a) {\n\t\t\tpublic = append(public, a)\n\t\t\tcontinue\n\t\t}\n\n\t\t// discard unroutable addrs\n\t\tif manet.IsPrivateAddr(a) {\n\t\t\tprivate = append(private, a)\n\t\t}\n\t}\n\n\tif !hasAddrsplosion(public) {\n\t\treturn public\n\t}\n\n\treturn sanitizeAddrsplodedSet(public, private)\n}",
"func (lv *Libvirt) rebuildDHCPStaticLeases(app *App) error {\n\t_, err := lv.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpreviousHosts := lv.NetworkXML.IPs[0].DHCP.Hosts\n\tvar hostsToDelete []libvirtxml.NetworkDHCPHost\n\tvar hostsToAdd []libvirtxml.NetworkDHCPHost // mostly for old VMs (previous \"format\") where no static IP was set\n\n\t// search for leases to delete\n\tfor _, host := range previousHosts {\n\t\tif !strings.HasPrefix(host.Name, app.Config.VMPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tnameID := strings.TrimPrefix(host.Name, app.Config.VMPrefix)\n\t\tvm, _ := app.VMDB.GetByNameID(nameID)\n\t\tif vm == nil {\n\t\t\tif lv.dhcpLeases.findByHost(host.Name) == nil {\n\t\t\t\thostsToDelete = append(hostsToDelete, host)\n\t\t\t}\n\t\t}\n\t}\n\n\t// search for leases to add (from VM database)\n\tvmNames := app.VMDB.GetNames()\n\tfor _, name := range vmNames {\n\t\tfound := false\n\t\tfor _, host := range previousHosts {\n\t\t\tif host.Name == name.LibvirtDomainName(app) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tvm, err := app.VMDB.GetByName(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thost := libvirtxml.NetworkDHCPHost{\n\t\t\t\tName: name.LibvirtDomainName(app),\n\t\t\t\tMAC: vm.AssignedMAC,\n\t\t\t\tIP: vm.AssignedIPv4,\n\t\t\t}\n\t\t\thostsToAdd = append(hostsToAdd, host)\n\t\t}\n\t}\n\n\t// search for leases to add (from transient database)\n\tfor lease := range lv.dhcpLeases.leases {\n\t\tfound := false\n\t\tfor _, host := range previousHosts {\n\t\t\tif host.Name == lease.Name {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\thostsToAdd = append(hostsToAdd, *lease)\n\t\t}\n\t}\n\n\tfor _, host := range hostsToDelete {\n\t\tapp.Log.Tracef(\"remove DHCP lease for '%s/%s/%s'\", host.Name, host.MAC, host.IP)\n\t\txml, err := host.Marshal()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = lv.Network.Update(\n\t\t\tlibvirt.NETWORK_UPDATE_COMMAND_DELETE,\n\t\t\tlibvirt.NETWORK_SECTION_IP_DHCP_HOST,\n\t\t\t-1,\n\t\t\txml,\n\t\t\tlibvirt.NETWORK_UPDATE_AFFECT_LIVE|libvirt.NETWORK_UPDATE_AFFECT_CONFIG,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, host := range hostsToAdd {\n\t\tapp.Log.Tracef(\"add DHCP lease for '%s/%s/%s'\", host.Name, host.MAC, host.IP)\n\t\txml, err := host.Marshal()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = lv.Network.Update(\n\t\t\tlibvirt.NETWORK_UPDATE_COMMAND_ADD_LAST,\n\t\t\tlibvirt.NETWORK_SECTION_IP_DHCP_HOST,\n\t\t\t-1,\n\t\t\txml,\n\t\t\tlibvirt.NETWORK_UPDATE_AFFECT_LIVE|libvirt.NETWORK_UPDATE_AFFECT_CONFIG,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// update lv.NetworkXML\n\txmldoc, err := lv.Network.GetXMLDesc(0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed GetXMLDesc: %s\", err)\n\t}\n\n\tnetcfg := &libvirtxml.Network{}\n\terr = netcfg.Unmarshal(xmldoc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed Unmarshal: %s\", err)\n\t}\n\n\tlv.NetworkXML = netcfg\n\n\treturn nil\n}",
"func (p *ProxySQL) RemoveHosts(hosts ...*Host) error {\n\tfor _, host := range hosts {\n\t\terr := p.RemoveHost(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (ps *PeerStore) RemoveRandom() {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\tfor _, p := range ps.peers {\n\t\tdelete(ps.peers, p.ListenAddr)\n\t\tbreak\n\t}\n}",
"func (r *HashRing) copyServersNoLock() []string {\n\tservers := make([]string, 0, len(r.serverSet))\n\tfor server := range r.serverSet {\n\t\tservers = append(servers, server)\n\t}\n\treturn servers\n}",
"func DeallocateIP(reservelist []IPReservation, containerID string) ([]IPReservation, net.IP, error) {\n\n}",
"func (o ArgoCDSpecInitialSSHKnownHostsPtrOutput) Excludedefaulthosts() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecInitialSSHKnownHosts) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Excludedefaulthosts\n\t}).(pulumi.BoolPtrOutput)\n}",
"func WithDHCPServer(subnet *net.IPNet) Option {\n\treturn func(d *dnsmasq) {\n\t\td.subnet = subnet\n\t}\n}",
"func (dump *Dump) purge(existed Int32Map, stats *ParseStatistics) {\n\tfor id, cont := range dump.ContentIndex {\n\t\tif _, ok := existed[id]; !ok {\n\t\t\tfor _, ip4 := range cont.IPv4 {\n\t\t\t\tdump.RemoveFromIPv4Index(ip4.IPv4, cont.ID)\n\t\t\t}\n\n\t\t\tfor _, ip6 := range cont.IPv6 {\n\t\t\t\tip6 := string(ip6.IPv6)\n\t\t\t\tdump.RemoveFromIPv6Index(ip6, cont.ID)\n\t\t\t}\n\n\t\t\tfor _, subnet6 := range cont.SubnetIPv6 {\n\t\t\t\tdump.RemoveFromSubnetIPv6Index(subnet6.SubnetIPv6, cont.ID)\n\t\t\t}\n\n\t\t\tfor _, subnet4 := range cont.SubnetIPv4 {\n\t\t\t\tdump.RemoveFromSubnetIPv4Index(subnet4.SubnetIPv4, cont.ID)\n\t\t\t}\n\n\t\t\tfor _, u := range cont.URL {\n\t\t\t\tdump.RemoveFromURLIndex(NormalizeURL(u.URL), cont.ID)\n\t\t\t}\n\n\t\t\tfor _, domain := range cont.Domain {\n\t\t\t\tdump.RemoveFromDomainIndex(NormalizeDomain(domain.Domain), cont.ID)\n\t\t\t}\n\n\t\t\tdump.RemoveFromDecisionIndex(cont.Decision, cont.ID)\n\t\t\tdump.RemoveFromDecisionOrgIndex(cont.DecisionOrg, cont.ID)\n\t\t\tdump.RemoveFromDecisionWithoutNoIndex(cont.ID)\n\t\t\tdump.RemoveFromEntryTypeIndex(entryTypeKey(cont.EntryType, cont.DecisionOrg), cont.ID)\n\n\t\t\tdelete(dump.ContentIndex, id)\n\n\t\t\tstats.RemoveCount++\n\t\t}\n\t}\n}",
"func cleanNS(l []*net.NS) []string {\n\tvar r []string\n\tfor _, i := range l {\n\t\tr = append(r, i.Host)\n\t}\n\tsort.Strings(r)\n\treturn (r)\n\n}",
"func RemoveDnats(dnats []*Dnat) int {\n\n\ttree := vyos.NewParserFromShowConfiguration().Tree\n\n\tfor _, dnat := range dnats {\n\t\tdeleteDnat(tree, dnat)\n\t}\n\n\ttree.Apply(false)\n\n\treturn merrors.ErrSuccess\n}",
"func resourceBoilerplateServerDelete(d *schema.ResourceData, m interface{}) error {\n\tclient := m.(*Client).Client\n\n\tid, err := strconv.Atoi(d.Id())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.DeleteServer(context.Background(), id)\n\n\tlog.Printf(\"[INFO] Deleting Server\")\n\n\treturn nil\n}",
"func stripPendingDeletes(pm *kafkazk.PartitionMap, zk kafkazk.Handler) []string {\n\t// Get pending deletions.\n\tpd, err := zk.GetPendingDeletion()\n\tif err != nil {\n\t\tfmt.Println(\"Error fetching topics pending deletion\")\n\t}\n\n\tif len(pd) == 0 {\n\t\treturn []string{}\n\t}\n\n\t// This is used as a set of topic names\n\t// pending deleting.\n\tpending := map[string]struct{}{}\n\n\tfor _, t := range pd {\n\t\tpending[t] = struct{}{}\n\t}\n\n\t// Traverse the partition map and drop\n\t// any pending topics.\n\n\tnewPL := kafkazk.PartitionList{}\n\tpendingExcluded := map[string]struct{}{}\n\tfor _, p := range pm.Partitions {\n\t\tif _, exists := pending[p.Topic]; !exists {\n\t\t\tnewPL = append(newPL, p)\n\t\t} else {\n\t\t\tpendingExcluded[p.Topic] = struct{}{}\n\t\t}\n\t}\n\n\tpm.Partitions = newPL\n\n\tpendingExcludedNames := []string{}\n\tfor t := range pendingExcluded {\n\t\tpendingExcludedNames = append(pendingExcludedNames, t)\n\t}\n\n\treturn pendingExcludedNames\n}",
"func (plugin *cniNetworkPlugin) cleanupBridges(containerID string) error {\n\t// Get the amount of combinations between an IP mask, and an iptables chain, with the specified container ID\n\tresult, err := getIPChains(containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar teardownErrs []error\n\tfor _, net := range plugin.cniConfig.Networks {\n\t\tvar hasBridge bool\n\t\tfor _, plugin := range net.Config.Plugins {\n\t\t\tif plugin.Network.Type == \"bridge\" {\n\t\t\t\thasBridge = true\n\t\t\t}\n\t\t}\n\n\t\tif hasBridge {\n\t\t\tlog.Debugf(\"Teardown IPMasq for container %q on CNI network %q which contains a bridge\", containerID, net.Config.Name)\n\t\t\tcomment := utils.FormatComment(net.Config.Name, containerID)\n\t\t\tfor _, t := range result {\n\t\t\t\tif err = ip.TeardownIPMasq(t.ip, t.chain, comment); err != nil {\n\t\t\t\t\tteardownErrs = append(teardownErrs, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(teardownErrs) == 1 {\n\t\treturn teardownErrs[0]\n\t}\n\tif len(teardownErrs) > 0 {\n\t\treturn fmt.Errorf(\"Errors occured cleaning up bridges: %v\", teardownErrs)\n\t}\n\n\treturn nil\n}",
"func (client *LANHostConfigManagement1) DeleteDNSServer(NewDNSServers string) (err error) {\n\treturn client.DeleteDNSServerCtx(context.Background(),\n\t\tNewDNSServers,\n\t)\n}",
"func (e *Engine) shutdownVservers() {\n\tfor _, v := range e.vservers {\n\t\tv.stop()\n\t}\n\tfor name, v := range e.vservers {\n\t\t<-v.stopped\n\t\tdelete(e.vservers, name)\n\t}\n\te.vserverLock.Lock()\n\te.vserverSnapshots = make(map[string]*seesaw.Vserver)\n\te.vserverLock.Unlock()\n}",
"func RemoveServiceVhosts(conn client.Connection, svc *service.Service) error {\n\tglog.V(2).Infof(\"RemoveServiceVhosts for ID:%s Name:%s\", svc.ID, svc.Name)\n\n\t// generate map of current vhosts\n\tif svcvhosts, err := conn.Children(zkServiceVhosts); err == client.ErrNoNode {\n\t} else if err != nil {\n\t\tglog.Errorf(\"UpdateServiceVhosts unable to retrieve vhost children at path %s %s\", zkServiceVhosts, err)\n\t\treturn err\n\t} else {\n\t\tglog.V(2).Infof(\"RemoveServiceVhosts for svc.ID:%s from children:%+v\", svc.ID, svcvhosts)\n\t\tfor _, svcvhost := range svcvhosts {\n\t\t\tvhkey := VHostKey(svcvhost)\n\t\t\tif vhkey.ServiceID() == svc.ID {\n\t\t\t\tif err := removeServiceVhost(conn, string(vhkey)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func CleanTunIP(tunName string, subnetIP global.Address, subnetMask uint8, client bool) error {\n\tip, subnet, err := ParseCIDR(subnetIP, subnetMask)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tip = ip.To4()\n\tif ip[3]%2 == 0 {\n\t\treturn errors.New(\"Invalid ip address.\")\n\t}\n\n\tpeer := net.IP(make([]byte, 4))\n\tcopy([]byte(peer), []byte(ip))\n\tpeer[3]++\n\n\tsargs := fmt.Sprintf(\"route del %s via %s dev %s\", subnet, peer, tunName)\n\targs := strings.Split(sargs, \" \")\n\tcmd := exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\tlog.Errorf(\"ip %v err:%v\", sargs, err)\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"link set %s down\", tunName)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ip\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\tlog.Errorf(\"ip %v err:%v\", sargs, err)\n\t} else {\n\t\tlog.Infof(\"ip %s\", sargs)\n\t}\n\n\tsargs = fmt.Sprintf(\"%s %s %s down\", tunName, ip, peer)\n\targs = strings.Split(sargs, \" \")\n\tcmd = exec.Command(\"ifconfig\", args...)\n\tif err := cmd.Run(); nil != err {\n\t\treturn errors.New(fmt.Sprintf(\"ifconfig %v err:%v\", sargs, err))\n\t} else {\n\t\tlog.Infof(\"ifconfig %s\", sargs)\n\t}\n\n\tif client { // for client\n\t\tif err := UnRedirectGateway(); nil != err {\n\t\t\tlog.Errorf(\"%v\", err)\n\t\t}\n\t} else { // for server\n\t\tsargs = \"net.ipv4.ip_forward=0\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"sysctl\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"sysctl %v err:%v\", sargs, err)\n\t\t}\n\n\t\tsargs = \"-t nat -D POSTROUTING -j MASQUERADE\"\n\t\targs = strings.Split(sargs, \" \")\n\t\tcmd = exec.Command(\"iptables\", args...)\n\t\tif err := cmd.Run(); nil != err {\n\t\t\tlog.Errorf(\"iptables %v err:%v\", sargs, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func createServersOfDomain(domainA DomainAPI, domain *model.Domain) {\n\n\tfor _, servers := range domainA.Endpoints {\n\t\taddress := servers.IPAddress\n\t\towner, country := domainA.WhoisServerAttributes(address)\n\t\tsslGrade := servers.Grade\n\t\ttemServer := model.Server{address, sslGrade, country, owner}\n\t\tdomain.Servers = append(domain.Servers, temServer)\n\t}\n}",
"func getHostOnlyNetworkInterface(mc *driver.MachineConfig) (string, error) {\n\t// Check if the interface/dhcp exists.\n\tnets, err := HostonlyNets()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdhcps, err := DHCPs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, n := range nets {\n\t\tif dhcp, ok := dhcps[n.NetworkName]; ok {\n\t\t\tif dhcp.IPv4.IP.Equal(mc.DHCPIP) &&\n\t\t\t\tdhcp.IPv4.Mask.String() == mc.NetMask.String() &&\n\t\t\t\tdhcp.LowerIP.Equal(mc.LowerIP) &&\n\t\t\t\tdhcp.UpperIP.Equal(mc.UpperIP) &&\n\t\t\t\tdhcp.Enabled == mc.DHCPEnabled {\n\t\t\t\treturn n.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// No existing host-only interface found. Create a new one.\n\thostonlyNet, err := CreateHostonlyNet()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thostonlyNet.IPv4.IP = mc.HostIP\n\thostonlyNet.IPv4.Mask = mc.NetMask\n\tif err := hostonlyNet.Config(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Create and add a DHCP server to the host-only network\n\tdhcp := driver.DHCP{}\n\tdhcp.IPv4.IP = mc.DHCPIP\n\tdhcp.IPv4.Mask = mc.NetMask\n\tdhcp.LowerIP = mc.LowerIP\n\tdhcp.UpperIP = mc.UpperIP\n\tdhcp.Enabled = true\n\tif err := AddHostonlyDHCP(hostonlyNet.Name, dhcp); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostonlyNet.Name, nil\n}",
"func purgeOldStateOfGluster() error {\n\n\tpeers, err := client.PeerStatus()\n\tif err != nil {\n\t\trwolog.Error(\"Error while checking gluster Pool list\", err.Error())\n\t\treturn err\n\t}\n\n\tvar otherPeers []string\n\tvar connectedPeers []string\n\n\t// Debug Prints\n\trwolog.Debug(\"purgeOldStateOfGluster: Peers List \", peers)\n\tfor _, peer := range peers {\n\t\tif peer.Name != \"localhost\" {\n\t\t\totherPeers = append(otherPeers, peer.Name)\n\n\t\t\tif peer.Status == \"CONNECTED\" {\n\t\t\t\tconnectedPeers = append(connectedPeers, peer.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(connectedPeers) == 0 {\n\t\t// All peers are disconnected.\n\t\t// Remove individual bricks and delete the volume\n\t\t// Iterate and do peer detach\n\n\t\tvols, err := client.ListVolumes()\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Error while List gluster volumes\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, vol := range vols {\n\n\t\t\t//Set the quorum ratio before cleanup.\n\t\t\terr = helpers.SetQuorumRatio(client, \"cleanup\")\n\t\t\tif err != nil {\n\t\t\t\trwolog.Error(\"Error while setting server quorum ratio \", err)\n\t\t\t}\n\n\t\t\tstopVolumeRetry(vol.Name)\n\n\t\t\t// Iterate over bricks and remove bricks but one\n\t\t\treplica := len(vol.Bricks) - 1\n\t\t\tfor _, brick := range vol.Bricks {\n\t\t\t\t// since peers are disconnected, we delete bricks individually\n\t\t\t\tourIP, err := helpers.GetIPAddr()\n\t\t\t\tif err != nil {\n\t\t\t\t\trwolog.Error(\"Failed to get network up:\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif !strings.Contains(brick, ourIP) {\n\t\t\t\t\tif replica > 0 {\n\t\t\t\t\t\trwolog.Debug(\"Remove Brick \", brick, \" with replica \", replica)\n\n\t\t\t\t\t\terr = rmBricksFromVol(vol.Name, brick, replica)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\trwolog.Error(\"Error Removing Brick \", err)\n\t\t\t\t\t\t\t// Do not return error, As we have tried to remove the bricks five times.\n\t\t\t\t\t\t\t// Might be IP address has been changed and handler is trying to remove\n\t\t\t\t\t\t\t// its own brick with previous IP address.\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfmt.Printf(\"Brick deleted: %s\\n\", brick)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tb, _ := client.GetBricks(vol.Name)\n\t\t\t\treplica = len(b) - 1\n\t\t\t}\n\t\t}\n\n\t\tfor _, p := range otherPeers {\n\t\t\terr = client.PeerDetach(p)\n\t\t\tif err != nil {\n\t\t\t\trwolog.Error(err)\n\t\t\t}\n\t\t\trwolog.Debug(\"Detach peer \", p, \" error \", err)\n\t\t}\n\n\t\t// Uptil here, peers can be detached or not detached\n\t\tpeers, err := client.GetPeers()\n\t\tif err != nil {\n\t\t\trwolog.Error(err)\n\t\t}\n\n\t\t// Peer detach was not successful\n\t\tif len(peers) > 1 {\n\t\t\treturn fmt.Errorf(\"Not able to detach all peers\")\n\t\t}\n\n\t} else {\n\t\trwolog.Debug(\"purgeOldStateOfGluster: Some peers are connected. Not removing bricks.\")\n\t}\n\n\treturn nil\n}",
"func (s *Schemer) hostGetDropTables(host *chop.ChiHost) ([]string, []string, error) {\n\t// There isn't a separate query for deleting views. To delete a view, use DROP TABLE\n\t// See https://clickhouse.yandex/docs/en/query_language/create/\n\tsql := heredoc.Doc(`\n\t\tSELECT\n\t\t\tdistinct name, \n\t\t\tconcat('DROP TABLE IF EXISTS \"', database, '\".\"', name, '\"') AS drop_db_query\n\t\tFROM system.tables\n\t\tWHERE engine like 'Replicated%'`,\n\t)\n\n\tnames, sqlStatements, _ := s.getObjectListFromClickHouse([]string{CreatePodFQDN(host)}, sql)\n\treturn names, sqlStatements, nil\n}",
"func (o *PluginDnsClient) OnRemove(ctx *core.PluginCtx) {\n\tctx.UnregisterEvents(&o.PluginBase, dnsEvents)\n\tif o.IsNameServer() {\n\t\ttransportCtx := transport.GetTransportCtx(o.Client)\n\t\tif transportCtx != nil {\n\t\t\ttransportCtx.UnListen(\"udp\", \":53\", o)\n\t\t}\n\t} else {\n\t\tif o.cache != nil {\n\t\t\t_ = utils.NewDnsCacheRemover(o.cache, ctx.Tctx.GetTimerCtx())\n\t\t\to.cache = nil // GC can remove the client while the cache is removed.\n\t\t}\n\t}\n}",
"func (p *F5Plugin) deletePoolIfEmpty(poolname string) error {\n\tpoolExists, err := p.F5Client.PoolExists(poolname)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"F5Client.PoolExists failed: %v\", err)\n\t\treturn err\n\t}\n\n\tif poolExists {\n\t\tmembers, err := p.F5Client.GetPoolMembers(poolname)\n\t\tif err != nil {\n\t\t\tglog.V(4).Infof(\"F5Client.GetPoolMembers failed: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t// We only delete the pool if the pool is empty, which it may not be\n\t\t// if a service has been added and has not (yet) been deleted.\n\t\tif len(members) == 0 {\n\t\t\terr = p.F5Client.DeletePool(poolname)\n\t\t\tif err != nil {\n\t\t\t\tglog.V(4).Infof(\"Error deleting pool %s: %v\", poolname, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func filterOutBlockedDsInDirectDs() {\n\tfor k, _ := range blockedDs.domainSet {\n\t\tif directDs.domainSet[k] {\n\t\t\tdelete(directDs.domainSet, k)\n\t\t\tdirectDomainChanged = true\n\t\t}\n\t}\n\tfor k, _ := range alwaysBlockedDs {\n\t\tif alwaysDirectDs[k] {\n\t\t\terrl.Printf(\"%s in both always blocked and direct domain lists, taken as blocked.\\n\", k)\n\t\t\tdelete(alwaysDirectDs, k)\n\t\t}\n\t}\n}",
"func (d *deprecatedTemplate) prune() {\n\tfor key, blocker := range d.Blockers {\n\t\tfor name, job := range blocker.Jobs {\n\t\t\tif !job.current {\n\t\t\t\tdelete(d.Blockers[key].Jobs, name)\n\t\t\t}\n\t\t}\n\t\tif len(blocker.Jobs) == 0 {\n\t\t\tdelete(d.Blockers, key)\n\t\t}\n\t}\n\tif len(d.Blockers) == 0 {\n\t\td.Blockers = nil\n\t}\n\n\tfor name, job := range d.UnknownBlocker.Jobs {\n\t\tif !job.current {\n\t\t\tdelete(d.UnknownBlocker.Jobs, name)\n\t\t}\n\t}\n}",
"func DeleteVirtualNetworkSubnet() {}",
"func DeleteVirtualNetworkSubnet() {}",
"func (h *TunnelHandler) Clean() {\n\th.mu.Lock()\n\tremoved := make([]string, 0, len(h.tunnels))\n\tfor id, tunnel := range h.tunnels {\n\t\tselect {\n\t\tcase <-tunnel.chDone:\n\t\t\tremoved = append(removed, id)\n\t\tdefault:\n\t\t}\n\t}\n\th.mu.Unlock()\n\n\tfor _, id := range removed {\n\t\th.remove(id)\n\t}\n}",
"func (r Dns_Domain_Registration) RemoveNameserversFromDomain(nameservers []string) (resp bool, err error) {\n\tparams := []interface{}{\n\t\tnameservers,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_Registration\", \"removeNameserversFromDomain\", params, &r.Options, &resp)\n\treturn\n}",
"func (o *ClusterUninstaller) destroyFloatingIPs() error {\n\tfound, err := o.listFloatingIPs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titems := o.insertPendingItems(floatingIPTypeName, found.list())\n\n\tfor _, item := range items {\n\t\tif _, ok := found[item.key]; !ok {\n\t\t\t// This item has finished deletion.\n\t\t\to.deletePendingItems(item.typeName, []cloudResource{item})\n\t\t\to.Logger.Infof(\"Deleted floating IP %q\", item.name)\n\t\t\tcontinue\n\t\t}\n\t\terr = o.deleteFloatingIP(item)\n\t\tif err != nil {\n\t\t\to.errorTracker.suppressWarning(item.key, err, o.Logger)\n\t\t}\n\t}\n\n\tif items = o.getPendingItems(floatingIPTypeName); len(items) > 0 {\n\t\treturn errors.Errorf(\"%d items pending\", len(items))\n\t}\n\treturn nil\n}",
"func (p *Plugin) Cleanup(kubeclient kubernetes.Interface) {\n\tp.cleanedUp = true\n\tgracePeriod := int64(1)\n\tdeletionPolicy := metav1.DeletePropagationBackground\n\n\tlistOptions := p.listOptions()\n\tdeleteOptions := metav1.DeleteOptions{\n\t\tGracePeriodSeconds: &gracePeriod,\n\t\tPropagationPolicy: &deletionPolicy,\n\t}\n\n\t// Delete the DaemonSet created by this plugin\n\terr := kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).DeleteCollection(\n\t\t&deleteOptions,\n\t\tlistOptions,\n\t)\n\tif err != nil {\n\t\terrlog.LogError(errors.Wrapf(err, \"could not delete DaemonSet %v for daemonset plugin %v\", p.daemonSetName(), p.GetName()))\n\t}\n\n\t// Delete the ConfigMap created by this plugin\n\terr = kubeclient.CoreV1().ConfigMaps(p.Namespace).DeleteCollection(\n\t\t&deleteOptions,\n\t\tlistOptions,\n\t)\n\tif err != nil {\n\t\terrlog.LogError(errors.Wrapf(err, \"could not delete ConfigMap %v for daemonset plugin %v\", p.configMapName(), p.GetName()))\n\t}\n}",
"func (p *ProxySQL) Clear() error {\n\tmut.Lock()\n\tdefer mut.Unlock()\n\t_, err := exec(p, \"delete from mysql_servers\")\n\treturn err\n}",
"func (d *deprecatedTemplate) prune() {\n\tfor key, blocker := range d.Blockers {\n\t\tfor name, job := range blocker.Jobs {\n\t\t\tif !job.current {\n\t\t\t\tdelete(d.Blockers[key].Jobs, name)\n\t\t\t}\n\t\t}\n\t\tif len(blocker.Jobs) == 0 {\n\t\t\tdelete(d.Blockers, key)\n\t\t}\n\t}\n\tif len(d.Blockers) == 0 {\n\t\td.Blockers = nil\n\t}\n\n\tif d.UnknownBlocker == nil {\n\t\treturn\n\t}\n\tfor name, job := range d.UnknownBlocker.Jobs {\n\t\tif !job.current {\n\t\t\tdelete(d.UnknownBlocker.Jobs, name)\n\t\t}\n\t}\n}",
"func (h *InterfaceVppHandler) DumpDhcpClients() (map[uint32]*vppcalls.Dhcp, error) {\n\tdhcpData := make(map[uint32]*vppcalls.Dhcp)\n\treqCtx := h.callsChannel.SendMultiRequest(&dhcp.DHCPClientDump{})\n\n\tfor {\n\t\tdhcpDetails := &dhcp.DHCPClientDetails{}\n\t\tlast, err := reqCtx.ReceiveReply(dhcpDetails)\n\t\tif last {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient := dhcpDetails.Client\n\t\tlease := dhcpDetails.Lease\n\n\t\tvar hostMac net.HardwareAddr = lease.HostMac\n\t\tvar hostAddr, routerAddr string\n\t\tif uintToBool(lease.IsIPv6) {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t} else {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t}\n\n\t\t// DHCP client data\n\t\tdhcpClient := &vppcalls.Client{\n\t\t\tSwIfIndex: client.SwIfIndex,\n\t\t\tHostname: string(bytes.SplitN(client.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tID: string(bytes.SplitN(client.ID, []byte{0x00}, 2)[0]),\n\t\t\tWantDhcpEvent: uintToBool(client.WantDHCPEvent),\n\t\t\tSetBroadcastFlag: uintToBool(client.SetBroadcastFlag),\n\t\t\tPID: client.PID,\n\t\t}\n\n\t\t// DHCP lease data\n\t\tdhcpLease := &vppcalls.Lease{\n\t\t\tSwIfIndex: lease.SwIfIndex,\n\t\t\tState: lease.State,\n\t\t\tHostname: string(bytes.SplitN(lease.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tIsIPv6: uintToBool(lease.IsIPv6),\n\t\t\tHostAddress: hostAddr,\n\t\t\tRouterAddress: routerAddr,\n\t\t\tHostMac: hostMac.String(),\n\t\t}\n\n\t\t// DHCP metadata\n\t\tdhcpData[client.SwIfIndex] = &vppcalls.Dhcp{\n\t\t\tClient: dhcpClient,\n\t\t\tLease: dhcpLease,\n\t\t}\n\t}\n\n\treturn dhcpData, nil\n}",
"func filterOutDs(ds domainSet) {\n\tfor k, _ := range ds {\n\t\tif blockedDs.domainSet[k] {\n\t\t\tdelete(blockedDs.domainSet, k)\n\t\t\tblockedDomainChanged = true\n\t\t}\n\t\tif directDs.domainSet[k] {\n\t\t\tdelete(directDs.domainSet, k)\n\t\t\tdirectDomainChanged = true\n\t\t}\n\t}\n}",
"func (j *Janitor) removeOrphanedData(ctx context.Context) {\n\toffset := 0\n\tfor {\n\t\tdumpIDs, err := j.lsifStore.DumpIDs(ctx, orphanBatchSize, offset)\n\t\tif err != nil {\n\t\t\tj.error(\"Failed to list dump identifiers\", \"error\", err)\n\t\t\treturn\n\t\t}\n\n\t\tstates, err := j.store.GetStates(ctx, dumpIDs)\n\t\tif err != nil {\n\t\t\tj.error(\"Failed to get states for dumps\", \"error\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcount := 0\n\t\tfor _, dumpID := range dumpIDs {\n\t\t\tif _, ok := states[dumpID]; !ok {\n\t\t\t\tif err := j.lsifStore.Clear(ctx, dumpID); err != nil {\n\t\t\t\t\tj.error(\"Failed to remove data for dump\", \"dump_id\", dumpID, \"error\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\n\t\tif count > 0 {\n\t\t\tlog15.Debug(\"Removed orphaned data rows from skunkworks orphan harvester\", \"count\", count)\n\t\t\tj.metrics.DataRowsRemoved.Add(float64(count))\n\t\t}\n\n\t\tif len(dumpIDs) < orphanBatchSize {\n\t\t\tbreak\n\t\t}\n\n\t\toffset += orphanBatchSize\n\t}\n}",
"func deleteMulticastAllowPolicy(ovnNBClient goovn.Client, ns string, nsInfo *namespaceInfo) error {\n\tportGroupHash := hashedPortGroup(ns)\n\n\terr := deleteMulticastACLs(ns, portGroupHash, nsInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_ = nsInfo.updateNamespacePortGroup(ovnNBClient, ns)\n\treturn nil\n}",
"func (c *RpcClusterClient) clear(addrs []string) {\n\tc.Lock()\n\tvar rm []*poolWeightClient\n\tfor _, cli := range c.clients {\n\t\tvar has_cli bool\n\t\tfor _, addr := range addrs {\n\t\t\tif cli.endpoint == addr {\n\t\t\t\thas_cli = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !has_cli {\n\t\t\trm = append(rm, cli)\n\t\t} else if cli.errcnt > 0 {\n\t\t\t/*\n\t\t\t\tif cli.weight >= errWeight*uint64(cli.errcnt) {\n\t\t\t\t\tcli.weight -= errWeight * uint64(cli.errcnt)\n\t\t\t\t\tcli.errcnt = 0\n\t\t\t\t\tif c.Len() >= minHeapSize {\n\t\t\t\t\t\t// cli will and only up, so it's ok here.\n\t\t\t\t\t\theap.Fix(c, cli.index)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t*/\n\t\t}\n\t}\n\n\tfor _, cli := range rm {\n\t\t// p will up, down, or not move, so append it to rm list.\n\t\tc.Debugf(\"remove cli: %s\", cli.endpoint)\n\n\t\theap.Remove(c, cli.index)\n\t\tcli.pool.Close()\n\t}\n\tc.Unlock()\n}",
"func RouteIngrDeletePoolsByHostnameForEvh(routeIgrObj RouteIngressModel, namespace, objname, key string, fullsync bool, sharedQueue *utils.WorkerQueue) {\n\tok, hostMap := routeIgrObj.GetSvcLister().IngressMappings(namespace).GetRouteIngToHost(objname)\n\tif !ok {\n\t\tutils.AviLog.Warnf(\"key: %s, msg: nothing to delete for route: %s\", key, objname)\n\t\treturn\n\t}\n\n\tvar infraSettingName string\n\tif aviInfraSetting := routeIgrObj.GetAviInfraSetting(); aviInfraSetting != nil {\n\t\tinfraSettingName = aviInfraSetting.Name\n\t}\n\n\tutils.AviLog.Debugf(\"key: %s, msg: hosts to delete are :%s\", key, utils.Stringify(hostMap))\n\tfor host, hostData := range hostMap {\n\t\tshardVsName, _ := DeriveShardVSForEvh(host, key, routeIgrObj)\n\t\tdeleteVS := false\n\t\tif hostData.SecurePolicy == lib.PolicyPass {\n\t\t\tshardVsName.Name, _ = DerivePassthroughVS(host, key, routeIgrObj)\n\t\t}\n\n\t\tmodelName := lib.GetModelName(lib.GetTenant(), shardVsName.Name)\n\t\tfound, aviModel := objects.SharedAviGraphLister().Get(modelName)\n\t\tif !found || aviModel == nil {\n\t\t\tutils.AviLog.Warnf(\"key: %s, msg: model not found during delete: %s\", key, modelName)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Delete the pool corresponding to this host\n\t\tif hostData.SecurePolicy == lib.PolicyEdgeTerm {\n\t\t\tdeleteVS = aviModel.(*AviObjectGraph).DeletePoolForHostnameForEvh(shardVsName.Name, host, routeIgrObj, hostData.PathSvc, key, infraSettingName, true, true, true, true)\n\t\t} else if hostData.SecurePolicy == lib.PolicyPass {\n\t\t\taviModel.(*AviObjectGraph).DeleteObjectsForPassthroughHost(shardVsName.Name, host, routeIgrObj, hostData.PathSvc, infraSettingName, key, true, true, true)\n\t\t}\n\t\tif hostData.InsecurePolicy == lib.PolicyAllow {\n\t\t\tdeleteVS = aviModel.(*AviObjectGraph).DeletePoolForHostnameForEvh(shardVsName.Name, host, routeIgrObj, hostData.PathSvc, key, infraSettingName, true, true, true, false)\n\t\t}\n\t\tif !deleteVS {\n\t\t\tok := saveAviModel(modelName, aviModel.(*AviObjectGraph), key)\n\t\t\tif ok && len(aviModel.(*AviObjectGraph).GetOrderedNodes()) != 0 && !fullsync {\n\t\t\t\tPublishKeyToRestLayer(modelName, key, sharedQueue)\n\t\t\t}\n\t\t} else {\n\t\t\tutils.AviLog.Debugf(\"Setting up model name :[%v] to nil\", modelName)\n\t\t\tobjects.SharedAviGraphLister().Save(modelName, nil)\n\t\t\tPublishKeyToRestLayer(modelName, key, sharedQueue)\n\t\t}\n\t}\n\t// Now remove the secret relationship\n\trouteIgrObj.GetSvcLister().IngressMappings(namespace).RemoveIngressSecretMappings(objname)\n\tutils.AviLog.Infof(\"key: %s, removed ingress mapping for: %s\", key, objname)\n\n\t// Remove the hosts mapping for this ingress\n\trouteIgrObj.GetSvcLister().IngressMappings(namespace).DeleteIngToHostMapping(objname)\n\n\t// remove hostpath mappings\n\tupdateHostPathCache(namespace, objname, hostMap, nil)\n}",
"func (this *ExDomain) removesIvDomain(ivdom *IvDomain,\n\tmodifyingOtherDomain bool) {\n\tif modifyingOtherDomain {\n\t\tdels := make([]int, 0)\n\t\tfor _, part := range ivdom.GetParts() {\n\t\t\tfor v := part.From; v <= part.To; v++ {\n\t\t\t\tif this.Contains(v) {\n\t\t\t\t\tthis.Remove(v)\n\t\t\t\t} else {\n\t\t\t\t\tdels = append(dels, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(dels) != 0 {\n\t\t\tivdom.Removes(CreateIvDomainFromIntArr(dels))\n\t\t}\n\t} else {\n\t\tfor _, part := range ivdom.GetParts() {\n\t\t\tfor v := part.From; v <= part.To; v++ {\n\t\t\t\tthis.Remove(v)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (q *gcpQuery) deleteNameServers(gcpClient gcpclient.Client, managedZone string, domain string, values sets.String) error {\n\treturn gcpClient.DeleteResourceRecordSet(managedZone, q.resourceRecordSet(domain, values))\n}"
] | [
"0.6231171",
"0.5975118",
"0.592964",
"0.5638924",
"0.5413972",
"0.51644886",
"0.5150004",
"0.5145574",
"0.51422113",
"0.50841993",
"0.5060565",
"0.5032385",
"0.50233114",
"0.50209886",
"0.5001533",
"0.49985364",
"0.49904168",
"0.4957759",
"0.49550113",
"0.49398196",
"0.4936964",
"0.4928295",
"0.4917889",
"0.49097753",
"0.48729655",
"0.48728853",
"0.48693764",
"0.48662356",
"0.48557687",
"0.48502415",
"0.4847909",
"0.48439294",
"0.48409843",
"0.48360962",
"0.48341566",
"0.48242816",
"0.48150998",
"0.47838515",
"0.4769143",
"0.47471902",
"0.47471353",
"0.47437918",
"0.47182995",
"0.47095296",
"0.4708826",
"0.47072875",
"0.47021317",
"0.4690318",
"0.4687757",
"0.4686866",
"0.4677887",
"0.46776098",
"0.46712223",
"0.4669997",
"0.46680278",
"0.46640438",
"0.46635774",
"0.46625152",
"0.46383265",
"0.46348673",
"0.46341085",
"0.46265993",
"0.46232274",
"0.46141085",
"0.4612332",
"0.4609329",
"0.46080285",
"0.45973563",
"0.4592265",
"0.45915735",
"0.45893255",
"0.45880947",
"0.4587978",
"0.4585361",
"0.4583789",
"0.45820442",
"0.45818594",
"0.4565169",
"0.45635784",
"0.45606855",
"0.45525324",
"0.45451918",
"0.45421633",
"0.4539433",
"0.45343068",
"0.45343068",
"0.45299402",
"0.452823",
"0.45217946",
"0.45208725",
"0.45201626",
"0.45157576",
"0.45140916",
"0.45120937",
"0.45108867",
"0.45057127",
"0.45013985",
"0.4490932",
"0.44887152",
"0.44798288"
] | 0.80990946 | 0 |
addHostOnlyDHCPServer adds a DHCP server to a hostonly network. | addHostOnlyDHCPServer добавляет сервер DHCP в сеть hostonly. | func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error {
name := dhcpPrefix + ifname
dhcps, err := listDHCPServers(vbox)
if err != nil {
return err
}
// On some platforms (OSX), creating a host-only adapter adds a default dhcpserver,
// while on others (Windows?) it does not.
command := "add"
if dhcp, ok := dhcps[name]; ok {
command = "modify"
if (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && dhcp.Enabled {
// dhcp is up to date
return nil
}
}
args := []string{"dhcpserver", command,
"--netname", name,
"--ip", d.IPv4.IP.String(),
"--netmask", net.IP(d.IPv4.Mask).String(),
"--lowerip", d.LowerIP.String(),
"--upperip", d.UpperIP.String(),
}
if d.Enabled {
args = append(args, "--enable")
} else {
args = append(args, "--disable")
}
return vbox.vbm(args...)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func WithDHCPServer(subnet *net.IPNet) Option {\n\treturn func(d *dnsmasq) {\n\t\td.subnet = subnet\n\t}\n}",
"func (p *PerHost) AddHost(host string) {\n\tif strings.HasSuffix(host, \".\") {\n\t\thost = host[:len(host)-1]\n\t}\n\tp.bypassHosts = append(p.bypassHosts, host)\n}",
"func (lv *Libvirt) AddTransientDHCPHost(newHost *libvirtxml.NetworkDHCPHost, app *App) error {\n\tlv.dhcpLeases.mutex.Lock()\n\tdefer lv.dhcpLeases.mutex.Unlock()\n\n\tlv.dhcpLeases.leases[newHost] = true\n\treturn lv.rebuildDHCPStaticLeases(app)\n}",
"func getHostOnlyNetworkInterface(mc *driver.MachineConfig) (string, error) {\n\t// Check if the interface/dhcp exists.\n\tnets, err := HostonlyNets()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdhcps, err := DHCPs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, n := range nets {\n\t\tif dhcp, ok := dhcps[n.NetworkName]; ok {\n\t\t\tif dhcp.IPv4.IP.Equal(mc.DHCPIP) &&\n\t\t\t\tdhcp.IPv4.Mask.String() == mc.NetMask.String() &&\n\t\t\t\tdhcp.LowerIP.Equal(mc.LowerIP) &&\n\t\t\t\tdhcp.UpperIP.Equal(mc.UpperIP) &&\n\t\t\t\tdhcp.Enabled == mc.DHCPEnabled {\n\t\t\t\treturn n.Name, nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// No existing host-only interface found. Create a new one.\n\thostonlyNet, err := CreateHostonlyNet()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thostonlyNet.IPv4.IP = mc.HostIP\n\thostonlyNet.IPv4.Mask = mc.NetMask\n\tif err := hostonlyNet.Config(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Create and add a DHCP server to the host-only network\n\tdhcp := driver.DHCP{}\n\tdhcp.IPv4.IP = mc.DHCPIP\n\tdhcp.IPv4.Mask = mc.NetMask\n\tdhcp.LowerIP = mc.LowerIP\n\tdhcp.UpperIP = mc.UpperIP\n\tdhcp.Enabled = true\n\tif err := AddHostonlyDHCP(hostonlyNet.Name, dhcp); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hostonlyNet.Name, nil\n}",
"func (v *Virter) addDHCPEntry(mac string, id uint) (net.IP, error) {\n\tnetwork, err := v.libvirt.NetworkLookupByName(v.networkName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get network: %w\", err)\n\t}\n\n\tipNet, err := v.getIPNet(network)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnetworkBaseIP := ipNet.IP.Mask(ipNet.Mask)\n\tip := addToIP(networkBaseIP, id)\n\n\tif !ipNet.Contains(ip) {\n\t\treturn nil, fmt.Errorf(\"computed IP %v is not in network\", ip)\n\t}\n\n\tlog.Printf(\"Add DHCP entry from %v to %v\", mac, ip)\n\terr = v.libvirt.NetworkUpdate(\n\t\tnetwork,\n\t\t// the following 2 arguments are swapped; see\n\t\t// https://github.com/digitalocean/go-libvirt/issues/87\n\t\tuint32(libvirt.NetworkSectionIPDhcpHost),\n\t\tuint32(libvirt.NetworkUpdateCommandAddLast),\n\t\t-1,\n\t\tfmt.Sprintf(\"<host mac='%s' ip='%v'/>\", mac, ip),\n\t\tlibvirt.NetworkUpdateAffectLive|libvirt.NetworkUpdateAffectConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not add DHCP entry: %w\", err)\n\t}\n\n\treturn ip, nil\n}",
"func addBlockedHost(host string) bool {\n\tdm := host2Domain(host)\n\tif isHostAlwaysDirect(host) || hostIsIP(host) || dm == \"localhost\" {\n\t\treturn false\n\t}\n\tif chouDs[dm] {\n\t\t// Record blocked time for chou domain, this marks a chou domain as\n\t\t// temporarily blocked\n\t\tnow := time.Now()\n\t\tchou.Lock()\n\t\tchou.time[dm] = now\n\t\tchou.Unlock()\n\t\tdebug.Printf(\"chou domain %s blocked at %v\\n\", dm, now)\n\t} else if !blockedDs.has(dm) {\n\t\tblockedDs.add(dm)\n\t\tblockedDomainChanged = true\n\t\tdebug.Printf(\"%s added to blocked list\\n\", dm)\n\t\t// Delete this domain from direct domain set\n\t\tdelDirectDomain(dm)\n\t}\n\treturn true\n}",
"func (servers *Servers) AddServer(macAddressStr string) error {\n\tvar macAddress net.HardwareAddr\n\tmacAddress, err := net.ParseMAC(macAddressStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = servers.GetServer(macAddress.String())\n\tserver := Server{\n\t\tMacAddress: macAddress, IPAddress: \"change me\", Installed: false, Kernel: \"linux\", SecondMacAddress: \"find me\"}\n\tif err == nil {\n\t\tlog.Warnln(\"The server already exist in the list. Overwrite it.\")\n\t} else {\n\t\tswitch err.(type) {\n\t\tcase *EmptyServerListError:\n\t\t\tlog.Infoln(err)\n\t\t\tbreak\n\t\tcase *UnreconizeServerError:\n\t\t\tbreak\n\t\tcase *NilServerListError:\n\n\t\t\t// A map should not be nil\n\t\t\t// Refence : https://blog.golang.org/go-maps-in-action\n\t\t\treturn err\n\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\t(*servers)[macAddress.String()] = &server\n\n\treturn nil\n}",
"func WithDHCPNameServers(dns []string) Option {\n\treturn func(d *dnsmasq) {\n\t\td.dns = dns\n\t}\n}",
"func (data *DNSData) AddServer(server compute.Server) {\n\tdata.AddNetworkAdapter(server.Name, server.Network.PrimaryAdapter)\n\n\tfor _, additionalNetworkAdapter := range server.Network.AdditionalNetworkAdapters {\n\t\tdata.AddNetworkAdapter(server.Name, additionalNetworkAdapter)\n\t}\n}",
"func (h *HostHandler) AddHost(host string) *http.ServeMux {\n\tmux := http.NewServeMux()\n\th.eligibleHosts[host] = mux\n\treturn mux\n}",
"func AddHCNHostEndpoint(ctx context.Context, i *Endpoint, netns, networkID string, decorates ...HCNEndpointDecorator) error {\n\tif netns == \"\" || networkID == \"\" ||\n\t\t!i.isValid() {\n\t\treturn errors.Errorf(\"invalid HostComputeEndpoint configuration\")\n\t}\n\n\tvar attach = func(ep *hcn.HostComputeEndpoint, isNewlyCreatedEndpoint bool) error {\n\t\t// attach gateway endpoint to host\n\t\tvar condErr error\n\t\tvar err = wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {\n\t\t\tcondErr = ep.NamespaceAttach(netns)\n\t\t\tif condErr == nil ||\n\t\t\t\thcn.CheckErrorWithCode(condErr, 0x803B0014) { // if already attached\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}, ctx.Done())\n\t\tif err != nil {\n\t\t\tif condErr == nil {\n\t\t\t\tcondErr = err\n\t\t\t}\n\t\t\treturn errors.Wrapf(condErr, \"failed to attach gateway HostComputeEndpoint %s\", ep.Name)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn addHCNEndpoint(ctx, i, netns, networkID, attach, decorates...)\n}",
"func (s *Switch) Add_host( host *string, vmid *string, port int ) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\ts.hosts[*host] = true\n\ts.hport[*host] = port\n\ts.hvmid[*host] = vmid\n}",
"func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {\n\ts.printLeases()\n\n\tswitch msgType {\n\tcase dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?\n\t\treturn s.handleDiscover(p, options)\n\n\tcase dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)\n\t\t// start/renew a lease -- update lease time\n\t\t// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request\n\t\treturn s.handleDHCP4Request(p, options)\n\n\tcase dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP\n\t\treturn s.handleDecline(p, options)\n\n\tcase dhcp4.Release: // From Client, I don't need that IP anymore\n\t\treturn s.handleRelease(p, options)\n\n\tcase dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it\n\t\treturn s.handleInform(p, options)\n\n\t// from server -- ignore those but enumerate just in case\n\tcase dhcp4.Offer: // Broadcast From Server - Here's an IP\n\t\tlog.Printf(\"DHCP: received message from %s: Offer\", p.CHAddr())\n\n\tcase dhcp4.ACK: // From Server, Yes you can have that IP\n\t\tlog.Printf(\"DHCP: received message from %s: ACK\", p.CHAddr())\n\n\tcase dhcp4.NAK: // From Server, No you cannot have that IP\n\t\tlog.Printf(\"DHCP: received message from %s: NAK\", p.CHAddr())\n\n\tdefault:\n\t\tlog.Printf(\"DHCP: unknown packet %v from %s\", msgType, p.CHAddr())\n\t\treturn nil\n\t}\n\treturn nil\n}",
"func createHostonlyAdapter(vbox VBoxManager) (*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"hostonlyif\", \"create\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := reHostOnlyAdapterCreated.FindStringSubmatch(string(out))\n\tif res == nil {\n\t\treturn nil, errors.New(\"Failed to create host-only adapter\")\n\t}\n\n\treturn &hostOnlyNetwork{Name: res[1]}, nil\n}",
"func AddHost(name, addr string) (*Host, error) {\n\t// Create a network namespace\n\th, err := NewHost(name)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to NewHost: \", err)\n\t}\n\t// setup a veth pair\n\t_, err = h.setupVeth(\"eth2\", 1500)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open netns: \", err)\n\t}\n\t// setup a IP for host\n\th.setIfaceIP(addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to setIfaceIP for %s: %v\", h.Name, err)\n\t\treturn nil, err\n\t}\n\treturn h, nil\n}",
"func WithDHCPWPAD(wpad string) Option {\n\treturn func(d *dnsmasq) {\n\t\td.wpad = wpad\n\t}\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) SetDhcpEnabled(v bool) {\n\to.DhcpEnabled = &v\n}",
"func parseDhcp(packet gopacket.Packet) {\n\t// DHCP v4\n\tdhcpLayer := packet.Layer(layers.LayerTypeDHCPv4)\n\tif dhcpLayer != nil {\n\t\tdhcp, _ := dhcpLayer.(*layers.DHCPv4)\n\t\tlinkSrc, _ := getMacs(packet)\n\n\t\t// add device\n\t\tdev := devices.Add(linkSrc)\n\t\tif dhcp.Operation == layers.DHCPOpRequest {\n\t\t\tdebug(\"DHCP Request\")\n\t\t\treturn\n\t\t}\n\t\tif dhcp.Operation == layers.DHCPOpReply {\n\t\t\tdebug(\"DHCP Reply\")\n\t\t\t// mark this device as dhcp server\n\t\t\tdev.DHCP.Enable()\n\t\t\tdev.DHCP.SetTimestamp(packet.Metadata().Timestamp)\n\t\t}\n\t}\n\n\t// DHCP v6\n\tdhcpv6Layer := packet.Layer(layers.LayerTypeDHCPv6)\n\tif dhcpv6Layer != nil {\n\t\tdhcp, _ := dhcpv6Layer.(*layers.DHCPv6)\n\t\tlinkSrc, _ := getMacs(packet)\n\t\tnetSrc, _ := getIps(packet)\n\t\tdev := devices.Add(linkSrc)\n\t\tdev.UCasts.Add(netSrc)\n\t\ttimestamp := packet.Metadata().Timestamp\n\n\t\t// parse message type to determine if server or client\n\t\tswitch dhcp.MsgType {\n\t\tcase layers.DHCPv6MsgTypeSolicit:\n\t\t\tdebug(\"DHCPv6 Solicit\")\n\t\tcase layers.DHCPv6MsgTypeAdvertise:\n\t\t\tdebug(\"DHCPv6 Advertise\")\n\t\tcase layers.DHCPv6MsgTypeRequest:\n\t\t\tdebug(\"DHCPv6 Request\")\n\t\t\t// server\n\t\t\tdev.DHCP.Enable()\n\t\t\tdev.DHCP.SetTimestamp(timestamp)\n\t\tcase layers.DHCPv6MsgTypeConfirm:\n\t\t\tdebug(\"DHCPv6 Confirm\")\n\t\tcase layers.DHCPv6MsgTypeRenew:\n\t\t\tdebug(\"DHCPv6 Renew\")\n\t\tcase layers.DHCPv6MsgTypeRebind:\n\t\t\tdebug(\"DHCPv6 Rebind\")\n\t\tcase layers.DHCPv6MsgTypeReply:\n\t\t\tdebug(\"DHCPv6 Reply\")\n\t\t\t// server\n\t\t\tdev.DHCP.Enable()\n\t\t\tdev.DHCP.SetTimestamp(timestamp)\n\t\tcase layers.DHCPv6MsgTypeRelease:\n\t\t\tdebug(\"DHCPv6 Release\")\n\t\tcase layers.DHCPv6MsgTypeDecline:\n\t\t\tdebug(\"DHCPv6 Decline\")\n\t\tcase layers.DHCPv6MsgTypeReconfigure:\n\t\t\tdebug(\"DHCPv6 Reconfigure\")\n\t\t\t// server\n\t\t\tdev.DHCP.Enable()\n\t\t\tdev.DHCP.SetTimestamp(timestamp)\n\t\tcase layers.DHCPv6MsgTypeInformationRequest:\n\t\t\tdebug(\"DHCPv6 Information Request\")\n\t\tcase layers.DHCPv6MsgTypeRelayForward:\n\t\t\tdebug(\"DHCPv6 Relay Forward\")\n\t\tcase layers.DHCPv6MsgTypeRelayReply:\n\t\t\tdebug(\"DHCPv6 Relay Reply\")\n\t\t\t// server\n\t\t\tdev.DHCP.Enable()\n\t\t\tdev.DHCP.SetTimestamp(timestamp)\n\t\t}\n\t}\n}",
"func (p *ProxySQL) AddHost(opts ...HostOpts) error {\n\tmut.Lock()\n\tdefer mut.Unlock()\n\thostq, err := buildAndParseHostQueryWithHostname(opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// build a query with these options\n\t_, err = exec(p, buildInsertQuery(hostq))\n\treturn err\n}",
"func (i *DHCPInterface) ServeDHCP(p dhcp.Packet, msgType dhcp.MessageType, options dhcp.Options) dhcp.Packet {\n\tvar respMsg dhcp.MessageType\n\n\tswitch msgType {\n\tcase dhcp.Discover:\n\t\trespMsg = dhcp.Offer\n\tcase dhcp.Request:\n\t\trespMsg = dhcp.ACK\n\t}\n\n\tif respMsg != 0 {\n\t\trequestingMAC := p.CHAddr().String()\n\n\t\tif requestingMAC == i.MACFilter {\n\t\t\topts := dhcp.Options{\n\t\t\t\tdhcp.OptionSubnetMask: []byte(i.VMIPNet.Mask),\n\t\t\t\tdhcp.OptionRouter: []byte(*i.GatewayIP),\n\t\t\t\tdhcp.OptionDomainNameServer: i.dnsServers,\n\t\t\t\tdhcp.OptionHostName: []byte(i.Hostname),\n\t\t\t}\n\n\t\t\tif netRoutes := formClasslessRoutes(&i.Routes); netRoutes != nil {\n\t\t\t\topts[dhcp.OptionClasslessRouteFormat] = netRoutes\n\t\t\t}\n\n\t\t\tif i.ntpServers != nil {\n\t\t\t\topts[dhcp.OptionNetworkTimeProtocolServers] = i.ntpServers\n\t\t\t}\n\n\t\t\toptSlice := opts.SelectOrderOrAll(options[dhcp.OptionParameterRequestList])\n\n\t\t\treturn dhcp.ReplyPacket(p, respMsg, *i.GatewayIP, i.VMIPNet.IP, leaseDuration, optSlice)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func HasDHCP(net libvirtxml.Network) bool {\n\tif net.Forward != nil {\n\t\tif net.Forward.Mode == \"nat\" || net.Forward.Mode == \"route\" || net.Forward.Mode == \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (f *HostFilter) AddHost(host string, port int, ptype ProxyType) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\the, ok := f.hosts[host]\n\tf.hosts[host] = HostEntry{\n\t\tType: ptype,\n\t\tPort: port,\n\t}\n\tif !ok {\n\t\ttslog.Green(\"+ Add Rule [%s] %s\", ptype, host)\n\t} else {\n\t\tif he.Type != ptype {\n\t\t\ttslog.Green(\"* Change Rule [%s -> %s] %s\", he.Type, ptype, host)\n\t\t}\n\t}\n}",
"func HostOnly(addr string) string {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn addr\n\t} else {\n\t\treturn host\n\t}\n}",
"func (px *PXE) StartDHCP(iface string, ip net.IP) {\n\n\tvar netmask net.IP\n\tif px.selfNet.IsUnspecified() {\n\t\tnetmask = net.ParseIP(\"255.255.255.0\").To4()\n\t} else {\n\t\tnetmask = px.selfNet.To4()\n\t}\n\n\t// FIXME: hardcoded value\n\tpx.leaseTime = time.Minute * 5\n\tleaseTime := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(leaseTime, uint32(px.leaseTime.Seconds()))\n\n\tpx.options = layers.DHCPOptions{\n\t\tlayers.DHCPOption{Type: layers.DHCPOptLeaseTime, Length: 4, Data: leaseTime},\n\t\tlayers.DHCPOption{Type: layers.DHCPOptSubnetMask, Length: 4, Data: netmask.To4()},\n\t\tlayers.DHCPOption{Type: layers.DHCPOptRouter, Length: 4, Data: px.selfIP.To4()},\n\t}\n\n\tvar e error\n\n\t// Find our interface\n\tpx.iface, e = net.InterfaceByName(iface)\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v: %s\", e, iface)\n\t\treturn\n\t}\n\n\t// We need the raw handle to send unicast packet replies\n\t// This is only used for sending initial DHCP offers\n\t// Note: 0x0800 is EtherType for ipv4. See: https://en.wikipedia.org/wiki/EtherType\n\tpx.rawHandle, e = raw.ListenPacket(px.iface, 0x0800, nil)\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v: %s\", e, iface)\n\t\treturn\n\t}\n\tdefer px.rawHandle.Close()\n\n\t// We use this packetconn to read from\n\tnc, e := net.ListenPacket(\"udp4\", \":67\")\n\tif e != nil {\n\t\tpx.api.Logf(types.LLCRITICAL, \"%v\", e)\n\t\treturn\n\t}\n\tc := ipv4.NewPacketConn(nc)\n\tdefer c.Close()\n\tpx.api.Logf(types.LLINFO, \"started DHCP listener on: %s\", iface)\n\n\t// main read loop\n\tfor {\n\t\tbuffer := make([]byte, DHCPPacketBuffer)\n\t\tvar req layers.DHCPv4\n\t\tparser := gopacket.NewDecodingLayerParser(layers.LayerTypeDHCPv4, &req)\n\t\tdecoded := []gopacket.LayerType{}\n\n\t\tn, _, addr, e := c.ReadFrom(buffer)\n\t\tif e != nil {\n\t\t\tpx.api.Logf(types.LLCRITICAL, \"%v\", e)\n\t\t\tbreak\n\t\t}\n\t\tpx.api.Logf(types.LLDDEBUG, \"got a dhcp packet from: %s\", addr.String())\n\t\tif n < 240 {\n\t\t\tpx.api.Logf(types.LLDDEBUG, \"packet is too short: %d < 240\", n)\n\t\t\tcontinue\n\t\t}\n\n\t\tif e = parser.DecodeLayers(buffer[:n], &decoded); e != nil {\n\t\t\tpx.api.Logf(types.LLERROR, \"error decoding packet: %v\", e)\n\t\t\tcontinue\n\t\t}\n\t\tif len(decoded) < 1 || decoded[0] != layers.LayerTypeDHCPv4 {\n\t\t\tpx.api.Logf(types.LLERROR, \"decoded non-DHCP packet\")\n\t\t\tcontinue\n\t\t}\n\t\t// at this point we have a parsed DHCPv4 packet\n\n\t\tif req.Operation != layers.DHCPOpRequest {\n\t\t\t// odd...\n\t\t\tcontinue\n\t\t}\n\t\tif req.HardwareLen > 16 {\n\t\t\tpx.api.Logf(types.LLDDEBUG, \"packet HardwareLen too long: %d > 16\", req.HardwareLen)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo px.handleDHCPRequest(req)\n\t}\n\tpx.api.Log(types.LLNOTICE, \"DHCP stopped.\")\n}",
"func (p *RangePlugin) ServeDHCP(ctx context.Context, req, res *dhcpv4.DHCPv4) error {\n\tdb := lease.GetDatabase(ctx)\n\tcli := lease.Client{HwAddr: req.ClientHWAddr}\n\n\tif dhcpserver.Discover(req) {\n\t\tip := p.findUnboundAddr(ctx, req.ClientHWAddr, req.RequestedIPAddress(), db)\n\t\tif ip != nil {\n\t\t\tp.L.Debugf(\"found unbound address for %s: %s\", req.ClientHWAddr, ip)\n\t\t\tres.YourIPAddr = ip\n\t\t\treturn nil\n\t\t}\n\t\tp.L.Debugf(\"failed to find address for %s\", req.ClientHWAddr)\n\t\t// we failed to find an IP address for that client\n\t\t// so fallthrough and call the next middleware\n\t} else\n\n\t// for DHCPREQUEST we try to actually lease the IP address\n\t// and send a DHCPACK if we succeeded. In any error case\n\t// we will NOT send a NAK as a middleware below us\n\t// may succeed in leasing the address\n\t// TODO(ppacher): we could check if the RequestedIPAddress() is inside\n\t// the IP ranges and then decide to ACK or NAK\n\tif dhcpserver.Request(req) {\n\t\tstate := \"binding\"\n\t\tip := req.RequestedIPAddress()\n\t\tif ip == nil || ip.IsUnspecified() {\n\t\t\tip = req.ClientIPAddr\n\t\t\tstate = \"renewing\"\n\t\t}\n\n\t\tif ip != nil && !ip.IsUnspecified() {\n\t\t\tp.L.Debugf(\"%s (%s) requests %s\", req.ClientHWAddr, state, ip)\n\n\t\t\t// use the leaseTime already set to the response packet\n\t\t\t// else we fallback to time.Hour\n\t\t\t// TODO(ppacher): we should make the default lease time configurable\n\t\t\t// for the ranges plguin\n\t\t\tleaseTime := res.IPAddressLeaseTime(time.Hour)\n\n\t\t\tleaseTime, err := db.Lease(ctx, ip, cli, leaseTime, state == \"renewing\")\n\t\t\tif err == nil {\n\t\t\t\tp.L.Infof(\"%s (%s): lease %s for %s\", req.ClientHWAddr, state, ip, leaseTime)\n\t\t\t\tif leaseTime == time.Hour {\n\t\t\t\t\t// if we use the default, make sure to set it\n\t\t\t\t\tres.UpdateOption(dhcpv4.OptIPAddressLeaseTime(leaseTime))\n\t\t\t\t}\n\n\t\t\t\t// make sure we ACK the DHCPREQUEST\n\t\t\t\tres.YourIPAddr = ip\n\n\t\t\t\tif res.SubnetMask() == nil || res.SubnetMask().String() == \"0.0.0.0\" {\n\t\t\t\t\tres.UpdateOption(dhcpv4.OptSubnetMask(p.Network.Mask))\n\t\t\t\t}\n\n\t\t\t\tres.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))\n\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tp.L.Errorf(\"%s: failed to lease requested ip %s: %s\", req.ClientHWAddr, ip, err.Error())\n\t\t}\n\t} else\n\n\t// If it's a DHCPRELEASE message and part of our range we'll release it\n\tif dhcpserver.Release(req) && p.Ranges.Contains(req.ClientIPAddr) {\n\t\tif err := db.Release(ctx, req.ClientIPAddr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// No response should be sent for DHCPRELEASE messages\n\t\treturn dhcpserver.ErrNoResponse\n\t}\n\n\treturn p.Next.ServeDHCP(ctx, req, res)\n}",
"func (m *NetManager) serverAdd(key string, con cm.Connection) error {\n\n\tm.keyMarker[key] = key_marker_server\n\treturn m.serverConsManager.Add(key, con)\n}",
"func (h *Hosts) AddHost(host ...Host) {\n\th.mux.Lock()\n\tdefer h.mux.Unlock()\n\n\th.hosts = append(h.hosts, host...)\n}",
"func SetServerHost(s string) func(*Server) error {\n\treturn func(c *Server) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}",
"func (client *Client) AddHpHost(request *AddHpHostRequest) (response *AddHpHostResponse, err error) {\n\tresponse = CreateAddHpHostResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) {\n\tout, err := vbox.vbmOut(\"list\", \"dhcpservers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := map[string]*dhcpServer{}\n\tdhcp := &dhcpServer{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"NetworkName\":\n\t\t\tdhcp = &dhcpServer{}\n\t\t\tm[val] = dhcp\n\t\t\tdhcp.NetworkName = val\n\t\tcase \"IP\":\n\t\t\tdhcp.IPv4.IP = net.ParseIP(val)\n\t\tcase \"upperIPAddress\":\n\t\t\tdhcp.UpperIP = net.ParseIP(val)\n\t\tcase \"lowerIPAddress\":\n\t\t\tdhcp.LowerIP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tdhcp.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"Enabled\":\n\t\t\tdhcp.Enabled = (val == \"Yes\")\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
"func (client *Client) AddHDMInstance(request *AddHDMInstanceRequest) (response *AddHDMInstanceResponse, err error) {\n\tresponse = CreateAddHDMInstanceResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) HasDhcpEnabled() bool {\n\tif o != nil && o.DhcpEnabled != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (h *hostfile) AddHost(ip, hostname string) error {\n\th.hosts.Reload()\n\th.hosts.AddHost(ip, hostname)\n\terr := h.hosts.Save()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func addDoH(ipStr, base string) {\n\tip := netip.MustParseAddr(ipStr)\n\tdohOfIP[ip] = base\n\tdohIPsOfBase[base] = append(dohIPsOfBase[base], ip)\n}",
"func (p *PerHost) AddZone(zone string) {\n\tif strings.HasSuffix(zone, \".\") {\n\t\tzone = zone[:len(zone)-1]\n\t}\n\tif !strings.HasPrefix(zone, \".\") {\n\t\tzone = \".\" + zone\n\t}\n\tp.bypassZones = append(p.bypassZones, zone)\n}",
"func NewHostHandler() *HostHandler {\n\th := &HostHandler{\n\t\teligibleHosts: make(map[string]*http.ServeMux),\n\t}\n\n\treturn h\n}",
"func (c *Config) AddServer(name string, host string, port int) (string, *Server) {\n\tserver := &Server{\n\t\tName: name,\n\t\tHost: host,\n\t\tPort: port,\n\t}\n\tserverAmount := len(*c.Servers) + 1\n\tlog.Printf(\"Add server, server amount: %d\", serverAmount)\n\tkey := fmt.Sprintf(\"%s%d\", \"server\", serverAmount)\n\t(*c.Servers)[key] = server\n\treturn key, server\n}",
"func addDomainsWithHost(domains map[string]*lazyloadv1alpha1.Destinations, sf *lazyloadv1alpha1.ServiceFence, nsSvcCache *NsSvcCache,\n\trules []*domainAliasRule,\n) {\n\tcheckStatus := func(now int64, strategy *lazyloadv1alpha1.RecyclingStrategy) lazyloadv1alpha1.Destinations_Status {\n\t\tswitch {\n\t\tcase strategy.Stable != nil:\n\t\t\t// ...\n\t\tcase strategy.Deadline != nil:\n\t\t\tif now > strategy.Deadline.Expire.Seconds {\n\t\t\t\treturn lazyloadv1alpha1.Destinations_EXPIRE\n\t\t\t}\n\t\tcase strategy.Auto != nil:\n\t\t\tif strategy.RecentlyCalled != nil {\n\t\t\t\tif now-strategy.RecentlyCalled.Seconds > strategy.Auto.Duration.Seconds {\n\t\t\t\t\treturn lazyloadv1alpha1.Destinations_EXPIRE\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn lazyloadv1alpha1.Destinations_ACTIVE\n\t}\n\n\tfor h, strategy := range sf.Spec.Host {\n\t\tif strings.HasSuffix(h, \"/*\") {\n\t\t\t// handle namespace level host, like 'default/*'\n\t\t\thandleNsHost(h, domains, nsSvcCache, rules)\n\t\t} else {\n\t\t\t// handle service level host, like 'a.default.svc.cluster.local' or 'www.netease.com'\n\t\t\thandleSvcHost(h, strategy, checkStatus, domains, sf, rules)\n\t\t}\n\t}\n}",
"func (r *Resource) AddHost(id int, hostname, ip string) error {\n\th := Host{\n\t\tID: id,\n\t\tName: hostname,\n\t\tIP: ip,\n\t\tvolume: make(map[int]volume),\n\t}\n\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif err := checkHosts(r, h); err != nil {\n\t\treturn err\n\t}\n\tr.host[hostname] = h\n\n\treturn nil\n}",
"func (sl *ServerList) Add(server string) error {\n\tvar serverAddress net.Addr\n\tif strings.Contains(server, \"/\") {\n\t\taddr, err := net.ResolveUnixAddr(\"unix\", server)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tserverAddress = newAddrValue(addr)\n\t} else {\n\t\ttcpaddr, err := net.ResolveTCPAddr(\"tcp\", server)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tserverAddress = newAddrValue(tcpaddr)\n\t}\n\n\tsl.mu.Lock()\n\tdefer sl.mu.Unlock()\n\tsl.servers = append(sl.servers, serverAddress)\n\tsl.consistentHash.Add(serverAddress)\n\treturn nil\n}",
"func AddServer(serverName string, roles []string) error {\n\tsshKey, err := sshkey.ReadSSHPublicKeyFromConf()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !sshKey.IsProvisioned() {\n\t\treturn fmt.Errorf(\n\t\t\t\"Could not add SSH key to server '%s'. The SSH key '%s' is not available in hcloud. Use the provision command first\", serverName, sshKey.Name)\n\t}\n\tserverConf := Config{\n\t\tName: serverName,\n\t\tSSHPublicKeyID: sshKey.ID,\n\t\tServerType: viper.GetString(confHCloudDefaultServerTypeKey),\n\t\tImageName: viper.GetString(confHCloudDefaultImageNameKey),\n\t\tLocationName: viper.GetString(confHCloudLocationNameKey),\n\t\tRoles: roles}\n\tserverConf.UpdateConfig()\n\treturn nil\n}",
"func (c *ConfigurationFile) AddServer(name string, server Server) {\n\tc.Servers[name] = server\n}",
"func (c *FortiSDKClient) CreateNetworkingInterfaceDHCP(params *JSONNetworkingInterfaceDHCP) (output *JSONNetworkingInterfaceDHCPResult, err error) {\n\tlogPrefix := \"CreateNetworkingInterfaceDHCP - \"\n\tHTTPMethod := \"POST\"\n\tpath := \"/api/v2/cmdb/system.dhcp/server\"\n\n\t// Check if there is already a server attached to interface\n\t// Create will fail with error but still creat new config if interface already associated with a config., so we don't post the request if interface already has a config\n\tdata, err := c.ReadNetworkingInterfaceDHCPServers()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\tservers := *data\n\n\tfor _, server := range servers {\n\t\tif server.Interface == params.Interface {\n\t\t\terr = fmt.Errorf(\"DHCP Server already attached to interface!\")\n\t\t\treturn\n\t\t}\n\t}\n\t// End Check\n\n\toutput = &JSONNetworkingInterfaceDHCPResult{}\n\tlocJSON, err := json.Marshal(params)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tlog.Printf(logPrefix+\"%s: %s\", HTTPMethod, string(locJSON))\n\n\tbytes := bytes.NewBuffer(locJSON)\n\treq := c.NewRequest(HTTPMethod, path, nil, bytes)\n\terr = req.Send()\n\tif err != nil || req.HTTPResponse == nil {\n\t\terr = fmt.Errorf(\"cannot send request %s\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(req.HTTPResponse.Body)\n\tif err != nil || body == nil {\n\n\t\terr = fmt.Errorf(\"cannot get response body %s\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(logPrefix+\"PATH: %s\", path)\n\tlog.Printf(logPrefix+\"FortiOS Response: %s\", string(body))\n\n\tvar result map[string]interface{}\n\tjson.Unmarshal([]byte(string(body)), &result)\n\n\treq.HTTPResponse.Body.Close()\n\n\tif result != nil {\n\t\tif result[\"vdom\"] != nil {\n\t\t\toutput.Vdom = result[\"vdom\"].(string)\n\t\t}\n\t\tif result[\"mkey\"] != nil {\n\t\t\t// Note that Fortios is inconsistent in how mkey is reported\n\t\t\tif fmt.Sprintf(\"%T\", result[\"mkey\"]) == \"float64\" {\n\t\t\t\toutput.Mkey = fmt.Sprintf(\"%.0f\", result[\"mkey\"].(float64))\n\t\t\t} else {\n\t\t\t\toutput.Mkey = result[\"mkey\"].(string)\n\t\t\t}\n\t\t}\n\t\tif result[\"status\"] != nil {\n\t\t\tif result[\"status\"] != \"success\" {\n\t\t\t\tif result[\"error\"] != nil {\n\t\t\t\t\terrorCode := fmt.Sprintf(\"%.0f\", result[\"error\"])\n\t\t\t\t\tswitch errorCode {\n\t\t\t\t\tcase \"-3\":\n\t\t\t\t\t\terr = fmt.Errorf(\"Invalid Interface, no such interface\")\n\t\t\t\t\tcase \"-526\":\n\t\t\t\t\t\terr = fmt.Errorf(\"DHCP Server already attached to interface\")\n\t\t\t\t\t\t// Even if create fail a server config is created. Check func added to avoid getting here\n\t\t\t\t\tdefault:\n\t\t\t\t\t\terr = fmt.Errorf(\"status is %s and error no is %.0f\", result[\"status\"], result[\"error\"])\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\terr = fmt.Errorf(\"status is %s and error no is not found\", result[\"status\"])\n\t\t\t\t}\n\n\t\t\t\tif result[\"http_status\"] != nil {\n\t\t\t\t\terr = fmt.Errorf(\"%s, details: %s\", err, util.HttpStatus2Str(int(result[\"http_status\"].(float64))))\n\t\t\t\t} else {\n\t\t\t\t\terr = fmt.Errorf(\"%s, and http_status no is not found\", err)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\toutput.Status = result[\"status\"].(string)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"cannot get status from the response\")\n\t\t\treturn\n\t\t}\n\t\tif result[\"http_status\"] != nil {\n\t\t\toutput.HTTPStatus = fmt.Sprintf(\"%.0f\", result[\"http_status\"].(float64))\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"cannot get the right response\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func AdvertiseHost(listen string) string {\n\tif listen == \"0.0.0.0\" {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil || len(addrs) == 0 {\n\t\t\treturn \"localhost\"\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {\n\t\t\t\treturn ip.IP.To4().String()\n\t\t\t}\n\t\t}\n\t\treturn \"localhost\"\n\t}\n\n\treturn listen\n}",
"func (m *mDNS) AddHandler(f func(net.Interface, net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}",
"func (c *connection) addHost(conn *net.TCPConn) {\n\tmsg := read(conn)\n\tvar buf bytes.Buffer\n\tid := int(msg[1])\n\tport := int(msg[2])*256 + int(msg[3])\n\tif id == 0 {\n\t\tid = len(c.peers)\n\t\tbuf.Write([]byte{byte(id), byte(c.myId)})\n\t\tfor i := range c.peers {\n\t\t\tif i != c.myId {\n\t\t\t\tbuf.WriteByte(byte(i))\n\t\t\t\tbuf.Write(addrToBytes(c.peers[i].ip, c.peers[i].port))\n\t\t\t}\n\t\t}\n\t\twrite(conn, buf.Bytes())\n\n\t}\n\tc.addPeer(id, conn, port)\n\tgo c.receive(c.peers[id])\n}",
"func (c *FortiSDKClient) DeleteNetworkingInterfaceDHCP(mkey string) (err error) {\n\tlogPrefix := \"DeleteNetworkingInterfaceDHCP - \"\n\tHTTPMethod := \"DELETE\"\n\tpath := \"/api/v2/cmdb/system.dhcp/server\"\n\tpath += \"/\" + EscapeURLString(mkey)\n\n\treq := c.NewRequest(HTTPMethod, path, nil, nil)\n\terr = req.Send()\n\tif err != nil || req.HTTPResponse == nil {\n\t\terr = fmt.Errorf(\"cannot send request %s\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(req.HTTPResponse.Body)\n\tif err != nil || body == nil {\n\t\terr = fmt.Errorf(\"cannot get response body %s\", err)\n\t\treturn\n\t}\n\n\tvar result map[string]interface{}\n\tjson.Unmarshal([]byte(string(body)), &result)\n\n\treq.HTTPResponse.Body.Close()\n\n\tlog.Printf(logPrefix+\"Path called %s\", path)\n\tlog.Printf(logPrefix+\"FortiOS response: %s\", string(body))\n\n\tif result != nil {\n\t\tif result[\"status\"] == nil {\n\t\t\terr = fmt.Errorf(\"cannot get status from the response\")\n\t\t\treturn\n\t\t}\n\n\t\tif result[\"status\"] != \"success\" {\n\t\t\tif result[\"error\"] != nil {\n\t\t\t\terr = fmt.Errorf(\"status is %s and error no is %.0f\", result[\"status\"], result[\"error\"])\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"status is %s and error no is not found\", result[\"status\"])\n\t\t\t}\n\n\t\t\tif result[\"http_status\"] != nil {\n\t\t\t\terr = fmt.Errorf(\"%s, details: %s\", err, util.HttpStatus2Str(int(result[\"http_status\"].(float64))))\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"%s, and http_status no is not found\", err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t} else {\n\t\terr = fmt.Errorf(\"cannot get the right response\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func WithHwAddr(hwaddr []byte) Modifier {\n\treturn func(d *DHCPv4) *DHCPv4 {\n\t\td.SetClientHwAddr(hwaddr)\n\t\treturn d\n\t}\n}",
"func (c *RawClient) sendDHCP(target net.HardwareAddr, dhcp []byte, dstIP net.IP, srcIP net.IP) error {\n\n\tproto := 17\n\n\tudpsrc := uint(67)\n\tudpdst := uint(68)\n\n\tudp := udphdr{\n\t\tsrc: uint16(udpsrc),\n\t\tdst: uint16(udpdst),\n\t}\n\n\tudplen := 8 + len(dhcp)\n\n\tip := iphdr{\n\t\tvhl: 0x45,\n\t\ttos: 0,\n\t\tid: 0x0000, // the kernel overwrites id if it is zero\n\t\toff: 0,\n\t\tttl: 128,\n\t\tproto: uint8(proto),\n\t}\n\tcopy(ip.src[:], srcIP.To4())\n\tcopy(ip.dst[:], dstIP.To4())\n\n\tudp.ulen = uint16(udplen)\n\tudp.checksum(&ip, dhcp)\n\n\ttotalLen := 20 + udplen\n\n\tip.iplen = uint16(totalLen)\n\tip.checksum()\n\n\tbuf := bytes.NewBuffer([]byte{})\n\terr := binary.Write(buf, binary.BigEndian, &udp)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tudpHeader := buf.Bytes()\n\tdataWithHeader := append(udpHeader, dhcp...)\n\n\tbuff := bytes.NewBuffer([]byte{})\n\terr = binary.Write(buff, binary.BigEndian, &ip)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tipHeader := buff.Bytes()\n\tpacket := append(ipHeader, dataWithHeader...)\n\n\t// Create Ethernet frame\n\tf := ðernet.Frame{\n\t\tDestination: target,\n\t\tSource: c.ifi.HardwareAddr,\n\t\tEtherType: ethernet.EtherTypeIPv4,\n\t\tPayload: packet,\n\t}\n\tfb, err := f.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Send packet to target\n\t_, err = c.p.WriteTo(fb, &raw.Addr{\n\t\tHardwareAddr: target,\n\t})\n\treturn err\n}",
"func (b *DummyPool) AddServer(s *Server) {\n\tb.server = s\n}",
"func (vcd *TestVCD) Test_VMGetDhcpAddress(check *C) {\n\tif vcd.config.VCD.EdgeGateway == \"\" {\n\t\tcheck.Skip(\"Skipping test because no edge gateway given\")\n\t}\n\n\t// Construct new VM for test\n\tvapp, err := deployVappForTest(vcd, \"GetDhcpAddress\")\n\tcheck.Assert(err, IsNil)\n\tvmType, _ := vcd.findFirstVm(*vapp)\n\tvm := &VM{\n\t\tVM: &vmType,\n\t\tclient: vapp.client,\n\t}\n\n\tedgeGateway, err := vcd.vdc.GetEdgeGatewayByName(vcd.config.VCD.EdgeGateway, false)\n\tif err != nil {\n\t\tcheck.Skip(fmt.Sprintf(\"Edge Gateway %s not found\", vcd.config.VCD.EdgeGateway))\n\t}\n\n\t// Setup Org network with a single IP in DHCP pool\n\tnetwork := makeOrgVdcNetworkWithDhcp(vcd, check, edgeGateway)\n\n\t// Attach Org network to vApp\n\t_, err = vapp.AddOrgNetwork(&VappNetworkSettings{}, network, false)\n\tcheck.Assert(err, IsNil)\n\n\t// Get network config and update it to use DHCP\n\tnetCfg, err := vm.GetNetworkConnectionSection()\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(netCfg, NotNil)\n\n\tnetCfg.NetworkConnection[0].Network = network.Name\n\tnetCfg.NetworkConnection[0].IPAddressAllocationMode = types.IPAllocationModeDHCP\n\tnetCfg.NetworkConnection[0].IsConnected = true\n\n\tsecondNic := &types.NetworkConnection{\n\t\tNetwork: network.Name,\n\t\tIPAddressAllocationMode: types.IPAllocationModeDHCP,\n\t\tNetworkConnectionIndex: 1,\n\t\tIsConnected: true,\n\t}\n\tnetCfg.NetworkConnection = append(netCfg.NetworkConnection, secondNic)\n\n\t// Update network configuration to use DHCP\n\terr = vm.UpdateNetworkConnectionSection(netCfg)\n\tcheck.Assert(err, IsNil)\n\n\tif testVerbose {\n\t\tfmt.Printf(\"# Time out waiting for DHCP IPs on powered off VMs: \")\n\t}\n\t// Pretend we are waiting for DHCP addresses when VM is powered off - it must timeout\n\tips, hasTimedOut, err := vm.WaitForDhcpIpByNicIndexes([]int{0, 1}, 10, true)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(hasTimedOut, Equals, true)\n\tcheck.Assert(ips, HasLen, 2)\n\tcheck.Assert(ips[0], Equals, \"\")\n\tcheck.Assert(ips[1], Equals, \"\")\n\n\tif testVerbose {\n\t\tfmt.Println(\"OK\")\n\t}\n\n\t// err = vm.PowerOnAndForceCustomization()\n\ttask, err := vapp.PowerOn()\n\tcheck.Assert(err, IsNil)\n\terr = task.WaitTaskCompletion()\n\tcheck.Assert(err, IsNil)\n\n\tif testVerbose {\n\t\tfmt.Printf(\"# Get IPs for NICs 0 and 1: \")\n\t}\n\t// Wait and check DHCP lease acquired\n\tips, hasTimedOut, err = vm.WaitForDhcpIpByNicIndexes([]int{0, 1}, 300, true)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(hasTimedOut, Equals, false)\n\tcheck.Assert(ips, HasLen, 2)\n\tcheck.Assert(ips[0], Matches, `^32.32.32.\\d{1,3}$`)\n\tcheck.Assert(ips[1], Matches, `^32.32.32.\\d{1,3}$`)\n\n\tif testVerbose {\n\t\tfmt.Printf(\"OK:(NICs 0 and 1): %s, %s\\n\", ips[0], ips[1])\n\t}\n\n\t// DHCP lease was received so VMs MAC address should have an active lease\n\tif testVerbose {\n\t\tfmt.Printf(\"# Get active lease for NICs with MAC 0: \")\n\t}\n\tlease, err := edgeGateway.GetNsxvActiveDhcpLeaseByMac(netCfg.NetworkConnection[0].MACAddress)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(lease, NotNil)\n\t// This check fails for a known bug in vCD\n\t//check.Assert(lease.IpAddress, Matches, `^32.32.32.\\d{1,3}$`)\n\tif testVerbose {\n\t\tfmt.Printf(\"Ok. (Got active lease for MAC 0: %s)\\n\", lease.IpAddress)\n\t}\n\n\tif testVerbose {\n\t\tfmt.Printf(\"# Check number of leases on Edge Gateway: \")\n\t}\n\tallLeases, err := edgeGateway.GetAllNsxvDhcpLeases()\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(allLeases, NotNil)\n\tcheck.Assert(len(allLeases) > 0, Equals, true)\n\tif testVerbose {\n\t\tfmt.Printf(\"OK: (%d leases found)\\n\", len(allLeases))\n\t}\n\n\t// Check for a single NIC\n\tif testVerbose {\n\t\tfmt.Printf(\"# Get IP for single NIC 0: \")\n\t}\n\tips, hasTimedOut, err = vm.WaitForDhcpIpByNicIndexes([]int{0}, 300, true)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(hasTimedOut, Equals, false)\n\tcheck.Assert(ips, HasLen, 1)\n\n\t// This check fails for a known bug in vCD\n\t// TODO: re-enable when the bug is fixed\n\t//check.Assert(ips[0], Matches, `^32.32.32.\\d{1,3}$`)\n\tif testVerbose {\n\t\tfmt.Printf(\"OK: Got IP for NICs 0: %s\\n\", ips[0])\n\t}\n\n\t// Check if IPs are reported by only using VMware tools\n\tif testVerbose {\n\t\tfmt.Printf(\"# Get IPs for NICs 0 and 1 (only using guest tools): \")\n\t}\n\tips, hasTimedOut, err = vm.WaitForDhcpIpByNicIndexes([]int{0, 1}, 300, false)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(hasTimedOut, Equals, false)\n\tcheck.Assert(ips, HasLen, 2)\n\t// This check fails for a known bug in vCD\n\t//check.Assert(ips[0], Matches, `^32.32.32.\\d{1,3}$`)\n\t//check.Assert(ips[1], Matches, `^32.32.32.\\d{1,3}$`)\n\tif testVerbose {\n\t\tfmt.Printf(\"OK: IPs for NICs 0 and 1 (via guest tools): %s, %s\\n\", ips[0], ips[1])\n\t}\n\n\t// Cleanup vApp\n\terr = deleteVapp(vcd, vapp.VApp.Name)\n\tcheck.Assert(err, IsNil)\n}",
"func (m *mDNS) AddHandler(f func(net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}",
"func (sp *ServerPool) AddServer(server *Server) {\n\tlog.Printf(\"Added server %s to pool\", server.Name)\n\tsp.servers = append(sp.servers, server)\n}",
"func (c *Client) SetServerHost(host string, port int) {\n\tc.serverAddr = net.JoinHostPort(host, strconv.Itoa(port))\n}",
"func createFakeDHCP() error{\n\n\n dhcpData := []byte(`lease 192.168.50.63 {\n starts 4 2019/08/08 22:32:49;\n ends 4 2019/08/08 23:52:49;\n cltt 4 2019/08/08 22:32:49;\n binding state active;\n next binding state free;\n rewind binding state free;\n hardware ethernet 08:00:27:00:ab:2c;\n client-hostname \"fake-test-bmh\"\";\n}`)\n err := ioutil.WriteFile(\"/var/lib/dhcp/dhcpd.leases\", dhcpData, 0777)\n\n if (err != nil) {\n return err\n }\n\n return nil\n}",
"func (ln *linuxNetworking) AddVirtualServer(ipvsSvc *ipvs.Service) error {\n\tln.Lock()\n\tdefer ln.Unlock()\n\n\treturn ln.ipvsHandle.NewService(ipvsSvc)\n}",
"func (d *dataset) addServer(server Server) {\n\tif server.WritePriority > 0 {\n\t\td.write = &server\n\t}\n\tif server.ReadPriority > 0 {\n\t\td.read.addServer(&server)\n\t\tif _, ok := d.allReadServers[server.ReadPriority]; !ok {\n\t\t\td.allReadServers[server.ReadPriority] = []*Server{}\n\t\t}\n\t\td.allReadServers[server.ReadPriority] = append(d.allReadServers[server.ReadPriority], &server)\n\t}\n}",
"func NewAddDNSServerNoContent() *AddDNSServerNoContent {\n\treturn &AddDNSServerNoContent{}\n}",
"func WithHost(p string) Option {\n\treturn func(o *options) {\n\t\to.host = p\n\t}\n}",
"func AddHostAndDatabase(ctx context.Context, pmfs ...DBOption) error {\n\tpm := getParam(pmfs...)\n\n\tconf := new(config)\n\tif _, err := os.Stat(pm.ConfFilePath); err == nil {\n\t\tconf, err = getConfig(ctx, pm.ConfFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif conf.Hosts == nil {\n\t\tconf.Hosts = []*host{}\n\t}\n\n\tif conf.Databases == nil {\n\t\tconf.Databases = []*database{}\n\t}\n\n\t// add Host Info\n\tvar ho *host\n\tfor _, h := range conf.Hosts {\n\t\tif h.User == pm.User &&\n\t\t\th.Password == pm.Password &&\n\t\t\th.Address == pm.Address &&\n\t\t\th.Port == pm.Port &&\n\t\t\th.Protocol == pm.Protocol {\n\t\t\tho = h\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif ho == nil {\n\t\tho = &host{\n\t\t\tKey: len(conf.Hosts) + 1,\n\t\t\tUser: pm.User,\n\t\t\tPassword: pm.Password,\n\t\t\tAddress: pm.Address,\n\t\t\tPort: pm.Port,\n\t\t\tProtocol: pm.Protocol,\n\t\t}\n\n\t\tconf.Hosts = append(conf.Hosts, ho)\n\t}\n\n\thostKey := ho.Key\n\n\tvar db *database\n\n\t// add Database Info\n\tfor _, d := range conf.Databases {\n\t\tif d.HostKey == hostKey && d.Name == pm.Database {\n\t\t\tdb = d\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif db == nil {\n\t\tdb = &database{\n\t\t\tHostKey: hostKey,\n\t\t\tName: pm.Database,\n\t\t}\n\n\t\tconf.Databases = append(conf.Databases, db)\n\t}\n\n\treturn setConfig(ctx, conf, pm.ConfFilePath)\n}",
"func (sf *ClientOption) AddRemoteServer(server string) error {\n\tif len(server) > 0 && server[0] == ':' {\n\t\tserver = \"127.0.0.1\" + server\n\t}\n\tif !strings.Contains(server, \"://\") {\n\t\tserver = \"tcp://\" + server\n\t\t//server = \"mms://\" + server\n\t}\n\tremoteURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsf.server = remoteURL\n\treturn nil\n}",
"func AddServer(dir, serverID string) error {\n\tscript := fmt.Sprintf(`cd %v && ./easyrsa build-server-full %v nopass`, dir, serverID)\n\treturn execScript(script)\n}",
"func (client *LANHostConfigManagement1) SetDHCPServerConfigurable(NewDHCPServerConfigurable bool) (err error) {\n\treturn client.SetDHCPServerConfigurableCtx(context.Background(),\n\t\tNewDHCPServerConfigurable,\n\t)\n}",
"func (px *PXE) handleDHCPRequest(p layers.DHCPv4) {\n\n\t// Construct a hash of options so we can use it more easily\n\topts := make(map[layers.DHCPOpt]layers.DHCPOption)\n\tfor _, o := range p.Options {\n\t\topts[o.Type] = o\n\t}\n\n\tt, ok := opts[layers.DHCPOptMessageType]\n\tif !ok {\n\t\tpx.api.Log(types.LLDEBUG, \"got DHCP packet with no message type\")\n\t\treturn\n\t}\n\n\tn := px.NodeGet(queryByMAC, p.ClientHWAddr.String())\n\t// fmt.Printf(\"%v, %v, %p, %v\\n\", count, n.ID().String(), &n, p.ClientHWAddr.String())\n\tif n == nil {\n\t\tpx.api.Logf(types.LLDEBUG, \"ignoring DHCP packet from unknown %s\", p.ClientHWAddr.String())\n\t\treturn\n\t}\n\n\tv, e := n.GetValue(px.cfg.IpUrl)\n\tif e != nil {\n\t\tpx.api.Logf(types.LLDEBUG, \"ignoring DHCP packet from node with no IP in state: %s\", p.ClientHWAddr.String())\n\t\treturn\n\t}\n\tip := v.Interface().(ipv4t.IP).IP\n\n\tswitch layers.DHCPMsgType(t.Data[0]) {\n\tcase layers.DHCPMsgTypeDiscover:\n\t\tpx.api.Logf(types.LLDEBUG, \"sending DHCP offer of %s to %s\", ip.String(), p.ClientHWAddr.String())\n\n\t\tr := px.newDHCPPacket(\n\t\t\tp,\n\t\t\tlayers.DHCPMsgTypeOffer,\n\t\t\tpx.selfIP.To4(),\n\t\t\tip,\n\t\t\tpx.leaseTime,\n\t\t\tlayers.DHCPOptions{},\n\t\t)\n\n\t\tpx.transmitDHCPPacket(n, ip, p.ClientHWAddr, r)\n\t\treturn\n\tcase layers.DHCPMsgTypeRequest:\n\t\treq, ok := opts[layers.DHCPOptRequestIP]\n\t\tif !ok {\n\t\t\tpx.api.Log(types.LLDEBUG, \"got a DHCP request, but no request IP\")\n\t\t\treturn\n\t\t}\n\t\tif req.Length != 4 {\n\t\t\tpx.api.Logf(types.LLDEBUG, \"got a DHCP request with invalid length request IP, len = %d\", req.Length)\n\t\t\treturn\n\t\t}\n\t\treqIP := net.IP(req.Data)\n\t\tif reqIP.Equal(ip) {\n\t\t\tr := px.newDHCPPacket(\n\t\t\t\tp,\n\t\t\t\tlayers.DHCPMsgTypeAck,\n\t\t\t\tpx.selfIP.To4(),\n\t\t\t\tip,\n\t\t\t\tpx.leaseTime,\n\t\t\t\tlayers.DHCPOptions{},\n\t\t\t)\n\t\t\tpx.transmitDHCPPacket(n, ip, p.ClientHWAddr, r)\n\t\t\tpx.api.Logf(types.LLDEBUG, \"acknowledging DHCP request by %s for %s\", p.ClientHWAddr.String(), reqIP.String())\n\t\t\t// discover that we've progressed\n\t\t\turl1 := util.NodeURLJoin(n.ID().String(), PXEStateURL)\n\t\t\tev1 := core.NewEvent(\n\t\t\t\ttypes.Event_DISCOVERY,\n\t\t\t\turl1,\n\t\t\t\t&core.DiscoveryEvent{\n\t\t\t\t\tURL: url1,\n\t\t\t\t\tValueID: \"INIT\",\n\t\t\t\t},\n\t\t\t)\n\t\t\turl2 := util.NodeURLJoin(n.ID().String(), \"/RunState\")\n\t\t\tev2 := core.NewEvent(\n\t\t\t\ttypes.Event_DISCOVERY,\n\t\t\t\turl2,\n\t\t\t\t&core.DiscoveryEvent{\n\t\t\t\t\tURL: url2,\n\t\t\t\t\tValueID: \"NODE_INIT\",\n\t\t\t\t},\n\t\t\t)\n\t\t\tpx.dchan <- ev1\n\t\t\tpx.dchan <- ev2\n\t\t} else {\n\t\t\tpx.api.Logf(types.LLDEBUG, \"NAKing DHCP request by %s for %s\", p.ClientHWAddr.String(), reqIP.String())\n\t\t\tr := px.newDHCPPacket(\n\t\t\t\tp,\n\t\t\t\tlayers.DHCPMsgTypeNak,\n\t\t\t\tpx.selfIP.To4(),\n\t\t\t\tip,\n\t\t\t\tpx.leaseTime,\n\t\t\t\tlayers.DHCPOptions{},\n\t\t\t)\n\t\t\tpx.transmitDHCPPacket(n, ip, p.ClientHWAddr, r)\n\t\t}\n\t// We don't expect any of the following, so we don't handle them\n\tcase layers.DHCPMsgTypeDecline:\n\t\tfallthrough\n\tcase layers.DHCPMsgTypeInform:\n\t\tfallthrough\n\tcase layers.DHCPMsgTypeRelease:\n\t\tfallthrough\n\tcase layers.DHCPMsgTypeUnspecified:\n\t\tfallthrough\n\tdefault: // Pi's only send Discovers\n\t\tpx.api.Log(types.LLDEBUG, \"Unhandled DHCP packet.\")\n\t}\n\treturn\n}",
"func (r Virtual_Guest) GetDedicatedHost() (resp datatypes.Virtual_DedicatedHost, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getDedicatedHost\", nil, &r.Options, &resp)\n\treturn\n}",
"func (p *PerHost) AddIP(ip net.IP) {\n\tp.bypassIPs = append(p.bypassIPs, ip)\n}",
"func WithHost(host string) Option {\n\treturn func(c *gate.Configuration) {\n\t\tc.Host = host\n\t}\n}",
"func handleDHCPFindActiveServer(w http.ResponseWriter, r *http.Request) {\n\tlog.Tracef(\"%s %v\", r.Method, r.URL)\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terrorText := fmt.Sprintf(\"failed to read request body: %s\", err)\n\t\tlog.Error(errorText)\n\t\thttp.Error(w, errorText, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tinterfaceName := strings.TrimSpace(string(body))\n\tif interfaceName == \"\" {\n\t\terrorText := fmt.Sprintf(\"empty interface name specified\")\n\t\tlog.Error(errorText)\n\t\thttp.Error(w, errorText, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfound, err := dhcpd.CheckIfOtherDHCPServersPresent(interfaceName)\n\n\tothSrv := map[string]interface{}{}\n\tfoundVal := \"no\"\n\tif found {\n\t\tfoundVal = \"yes\"\n\t} else if err != nil {\n\t\tfoundVal = \"error\"\n\t\tothSrv[\"error\"] = err.Error()\n\t}\n\tothSrv[\"found\"] = foundVal\n\n\tstaticIP := map[string]interface{}{}\n\tisStaticIP, err := hasStaticIP(interfaceName)\n\tstaticIPStatus := \"yes\"\n\tif err != nil {\n\t\tstaticIPStatus = \"error\"\n\t\tstaticIP[\"error\"] = err.Error()\n\t} else if !isStaticIP {\n\t\tstaticIPStatus = \"no\"\n\t\tstaticIP[\"ip\"] = getFullIP(interfaceName)\n\t}\n\tstaticIP[\"static\"] = staticIPStatus\n\n\tresult := map[string]interface{}{}\n\tresult[\"other_server\"] = othSrv\n\tresult[\"static_ip\"] = staticIP\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\terr = json.NewEncoder(w).Encode(result)\n\tif err != nil {\n\t\thttpError(w, http.StatusInternalServerError, \"Failed to marshal DHCP found json: %s\", err)\n\t\treturn\n\t}\n}",
"func sendUnicastDHCP(dhcp []byte, dstIP net.IP, srcIP net.IP, srcPort int, dstPort int) error {\n\n\ts, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_RAW)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tspew.Dump(dstIP)\n\tspew.Dump(srcIP)\n\tproto := 17\n\n\tudpsrc := srcPort\n\n\tudpdst := dstPort\n\n\tudp := udphdr{\n\t\tsrc: uint16(udpsrc),\n\t\tdst: uint16(udpdst),\n\t}\n\n\tudplen := 8 + len(dhcp)\n\n\tip := iphdr{\n\t\tvhl: 0x45,\n\t\ttos: 0,\n\t\tid: 0x0000, // the kernel overwrites id if it is zero\n\t\toff: 0,\n\t\tttl: 128,\n\t\tproto: uint8(proto),\n\t}\n\tcopy(ip.src[:], srcIP.To4())\n\tcopy(ip.dst[:], dstIP.To4())\n\n\tudp.ulen = uint16(udplen)\n\tudp.checksum(&ip, dhcp)\n\n\ttotalLen := 20 + udplen\n\n\tip.iplen = uint16(totalLen)\n\tip.checksum()\n\n\tbuf := bytes.NewBuffer([]byte{})\n\terr = binary.Write(buf, binary.BigEndian, &udp)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tudpHeader := buf.Bytes()\n\tdataWithHeader := append(udpHeader, dhcp...)\n\n\tbuff := bytes.NewBuffer([]byte{})\n\terr = binary.Write(buff, binary.BigEndian, &ip)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tipHeader := buff.Bytes()\n\tpacket := append(ipHeader, dataWithHeader...)\n\n\taddr := syscall.SockaddrInet4{}\n\tcopy(addr.Addr[:], dstIP.To4())\n\taddr.Port = int(udpdst)\n\n\terr = syscall.Sendto(s, packet, 0, &addr)\n\t// Send packet to target\n\terr = syscall.Close(s)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error closing the socket: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\treturn err\n}",
"func (d *DelegationCache) Add(domain string, server Server) bool {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tdomain = strings.ToLower(domain)\n\tfor _, s2 := range d.c[domain] {\n\t\tif domainEqual(s2.Name, server.Name) {\n\t\t\treturn false\n\t\t}\n\t}\n\tif d.c == nil {\n\t\td.c = map[string][]Server{}\n\t}\n\td.c[domain] = append(d.c[domain], server)\n\treturn true\n}",
"func (d *DHCPv4) SetUnicast() {\n\td.Flags &= ^uint16(0x8000)\n}",
"func (p *SpecificDeriver) DeriveHostToHost(dstHost string, key Key) (Key, error) {\n\thost, err := packtoHostAddr(dstHost)\n\tif err != nil {\n\t\treturn Key{}, serrors.WrapStr(\"deriving input H2H\", err)\n\t}\n\tlen := inputDeriveHostToHost(p.buf[:], host)\n\toutKey, err := deriveKey(p.buf[:], len, key)\n\treturn outKey, err\n}",
"func DHCPCreate(dhcp Dhcp) error {\n\tif isUnmanaged(UnmanagedID(dhcp.Ifname), LINKTYPE) {\n\t\treturn NewUnmanagedLinkDHCPCannotBeModifiedError(dhcp.Ifname)\n\t}\n\t_, err := DHCPGet(dhcp.Ifname)\n\tif err != nil {\n\t\t/* The only acceptable error is that you didn't find it. For any other error, abort */\n\t\tif _, ok := err.(*NotFoundError); !ok {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = DHCPStaticAddressesManage(dhcp.Ifname)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := exec.Command(prefixInstallPAth+\"dhcp_start.sh\", string(dhcp.Ifname)).Output()\n\tif err != nil {\n\t\treturn NewCannotStartDHCPError(dhcp.Ifname, err)\n\t}\n\n\tif string(out) == \"Service running already\" {\n\t\treturn NewDHCPAlreadyRunningConflictError(dhcp.Ifname)\n\t}\n\treturn nil\n}",
"func (o *DhcpRangeDataData) HasServerAddr() bool {\n\tif o != nil && o.ServerAddr != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Proxy) AddServerAccess(s *Server) (err error) {\n\n // We only want to connect internally to the VPN.\n // These should not be exposed.\n // serverAddress := s.PublicServerAddress()\n serverAddress := s.ServerAddress()\n\n f:= logrus.Fields{\n \"proxy\": p.Name, \"server\": s.Name, \"user\": s.User, \"serverAddress\": serverAddress,\n }\n log.Info(f, \"Adding server access to proxy.\")\n\n if s.ServerPort == 0 {\n return fmt.Errorf(\"Failed to add server: invalid server port = (%d)\", s.ServerPort)\n }\n\n rcon, err := p.GetRcon()\n if err != nil { return err }\n\n motd := fmt.Sprintf(\"%s hosted by %s in the %s neighborhood.\", s.Name, s.User, s.Name)\n command := fmt.Sprintf(\"bconf addServer(\\\"%s\\\", \\\"%s\\\", \\\"%s\\\", false)\",\n s.Name, motd, serverAddress)\n\n reply, err := rcon.Send(command)\n f[\"command\"] = command\n f[\"reply\"] = reply\n if err != nil { \n log.Error(f, \"Remore addServer failed.\", err)\n return err \n }\n // fmt.Printf(\"Received reply: %s\\n\", reply)\n log.Info(f, \"Remote addServer reply.\")\n\n return err\n}",
"func isServerAddHeaderDirective(directive string) bool {\n\tif isEqualString(directive, AddHeaderDirective) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func DBHost(address string) DBOption {\n\treturn func(pm *param) *param {\n\t\tpm.Address = address\n\t\treturn pm\n\t}\n}",
"func (i *DHCPInterface) StartBlockingServer() error {\n\tpacketConn, err := conn.NewUDP4BoundListener(i.Bridge, \":67\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn dhcp.Serve(packetConn, i)\n}",
"func (h *Host) SetAdress(a string) {\n}",
"func (client WorkloadNetworksClient) CreateDhcpResponder(resp *http.Response) (result WorkloadNetworkDhcp, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func IPIsDoHOnlyServer(ip netip.Addr) bool {\n\treturn nextDNSv6RangeA.Contains(ip) || nextDNSv6RangeB.Contains(ip) ||\n\t\tnextDNSv4RangeA.Contains(ip) || nextDNSv4RangeB.Contains(ip)\n}",
"func (client WorkloadNetworksClient) CreateDhcp(ctx context.Context, resourceGroupName string, privateCloudName string, dhcpID string, workloadNetworkDhcp WorkloadNetworkDhcp) (result WorkloadNetworksCreateDhcpFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/WorkloadNetworksClient.CreateDhcp\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"avs.WorkloadNetworksClient\", \"CreateDhcp\", err.Error())\n\t}\n\n\treq, err := client.CreateDhcpPreparer(ctx, resourceGroupName, privateCloudName, dhcpID, workloadNetworkDhcp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"CreateDhcp\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateDhcpSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"CreateDhcp\", nil, \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func NewPutDevicesEroutersIDDhcpServersServernameForbidden() *PutDevicesEroutersIDDhcpServersServernameForbidden {\n\treturn &PutDevicesEroutersIDDhcpServersServernameForbidden{}\n}",
"func AddServer(data map[string]string)(err error) {\n uuid := data[\"uuid\"]\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return err}\n ipuuid,portuuid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"AddServer ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return err\n }\n err = nodeclient.AddServer(ipuuid,portuuid, data)\n if err != nil {\n logs.Error(\"node/AddServer ERROR http data request: \"+err.Error())\n return err\n }\n return nil\n}",
"func (a *Client) PostNodesMacaddressDhcpWhitelist(params *PostNodesMacaddressDhcpWhitelistParams, authInfo runtime.ClientAuthInfoWriter) (*PostNodesMacaddressDhcpWhitelistCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostNodesMacaddressDhcpWhitelistParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostNodesMacaddressDhcpWhitelist\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/nodes/{macaddress}/dhcp/whitelist\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/x-gzip\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PostNodesMacaddressDhcpWhitelistReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostNodesMacaddressDhcpWhitelistCreated), nil\n}",
"func (a *Client) PostNodesMacaddressDhcpWhitelist(params *PostNodesMacaddressDhcpWhitelistParams, authInfo runtime.ClientAuthInfoWriter) (*PostNodesMacaddressDhcpWhitelistCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostNodesMacaddressDhcpWhitelistParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostNodesMacaddressDhcpWhitelist\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/nodes/{macaddress}/dhcp/whitelist\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/x-gzip\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PostNodesMacaddressDhcpWhitelistReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostNodesMacaddressDhcpWhitelistCreated), nil\n}",
"func (gs *GRPCClient) AddServer(sv *Server) {\n\tvar host, port, portKey string\n\tvar ok bool\n\n\thost, portKey = gs.getServerHost(sv)\n\tif host == \"\" {\n\t\tlogger.Log.Errorf(\"[grpc client] server %s has no grpcHost specified in metadata\", sv.ID)\n\t\treturn\n\t}\n\n\tif port, ok = sv.Metadata[portKey]; !ok {\n\t\tlogger.Log.Errorf(\"[grpc client] server %s has no %s specified in metadata\", sv.ID, portKey)\n\t\treturn\n\t}\n\n\taddress := fmt.Sprintf(\"%s:%s\", host, port)\n\tclient := &grpcClient{address: address}\n\tif !gs.lazy {\n\t\tif err := client.connect(); err != nil {\n\t\t\tlogger.Log.Errorf(\"[grpc client] unable to connect to server %s at %s: %v\", sv.ID, address, err)\n\t\t}\n\t}\n\tgs.clientMap.Store(sv.ID, client)\n\tlogger.Log.Debugf(\"[grpc client] added server %s at %s\", sv.ID, address)\n}",
"func (d *Device) AddService(svc *ble.Service) error {\n\treturn d.Server.AddService(svc)\n}",
"func (_obj *Apichannels) AddServant(imp _impApichannels, obj string) {\n\ttars.AddServant(_obj, imp, obj)\n}",
"func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"list\", \"hostonlyifs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbyName := map[string]*hostOnlyNetwork{}\n\tbyIP := map[string]*hostOnlyNetwork{}\n\tn := &hostOnlyNetwork{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"Name\":\n\t\t\tn.Name = val\n\t\tcase \"GUID\":\n\t\t\tn.GUID = val\n\t\tcase \"DHCP\":\n\t\t\tn.DHCP = (val != \"Disabled\")\n\t\tcase \"IPAddress\":\n\t\t\tn.IPv4.IP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tn.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"HardwareAddress\":\n\t\t\tmac, err := net.ParseMAC(val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tn.HwAddr = mac\n\t\tcase \"MediumType\":\n\t\t\tn.Medium = val\n\t\tcase \"Status\":\n\t\t\tn.Status = val\n\t\tcase \"VBoxNetworkName\":\n\t\t\tn.NetworkName = val\n\n\t\t\tif _, present := byName[n.NetworkName]; present {\n\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one.\", n.NetworkName)\n\t\t\t}\n\t\t\tbyName[n.NetworkName] = n\n\n\t\t\tif len(n.IPv4.IP) != 0 {\n\t\t\t\tif _, present := byIP[n.IPv4.IP.String()]; present {\n\t\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one.\", n.IPv4.IP)\n\t\t\t\t}\n\t\t\t\tbyIP[n.IPv4.IP.String()] = n\n\t\t\t}\n\n\t\t\tn = &hostOnlyNetwork{}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn byName, nil\n}",
"func WithDHCPClasslessStaticRoutes(routes []Route) Option {\n\treturn func(d *dnsmasq) {\n\t\td.classlessStaticRoutes = routes\n\t}\n}",
"func (impl *ldapAuthImpl) SetLDAPServerHost(ldapServerHost string) {\n\timpl.Lock()\n\tdefer impl.Unlock()\n\n\tif ldapServerHost != impl.ldapServerHost {\n\t\timpl.ldapServerHost = ldapServerHost\n\t}\n}",
"func WithDNSServer(dns string) ClientOption {\n\treturn optionFunc(func(c *Client) {\n\t\tc.WithDNSServer(dns)\n\t})\n}",
"func setupKadDHT(ctx context.Context, nodehost host.Host) *dht.IpfsDHT {\n\t// Create DHT server mode option\n\tdhtmode := dht.Mode(dht.ModeServer)\n\t// Rertieve the list of boostrap peer addresses\n\tbootstrappeers := dht.GetDefaultBootstrapPeerAddrInfos()\n\t// Create the DHT bootstrap peers option\n\tdhtpeers := dht.BootstrapPeers(bootstrappeers...)\n\n\t// Trace log\n\tlogrus.Traceln(\"Generated DHT Configuration.\")\n\n\t// Start a Kademlia DHT on the host in server mode\n\tkaddht, err := dht.New(ctx, nodehost, dhtmode, dhtpeers)\n\t// Handle any potential error\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalln(\"Failed to Create the Kademlia DHT!\")\n\t}\n\n\t// Return the KadDHT\n\treturn kaddht\n}",
"func (n *GoDHCPd) Serve(ctx context.Context, addr net.IP, iface string) error {\n\tconn, err := dhcp4.NewConn(fmt.Sprintf(\"%s:67\", addr))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create nwe connection: %w\", err)\n\t}\n\tdefer conn.Close()\n\n\tfor {\n\t\treq, riface, err := conn.RecvDHCP()\n\t\tif err != nil {\n\t\t\tn.logger.Error(\"failed to receive dhcp request\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tif riface.Name != iface {\n\t\t\tcontinue\n\t\t}\n\t\tn.logger.Info(\"received request\", zap.String(\"req\", fmt.Sprintf(\"%+v\", req)))\n\n\t\tsubnet, err := n.ds.GetManagementSubnet(ctx)\n\t\tif err != nil {\n\t\t\tn.logger.Error(\"failed to get subnet\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tvar lease *dhcpd.Lease\n\t\tlease, err = n.ds.GetLeaseFromManagementSubnet(ctx, types.HardwareAddr(req.HardwareAddr))\n\t\tif err != nil && errors.Is(err, sql.ErrNoRows) {\n\t\t\tlease, err = n.ds.CreateLeaseFromManagementSubnet(ctx, types.HardwareAddr(req.HardwareAddr))\n\t\t}\n\t\tif err != nil {\n\t\t\tn.logger.Error(\"failed to get lease\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tresp, err := makeResponse(addr, *req, *subnet, *lease)\n\t\tif err != nil {\n\t\t\tn.logger.Error(\"failed to make response\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\terr = conn.SendDHCP(resp, riface)\n\t\tif err != nil {\n\t\t\tn.logger.Error(\"failed to send dhcp response\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tn.logger.Info(\"send DCHP response\", zap.String(\"resp\", fmt.Sprintf(\"%+v\", resp)))\n\t}\n}",
"func (host *DinnerHost) Add(newPhilosopher Philosopher) bool {\n\tnewName := newPhilosopher.Name()\n\tfmt.Println(newName + \" WANTS TO JOIN THE TABLE.\")\n\tif len(host.phiData) >= host.tableCount {\n\t\tfmt.Println(newName + \" CANNOT JOIN: THE TABLE IS FULL.\")\n\t\tfmt.Println()\n\t\treturn false\n\t}\n\tif host.phiData[newName] != nil {\n\t\tfmt.Println(newName + \" CANNOT JOIN: ALREADY ON THE HOST'S LIST.\")\n\t\tfmt.Println()\n\t\treturn false\n\t}\n\thost.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel())\n\thost.phiData[newName].TakeSeat(host.freeSeats[0])\n\thost.freeSeats = host.freeSeats[1:]\n\tfmt.Println(newName + \" JOINED THE TABLE.\")\n\tfmt.Println()\n\treturn true\n}",
"func (p *RoundRobinPool) addServer(server *Instance.ServerRoute) {\n\tp.servers = append(p.servers, server)\n}",
"func WithHost(host string) InstanceOpt {\n\treturn func(i *Instance) error {\n\t\ti.host = host\n\t\treturn nil\n\t}\n}",
"func WithRelay(ip net.IP) Modifier {\n\treturn func(d *DHCPv4) *DHCPv4 {\n\t\td.SetUnicast()\n\t\td.SetGatewayIPAddr(ip)\n\t\td.SetHopCount(1)\n\t\treturn d\n\t}\n}",
"func (p *Proxy) StartProxyForServer(s *Server) (err error) {\n\n f:= logrus.Fields{\n \"proxy\": p.Name, \"server\": s.Name, \"user\": s.User,\n }\n log.Info(f, \"Adding proxy as a network-proxy for server.\")\n\n serverFQDN, err := p.ProxiedServerFQDN(s)\n if err != nil {\n log.Error(f, \"Failed to obtain server address from DNS. Proxy not started for Server\", err)\n return err\n }\n f[\"serverFQDN\"] = serverFQDN\n\n rcon, err := p.GetRcon()\n if err != nil { \n log.Error(f, \"Failed to get an RCON connection. Proxy not started for Server\", err)\n return err \n }\n\n // serverFQDN := fmt.Sprintf(\"%s.%s\", s.DNSName(), proxyFQDN)\n command := fmt.Sprintf(\"bconf addForcedHost(%d, \\\"%s\\\", \\\"%s\\\")\", 0, serverFQDN, s.Name)\n reply, err := rcon.Send(command)\n f[\"command\"] = command\n f[\"reply\"] = reply\n log.Info(f, \"Remote addForcedHost reply.\")\n\n return err\n}"
] | [
"0.63686794",
"0.57897925",
"0.57406795",
"0.56142825",
"0.55573124",
"0.5441052",
"0.5403369",
"0.5396786",
"0.534772",
"0.524854",
"0.5180699",
"0.5176822",
"0.5176461",
"0.5159396",
"0.5083173",
"0.50322163",
"0.49743834",
"0.49698305",
"0.496737",
"0.49510124",
"0.49067736",
"0.48714405",
"0.48523742",
"0.48360628",
"0.48351282",
"0.4821606",
"0.47922447",
"0.47706908",
"0.4767083",
"0.4760986",
"0.47439435",
"0.47333866",
"0.47288126",
"0.47206798",
"0.47150165",
"0.46803376",
"0.46512908",
"0.46505734",
"0.46504596",
"0.46489352",
"0.4637664",
"0.4617628",
"0.46120188",
"0.46094453",
"0.46076196",
"0.460009",
"0.45959625",
"0.45946854",
"0.45893478",
"0.4587159",
"0.45741847",
"0.45704418",
"0.45579877",
"0.45421147",
"0.45354497",
"0.4529297",
"0.45285884",
"0.45146662",
"0.4506589",
"0.4498673",
"0.44984016",
"0.44910008",
"0.44900438",
"0.44798318",
"0.44733733",
"0.44687897",
"0.44658753",
"0.44569877",
"0.44557405",
"0.4444806",
"0.44185925",
"0.43920606",
"0.43619987",
"0.43607026",
"0.435303",
"0.43376684",
"0.43335643",
"0.43307328",
"0.43301997",
"0.43230027",
"0.43207383",
"0.43186373",
"0.42981058",
"0.42927444",
"0.42906183",
"0.42906183",
"0.42771927",
"0.42698038",
"0.42695972",
"0.42589486",
"0.425653",
"0.425592",
"0.42554125",
"0.42550424",
"0.42489424",
"0.4228788",
"0.42212117",
"0.42197332",
"0.42105326",
"0.42066267"
] | 0.8681207 | 0 |
listDHCPServers lists all DHCP server settings in a map keyed by DHCP.NetworkName. | listDHCPServers перечисляет все настройки сервера DHCP в виде карты, ключом которой является DHCP.NetworkName. | func listDHCPServers(vbox VBoxManager) (map[string]*dhcpServer, error) {
out, err := vbox.vbmOut("list", "dhcpservers")
if err != nil {
return nil, err
}
m := map[string]*dhcpServer{}
dhcp := &dhcpServer{}
err = parseKeyValues(out, reColonLine, func(key, val string) error {
switch key {
case "NetworkName":
dhcp = &dhcpServer{}
m[val] = dhcp
dhcp.NetworkName = val
case "IP":
dhcp.IPv4.IP = net.ParseIP(val)
case "upperIPAddress":
dhcp.UpperIP = net.ParseIP(val)
case "lowerIPAddress":
dhcp.LowerIP = net.ParseIP(val)
case "NetworkMask":
dhcp.IPv4.Mask = parseIPv4Mask(val)
case "Enabled":
dhcp.Enabled = (val == "Yes")
}
return nil
})
if err != nil {
return nil, err
}
return m, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *VirtualizationVmwareVirtualMachineAllOf) GetDnsServerList() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.DnsServerList\n}",
"func WithDHCPNameServers(dns []string) Option {\n\treturn func(d *dnsmasq) {\n\t\td.dns = dns\n\t}\n}",
"func (client WorkloadNetworksClient) ListDhcpResponder(resp *http.Response) (result WorkloadNetworkDhcpList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func removeOrphanDHCPServers(vbox VBoxManager) error {\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(dhcps) == 0 {\n\t\treturn nil\n\t}\n\n\tnets, err := listHostOnlyAdapters(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor name := range dhcps {\n\t\tif strings.HasPrefix(name, dhcpPrefix) {\n\t\t\tif _, present := nets[name]; !present {\n\t\t\t\tif err := vbox.vbm(\"dhcpserver\", \"remove\", \"--netname\", name); err != nil {\n\t\t\t\t\tlog.Warnf(\"Unable to remove orphan dhcp server %q: %s\", name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func runListServers(_ *cobra.Command, _ []string) {\n\tcfg, err := config.LoadFromFile()\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tregions, err := checkRegions(*region)\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tnameFilter := core.NewFilter(core.TagName, *name, core.Contains, *ignoreCase)\n\tenvFilter := core.NewFilter(core.TagEnv, *env, core.Equals, *ignoreCase)\n\tservers, err := core.GetAllServers(cfg.AWSCredentials, regions, nameFilter, envFilter)\n\tif err != nil {\n\t\texitWithError(err)\n\t}\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)\n\tfmt.Fprintln(w, \"NAME\\tENVIRONMENT\\tPRIVATE IP\\tPUBLIC IP\")\n\tfor _, server := range servers {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\n\", server.Name, server.Env, server.PrivateIP, server.PublicIP)\n\t}\n\tw.Flush()\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) SetDnsServerList(v []string) {\n\to.DnsServerList = v\n}",
"func (s *FastDNSv2Service) ListZones(ctx context.Context, opt *ZoneListOptions) (*ZoneList, *Response, error) {\n\tu := fmt.Sprintf(\"config-dns/v2/zones\")\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar zones *ZoneList\n\tresp, err := s.client.Do(ctx, req, &zones)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn zones, resp, nil\n}",
"func (h *InterfaceVppHandler) DumpDhcpClients() (map[uint32]*vppcalls.Dhcp, error) {\n\tdhcpData := make(map[uint32]*vppcalls.Dhcp)\n\treqCtx := h.callsChannel.SendMultiRequest(&dhcp.DHCPClientDump{})\n\n\tfor {\n\t\tdhcpDetails := &dhcp.DHCPClientDetails{}\n\t\tlast, err := reqCtx.ReceiveReply(dhcpDetails)\n\t\tif last {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient := dhcpDetails.Client\n\t\tlease := dhcpDetails.Lease\n\n\t\tvar hostMac net.HardwareAddr = lease.HostMac\n\t\tvar hostAddr, routerAddr string\n\t\tif uintToBool(lease.IsIPv6) {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress).To16().String(), uint32(lease.MaskWidth))\n\t\t} else {\n\t\t\thostAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.HostAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t\trouterAddr = fmt.Sprintf(\"%s/%d\", net.IP(lease.RouterAddress[:4]).To4().String(), uint32(lease.MaskWidth))\n\t\t}\n\n\t\t// DHCP client data\n\t\tdhcpClient := &vppcalls.Client{\n\t\t\tSwIfIndex: client.SwIfIndex,\n\t\t\tHostname: string(bytes.SplitN(client.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tID: string(bytes.SplitN(client.ID, []byte{0x00}, 2)[0]),\n\t\t\tWantDhcpEvent: uintToBool(client.WantDHCPEvent),\n\t\t\tSetBroadcastFlag: uintToBool(client.SetBroadcastFlag),\n\t\t\tPID: client.PID,\n\t\t}\n\n\t\t// DHCP lease data\n\t\tdhcpLease := &vppcalls.Lease{\n\t\t\tSwIfIndex: lease.SwIfIndex,\n\t\t\tState: lease.State,\n\t\t\tHostname: string(bytes.SplitN(lease.Hostname, []byte{0x00}, 2)[0]),\n\t\t\tIsIPv6: uintToBool(lease.IsIPv6),\n\t\t\tHostAddress: hostAddr,\n\t\t\tRouterAddress: routerAddr,\n\t\t\tHostMac: hostMac.String(),\n\t\t}\n\n\t\t// DHCP metadata\n\t\tdhcpData[client.SwIfIndex] = &vppcalls.Dhcp{\n\t\t\tClient: dhcpClient,\n\t\t\tLease: dhcpLease,\n\t\t}\n\t}\n\n\treturn dhcpData, nil\n}",
"func (client WorkloadNetworksClient) ListDhcpSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}",
"func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {\n\treturn nil, ErrNotImplemented\n}",
"func (api Solarwinds) ListServers(siteid int) ([]Server, error) {\n\tbody := struct {\n\t\tItems []Server `xml:\"items>server\"`\n\t}{}\n\n\terr := api.get(url.Values{\n\t\t\"service\": []string{\"list_servers\"},\n\t\t\"siteid\": []string{strconv.Itoa(siteid)},\n\t}, &body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body.Items, nil\n}",
"func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts ...scw.RequestOption) (*ListDNSZoneNameserversResponse, error) {\n\tvar err error\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2alpha2/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/nameservers\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneNameserversResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (d *DHCPv4) NTPServers() []net.IP {\n\treturn GetIPs(OptionNTPServers, d.Options)\n}",
"func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts ...scw.RequestOption) (*ListDNSZoneNameserversResponse, error) {\n\tvar err error\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\n\tif fmt.Sprint(req.DNSZone) == \"\" {\n\t\treturn nil, errors.New(\"field DNSZone cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2beta1/dns-zones/\" + fmt.Sprint(req.DNSZone) + \"/nameservers\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZoneNameserversResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (o LookupVirtualNetworkResultOutput) DnsServers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v LookupVirtualNetworkResult) []string { return v.DnsServers }).(pulumi.StringArrayOutput)\n}",
"func (api *powerdnsProvider) ListZones() ([]string, error) {\n\tvar result []string\n\tmyZones, err := api.client.Zones().ListZones(context.Background(), api.ServerName)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tfor _, zone := range myZones {\n\t\tresult = append(result, zone.Name)\n\t}\n\treturn result, nil\n}",
"func (m *VpnConfiguration) GetServers()([]VpnServerable) {\n val, err := m.GetBackingStore().Get(\"servers\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]VpnServerable)\n }\n return nil\n}",
"func (r *ReconcileRethinkDBCluster) listServers(cr *rethinkdbv1alpha1.RethinkDBCluster) ([]corev1.Pod, error) {\n\tfound := &corev1.PodList{}\n\tlabelSelector := labels.SelectorFromSet(labelsForCluster(cr))\n\tlistOps := &client.ListOptions{Namespace: cr.Namespace, LabelSelector: labelSelector}\n\terr := r.client.List(context.TODO(), listOps, found)\n\tif err != nil {\n\t\tlog.Error(err, \"failed to list server pods\")\n\t\treturn nil, err\n\t}\n\treturn found.Items, nil\n}",
"func (o AccountActiveDirectoryOutput) DnsServers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AccountActiveDirectory) []string { return v.DnsServers }).(pulumi.StringArrayOutput)\n}",
"func (ml *Memberlist) List() (map[string]*IpAddress, error) {\n\n\t// Send request to service\n\tres, err := http.Post(ml.ServiceUrl+\"/list\",\n\t\t\"application/x-www-form-urlencoded\", nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list memberlist (%v): %v\\n\", ml, err)\n\t}\n\n\t// Read response body in JSON\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read response of memberlist list (%v): %v\\n\", ml, err)\n\t}\n\n\t// Unmarshall all ip addresses as map\n\tvar memberlist map[string]*IpAddress\n\tjson.Unmarshal(body, &memberlist)\n\n\treturn memberlist, nil\n}",
"func (client DnsClient) listZones(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/zones\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListZonesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (ncr *NamespacedConfigMapReflector) List() ([]interface{}, error) {\n\treturn virtualkubelet.List[virtualkubelet.Lister[*corev1.ConfigMap], *corev1.ConfigMap](\n\t\tncr.localConfigMaps,\n\t\tncr.remoteConfigMaps,\n\t)\n}",
"func addHostOnlyDHCPServer(ifname string, d dhcpServer, vbox VBoxManager) error {\n\tname := dhcpPrefix + ifname\n\n\tdhcps, err := listDHCPServers(vbox)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// On some platforms (OSX), creating a host-only adapter adds a default dhcpserver,\n\t// while on others (Windows?) it does not.\n\tcommand := \"add\"\n\tif dhcp, ok := dhcps[name]; ok {\n\t\tcommand = \"modify\"\n\t\tif (dhcp.IPv4.IP.Equal(d.IPv4.IP)) && (dhcp.IPv4.Mask.String() == d.IPv4.Mask.String()) && (dhcp.LowerIP.Equal(d.LowerIP)) && (dhcp.UpperIP.Equal(d.UpperIP)) && dhcp.Enabled {\n\t\t\t// dhcp is up to date\n\t\t\treturn nil\n\t\t}\n\t}\n\n\targs := []string{\"dhcpserver\", command,\n\t\t\"--netname\", name,\n\t\t\"--ip\", d.IPv4.IP.String(),\n\t\t\"--netmask\", net.IP(d.IPv4.Mask).String(),\n\t\t\"--lowerip\", d.LowerIP.String(),\n\t\t\"--upperip\", d.UpperIP.String(),\n\t}\n\tif d.Enabled {\n\t\targs = append(args, \"--enable\")\n\t} else {\n\t\targs = append(args, \"--disable\")\n\t}\n\n\treturn vbox.vbm(args...)\n}",
"func (o AccountActiveDirectoryPtrOutput) DnsServers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *AccountActiveDirectory) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DnsServers\n\t}).(pulumi.StringArrayOutput)\n}",
"func (c *Client) ListServers() (*ServerList) {\n\tv := &ServerList{}\n\tURL, err := url.Parse(c.BaseURL)\n\tif err != nil {\n\t\tpanic(\"boom! Busted :F\")\n\t}\n\tURL.Path += \"listservers.php\"\n\tparameters := url.Values{}\n\tparameters.Add(\"key\", c.Token)\n\tparameters.Add(\"login\", c.Login)\n\tURL.RawQuery = parameters.Encode()\n\n\trequest, err := http.NewRequest(\"GET\", URL.String(), nil)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tc.Do(request, &v)\n\treturn v\n}",
"func (server Server) List() map[string]clientproxy.ClientProxy {\n\tparams := make([]interface{}, 0)\n\trequest := utils.Request{Op: \"List\", Params: params}\n\tinvocation := utils.Invocation{Host: server.IP, Port: server.Port, Request: request}\n\treqtor := requestor.Requestor{}\n\t// getting the result\n\treply := reqtor.Invoke(invocation).([]interface{})\n\tresult := reply[0].(map[string]clientproxy.ClientProxy)\n\treturn result\n}",
"func (o LookupServerResultOutput) DnsServers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v LookupServerResult) []string { return v.DnsServers }).(pulumi.StringArrayOutput)\n}",
"func (db *MongoDB) ListSSHServers() ([]SSHServer, error) {\n\tcur, err := db.instance.Collection(serverCollection).Find(context.Background(), bson.D{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer db.safeClose(cur)\n\tvar servers []SSHServer\n\tfor cur.Next(context.Background()) {\n\t\tvar result SSHServer\n\t\tif err := cur.Decode(&result); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tservers = append(servers, result)\n\t}\n\tSSHServers = servers // update cache\n\treturn servers, nil\n}",
"func DHCPsGet() ([]Dhcp, error) {\n\tvar dhcps []Dhcp\n\tlinks, err := LinksGet()\n\tif err != nil {\n\t\treturn dhcps, err\n\t}\n\tfor _, l := range links {\n\t\td, err := DHCPGet(l.Ifname)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*NotFoundError); ok == true {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn dhcps, err\n\t\t}\n\n\t\tdhcps = append(dhcps, d)\n\n\t}\n\treturn dhcps, nil\n}",
"func listHostOnlyAdapters(vbox VBoxManager) (map[string]*hostOnlyNetwork, error) {\n\tout, err := vbox.vbmOut(\"list\", \"hostonlyifs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbyName := map[string]*hostOnlyNetwork{}\n\tbyIP := map[string]*hostOnlyNetwork{}\n\tn := &hostOnlyNetwork{}\n\n\terr = parseKeyValues(out, reColonLine, func(key, val string) error {\n\t\tswitch key {\n\t\tcase \"Name\":\n\t\t\tn.Name = val\n\t\tcase \"GUID\":\n\t\t\tn.GUID = val\n\t\tcase \"DHCP\":\n\t\t\tn.DHCP = (val != \"Disabled\")\n\t\tcase \"IPAddress\":\n\t\t\tn.IPv4.IP = net.ParseIP(val)\n\t\tcase \"NetworkMask\":\n\t\t\tn.IPv4.Mask = parseIPv4Mask(val)\n\t\tcase \"HardwareAddress\":\n\t\t\tmac, err := net.ParseMAC(val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tn.HwAddr = mac\n\t\tcase \"MediumType\":\n\t\t\tn.Medium = val\n\t\tcase \"Status\":\n\t\t\tn.Status = val\n\t\tcase \"VBoxNetworkName\":\n\t\t\tn.NetworkName = val\n\n\t\t\tif _, present := byName[n.NetworkName]; present {\n\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same name %q. Please remove one.\", n.NetworkName)\n\t\t\t}\n\t\t\tbyName[n.NetworkName] = n\n\n\t\t\tif len(n.IPv4.IP) != 0 {\n\t\t\t\tif _, present := byIP[n.IPv4.IP.String()]; present {\n\t\t\t\t\treturn fmt.Errorf(\"VirtualBox is configured with multiple host-only adapters with the same IP %q. Please remove one.\", n.IPv4.IP)\n\t\t\t\t}\n\t\t\t\tbyIP[n.IPv4.IP.String()] = n\n\t\t\t}\n\n\t\t\tn = &hostOnlyNetwork{}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn byName, nil\n}",
"func (g *LiveDNS) ListDomains() (domains []Domain, err error) {\n\t_, err = g.client.Get(\"domains\", nil, &domains)\n\treturn\n}",
"func (z *Zone) GetNameServerList() ([]*NameServerRecord) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif z.NameServerList == nil {\n\t\tz.NameServerList = make([]*NameServerRecord, 0)\n\t}\n\tnewNameServerList := make([]*NameServerRecord, len(z.NameServerList))\n\tcopy(newNameServerList, z.NameServerList)\n\treturn newNameServerList\n}",
"func (h *ConfigHandler) GetHostList(ctx *fasthttp.RequestCtx) {\n\tuser, ok := common.GlobalSession.GetUser(ctx.ID())\n\tif !ok {\n\t\th.WriteJSON(ctx, nil, common.NewNotLoginError())\n\t\treturn\n\t}\n\n\tconf, err := h.Service.GetVPNConfig(context.Background(), &user)\n\tif err != nil {\n\t\th.WriteJSON(ctx, nil, err)\n\t\treturn\n\t}\n\tdata := vpnConfigResponseEncode(conf)\n\th.WriteJSON(ctx, map[string]interface{}{\n\t\t\"list\": data.Hosts,\n\t}, nil)\n\treturn\n}",
"func (v *Virter) getDHCPHosts(network libvirt.Network) ([]libvirtxml.NetworkDHCPHost, error) {\n\thosts := []libvirtxml.NetworkDHCPHost{}\n\n\tnetworkDescription, err := getNetworkDescription(v.libvirt, network)\n\tif err != nil {\n\t\treturn hosts, err\n\t}\n\tif len(networkDescription.IPs) < 1 {\n\t\treturn hosts, fmt.Errorf(\"no IPs in network\")\n\t}\n\n\tipDescription := networkDescription.IPs[0]\n\n\tdhcpDescription := ipDescription.DHCP\n\tif dhcpDescription == nil {\n\t\treturn hosts, fmt.Errorf(\"no DHCP in network\")\n\t}\n\n\tfor _, host := range dhcpDescription.Hosts {\n\t\thosts = append(hosts, host)\n\t}\n\n\treturn hosts, nil\n}",
"func (conf *Configuration) VirtualMachineList() ([]*VirtualMachine, error) {\n\tctx := context.NewContext(conf.Timeout)\n\tdefer ctx.Cancel()\n\n\treturn conf.VirtualMachineListWithContext(ctx)\n}",
"func (s *API) ListDNSZones(req *ListDNSZonesRequest, opts ...scw.RequestOption) (*ListDNSZonesResponse, error) {\n\tvar err error\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"domain\", req.Domain)\n\tparameter.AddToQuery(query, \"dns_zone\", req.DNSZone)\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2alpha2/dns-zones\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZonesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (i *DHCPInterface) SetDNSServers(dns []string) {\n\tfor _, server := range dns {\n\t\ti.dnsServers = append(i.dnsServers, []byte(net.ParseIP(server).To4())...)\n\t}\n}",
"func (client WorkloadNetworksClient) ListDhcp(ctx context.Context, resourceGroupName string, privateCloudName string) (result WorkloadNetworkDhcpListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/WorkloadNetworksClient.ListDhcp\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.wndl.Response.Response != nil {\n\t\t\t\tsc = result.wndl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"avs.WorkloadNetworksClient\", \"ListDhcp\", err.Error())\n\t}\n\n\tresult.fn = client.listDhcpNextResults\n\treq, err := client.ListDhcpPreparer(ctx, resourceGroupName, privateCloudName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListDhcp\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListDhcpSender(req)\n\tif err != nil {\n\t\tresult.wndl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListDhcp\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.wndl, err = client.ListDhcpResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListDhcp\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.wndl.hasNextLink() && result.wndl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func DHCPsDelete() error {\n\tdhcps, err := DHCPsGet()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, d := range dhcps {\n\t\tif isUnmanaged(UnmanagedID(d.Ifname), LINKTYPE) {\n\t\t\tlogger.Log.Info(fmt.Sprintf(\"Skipping Unmanaged Link %v DHCP configuration\", d.Ifname))\n\t\t\tcontinue\n\t\t}\n\t\terr = DHCPDelete(d.Ifname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (z *zones) List() ([]dnsprovider.Zone, error) {\n\tsnapshot := z.dnsView.Snapshot()\n\n\tvar zones []dnsprovider.Zone\n\tzoneInfos := snapshot.ListZones()\n\tfor i := range zoneInfos {\n\t\tzones = append(zones, &zone{dnsView: z.dnsView, zoneInfo: zoneInfos[i]})\n\t}\n\treturn zones, nil\n}",
"func (s *postgresqlServerLister) List(selector labels.Selector) (ret []*v1alpha1.PostgresqlServer, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.PostgresqlServer))\n\t})\n\treturn ret, err\n}",
"func (s *API) ListDNSZones(req *ListDNSZonesRequest, opts ...scw.RequestOption) (*ListDNSZonesResponse, error) {\n\tvar err error\n\n\tdefaultPageSize, exist := s.client.GetDefaultPageSize()\n\tif (req.PageSize == nil || *req.PageSize == 0) && exist {\n\t\treq.PageSize = &defaultPageSize\n\t}\n\n\tquery := url.Values{}\n\tparameter.AddToQuery(query, \"organization_id\", req.OrganizationID)\n\tparameter.AddToQuery(query, \"project_id\", req.ProjectID)\n\tparameter.AddToQuery(query, \"order_by\", req.OrderBy)\n\tparameter.AddToQuery(query, \"page\", req.Page)\n\tparameter.AddToQuery(query, \"page_size\", req.PageSize)\n\tparameter.AddToQuery(query, \"domain\", req.Domain)\n\tparameter.AddToQuery(query, \"dns_zone\", req.DNSZone)\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2beta1/dns-zones\",\n\t\tQuery: query,\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp ListDNSZonesResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) SetDnsSuffixList(v []string) {\n\to.DnsSuffixList = v\n}",
"func (w *wireguardServerConfig) List() ([]api.WireguardServerConfig, error) {\n\tvar wgscList []api.WireguardServerConfig\n\treturn wgscList, w.store.Search(w.prefix, regexp.MustCompile(\".*\"), func(k, v []byte) error {\n\t\tvar obj api.WireguardServerConfig\n\t\tif err := json.Unmarshal(v, &obj); err != nil {\n\t\t\treturn err\n\t\t}\n\t\twgscList = append(wgscList, obj)\n\t\treturn nil\n\t})\n}",
"func ListZones(r *route53.Route53) {\n\tresp, err := r.ListHostedZones(&route53.ListHostedZonesRequest{})\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tfmt.Println(*resp)\n}",
"func (p *listDiscoveryPlugin) ListDomains(provider *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) ([]core.KeystoneDomain, error) {\n\tclient, err := openstack.NewIdentityV3(provider, eo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//gophercloud does not support domain listing yet - do it manually\n\turl := client.ServiceURL(\"domains\")\n\tvar result gophercloud.Result\n\t_, err = client.Get(url, &result.Body, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tDomains []core.KeystoneDomain `json:\"domains\"`\n\t}\n\terr = result.ExtractInto(&data)\n\treturn data.Domains, err\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) GetDnsSuffixList() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.DnsSuffixList\n}",
"func (p *Proxy) ServerNames() ([]string, error) {\n ns := []string{\"Error-Getting-Server-Names\"}\n rcon, err := p.GetRcon()\n if err != nil { return ns, err }\n\n command := fmt.Sprintf(\"bconf getServers().getKeys()\")\n reply, err := rcon.Send(command)\n if err != nil { return ns, err }\n\n reply = strings.Trim(reply, \"[] \\n\")\n names := strings.Split(reply, \",\")\n for i, n := range names {\n names[i] = strings.Trim(n, \" \")\n }\n return names, nil\n}",
"func (o *NetworkDns) GetNameServers() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.NameServers\n}",
"func (api *powerdnsProvider) GetNameservers(string) ([]*models.Nameserver, error) {\n\tvar r []string\n\tfor _, j := range api.nameservers {\n\t\tr = append(r, j.Name)\n\t}\n\treturn models.ToNameservers(r)\n}",
"func (client *Client) ListNamespacedConfigMaps(request *ListNamespacedConfigMapsRequest) (response *ListNamespacedConfigMapsResponse, err error) {\n\tresponse = CreateListNamespacedConfigMapsResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (r Dns_Domain_Registration) GetDomainNameservers() (resp []datatypes.Container_Dns_Domain_Registration_Nameserver, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_Registration\", \"getDomainNameservers\", nil, &r.Options, &resp)\n\treturn\n}",
"func (c *ThreeScaleClient) ListProxyConfig(svcId string, env string) (ProxyConfigList, error) {\n\tvar pc ProxyConfigList\n\n\tendpoint := fmt.Sprintf(proxyConfigList, svcId, env)\n\treq, err := c.buildGetReq(endpoint)\n\tif err != nil {\n\t\treturn pc, httpReqError\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\tvalues := url.Values{}\n\treq.URL.RawQuery = values.Encode()\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn pc, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\terr = handleJsonResp(resp, http.StatusOK, &pc)\n\treturn pc, err\n}",
"func (ovs OvsdbClient) ListDbs() ([]string, error) {\n\tvar dbs []string\n\terr := ovs.rpcClient.Call(\"list_dbs\", nil, &dbs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ListDbs failure - %v\", err)\n\t}\n\treturn dbs, err\n}",
"func (w *Watcher) GetDomainList() ([]string) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.ZoneMap == nil {\n\t\tw.ZoneMap = make(map[string]*Zone)\n\t}\n\tdomainList := make([]string, 0, len(w.ZoneMap))\n\tfor d := range w.ZoneMap {\n\t\tdomainList = append(domainList, d)\n\t}\n\treturn domainList\n}",
"func DHCPsConfigure(dhcp []Dhcp) error {\n\tfor _, d := range dhcp {\n\t\tif isUnmanaged(UnmanagedID(d.Ifname), LINKTYPE) {\n\t\t\tlogger.Log.Info(fmt.Sprintf(\"Skipping Unmanaged Link %v DHCP configuration\", d.Ifname))\n\t\t\tcontinue\n\t\t}\n\t\terr := DHCPDelete(d.Ifname)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*NotFoundError); ok != true {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := DHCPCreate(d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func CyberghostServers() []models.CyberghostServer {\n\treturn []models.CyberghostServer{\n\t\t{Region: \"Albania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 3}, {31, 171, 155, 4}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 12}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Albania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-al.cg-dialup.net\", IPs: []net.IP{{31, 171, 155, 4}, {31, 171, 155, 5}, {31, 171, 155, 6}, {31, 171, 155, 7}, {31, 171, 155, 8}, {31, 171, 155, 9}, {31, 171, 155, 10}, {31, 171, 155, 11}, {31, 171, 155, 13}, {31, 171, 155, 14}}},\n\t\t{Region: \"Algeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 132}, {176, 125, 228, 134}, {176, 125, 228, 135}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 138}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}}},\n\t\t{Region: \"Algeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dz.cg-dialup.net\", IPs: []net.IP{{176, 125, 228, 131}, {176, 125, 228, 133}, {176, 125, 228, 134}, {176, 125, 228, 136}, {176, 125, 228, 137}, {176, 125, 228, 139}, {176, 125, 228, 140}, {176, 125, 228, 141}, {176, 125, 228, 142}, {176, 125, 228, 143}}},\n\t\t{Region: \"Andorra\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 137}, {188, 241, 82, 138}, {188, 241, 82, 140}, {188, 241, 82, 142}, {188, 241, 82, 147}, {188, 241, 82, 155}, {188, 241, 82, 159}, {188, 241, 82, 160}, {188, 241, 82, 161}, {188, 241, 82, 166}}},\n\t\t{Region: \"Andorra\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ad.cg-dialup.net\", IPs: []net.IP{{188, 241, 82, 133}, {188, 241, 82, 134}, {188, 241, 82, 136}, {188, 241, 82, 137}, {188, 241, 82, 146}, {188, 241, 82, 153}, {188, 241, 82, 155}, {188, 241, 82, 160}, {188, 241, 82, 164}, {188, 241, 82, 168}}},\n\t\t{Region: \"Argentina\", Group: \"Premium TCP USA\", Hostname: \"93-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 4}, {146, 70, 39, 9}, {146, 70, 39, 15}, {146, 70, 39, 19}, {146, 70, 39, 135}, {146, 70, 39, 136}, {146, 70, 39, 139}, {146, 70, 39, 142}, {146, 70, 39, 143}, {146, 70, 39, 145}}},\n\t\t{Region: \"Argentina\", Group: \"Premium UDP USA\", Hostname: \"94-1-ar.cg-dialup.net\", IPs: []net.IP{{146, 70, 39, 3}, {146, 70, 39, 5}, {146, 70, 39, 6}, {146, 70, 39, 8}, {146, 70, 39, 11}, {146, 70, 39, 12}, {146, 70, 39, 131}, {146, 70, 39, 134}, {146, 70, 39, 142}, {146, 70, 39, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 134}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 138}, {185, 253, 160, 139}, {185, 253, 160, 140}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 143}}},\n\t\t{Region: \"Armenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-am.cg-dialup.net\", IPs: []net.IP{{185, 253, 160, 131}, {185, 253, 160, 132}, {185, 253, 160, 133}, {185, 253, 160, 134}, {185, 253, 160, 135}, {185, 253, 160, 136}, {185, 253, 160, 137}, {185, 253, 160, 141}, {185, 253, 160, 142}, {185, 253, 160, 144}}},\n\t\t{Region: \"Australia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-au.cg-dialup.net\", IPs: []net.IP{{154, 16, 81, 22}, {181, 214, 215, 7}, {181, 214, 215, 15}, {181, 214, 215, 18}, {191, 101, 210, 15}, {191, 101, 210, 50}, {191, 101, 210, 60}, {202, 60, 80, 78}, {202, 60, 80, 82}, {202, 60, 80, 102}}},\n\t\t{Region: \"Australia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-au.cg-dialup.net\", IPs: []net.IP{{181, 214, 215, 4}, {181, 214, 215, 16}, {191, 101, 210, 18}, {191, 101, 210, 21}, {191, 101, 210, 36}, {191, 101, 210, 58}, {191, 101, 210, 60}, {202, 60, 80, 74}, {202, 60, 80, 106}, {202, 60, 80, 124}}},\n\t\t{Region: \"Austria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 9}, {37, 19, 223, 16}, {37, 19, 223, 113}, {37, 19, 223, 205}, {37, 19, 223, 211}, {37, 19, 223, 218}, {37, 19, 223, 223}, {37, 19, 223, 245}, {37, 120, 155, 104}, {89, 187, 168, 174}}},\n\t\t{Region: \"Austria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-at.cg-dialup.net\", IPs: []net.IP{{37, 19, 223, 202}, {37, 19, 223, 205}, {37, 19, 223, 229}, {37, 19, 223, 239}, {37, 19, 223, 241}, {37, 19, 223, 243}, {37, 120, 155, 103}, {89, 187, 168, 160}, {89, 187, 168, 174}, {89, 187, 168, 181}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium TCP USA\", Hostname: \"93-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 136}, {95, 181, 238, 142}, {95, 181, 238, 144}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 152}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bahamas\", Group: \"Premium UDP USA\", Hostname: \"94-1-bs.cg-dialup.net\", IPs: []net.IP{{95, 181, 238, 131}, {95, 181, 238, 138}, {95, 181, 238, 140}, {95, 181, 238, 141}, {95, 181, 238, 146}, {95, 181, 238, 147}, {95, 181, 238, 148}, {95, 181, 238, 151}, {95, 181, 238, 153}, {95, 181, 238, 155}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium TCP Asia\", Hostname: \"96-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 132}, {84, 252, 93, 133}, {84, 252, 93, 135}, {84, 252, 93, 138}, {84, 252, 93, 139}, {84, 252, 93, 141}, {84, 252, 93, 142}, {84, 252, 93, 143}, {84, 252, 93, 144}, {84, 252, 93, 145}}},\n\t\t{Region: \"Bangladesh\", Group: \"Premium UDP Asia\", Hostname: \"95-1-bd.cg-dialup.net\", IPs: []net.IP{{84, 252, 93, 131}, {84, 252, 93, 133}, {84, 252, 93, 134}, {84, 252, 93, 135}, {84, 252, 93, 136}, {84, 252, 93, 139}, {84, 252, 93, 140}, {84, 252, 93, 141}, {84, 252, 93, 143}, {84, 252, 93, 145}}},\n\t\t{Region: \"Belarus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 5}, {45, 132, 194, 6}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 25}, {45, 132, 194, 27}, {45, 132, 194, 30}, {45, 132, 194, 35}, {45, 132, 194, 44}, {45, 132, 194, 49}}},\n\t\t{Region: \"Belarus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-by.cg-dialup.net\", IPs: []net.IP{{45, 132, 194, 6}, {45, 132, 194, 8}, {45, 132, 194, 9}, {45, 132, 194, 11}, {45, 132, 194, 15}, {45, 132, 194, 19}, {45, 132, 194, 20}, {45, 132, 194, 23}, {45, 132, 194, 24}, {45, 132, 194, 26}}},\n\t\t{Region: \"Belgium\", Group: \"Premium TCP Europe\", Hostname: \"97-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 165}, {37, 120, 143, 166}, {185, 210, 217, 10}, {185, 210, 217, 248}, {193, 9, 114, 211}, {193, 9, 114, 220}, {194, 110, 115, 195}, {194, 110, 115, 199}, {194, 110, 115, 205}, {194, 110, 115, 238}}},\n\t\t{Region: \"Belgium\", Group: \"Premium UDP Europe\", Hostname: \"87-1-be.cg-dialup.net\", IPs: []net.IP{{37, 120, 143, 163}, {37, 120, 143, 167}, {185, 210, 217, 9}, {185, 210, 217, 13}, {185, 210, 217, 55}, {185, 210, 217, 251}, {185, 232, 21, 120}, {194, 110, 115, 214}, {194, 110, 115, 218}, {194, 110, 115, 236}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Bosnia and Herzegovina\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ba.cg-dialup.net\", IPs: []net.IP{{185, 99, 3, 57}, {185, 99, 3, 58}, {185, 99, 3, 72}, {185, 99, 3, 73}, {185, 99, 3, 74}, {185, 99, 3, 130}, {185, 99, 3, 131}, {185, 99, 3, 134}, {185, 99, 3, 135}, {185, 99, 3, 136}}},\n\t\t{Region: \"Brazil\", Group: \"Premium TCP USA\", Hostname: \"93-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 5}, {188, 241, 177, 11}, {188, 241, 177, 38}, {188, 241, 177, 45}, {188, 241, 177, 132}, {188, 241, 177, 135}, {188, 241, 177, 136}, {188, 241, 177, 152}, {188, 241, 177, 153}, {188, 241, 177, 156}}},\n\t\t{Region: \"Brazil\", Group: \"Premium UDP USA\", Hostname: \"94-1-br.cg-dialup.net\", IPs: []net.IP{{188, 241, 177, 8}, {188, 241, 177, 37}, {188, 241, 177, 40}, {188, 241, 177, 42}, {188, 241, 177, 45}, {188, 241, 177, 135}, {188, 241, 177, 139}, {188, 241, 177, 149}, {188, 241, 177, 152}, {188, 241, 177, 154}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 101}, {37, 120, 152, 103}, {37, 120, 152, 104}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}, {37, 120, 152, 110}}},\n\t\t{Region: \"Bulgaria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-bg.cg-dialup.net\", IPs: []net.IP{{37, 120, 152, 99}, {37, 120, 152, 100}, {37, 120, 152, 101}, {37, 120, 152, 102}, {37, 120, 152, 103}, {37, 120, 152, 105}, {37, 120, 152, 106}, {37, 120, 152, 107}, {37, 120, 152, 108}, {37, 120, 152, 109}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 35}, {188, 215, 235, 36}, {188, 215, 235, 38}, {188, 215, 235, 39}, {188, 215, 235, 45}, {188, 215, 235, 49}, {188, 215, 235, 51}, {188, 215, 235, 53}, {188, 215, 235, 54}, {188, 215, 235, 57}}},\n\t\t{Region: \"Cambodia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kh.cg-dialup.net\", IPs: []net.IP{{188, 215, 235, 36}, {188, 215, 235, 40}, {188, 215, 235, 42}, {188, 215, 235, 44}, {188, 215, 235, 46}, {188, 215, 235, 47}, {188, 215, 235, 48}, {188, 215, 235, 50}, {188, 215, 235, 55}, {188, 215, 235, 57}}},\n\t\t{Region: \"Canada\", Group: \"Premium TCP USA\", Hostname: \"93-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 136}, {66, 115, 142, 139}, {66, 115, 142, 156}, {66, 115, 142, 162}, {66, 115, 142, 172}, {104, 200, 151, 99}, {104, 200, 151, 111}, {104, 200, 151, 153}, {104, 200, 151, 164}, {172, 98, 89, 137}}},\n\t\t{Region: \"Canada\", Group: \"Premium UDP USA\", Hostname: \"94-1-ca.cg-dialup.net\", IPs: []net.IP{{66, 115, 142, 135}, {66, 115, 142, 154}, {66, 115, 142, 165}, {104, 200, 151, 32}, {104, 200, 151, 57}, {104, 200, 151, 85}, {104, 200, 151, 86}, {104, 200, 151, 147}, {172, 98, 89, 144}, {172, 98, 89, 173}}},\n\t\t{Region: \"Chile\", Group: \"Premium TCP USA\", Hostname: \"93-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 12}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"Chile\", Group: \"Premium UDP USA\", Hostname: \"94-1-cl.cg-dialup.net\", IPs: []net.IP{{146, 70, 11, 3}, {146, 70, 11, 4}, {146, 70, 11, 6}, {146, 70, 11, 7}, {146, 70, 11, 8}, {146, 70, 11, 9}, {146, 70, 11, 10}, {146, 70, 11, 11}, {146, 70, 11, 13}, {146, 70, 11, 14}}},\n\t\t{Region: \"China\", Group: \"Premium TCP Asia\", Hostname: \"96-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 137}, {188, 241, 80, 139}, {188, 241, 80, 140}, {188, 241, 80, 141}, {188, 241, 80, 142}}},\n\t\t{Region: \"China\", Group: \"Premium UDP Asia\", Hostname: \"95-1-cn.cg-dialup.net\", IPs: []net.IP{{188, 241, 80, 131}, {188, 241, 80, 132}, {188, 241, 80, 133}, {188, 241, 80, 134}, {188, 241, 80, 135}, {188, 241, 80, 136}, {188, 241, 80, 137}, {188, 241, 80, 138}, {188, 241, 80, 139}, {188, 241, 80, 142}}},\n\t\t{Region: \"Colombia\", Group: \"Premium TCP USA\", Hostname: \"93-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 7}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}, {146, 70, 9, 13}, {146, 70, 9, 14}}},\n\t\t{Region: \"Colombia\", Group: \"Premium UDP USA\", Hostname: \"94-1-co.cg-dialup.net\", IPs: []net.IP{{146, 70, 9, 3}, {146, 70, 9, 4}, {146, 70, 9, 5}, {146, 70, 9, 6}, {146, 70, 9, 7}, {146, 70, 9, 8}, {146, 70, 9, 9}, {146, 70, 9, 10}, {146, 70, 9, 11}, {146, 70, 9, 12}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium TCP USA\", Hostname: \"93-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 10}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 13}}},\n\t\t{Region: \"Costa Rica\", Group: \"Premium UDP USA\", Hostname: \"94-1-cr.cg-dialup.net\", IPs: []net.IP{{146, 70, 10, 3}, {146, 70, 10, 4}, {146, 70, 10, 5}, {146, 70, 10, 6}, {146, 70, 10, 7}, {146, 70, 10, 8}, {146, 70, 10, 9}, {146, 70, 10, 11}, {146, 70, 10, 12}, {146, 70, 10, 14}}},\n\t\t{Region: \"Croatia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 5}, {146, 70, 8, 8}, {146, 70, 8, 9}, {146, 70, 8, 10}, {146, 70, 8, 11}, {146, 70, 8, 12}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 15}, {146, 70, 8, 16}}},\n\t\t{Region: \"Croatia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hr.cg-dialup.net\", IPs: []net.IP{{146, 70, 8, 3}, {146, 70, 8, 4}, {146, 70, 8, 5}, {146, 70, 8, 6}, {146, 70, 8, 7}, {146, 70, 8, 9}, {146, 70, 8, 11}, {146, 70, 8, 13}, {146, 70, 8, 14}, {146, 70, 8, 16}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 133}, {185, 253, 162, 135}, {185, 253, 162, 136}, {185, 253, 162, 137}, {185, 253, 162, 139}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Cyprus\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cy.cg-dialup.net\", IPs: []net.IP{{185, 253, 162, 131}, {185, 253, 162, 132}, {185, 253, 162, 134}, {185, 253, 162, 135}, {185, 253, 162, 137}, {185, 253, 162, 138}, {185, 253, 162, 140}, {185, 253, 162, 142}, {185, 253, 162, 143}, {185, 253, 162, 144}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium TCP Europe\", Hostname: \"97-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 235}, {138, 199, 56, 236}, {138, 199, 56, 237}, {138, 199, 56, 245}, {138, 199, 56, 246}, {138, 199, 56, 249}, {195, 181, 161, 12}, {195, 181, 161, 16}, {195, 181, 161, 20}, {195, 181, 161, 23}}},\n\t\t{Region: \"Czech Republic\", Group: \"Premium UDP Europe\", Hostname: \"87-1-cz.cg-dialup.net\", IPs: []net.IP{{138, 199, 56, 227}, {138, 199, 56, 229}, {138, 199, 56, 231}, {138, 199, 56, 235}, {138, 199, 56, 241}, {138, 199, 56, 247}, {195, 181, 161, 10}, {195, 181, 161, 16}, {195, 181, 161, 18}, {195, 181, 161, 22}}},\n\t\t{Region: \"Denmark\", Group: \"Premium TCP Europe\", Hostname: \"97-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 145, 83}, {37, 120, 145, 88}, {37, 120, 145, 93}, {37, 120, 194, 36}, {37, 120, 194, 56}, {37, 120, 194, 57}, {95, 174, 65, 163}, {95, 174, 65, 174}, {185, 206, 224, 238}, {185, 206, 224, 243}}},\n\t\t{Region: \"Denmark\", Group: \"Premium UDP Europe\", Hostname: \"87-1-dk.cg-dialup.net\", IPs: []net.IP{{37, 120, 194, 39}, {95, 174, 65, 167}, {95, 174, 65, 170}, {185, 206, 224, 227}, {185, 206, 224, 230}, {185, 206, 224, 236}, {185, 206, 224, 238}, {185, 206, 224, 245}, {185, 206, 224, 250}, {185, 206, 224, 254}}},\n\t\t{Region: \"Egypt\", Group: \"Premium TCP Europe\", Hostname: \"97-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 40}, {188, 214, 122, 42}, {188, 214, 122, 43}, {188, 214, 122, 45}, {188, 214, 122, 48}, {188, 214, 122, 50}, {188, 214, 122, 52}, {188, 214, 122, 60}, {188, 214, 122, 70}, {188, 214, 122, 73}}},\n\t\t{Region: \"Egypt\", Group: \"Premium UDP Europe\", Hostname: \"87-1-eg.cg-dialup.net\", IPs: []net.IP{{188, 214, 122, 37}, {188, 214, 122, 38}, {188, 214, 122, 44}, {188, 214, 122, 54}, {188, 214, 122, 57}, {188, 214, 122, 59}, {188, 214, 122, 60}, {188, 214, 122, 61}, {188, 214, 122, 67}, {188, 214, 122, 69}}},\n\t\t{Region: \"Estonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 86}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 93}, {95, 153, 32, 94}}},\n\t\t{Region: \"Estonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ee.cg-dialup.net\", IPs: []net.IP{{95, 153, 32, 83}, {95, 153, 32, 84}, {95, 153, 32, 85}, {95, 153, 32, 87}, {95, 153, 32, 88}, {95, 153, 32, 89}, {95, 153, 32, 90}, {95, 153, 32, 91}, {95, 153, 32, 92}, {95, 153, 32, 94}}},\n\t\t{Region: \"Finland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 99}, {188, 126, 89, 102}, {188, 126, 89, 105}, {188, 126, 89, 107}, {188, 126, 89, 108}, {188, 126, 89, 110}, {188, 126, 89, 112}, {188, 126, 89, 115}, {188, 126, 89, 116}, {188, 126, 89, 119}}},\n\t\t{Region: \"Finland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fi.cg-dialup.net\", IPs: []net.IP{{188, 126, 89, 101}, {188, 126, 89, 104}, {188, 126, 89, 109}, {188, 126, 89, 110}, {188, 126, 89, 111}, {188, 126, 89, 113}, {188, 126, 89, 114}, {188, 126, 89, 115}, {188, 126, 89, 122}, {188, 126, 89, 124}}},\n\t\t{Region: \"France\", Group: \"Premium TCP Europe\", Hostname: \"97-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 43, 167}, {84, 17, 60, 147}, {84, 17, 60, 155}, {151, 106, 8, 108}, {191, 101, 31, 202}, {191, 101, 31, 254}, {191, 101, 217, 45}, {191, 101, 217, 159}, {191, 101, 217, 211}, {191, 101, 217, 240}}},\n\t\t{Region: \"France\", Group: \"Premium UDP Europe\", Hostname: \"87-1-fr.cg-dialup.net\", IPs: []net.IP{{84, 17, 60, 59}, {84, 17, 60, 121}, {191, 101, 31, 81}, {191, 101, 31, 84}, {191, 101, 31, 126}, {191, 101, 31, 127}, {191, 101, 217, 140}, {191, 101, 217, 201}, {191, 101, 217, 206}, {191, 101, 217, 211}}},\n\t\t{Region: \"Georgia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 131}, {95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 135}, {95, 181, 236, 136}, {95, 181, 236, 138}, {95, 181, 236, 139}, {95, 181, 236, 142}, {95, 181, 236, 144}}},\n\t\t{Region: \"Georgia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ge.cg-dialup.net\", IPs: []net.IP{{95, 181, 236, 132}, {95, 181, 236, 133}, {95, 181, 236, 134}, {95, 181, 236, 136}, {95, 181, 236, 137}, {95, 181, 236, 139}, {95, 181, 236, 141}, {95, 181, 236, 142}, {95, 181, 236, 143}, {95, 181, 236, 144}}},\n\t\t{Region: \"Germany\", Group: \"Premium TCP Europe\", Hostname: \"97-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 39}, {84, 17, 48, 234}, {84, 17, 49, 106}, {84, 17, 49, 112}, {84, 17, 49, 218}, {154, 28, 188, 35}, {154, 28, 188, 66}, {154, 28, 188, 133}, {154, 28, 188, 144}, {154, 28, 188, 145}}},\n\t\t{Region: \"Germany\", Group: \"Premium UDP Europe\", Hostname: \"87-1-de.cg-dialup.net\", IPs: []net.IP{{84, 17, 48, 41}, {84, 17, 48, 224}, {84, 17, 49, 95}, {84, 17, 49, 236}, {84, 17, 49, 241}, {138, 199, 36, 151}, {154, 13, 1, 177}, {154, 28, 188, 73}, {154, 28, 188, 76}, {154, 28, 188, 93}}},\n\t\t{Region: \"Greece\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 165}, {185, 51, 134, 171}, {185, 51, 134, 172}, {185, 51, 134, 245}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 249}, {185, 51, 134, 251}, {185, 51, 134, 254}}},\n\t\t{Region: \"Greece\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gr.cg-dialup.net\", IPs: []net.IP{{185, 51, 134, 163}, {185, 51, 134, 166}, {185, 51, 134, 173}, {185, 51, 134, 174}, {185, 51, 134, 244}, {185, 51, 134, 246}, {185, 51, 134, 247}, {185, 51, 134, 251}, {185, 51, 134, 252}, {185, 51, 134, 253}}},\n\t\t{Region: \"Greenland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 8}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 13}, {91, 90, 120, 14}, {91, 90, 120, 17}}},\n\t\t{Region: \"Greenland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gl.cg-dialup.net\", IPs: []net.IP{{91, 90, 120, 3}, {91, 90, 120, 4}, {91, 90, 120, 5}, {91, 90, 120, 7}, {91, 90, 120, 9}, {91, 90, 120, 10}, {91, 90, 120, 12}, {91, 90, 120, 14}, {91, 90, 120, 15}, {91, 90, 120, 16}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium TCP Asia\", Hostname: \"96-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 144}, {84, 17, 56, 148}, {84, 17, 56, 153}, {84, 17, 56, 162}, {84, 17, 56, 163}, {84, 17, 56, 169}, {84, 17, 56, 170}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 181}}},\n\t\t{Region: \"Hong Kong\", Group: \"Premium UDP Asia\", Hostname: \"95-1-hk.cg-dialup.net\", IPs: []net.IP{{84, 17, 56, 143}, {84, 17, 56, 147}, {84, 17, 56, 150}, {84, 17, 56, 152}, {84, 17, 56, 161}, {84, 17, 56, 164}, {84, 17, 56, 168}, {84, 17, 56, 179}, {84, 17, 56, 180}, {84, 17, 56, 183}}},\n\t\t{Region: \"Hungary\", Group: \"Premium TCP Europe\", Hostname: \"97-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 247}, {86, 106, 74, 251}, {86, 106, 74, 253}, {185, 189, 114, 117}, {185, 189, 114, 118}, {185, 189, 114, 119}, {185, 189, 114, 121}, {185, 189, 114, 123}, {185, 189, 114, 125}, {185, 189, 114, 126}}},\n\t\t{Region: \"Hungary\", Group: \"Premium UDP Europe\", Hostname: \"87-1-hu.cg-dialup.net\", IPs: []net.IP{{86, 106, 74, 245}, {86, 106, 74, 247}, {86, 106, 74, 248}, {86, 106, 74, 249}, {86, 106, 74, 250}, {86, 106, 74, 252}, {86, 106, 74, 253}, {185, 189, 114, 120}, {185, 189, 114, 121}, {185, 189, 114, 122}}},\n\t\t{Region: \"Iceland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 4}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 12}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"Iceland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-is.cg-dialup.net\", IPs: []net.IP{{45, 133, 193, 3}, {45, 133, 193, 5}, {45, 133, 193, 6}, {45, 133, 193, 7}, {45, 133, 193, 8}, {45, 133, 193, 9}, {45, 133, 193, 10}, {45, 133, 193, 11}, {45, 133, 193, 13}, {45, 133, 193, 14}}},\n\t\t{Region: \"India\", Group: \"Premium TCP Europe\", Hostname: \"97-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 68}, {103, 13, 112, 70}, {103, 13, 112, 72}, {103, 13, 112, 74}, {103, 13, 112, 75}, {103, 13, 113, 74}, {103, 13, 113, 79}, {103, 13, 113, 82}, {103, 13, 113, 83}, {103, 13, 113, 84}}},\n\t\t{Region: \"India\", Group: \"Premium UDP Europe\", Hostname: \"87-1-in.cg-dialup.net\", IPs: []net.IP{{103, 13, 112, 67}, {103, 13, 112, 70}, {103, 13, 112, 71}, {103, 13, 112, 77}, {103, 13, 112, 80}, {103, 13, 113, 72}, {103, 13, 113, 74}, {103, 13, 113, 75}, {103, 13, 113, 77}, {103, 13, 113, 85}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 4}, {146, 70, 14, 5}, {146, 70, 14, 6}, {146, 70, 14, 7}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Indonesia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-id.cg-dialup.net\", IPs: []net.IP{{146, 70, 14, 3}, {146, 70, 14, 5}, {146, 70, 14, 8}, {146, 70, 14, 9}, {146, 70, 14, 10}, {146, 70, 14, 12}, {146, 70, 14, 13}, {146, 70, 14, 14}, {146, 70, 14, 15}, {146, 70, 14, 16}}},\n\t\t{Region: \"Iran\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 5}, {62, 133, 46, 6}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 9}, {62, 133, 46, 10}, {62, 133, 46, 14}, {62, 133, 46, 15}}},\n\t\t{Region: \"Iran\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ir.cg-dialup.net\", IPs: []net.IP{{62, 133, 46, 3}, {62, 133, 46, 4}, {62, 133, 46, 7}, {62, 133, 46, 8}, {62, 133, 46, 11}, {62, 133, 46, 12}, {62, 133, 46, 13}, {62, 133, 46, 14}, {62, 133, 46, 15}, {62, 133, 46, 16}}},\n\t\t{Region: \"Ireland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 154}, {37, 120, 235, 166}, {37, 120, 235, 174}, {77, 81, 139, 35}, {84, 247, 48, 6}, {84, 247, 48, 19}, {84, 247, 48, 22}, {84, 247, 48, 23}, {84, 247, 48, 25}, {84, 247, 48, 26}}},\n\t\t{Region: \"Ireland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ie.cg-dialup.net\", IPs: []net.IP{{37, 120, 235, 147}, {37, 120, 235, 148}, {37, 120, 235, 153}, {37, 120, 235, 158}, {37, 120, 235, 169}, {37, 120, 235, 174}, {84, 247, 48, 8}, {84, 247, 48, 11}, {84, 247, 48, 20}, {84, 247, 48, 23}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium TCP Europe\", Hostname: \"97-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 156}, {91, 90, 124, 157}, {91, 90, 124, 158}}},\n\t\t{Region: \"Isle of Man\", Group: \"Premium UDP Europe\", Hostname: \"87-1-im.cg-dialup.net\", IPs: []net.IP{{91, 90, 124, 147}, {91, 90, 124, 149}, {91, 90, 124, 150}, {91, 90, 124, 151}, {91, 90, 124, 152}, {91, 90, 124, 153}, {91, 90, 124, 154}, {91, 90, 124, 155}, {91, 90, 124, 156}, {91, 90, 124, 157}}},\n\t\t{Region: \"Israel\", Group: \"Premium TCP Europe\", Hostname: \"97-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 174}, {185, 77, 248, 103}, {185, 77, 248, 111}, {185, 77, 248, 113}, {185, 77, 248, 114}, {185, 77, 248, 124}, {185, 77, 248, 125}, {185, 77, 248, 127}, {185, 77, 248, 128}, {185, 77, 248, 129}}},\n\t\t{Region: \"Israel\", Group: \"Premium UDP Europe\", Hostname: \"87-1-il.cg-dialup.net\", IPs: []net.IP{{160, 116, 0, 163}, {160, 116, 0, 165}, {160, 116, 0, 172}, {185, 77, 248, 103}, {185, 77, 248, 106}, {185, 77, 248, 114}, {185, 77, 248, 117}, {185, 77, 248, 118}, {185, 77, 248, 126}, {185, 77, 248, 129}}},\n\t\t{Region: \"Italy\", Group: \"Premium TCP Europe\", Hostname: \"97-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 21}, {84, 17, 58, 100}, {84, 17, 58, 106}, {84, 17, 58, 111}, {84, 17, 58, 117}, {87, 101, 94, 122}, {212, 102, 55, 100}, {212, 102, 55, 106}, {212, 102, 55, 110}, {212, 102, 55, 122}}},\n\t\t{Region: \"Italy\", Group: \"Premium UDP Europe\", Hostname: \"87-1-it.cg-dialup.net\", IPs: []net.IP{{84, 17, 58, 19}, {84, 17, 58, 95}, {84, 17, 58, 105}, {84, 17, 58, 119}, {84, 17, 58, 120}, {87, 101, 94, 116}, {185, 217, 71, 137}, {185, 217, 71, 138}, {185, 217, 71, 153}, {212, 102, 55, 108}}},\n\t\t{Region: \"Japan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 6}, {156, 146, 35, 10}, {156, 146, 35, 15}, {156, 146, 35, 22}, {156, 146, 35, 37}, {156, 146, 35, 39}, {156, 146, 35, 40}, {156, 146, 35, 41}, {156, 146, 35, 44}, {156, 146, 35, 50}}},\n\t\t{Region: \"Japan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-jp.cg-dialup.net\", IPs: []net.IP{{156, 146, 35, 4}, {156, 146, 35, 14}, {156, 146, 35, 15}, {156, 146, 35, 18}, {156, 146, 35, 25}, {156, 146, 35, 34}, {156, 146, 35, 36}, {156, 146, 35, 46}, {156, 146, 35, 49}, {156, 146, 35, 50}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium TCP Europe\", Hostname: \"97-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 134}, {62, 133, 47, 136}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}, {62, 133, 47, 144}}},\n\t\t{Region: \"Kazakhstan\", Group: \"Premium UDP Europe\", Hostname: \"87-1-kz.cg-dialup.net\", IPs: []net.IP{{62, 133, 47, 131}, {62, 133, 47, 132}, {62, 133, 47, 133}, {62, 133, 47, 134}, {62, 133, 47, 135}, {62, 133, 47, 138}, {62, 133, 47, 139}, {62, 133, 47, 140}, {62, 133, 47, 142}, {62, 133, 47, 143}}},\n\t\t{Region: \"Kenya\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Kenya\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ke.cg-dialup.net\", IPs: []net.IP{{62, 12, 118, 195}, {62, 12, 118, 196}, {62, 12, 118, 197}, {62, 12, 118, 198}, {62, 12, 118, 199}, {62, 12, 118, 200}, {62, 12, 118, 201}, {62, 12, 118, 202}, {62, 12, 118, 203}, {62, 12, 118, 204}}},\n\t\t{Region: \"Korea\", Group: \"Premium TCP Asia\", Hostname: \"96-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 134}, {79, 110, 55, 141}, {79, 110, 55, 147}, {79, 110, 55, 148}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 153}, {79, 110, 55, 155}, {79, 110, 55, 157}}},\n\t\t{Region: \"Korea\", Group: \"Premium UDP Asia\", Hostname: \"95-1-kr.cg-dialup.net\", IPs: []net.IP{{79, 110, 55, 131}, {79, 110, 55, 133}, {79, 110, 55, 134}, {79, 110, 55, 136}, {79, 110, 55, 138}, {79, 110, 55, 140}, {79, 110, 55, 149}, {79, 110, 55, 151}, {79, 110, 55, 152}, {79, 110, 55, 157}}},\n\t\t{Region: \"Latvia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 244}, {109, 248, 148, 245}, {109, 248, 148, 246}, {109, 248, 148, 247}, {109, 248, 148, 249}, {109, 248, 148, 250}, {109, 248, 148, 253}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 25}}},\n\t\t{Region: \"Latvia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lv.cg-dialup.net\", IPs: []net.IP{{109, 248, 148, 248}, {109, 248, 148, 250}, {109, 248, 148, 254}, {109, 248, 149, 19}, {109, 248, 149, 20}, {109, 248, 149, 22}, {109, 248, 149, 24}, {109, 248, 149, 26}, {109, 248, 149, 28}, {109, 248, 149, 30}}},\n\t\t{Region: \"Liechtenstein\", Group: \"Premium UDP Europe\", Hostname: \"87-1-li.cg-dialup.net\", IPs: []net.IP{{91, 90, 122, 131}, {91, 90, 122, 134}, {91, 90, 122, 137}, {91, 90, 122, 138}, {91, 90, 122, 139}, {91, 90, 122, 140}, {91, 90, 122, 141}, {91, 90, 122, 142}, {91, 90, 122, 144}, {91, 90, 122, 145}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 212}, {85, 206, 162, 215}, {85, 206, 162, 219}, {85, 206, 162, 222}, {85, 206, 165, 17}, {85, 206, 165, 23}, {85, 206, 165, 25}, {85, 206, 165, 26}, {85, 206, 165, 30}, {85, 206, 165, 31}}},\n\t\t{Region: \"Lithuania\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lt.cg-dialup.net\", IPs: []net.IP{{85, 206, 162, 209}, {85, 206, 162, 210}, {85, 206, 162, 211}, {85, 206, 162, 213}, {85, 206, 162, 214}, {85, 206, 162, 217}, {85, 206, 162, 218}, {85, 206, 162, 220}, {85, 206, 165, 26}, {85, 206, 165, 30}}},\n\t\t{Region: \"Luxembourg\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lu.cg-dialup.net\", IPs: []net.IP{{5, 253, 204, 7}, {5, 253, 204, 10}, {5, 253, 204, 12}, {5, 253, 204, 23}, {5, 253, 204, 26}, {5, 253, 204, 30}, {5, 253, 204, 37}, {5, 253, 204, 39}, {5, 253, 204, 44}, {5, 253, 204, 45}}},\n\t\t{Region: \"Macao\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 131}, {84, 252, 92, 133}, {84, 252, 92, 135}, {84, 252, 92, 137}, {84, 252, 92, 138}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 142}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macao\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mo.cg-dialup.net\", IPs: []net.IP{{84, 252, 92, 132}, {84, 252, 92, 134}, {84, 252, 92, 135}, {84, 252, 92, 136}, {84, 252, 92, 137}, {84, 252, 92, 139}, {84, 252, 92, 141}, {84, 252, 92, 143}, {84, 252, 92, 144}, {84, 252, 92, 145}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Macedonia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mk.cg-dialup.net\", IPs: []net.IP{{185, 225, 28, 3}, {185, 225, 28, 4}, {185, 225, 28, 5}, {185, 225, 28, 6}, {185, 225, 28, 7}, {185, 225, 28, 8}, {185, 225, 28, 9}, {185, 225, 28, 10}, {185, 225, 28, 11}, {185, 225, 28, 12}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 4}, {146, 70, 15, 6}, {146, 70, 15, 8}, {146, 70, 15, 9}, {146, 70, 15, 10}, {146, 70, 15, 11}, {146, 70, 15, 12}, {146, 70, 15, 13}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malaysia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-my.cg-dialup.net\", IPs: []net.IP{{146, 70, 15, 3}, {146, 70, 15, 4}, {146, 70, 15, 5}, {146, 70, 15, 6}, {146, 70, 15, 7}, {146, 70, 15, 8}, {146, 70, 15, 10}, {146, 70, 15, 12}, {146, 70, 15, 15}, {146, 70, 15, 16}}},\n\t\t{Region: \"Malta\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 133}, {176, 125, 230, 135}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 140}, {176, 125, 230, 142}, {176, 125, 230, 143}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Malta\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mt.cg-dialup.net\", IPs: []net.IP{{176, 125, 230, 131}, {176, 125, 230, 133}, {176, 125, 230, 134}, {176, 125, 230, 136}, {176, 125, 230, 137}, {176, 125, 230, 138}, {176, 125, 230, 139}, {176, 125, 230, 140}, {176, 125, 230, 144}, {176, 125, 230, 145}}},\n\t\t{Region: \"Mexico\", Group: \"Premium TCP USA\", Hostname: \"93-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 132}, {77, 81, 142, 134}, {77, 81, 142, 136}, {77, 81, 142, 139}, {77, 81, 142, 142}, {77, 81, 142, 154}, {77, 81, 142, 155}, {77, 81, 142, 157}, {77, 81, 142, 158}, {77, 81, 142, 159}}},\n\t\t{Region: \"Mexico\", Group: \"Premium UDP USA\", Hostname: \"94-1-mx.cg-dialup.net\", IPs: []net.IP{{77, 81, 142, 130}, {77, 81, 142, 131}, {77, 81, 142, 132}, {77, 81, 142, 139}, {77, 81, 142, 141}, {77, 81, 142, 142}, {77, 81, 142, 146}, {77, 81, 142, 147}, {77, 81, 142, 154}, {77, 81, 142, 159}}},\n\t\t{Region: \"Moldova\", Group: \"Premium TCP Europe\", Hostname: \"97-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 245}, {178, 175, 130, 246}, {178, 175, 130, 251}, {178, 175, 130, 254}, {178, 175, 142, 131}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Moldova\", Group: \"Premium UDP Europe\", Hostname: \"87-1-md.cg-dialup.net\", IPs: []net.IP{{178, 175, 130, 243}, {178, 175, 130, 244}, {178, 175, 130, 246}, {178, 175, 130, 250}, {178, 175, 130, 251}, {178, 175, 130, 253}, {178, 175, 130, 254}, {178, 175, 142, 132}, {178, 175, 142, 133}, {178, 175, 142, 134}}},\n\t\t{Region: \"Monaco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 131}, {95, 181, 233, 132}, {95, 181, 233, 133}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 140}, {95, 181, 233, 141}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Monaco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-mc.cg-dialup.net\", IPs: []net.IP{{95, 181, 233, 132}, {95, 181, 233, 135}, {95, 181, 233, 136}, {95, 181, 233, 137}, {95, 181, 233, 138}, {95, 181, 233, 139}, {95, 181, 233, 141}, {95, 181, 233, 142}, {95, 181, 233, 143}, {95, 181, 233, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium TCP Asia\", Hostname: \"96-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 132}, {185, 253, 163, 133}, {185, 253, 163, 135}, {185, 253, 163, 136}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 143}, {185, 253, 163, 144}}},\n\t\t{Region: \"Mongolia\", Group: \"Premium UDP Asia\", Hostname: \"95-1-mn.cg-dialup.net\", IPs: []net.IP{{185, 253, 163, 131}, {185, 253, 163, 133}, {185, 253, 163, 134}, {185, 253, 163, 137}, {185, 253, 163, 138}, {185, 253, 163, 139}, {185, 253, 163, 140}, {185, 253, 163, 141}, {185, 253, 163, 142}, {185, 253, 163, 144}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium TCP Europe\", Hostname: \"97-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 135}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 142}, {176, 125, 229, 143}, {176, 125, 229, 144}, {176, 125, 229, 145}}},\n\t\t{Region: \"Montenegro\", Group: \"Premium UDP Europe\", Hostname: \"87-1-me.cg-dialup.net\", IPs: []net.IP{{176, 125, 229, 131}, {176, 125, 229, 134}, {176, 125, 229, 136}, {176, 125, 229, 137}, {176, 125, 229, 138}, {176, 125, 229, 139}, {176, 125, 229, 140}, {176, 125, 229, 141}, {176, 125, 229, 143}, {176, 125, 229, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 134}, {95, 181, 232, 136}, {95, 181, 232, 137}, {95, 181, 232, 138}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 144}}},\n\t\t{Region: \"Morocco\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ma.cg-dialup.net\", IPs: []net.IP{{95, 181, 232, 131}, {95, 181, 232, 132}, {95, 181, 232, 133}, {95, 181, 232, 135}, {95, 181, 232, 137}, {95, 181, 232, 139}, {95, 181, 232, 140}, {95, 181, 232, 141}, {95, 181, 232, 142}, {95, 181, 232, 143}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium TCP Europe\", Hostname: \"97-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 98}, {181, 214, 206, 22}, {181, 214, 206, 27}, {181, 214, 206, 36}, {195, 78, 54, 10}, {195, 78, 54, 20}, {195, 78, 54, 43}, {195, 78, 54, 50}, {195, 78, 54, 119}, {195, 181, 172, 78}}},\n\t\t{Region: \"Netherlands\", Group: \"Premium UDP Europe\", Hostname: \"87-1-nl.cg-dialup.net\", IPs: []net.IP{{84, 17, 47, 110}, {181, 214, 206, 29}, {181, 214, 206, 42}, {195, 78, 54, 8}, {195, 78, 54, 19}, {195, 78, 54, 47}, {195, 78, 54, 110}, {195, 78, 54, 141}, {195, 78, 54, 143}, {195, 78, 54, 157}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 100}, {43, 250, 207, 101}, {43, 250, 207, 102}, {43, 250, 207, 103}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 108}, {43, 250, 207, 109}}},\n\t\t{Region: \"New Zealand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-nz.cg-dialup.net\", IPs: []net.IP{{43, 250, 207, 98}, {43, 250, 207, 99}, {43, 250, 207, 102}, {43, 250, 207, 104}, {43, 250, 207, 105}, {43, 250, 207, 106}, {43, 250, 207, 107}, {43, 250, 207, 108}, {43, 250, 207, 109}, {43, 250, 207, 110}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 73}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Nigeria\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ng.cg-dialup.net\", IPs: []net.IP{{102, 165, 25, 68}, {102, 165, 25, 69}, {102, 165, 25, 70}, {102, 165, 25, 71}, {102, 165, 25, 72}, {102, 165, 25, 74}, {102, 165, 25, 75}, {102, 165, 25, 76}, {102, 165, 25, 77}, {102, 165, 25, 78}}},\n\t\t{Region: \"Norway\", Group: \"Premium TCP Europe\", Hostname: \"97-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 137}, {45, 12, 223, 140}, {185, 206, 225, 29}, {185, 206, 225, 231}, {185, 253, 97, 234}, {185, 253, 97, 236}, {185, 253, 97, 238}, {185, 253, 97, 244}, {185, 253, 97, 250}, {185, 253, 97, 254}}},\n\t\t{Region: \"Norway\", Group: \"Premium UDP Europe\", Hostname: \"87-1-no.cg-dialup.net\", IPs: []net.IP{{45, 12, 223, 133}, {45, 12, 223, 134}, {45, 12, 223, 142}, {185, 206, 225, 227}, {185, 206, 225, 228}, {185, 206, 225, 231}, {185, 206, 225, 235}, {185, 253, 97, 237}, {185, 253, 97, 246}, {185, 253, 97, 254}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 3}, {146, 70, 12, 4}, {146, 70, 12, 6}, {146, 70, 12, 8}, {146, 70, 12, 9}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Pakistan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-pk.cg-dialup.net\", IPs: []net.IP{{146, 70, 12, 4}, {146, 70, 12, 5}, {146, 70, 12, 6}, {146, 70, 12, 7}, {146, 70, 12, 8}, {146, 70, 12, 10}, {146, 70, 12, 11}, {146, 70, 12, 12}, {146, 70, 12, 13}, {146, 70, 12, 14}}},\n\t\t{Region: \"Panama\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 132}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 139}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Panama\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pa.cg-dialup.net\", IPs: []net.IP{{91, 90, 126, 131}, {91, 90, 126, 133}, {91, 90, 126, 134}, {91, 90, 126, 135}, {91, 90, 126, 136}, {91, 90, 126, 138}, {91, 90, 126, 140}, {91, 90, 126, 141}, {91, 90, 126, 142}, {91, 90, 126, 145}}},\n\t\t{Region: \"Philippines\", Group: \"Premium TCP Asia\", Hostname: \"96-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 38}, {188, 214, 125, 40}, {188, 214, 125, 43}, {188, 214, 125, 44}, {188, 214, 125, 45}, {188, 214, 125, 52}, {188, 214, 125, 55}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Philippines\", Group: \"Premium UDP Asia\", Hostname: \"95-1-ph.cg-dialup.net\", IPs: []net.IP{{188, 214, 125, 37}, {188, 214, 125, 40}, {188, 214, 125, 46}, {188, 214, 125, 49}, {188, 214, 125, 52}, {188, 214, 125, 54}, {188, 214, 125, 57}, {188, 214, 125, 58}, {188, 214, 125, 61}, {188, 214, 125, 62}}},\n\t\t{Region: \"Poland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 132}, {138, 199, 59, 136}, {138, 199, 59, 137}, {138, 199, 59, 143}, {138, 199, 59, 144}, {138, 199, 59, 152}, {138, 199, 59, 153}, {138, 199, 59, 166}, {138, 199, 59, 174}, {138, 199, 59, 175}}},\n\t\t{Region: \"Poland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pl.cg-dialup.net\", IPs: []net.IP{{138, 199, 59, 130}, {138, 199, 59, 136}, {138, 199, 59, 148}, {138, 199, 59, 149}, {138, 199, 59, 153}, {138, 199, 59, 156}, {138, 199, 59, 157}, {138, 199, 59, 164}, {138, 199, 59, 171}, {138, 199, 59, 173}}},\n\t\t{Region: \"Portugal\", Group: \"Premium TCP Europe\", Hostname: \"97-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 112}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 216}, {89, 26, 243, 218}, {89, 26, 243, 220}, {89, 26, 243, 222}, {89, 26, 243, 223}, {89, 26, 243, 225}, {89, 26, 243, 228}}},\n\t\t{Region: \"Portugal\", Group: \"Premium UDP Europe\", Hostname: \"87-1-pt.cg-dialup.net\", IPs: []net.IP{{89, 26, 243, 99}, {89, 26, 243, 113}, {89, 26, 243, 115}, {89, 26, 243, 195}, {89, 26, 243, 199}, {89, 26, 243, 216}, {89, 26, 243, 219}, {89, 26, 243, 225}, {89, 26, 243, 226}, {89, 26, 243, 227}}},\n\t\t{Region: \"Qatar\", Group: \"Premium TCP Europe\", Hostname: \"97-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 133}, {95, 181, 234, 135}, {95, 181, 234, 136}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 140}, {95, 181, 234, 141}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Qatar\", Group: \"Premium UDP Europe\", Hostname: \"87-1-qa.cg-dialup.net\", IPs: []net.IP{{95, 181, 234, 131}, {95, 181, 234, 132}, {95, 181, 234, 133}, {95, 181, 234, 134}, {95, 181, 234, 135}, {95, 181, 234, 137}, {95, 181, 234, 138}, {95, 181, 234, 139}, {95, 181, 234, 142}, {95, 181, 234, 143}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 72}, {5, 8, 16, 74}, {5, 8, 16, 84}, {5, 8, 16, 85}, {5, 8, 16, 123}, {5, 8, 16, 124}, {5, 8, 16, 132}, {146, 70, 52, 35}, {146, 70, 52, 44}, {146, 70, 52, 54}}},\n\t\t{Region: \"Russian Federation\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ru.cg-dialup.net\", IPs: []net.IP{{5, 8, 16, 75}, {5, 8, 16, 87}, {5, 8, 16, 99}, {5, 8, 16, 110}, {5, 8, 16, 138}, {146, 70, 52, 29}, {146, 70, 52, 52}, {146, 70, 52, 58}, {146, 70, 52, 59}, {146, 70, 52, 67}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 133}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 140}, {95, 181, 235, 141}, {95, 181, 235, 142}}},\n\t\t{Region: \"Saudi Arabia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sa.cg-dialup.net\", IPs: []net.IP{{95, 181, 235, 131}, {95, 181, 235, 132}, {95, 181, 235, 134}, {95, 181, 235, 135}, {95, 181, 235, 136}, {95, 181, 235, 137}, {95, 181, 235, 138}, {95, 181, 235, 139}, {95, 181, 235, 141}, {95, 181, 235, 144}}},\n\t\t{Region: \"Serbia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 179}, {37, 120, 193, 186}, {37, 120, 193, 188}, {37, 120, 193, 190}, {141, 98, 103, 36}, {141, 98, 103, 38}, {141, 98, 103, 39}, {141, 98, 103, 43}, {141, 98, 103, 44}, {141, 98, 103, 46}}},\n\t\t{Region: \"Serbia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-rs.cg-dialup.net\", IPs: []net.IP{{37, 120, 193, 180}, {37, 120, 193, 186}, {37, 120, 193, 187}, {37, 120, 193, 188}, {37, 120, 193, 189}, {37, 120, 193, 190}, {141, 98, 103, 35}, {141, 98, 103, 36}, {141, 98, 103, 39}, {141, 98, 103, 41}}},\n\t\t{Region: \"Singapore\", Group: \"Premium TCP Asia\", Hostname: \"96-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 168}, {84, 17, 39, 171}, {84, 17, 39, 175}, {84, 17, 39, 177}, {84, 17, 39, 178}, {84, 17, 39, 181}, {84, 17, 39, 183}, {84, 17, 39, 185}}},\n\t\t{Region: \"Singapore\", Group: \"Premium UDP Asia\", Hostname: \"95-1-sg.cg-dialup.net\", IPs: []net.IP{{84, 17, 39, 162}, {84, 17, 39, 165}, {84, 17, 39, 166}, {84, 17, 39, 167}, {84, 17, 39, 171}, {84, 17, 39, 174}, {84, 17, 39, 175}, {84, 17, 39, 178}, {84, 17, 39, 180}, {84, 17, 39, 185}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovakia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-sk.cg-dialup.net\", IPs: []net.IP{{185, 245, 85, 227}, {185, 245, 85, 228}, {185, 245, 85, 229}, {185, 245, 85, 230}, {185, 245, 85, 231}, {185, 245, 85, 232}, {185, 245, 85, 233}, {185, 245, 85, 234}, {185, 245, 85, 235}, {185, 245, 85, 236}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium TCP Europe\", Hostname: \"97-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 218}, {195, 80, 150, 219}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"Slovenia\", Group: \"Premium UDP Europe\", Hostname: \"87-1-si.cg-dialup.net\", IPs: []net.IP{{195, 80, 150, 211}, {195, 80, 150, 212}, {195, 80, 150, 214}, {195, 80, 150, 215}, {195, 80, 150, 216}, {195, 80, 150, 217}, {195, 80, 150, 219}, {195, 80, 150, 220}, {195, 80, 150, 221}, {195, 80, 150, 222}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Asia\", Hostname: \"96-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 212}, {154, 127, 50, 215}, {154, 127, 50, 217}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 222}, {154, 127, 60, 196}, {154, 127, 60, 198}, {154, 127, 60, 199}, {154, 127, 60, 200}}},\n\t\t{Region: \"South Africa\", Group: \"Premium TCP Europe\", Hostname: \"97-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Asia\", Hostname: \"95-1-za.cg-dialup.net\", IPs: []net.IP{{154, 127, 50, 210}, {154, 127, 50, 214}, {154, 127, 50, 218}, {154, 127, 50, 219}, {154, 127, 50, 220}, {154, 127, 50, 221}, {154, 127, 50, 222}, {154, 127, 60, 195}, {154, 127, 60, 199}, {154, 127, 60, 206}}},\n\t\t{Region: \"South Africa\", Group: \"Premium UDP Europe\", Hostname: \"87-1-za.cg-dialup.net\", IPs: []net.IP{{197, 85, 7, 26}, {197, 85, 7, 27}, {197, 85, 7, 28}, {197, 85, 7, 29}, {197, 85, 7, 30}, {197, 85, 7, 31}, {197, 85, 7, 131}, {197, 85, 7, 132}, {197, 85, 7, 133}, {197, 85, 7, 134}}},\n\t\t{Region: \"Spain\", Group: \"Premium TCP Europe\", Hostname: \"97-1-es.cg-dialup.net\", IPs: []net.IP{{37, 120, 142, 41}, {37, 120, 142, 52}, {37, 120, 142, 55}, {37, 120, 142, 61}, {37, 120, 142, 173}, {84, 17, 62, 131}, {84, 17, 62, 142}, {84, 17, 62, 144}, {185, 93, 3, 108}, {185, 93, 3, 114}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium TCP Europe\", Hostname: \"97-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 137}, {95, 181, 239, 138}, {95, 181, 239, 140}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sri Lanka\", Group: \"Premium UDP Europe\", Hostname: \"87-1-lk.cg-dialup.net\", IPs: []net.IP{{95, 181, 239, 131}, {95, 181, 239, 132}, {95, 181, 239, 133}, {95, 181, 239, 134}, {95, 181, 239, 135}, {95, 181, 239, 136}, {95, 181, 239, 140}, {95, 181, 239, 141}, {95, 181, 239, 142}, {95, 181, 239, 144}}},\n\t\t{Region: \"Sweden\", Group: \"Premium TCP Europe\", Hostname: \"97-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 207}, {188, 126, 73, 209}, {188, 126, 73, 214}, {188, 126, 73, 219}, {188, 126, 79, 6}, {188, 126, 79, 11}, {188, 126, 79, 19}, {188, 126, 79, 25}, {195, 246, 120, 148}, {195, 246, 120, 161}}},\n\t\t{Region: \"Sweden\", Group: \"Premium UDP Europe\", Hostname: \"87-1-se.cg-dialup.net\", IPs: []net.IP{{188, 126, 73, 201}, {188, 126, 73, 211}, {188, 126, 73, 213}, {188, 126, 73, 218}, {188, 126, 79, 6}, {188, 126, 79, 8}, {188, 126, 79, 19}, {195, 246, 120, 142}, {195, 246, 120, 144}, {195, 246, 120, 168}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 4}, {84, 17, 52, 20}, {84, 17, 52, 44}, {84, 17, 52, 65}, {84, 17, 52, 72}, {84, 17, 52, 80}, {84, 17, 52, 83}, {84, 17, 52, 85}, {185, 32, 222, 112}, {185, 189, 150, 73}}},\n\t\t{Region: \"Switzerland\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ch.cg-dialup.net\", IPs: []net.IP{{84, 17, 52, 5}, {84, 17, 52, 14}, {84, 17, 52, 24}, {84, 17, 52, 64}, {84, 17, 52, 73}, {84, 17, 52, 85}, {185, 32, 222, 114}, {185, 189, 150, 52}, {185, 189, 150, 57}, {195, 225, 118, 43}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium TCP Asia\", Hostname: \"96-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 100}, {45, 133, 181, 102}, {45, 133, 181, 103}, {45, 133, 181, 106}, {45, 133, 181, 109}, {45, 133, 181, 113}, {45, 133, 181, 115}, {45, 133, 181, 116}, {45, 133, 181, 123}, {45, 133, 181, 125}}},\n\t\t{Region: \"Taiwan\", Group: \"Premium UDP Asia\", Hostname: \"95-1-tw.cg-dialup.net\", IPs: []net.IP{{45, 133, 181, 99}, {45, 133, 181, 102}, {45, 133, 181, 107}, {45, 133, 181, 108}, {45, 133, 181, 109}, {45, 133, 181, 114}, {45, 133, 181, 116}, {45, 133, 181, 117}, {45, 133, 181, 123}, {45, 133, 181, 124}}},\n\t\t{Region: \"Thailand\", Group: \"Premium TCP Asia\", Hostname: \"96-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 6}, {146, 70, 13, 7}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 11}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Thailand\", Group: \"Premium UDP Asia\", Hostname: \"95-1-th.cg-dialup.net\", IPs: []net.IP{{146, 70, 13, 3}, {146, 70, 13, 4}, {146, 70, 13, 8}, {146, 70, 13, 9}, {146, 70, 13, 10}, {146, 70, 13, 11}, {146, 70, 13, 12}, {146, 70, 13, 13}, {146, 70, 13, 15}, {146, 70, 13, 16}}},\n\t\t{Region: \"Turkey\", Group: \"Premium TCP Europe\", Hostname: \"97-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 9}, {188, 213, 34, 11}, {188, 213, 34, 15}, {188, 213, 34, 16}, {188, 213, 34, 23}, {188, 213, 34, 25}, {188, 213, 34, 28}, {188, 213, 34, 41}, {188, 213, 34, 108}, {188, 213, 34, 110}}},\n\t\t{Region: \"Turkey\", Group: \"Premium UDP Europe\", Hostname: \"87-1-tr.cg-dialup.net\", IPs: []net.IP{{188, 213, 34, 8}, {188, 213, 34, 11}, {188, 213, 34, 14}, {188, 213, 34, 28}, {188, 213, 34, 35}, {188, 213, 34, 42}, {188, 213, 34, 43}, {188, 213, 34, 100}, {188, 213, 34, 103}, {188, 213, 34, 107}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 18}, {31, 28, 161, 20}, {31, 28, 161, 27}, {31, 28, 163, 34}, {31, 28, 163, 37}, {31, 28, 163, 44}, {62, 149, 7, 167}, {62, 149, 7, 172}, {62, 149, 29, 45}, {62, 149, 29, 57}}},\n\t\t{Region: \"Ukraine\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ua.cg-dialup.net\", IPs: []net.IP{{31, 28, 161, 27}, {31, 28, 163, 38}, {31, 28, 163, 42}, {31, 28, 163, 54}, {31, 28, 163, 61}, {62, 149, 7, 162}, {62, 149, 7, 163}, {62, 149, 29, 35}, {62, 149, 29, 38}, {62, 149, 29, 41}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium TCP Europe\", Hostname: \"97-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 184}, {217, 138, 193, 185}, {217, 138, 193, 186}, {217, 138, 193, 188}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Arab Emirates\", Group: \"Premium UDP Europe\", Hostname: \"87-1-ae.cg-dialup.net\", IPs: []net.IP{{217, 138, 193, 179}, {217, 138, 193, 180}, {217, 138, 193, 181}, {217, 138, 193, 182}, {217, 138, 193, 183}, {217, 138, 193, 186}, {217, 138, 193, 187}, {217, 138, 193, 188}, {217, 138, 193, 189}, {217, 138, 193, 190}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium TCP Europe\", Hostname: \"97-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 173, 49}, {45, 133, 173, 56}, {45, 133, 173, 82}, {45, 133, 173, 86}, {95, 154, 200, 153}, {95, 154, 200, 156}, {181, 215, 176, 103}, {181, 215, 176, 246}, {181, 215, 176, 251}, {194, 110, 13, 141}}},\n\t\t{Region: \"United Kingdom\", Group: \"Premium UDP Europe\", Hostname: \"87-1-gb.cg-dialup.net\", IPs: []net.IP{{45, 133, 172, 100}, {45, 133, 172, 126}, {45, 133, 173, 84}, {95, 154, 200, 174}, {181, 215, 176, 110}, {181, 215, 176, 151}, {181, 215, 176, 158}, {191, 101, 209, 142}, {194, 110, 13, 107}, {194, 110, 13, 128}}},\n\t\t{Region: \"United States\", Group: \"Premium TCP USA\", Hostname: \"93-1-us.cg-dialup.net\", IPs: []net.IP{{102, 129, 145, 15}, {102, 129, 152, 195}, {102, 129, 152, 248}, {154, 21, 208, 159}, {185, 242, 5, 117}, {185, 242, 5, 123}, {185, 242, 5, 229}, {191, 96, 227, 173}, {191, 96, 227, 196}, {199, 115, 119, 248}}},\n\t\t{Region: \"United States\", Group: \"Premium UDP USA\", Hostname: \"94-1-us.cg-dialup.net\", IPs: []net.IP{{23, 82, 14, 113}, {23, 105, 177, 122}, {45, 89, 173, 222}, {84, 17, 35, 4}, {89, 187, 171, 132}, {156, 146, 37, 45}, {156, 146, 59, 86}, {184, 170, 240, 231}, {191, 96, 150, 248}, {199, 115, 119, 248}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium TCP USA\", Hostname: \"93-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 132}, {95, 181, 237, 133}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 138}, {95, 181, 237, 139}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 143}}},\n\t\t{Region: \"Venezuela\", Group: \"Premium UDP USA\", Hostname: \"94-1-ve.cg-dialup.net\", IPs: []net.IP{{95, 181, 237, 131}, {95, 181, 237, 132}, {95, 181, 237, 134}, {95, 181, 237, 135}, {95, 181, 237, 136}, {95, 181, 237, 140}, {95, 181, 237, 141}, {95, 181, 237, 142}, {95, 181, 237, 143}, {95, 181, 237, 144}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium TCP Asia\", Hostname: \"96-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 101}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 108}, {188, 214, 152, 109}, {188, 214, 152, 110}}},\n\t\t{Region: \"Vietnam\", Group: \"Premium UDP Asia\", Hostname: \"95-1-vn.cg-dialup.net\", IPs: []net.IP{{188, 214, 152, 99}, {188, 214, 152, 100}, {188, 214, 152, 101}, {188, 214, 152, 102}, {188, 214, 152, 103}, {188, 214, 152, 104}, {188, 214, 152, 105}, {188, 214, 152, 106}, {188, 214, 152, 107}, {188, 214, 152, 109}}},\n\t}\n}",
"func getNameServerAddressListFromCmd(nameSrvAdders *string) *singlylinkedlist.List {\n\tif nameSrvAdders != nil {\n\t\tif *nameSrvAdders == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tnameSrvAdderArr := strings.Split(*nameSrvAdders, \";\")\n\t\tif len(nameSrvAdderArr) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tnameServerAddressList := singlylinkedlist.New()\n\t\tfor _, nameServerAddress := range nameSrvAdderArr {\n\t\t\tnameServerAddressList.Add(nameServerAddress)\n\t\t}\n\t\treturn nameServerAddressList\n\t}\n\treturn nil\n}",
"func (m *VpnConfiguration) SetServers(value []VpnServerable)() {\n err := m.GetBackingStore().Set(\"servers\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (g *Gandi) ListDomains() (domains []Domain, err error) {\n\t_, err = g.askGandi(mGET, \"domains\", nil, &domains)\n\treturn\n}",
"func (c *Client) DNSNameservers(ctx context.Context) ([]string, error) {\n\tconst uriFmt = \"/api/v2/domain/%v/dns/nameservers\"\n\n\treq, err := c.buildRequest(ctx, http.MethodGet, fmt.Sprintf(uriFmt, c.domain), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp DomainDNSNameservers\n\tif err = c.performRequest(req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.DNS, nil\n}",
"func ListDockerHosts(w http.ResponseWriter, r *http.Request) {\n\t// let's get all host uri from map.\n\tvar hosts []string\n\tfor host, _ := range clientMap {\n\t\thosts = append(hosts, host)\n\t}\n\t// map with list of hosts\n\tresponse := make(map[string][]string)\n\tresponse[\"hosts\"] = hosts\n\t// convert map to json\n\tjsonString, err := json.Marshal(response)\n\tif err != nil {\n\t\tfmt.Fprintln(w,\"{ \\\"error\\\" : \\\"Internal server error\\\" }\")\n\t}\n\tfmt.Fprintln(w,string(jsonString))\n}",
"func getDNSConf() []string {\n\tservers := []string{}\n\t_, err := os.Stat(\"/etc/resolv.conf\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tj, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\tif len(servers) < 2 {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[0]))\n\t} else {\n\t\tservers = append(servers, fmt.Sprintf(\"%s:53\", j.Servers[1]))\n\t}\n\n\treturn servers\n\n}",
"func (c *Client) VirtualServerList() []A10VServer {\n\treturn a10VirtualServerList(c.debugf, c.host, c.sessionID)\n}",
"func (c *FakeGBPServers) List(opts v1.ListOptions) (result *aciawv1.GBPServerList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(gbpserversResource, gbpserversKind, c.ns, opts), &aciawv1.GBPServerList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &aciawv1.GBPServerList{ListMeta: obj.(*aciawv1.GBPServerList).ListMeta}\n\tfor _, item := range obj.(*aciawv1.GBPServerList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (p *PorterHelper) List() (map[string]string, error) {\n\tentries := p.Cache.List()\n\n\tres := make(map[string]string)\n\n\tfor _, entry := range entries {\n\t\tres[entry.ProxyEndpoint] = entry.AuthorizationToken\n\t}\n\n\treturn res, nil\n}",
"func (d *DhcpOptions) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"dnsServers\":\n\t\t\terr = unpopulate(val, \"DNSServers\", &d.DNSServers)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *NodeUpdate) GetDnsServers() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.DnsServers\n}",
"func listDatacenters(c context.Context, names stringset.Set) ([]*crimson.Datacenter, error) {\n\tdb := database.Get(c)\n\trows, err := db.QueryContext(c, `\n\t\tSELECT name, description, state\n\t\tFROM datacenters\n\t`)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to fetch datacenters\").Err()\n\t}\n\tdefer rows.Close()\n\n\tvar datacenters []*crimson.Datacenter\n\tfor rows.Next() {\n\t\tdc := &crimson.Datacenter{}\n\t\tif err = rows.Scan(&dc.Name, &dc.Description, &dc.State); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"failed to fetch datacenter\").Err()\n\t\t}\n\t\tif matches(dc.Name, names) {\n\t\t\tdatacenters = append(datacenters, dc)\n\t\t}\n\t}\n\treturn datacenters, nil\n}",
"func (d *DHCPv4) DNS() []net.IP {\n\treturn GetIPs(OptionDomainNameServer, d.Options)\n}",
"func (i *InstanceServiceHandler) ListIPv4(ctx context.Context, instanceID string, options *ListOptions) ([]IPv4, *Meta, error) {\n\turi := fmt.Sprintf(\"%s/%s/ipv4\", instancePath, instanceID)\n\treq, err := i.client.NewRequest(ctx, http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnewValues, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq.URL.RawQuery = newValues.Encode()\n\tips := new(ipBase)\n\tif err = i.client.DoWithContext(ctx, req, ips); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn ips.IPv4s, ips.Meta, nil\n}",
"func (c *Client) ListFlavorZones(args *ListFlavorZonesArgs) (*ListZonesResult, error) {\n\tjsonBytes, jsonErr := json.Marshal(args)\n\tif jsonErr != nil {\n\t\treturn nil, jsonErr\n\t}\n\tbody, err := bce.NewBodyFromBytes(jsonBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ListFlavorZones(c, body)\n}",
"func (c *Client) DBList() []string {\n\tres, _ := r.DBList().Run(c.session)\n\tdbs := []string{}\n\tres.All(&dbs)\n\treturn dbs\n}",
"func (c *Client) ListZoneFlavors(args *ListZoneFlavorsArgs) (*ListFlavorInfosResult, error) {\n\tjsonBytes, jsonErr := json.Marshal(args)\n\tif jsonErr != nil {\n\t\treturn nil, jsonErr\n\t}\n\tbody, err := bce.NewBodyFromBytes(jsonBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ListZoneFlavors(c, body)\n}",
"func (s *dnsManagedZoneLister) List(selector labels.Selector) (ret []*v1alpha1.DnsManagedZone, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DnsManagedZone))\n\t})\n\treturn ret, err\n}",
"func DSList(dss ...kapisext.DaemonSet) kapisext.DaemonSetList {\n\treturn kapisext.DaemonSetList{\n\t\tItems: dss,\n\t}\n}",
"func (api *hostAPI) ApisrvList(ctx context.Context, opts *api.ListWatchOptions) ([]*cluster.Host, error) {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn apicl.ClusterV1().Host().List(context.Background(), opts)\n\t}\n\n\t// List from local cache\n\tctkitObjs, err := api.List(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []*cluster.Host\n\tfor _, obj := range ctkitObjs {\n\t\tret = append(ret, &obj.Host)\n\t}\n\treturn ret, nil\n}",
"func (o *ZoneZone) GetNameservers() []string {\n\tif o == nil || o.Nameservers == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.Nameservers\n}",
"func (g *Domain) GetNameServers(domain string) (nameservers []string, err error) {\n\t_, err = g.client.Get(\"domains/\"+domain+\"/nameservers\", nil, &nameservers)\n\treturn\n}",
"func (a Agent) VirtualServerList() ([]VirtualServer, error) {\n\tlist, err := a.VirtualServerIDList()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvServers := make([]VirtualServer, len(list))\n\tfor i := range list {\n\t\tvServer, err := a.VirtualServer(list[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvServers[i] = *vServer\n\t}\n\treturn vServers, nil\n}",
"func (c *daemonSetsClass) getList(ns string, listOptions meta.ListOptions) (*kext.DaemonSetList, error) {\n\treturn c.rk.clientset.Extensions().DaemonSets(ns).List(listOptions)\n}",
"func (h *HTTPApi) listDatasourceInstance(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tdatasourceInstances := make([]string, 0, len(h.storageNode.Datasources))\n\tfor k := range h.storageNode.Datasources {\n\t\tdatasourceInstances = append(datasourceInstances, k)\n\t}\n\n\t// Now we need to return the results\n\tif bytes, err := json.Marshal(datasourceInstances); err != nil {\n\t\t// TODO: log this better?\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t}\n}",
"func (i *DHCPInterface) SetNTPServers(ntp []string) {\n\tfor _, server := range ntp {\n\t\ti.ntpServers = append(i.ntpServers, []byte(net.ParseIP(server).To4())...)\n\t}\n}",
"func (client DnsClient) ListZones(ctx context.Context, request ListZonesRequest) (response ListZonesResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listZones, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListZonesResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListZonesResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListZonesResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListZonesResponse\")\n\t}\n\treturn\n}",
"func (client WorkloadNetworksClient) ListDhcpPreparer(ctx context.Context, resourceGroupName string, privateCloudName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"privateCloudName\": autorest.Encode(\"path\", privateCloudName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-07-17-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/workloadNetworks/default/dhcpConfigurations\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func (client DatabasesClient) ListByServerResponder(resp *http.Response) (result DatabaseListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (o GroupDnsConfigPtrOutput) Nameservers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *GroupDnsConfig) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Nameservers\n\t}).(pulumi.StringArrayOutput)\n}",
"func getDNSNameservers(resolvConfPath string) ([]string, error) {\n\tif resolvConfPath == \"\" {\n\t\tresolvConfPath = defaultResolvConfPath\n\t}\n\n\tfile, err := os.Open(resolvConfPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not open '%s'.\", resolvConfPath)\n\t}\n\tdefer mustClose(file)\n\n\tscanner := bufio.NewScanner(file)\n\n\tvar servers []string\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tmatch := resolvConfNameserverPattern.FindStringSubmatch(line)\n\t\tif len(match) == 2 {\n\t\t\tservers = append(servers, match[1])\n\t\t}\n\t}\n\n\tif err = scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Could not read '%s'.\", resolvConfPath)\n\t}\n\n\tif len(servers) == 0 {\n\t\treturn nil, errors.Errorf(\"No nameservers found in '%s'.\", resolvConfPath)\n\t}\n\n\treturn servers, nil\n}",
"func List(_ machine.ListOptions) ([]*machine.ListResponse, error) {\n\treturn GetVMInfos()\n}",
"func AdminListDynamicConfig(c *cli.Context) {\n\tadminClient := cFactory.ServerAdminClient(c)\n\n\tctx, cancel := newContext(c)\n\tdefer cancel()\n\n\treq := &types.ListDynamicConfigRequest{\n\t\tConfigName: dynamicconfig.UnknownKey.String(),\n\t}\n\n\tval, err := adminClient.ListDynamicConfig(ctx, req)\n\tif err != nil {\n\t\tErrorAndExit(\"Failed to list dynamic config value(s)\", err)\n\t}\n\n\tif val == nil || val.Entries == nil || len(val.Entries) == 0 {\n\t\tfmt.Printf(\"No dynamic config values stored to list.\\n\")\n\t} else {\n\t\tcliEntries := make([]*cliEntry, 0, len(val.Entries))\n\t\tfor _, dcEntry := range val.Entries {\n\t\t\tcliEntry, err := convertToInputEntry(dcEntry)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Cannot parse list response.\\n\")\n\t\t\t}\n\t\t\tcliEntries = append(cliEntries, cliEntry)\n\t\t}\n\t\tprettyPrintJSONObject(cliEntries)\n\t}\n}",
"func (client *Client) ShowHostMaps(host string) ([]Volume, *ResponseStatus, error) {\n\tif len(host) > 0 {\n\t\thost = fmt.Sprintf(\"\\\"%s\\\"\", host)\n\t}\n\tres, status, err := client.FormattedRequest(\"/show/host-maps/%s\", host)\n\tif err != nil {\n\t\treturn nil, status, err\n\t}\n\n\tmappings := make([]Volume, 0)\n\tfor _, rootObj := range res.Objects {\n\t\tif rootObj.Name != \"host-view\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, object := range rootObj.Objects {\n\t\t\tif object.Name == \"volume-view\" {\n\t\t\t\tvol := Volume{}\n\t\t\t\tvol.fillFromObject(&object)\n\t\t\t\tmappings = append(mappings, vol)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn mappings, status, err\n}",
"func (s *GetServersInput) SetVmServerAddressList(v []*VmServerAddress) *GetServersInput {\n\ts.VmServerAddressList = v\n\treturn s\n}",
"func (k *kvSender) updateServerListCache(vbmap *protobuf.VbmapResponse) {\n\n\t//clear the cache\n\tk.serverListCache = k.serverListCache[:0]\n\n\t//update the cache\n\tfor _, kv := range vbmap.GetKvaddrs() {\n\t\tk.serverListCache = append(k.serverListCache, kv)\n\t}\n\n}",
"func (i *InstanceServiceHandler) ListPrivateNetworks(ctx context.Context, instanceID string, options *ListOptions) ([]PrivateNetwork, *Meta, error) {\n\turi := fmt.Sprintf(\"%s/%s/private-networks\", instancePath, instanceID)\n\treq, err := i.client.NewRequest(ctx, http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tnewValues, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq.URL.RawQuery = newValues.Encode()\n\n\tnetworks := new(privateNetworksBase)\n\tif err = i.client.DoWithContext(ctx, req, networks); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn networks.PrivateNetworks, networks.Meta, nil\n}",
"func (s *DataStore) ListReplicas() (map[string]*longhorn.Replica, error) {\n\treturn s.listReplicas(labels.Everything())\n}",
"func (i *DHCPInterface) ServeDHCP(p dhcp.Packet, msgType dhcp.MessageType, options dhcp.Options) dhcp.Packet {\n\tvar respMsg dhcp.MessageType\n\n\tswitch msgType {\n\tcase dhcp.Discover:\n\t\trespMsg = dhcp.Offer\n\tcase dhcp.Request:\n\t\trespMsg = dhcp.ACK\n\t}\n\n\tif respMsg != 0 {\n\t\trequestingMAC := p.CHAddr().String()\n\n\t\tif requestingMAC == i.MACFilter {\n\t\t\topts := dhcp.Options{\n\t\t\t\tdhcp.OptionSubnetMask: []byte(i.VMIPNet.Mask),\n\t\t\t\tdhcp.OptionRouter: []byte(*i.GatewayIP),\n\t\t\t\tdhcp.OptionDomainNameServer: i.dnsServers,\n\t\t\t\tdhcp.OptionHostName: []byte(i.Hostname),\n\t\t\t}\n\n\t\t\tif netRoutes := formClasslessRoutes(&i.Routes); netRoutes != nil {\n\t\t\t\topts[dhcp.OptionClasslessRouteFormat] = netRoutes\n\t\t\t}\n\n\t\t\tif i.ntpServers != nil {\n\t\t\t\topts[dhcp.OptionNetworkTimeProtocolServers] = i.ntpServers\n\t\t\t}\n\n\t\t\toptSlice := opts.SelectOrderOrAll(options[dhcp.OptionParameterRequestList])\n\n\t\t\treturn dhcp.ReplyPacket(p, respMsg, *i.GatewayIP, i.VMIPNet.IP, leaseDuration, optSlice)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (vc *VirtualCenter) ListDatacenters(ctx context.Context) (\n\t[]*Datacenter, error) {\n\tlog := logger.GetLogger(ctx)\n\tif err := vc.Connect(ctx); err != nil {\n\t\tlog.Errorf(\"failed to connect to vCenter. err: %v\", err)\n\t\treturn nil, err\n\t}\n\tfinder := find.NewFinder(vc.Client.Client, false)\n\tdcList, err := finder.DatacenterList(ctx, \"*\")\n\tif err != nil {\n\t\tlog.Errorf(\"failed to list datacenters with err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tvar dcs []*Datacenter\n\tfor _, dcObj := range dcList {\n\t\tdc := &Datacenter{Datacenter: dcObj, VirtualCenterHost: vc.Config.Host}\n\t\tdcs = append(dcs, dc)\n\t}\n\treturn dcs, nil\n}",
"func Start(config *config.Config) (*Servers, error) {\n\thandlers4, handlers6, err := plugins.LoadPlugins(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrv := Servers{\n\t\terrors: make(chan error),\n\t}\n\n\t// listen\n\tif config.Server6 != nil {\n\t\tlog.Println(\"Starting DHCPv6 server\")\n\t\tfor _, addr := range config.Server6.Addresses {\n\t\t\tvar l6 *listener6\n\t\t\tl6, err = listen6(&addr)\n\t\t\tif err != nil {\n\t\t\t\tgoto cleanup\n\t\t\t}\n\t\t\tl6.handlers = handlers6\n\t\t\tsrv.listeners = append(srv.listeners, l6)\n\t\t\tgo func() {\n\t\t\t\tsrv.errors <- l6.Serve()\n\t\t\t}()\n\t\t}\n\t}\n\n\tif config.Server4 != nil {\n\t\tlog.Println(\"Starting DHCPv4 server\")\n\t\tfor _, addr := range config.Server4.Addresses {\n\t\t\tvar l4 *listener4\n\t\t\tl4, err = listen4(&addr)\n\t\t\tif err != nil {\n\t\t\t\tgoto cleanup\n\t\t\t}\n\t\t\tl4.handlers = handlers4\n\t\t\tsrv.listeners = append(srv.listeners, l4)\n\t\t\tgo func() {\n\t\t\t\tsrv.errors <- l4.Serve()\n\t\t\t}()\n\t\t}\n\t}\n\n\treturn &srv, nil\n\ncleanup:\n\tsrv.Close()\n\treturn nil, err\n}",
"func ListTimezonesHandler(w http.ResponseWriter, r *http.Request) {\n\ttimezones, err := database.GetAllTimezones()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresults, _ := json.Marshal(timezones)\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(results)\n}",
"func listVMs(c context.Context, q database.QueryerContext, req *crimson.ListVMsRequest) ([]*crimson.VM, error) {\n\tipv4s, err := parseIPv4s(req.Ipv4S)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt := squirrel.Select(\n\t\t\"hv.name\",\n\t\t\"hv.vlan_id\",\n\t\t\"hp.name\",\n\t\t\"hp.vlan_id\",\n\t\t\"o.name\",\n\t\t\"v.description\",\n\t\t\"v.deployment_ticket\",\n\t\t\"i.ipv4\",\n\t\t\"v.state\",\n\t)\n\tstmt = stmt.From(\"vms v, hostnames hv, physical_hosts p, hostnames hp, oses o, ips i\").\n\t\tWhere(\"v.hostname_id = hv.id\").\n\t\tWhere(\"v.physical_host_id = p.id\").\n\t\tWhere(\"p.hostname_id = hp.id\").\n\t\tWhere(\"v.os_id = o.id\").\n\t\tWhere(\"i.hostname_id = hv.id\")\n\tstmt = selectInString(stmt, \"hv.name\", req.Names)\n\tstmt = selectInInt64(stmt, \"hv.vlan_id\", req.Vlans)\n\tstmt = selectInInt64(stmt, \"i.ipv4\", ipv4s)\n\tstmt = selectInString(stmt, \"hp.name\", req.Hosts)\n\tstmt = selectInInt64(stmt, \"hp.vlan_id\", req.HostVlans)\n\tstmt = selectInString(stmt, \"o.name\", req.Oses)\n\tstmt = selectInState(stmt, \"v.state\", req.States)\n\tquery, args, err := stmt.ToSql()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to generate statement\").Err()\n\t}\n\n\trows, err := q.QueryContext(c, query, args...)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to fetch VMs\").Err()\n\t}\n\tdefer rows.Close()\n\tvar vms []*crimson.VM\n\tfor rows.Next() {\n\t\tv := &crimson.VM{}\n\t\tvar ipv4 common.IPv4\n\t\tif err = rows.Scan(\n\t\t\t&v.Name,\n\t\t\t&v.Vlan,\n\t\t\t&v.Host,\n\t\t\t&v.HostVlan,\n\t\t\t&v.Os,\n\t\t\t&v.Description,\n\t\t\t&v.DeploymentTicket,\n\t\t\t&ipv4,\n\t\t\t&v.State,\n\t\t); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"failed to fetch VM\").Err()\n\t\t}\n\t\tv.Ipv4 = ipv4.String()\n\t\tvms = append(vms, v)\n\t}\n\treturn vms, nil\n}"
] | [
"0.5871556",
"0.57992846",
"0.5637868",
"0.5621159",
"0.561796",
"0.55982745",
"0.5547308",
"0.5486327",
"0.5452146",
"0.5445553",
"0.5430095",
"0.5428605",
"0.5427161",
"0.53797334",
"0.5372634",
"0.53152144",
"0.5299417",
"0.52930343",
"0.5261868",
"0.5255848",
"0.52518666",
"0.5201575",
"0.5200406",
"0.51966465",
"0.51674825",
"0.5159501",
"0.51589966",
"0.5156744",
"0.5146874",
"0.51299787",
"0.5116046",
"0.5096171",
"0.5064668",
"0.50444514",
"0.5004254",
"0.499411",
"0.49530444",
"0.4942069",
"0.49324647",
"0.49231845",
"0.49230257",
"0.49160486",
"0.49139962",
"0.48982298",
"0.48870382",
"0.48858282",
"0.48809054",
"0.48762622",
"0.48684943",
"0.4860596",
"0.48594922",
"0.48374856",
"0.48371267",
"0.4835746",
"0.4830727",
"0.48197433",
"0.48010135",
"0.47998422",
"0.47952172",
"0.47918957",
"0.4780157",
"0.47619763",
"0.47508544",
"0.47504663",
"0.47467673",
"0.4742025",
"0.4739674",
"0.47313166",
"0.47272465",
"0.47165543",
"0.4712249",
"0.47037864",
"0.47033155",
"0.4702828",
"0.46982178",
"0.46952233",
"0.469375",
"0.4685327",
"0.46825144",
"0.46762642",
"0.4672919",
"0.46721554",
"0.46717206",
"0.466471",
"0.4664619",
"0.46601772",
"0.4660026",
"0.46597272",
"0.46484014",
"0.46411267",
"0.46404326",
"0.4640408",
"0.46389467",
"0.46321505",
"0.46215233",
"0.46207544",
"0.4620066",
"0.46153277",
"0.46140847",
"0.46130762"
] | 0.8791519 | 0 |
parseIPv4Mask parses IPv4 netmask written in IP form (e.g. 255.255.255.0). This function should really belong to the net package. | parseIPv4Mask парсит IPv4-маску, записанную в виде IP (например, 255.255.255.0). Эта функция действительно должна принадлежать пакету net. | func parseIPv4Mask(s string) net.IPMask {
mask := net.ParseIP(s)
if mask == nil {
return nil
}
return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func parseMask4(s string) (mask Prefix, i int) {\n\ti = len(s)\n\tif i == 0 {\n\t\treturn 0, 0\n\t}\n\tif !isD(s[0]) {\n\t\treturn 0, 0\n\t}\n\tmask = Prefix(s[0] - '0')\n\tif i == 1 || !isD(s[1]) {\n\t\treturn mask, 1\n\t}\n\tmask = mask*10 + Prefix(s[1]-'0')\n\tif mask > 32 {\n\t\treturn 0, 0\n\t}\n\tif i == 2 || !isD(s[2]) {\n\t\treturn mask, 2\n\t}\n\treturn 0, 0\n}",
"func ParseIPv4(ip string) ([]net.IP, error) {\n\tparts := strings.Split(ip, \"/\")\n\tif len(parts) > 2 {\n\t\treturn nil, fmt.Errorf(\"parse/ParseIPv4: Invalid IP Address %s\", ip)\n\t}\n\n\tips, err := parseIPv4(parts[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cidr int64\n\n\tif len(parts) != 2 {\n\t\tcidr = 32\n\t} else {\n\n\t\tcidr, err = strconv.ParseInt(parts[1], 0, 8)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif cidr > 32 {\n\t\t\treturn nil, fmt.Errorf(\"parse/ParseIPv4: Invalid IP Address %s: Invalid CIDR notation\", ip)\n\t\t}\n\t}\n\n\treturn parseIPv4CIDR(ips, net.CIDRMask(int(cidr), 32))\n}",
"func ParseIPv4AndCIDR(data string) []*net.IPNet {\n\tfmt.Println(\"data: \", data)\n\tvar reIPv4 = regexp.MustCompile(`(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])+)(\\/(3[0-2]|[1-2][0-9]|[0-9]))?`)\n\t//var reIPv4 = regexp.MustCompile(`(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/(3[0-2]|[1-2][0-9]|[0-9]))`)\n\tscanner := bufio.NewScanner(strings.NewReader(data))\n\n\taddrs := make([]*net.IPNet, 0)\n\tfor scanner.Scan() {\n\t\tx := reIPv4.FindString(scanner.Text())\n\t\tfmt.Println(\"in ParseIPv4AndCIDR, x: \", x)\n\t\tif !strings.Contains(x, \"/\") {\n\t\t\tif !strings.Contains(x, \":\") {\n\t\t\t\tx = x + \"/32\"\n\t\t\t} else {\n\t\t\t\tx = x + \"/128\"\n\t\t\t}\n\t\t}\n\t\tif addr, cidr, e := net.ParseCIDR(x); e == nil {\n\t\t\t//if !ipv4.IsRFC4193(addr) && !ipv4.IsLoopback(addr) && !ipv4.IsBogonIP(addr) {\n\t\t\tif !IsLoopback(addr) && !IsBogonIP(addr) {\n\t\t\t\taddrs = append(addrs, cidr)\n\t\t\t}\n\t\t}\n\t}\n\treturn addrs\n}",
"func WithIPv4Mask(mask net.IPMask) Option {\n\treturn func(o *Options) {\n\t\to.IPv4Mask = mask\n\t}\n}",
"func ParseIPv4(data []byte) (Packet, error) {\n\tif len(data) < 20 {\n\t\treturn nil, ErrorTruncated\n\t}\n\tihl := int(data[0] & 0x0f)\n\theaderLen := ihl * 4\n\tlength := int(bo.Uint16(data[2:]))\n\n\tif headerLen < 20 || headerLen > length {\n\t\treturn nil, ErrorInvalid\n\t}\n\tif length > len(data) {\n\t\treturn nil, ErrorTruncated\n\t}\n\tif Checksum(data[0:headerLen]) != 0 {\n\t\treturn nil, ErrorChecksum\n\t}\n\n\treturn &IPv4{\n\t\tversion: int(data[0] >> 4),\n\t\ttos: int(data[1]),\n\t\tid: bo.Uint16(data[4:]),\n\t\tflags: int8(data[6] >> 5),\n\t\toffset: bo.Uint16(data[6:]) & 0x1fff,\n\t\tttl: data[8],\n\t\tprotocol: Protocol(data[9]),\n\t\tsrc: net.IP(data[12:16]),\n\t\tdst: net.IP(data[16:20]),\n\t\tdata: data[headerLen:length],\n\t}, nil\n}",
"func (i Internet) Ipv4() string {\n\tips := make([]string, 0, 4)\n\n\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(1, 255)))\n\tfor j := 0; j < 3; j++ {\n\t\tips = append(ips, strconv.Itoa(i.Faker.IntBetween(0, 255)))\n\t}\n\n\treturn strings.Join(ips, \".\")\n}",
"func parseIPv4(ip string) net.IP {\n\tif parsedIP := net.ParseIP(strings.TrimSpace(ip)); parsedIP != nil {\n\t\tif ipv4 := parsedIP.To4(); ipv4 != nil {\n\t\t\treturn ipv4\n\t\t}\n\t}\n\n\treturn nil\n}",
"func IsValidIP4(ipAddress string) bool {\n\tipAddress = strings.Trim(ipAddress, \" \")\n\tif !regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`).\n\t\tMatchString(ipAddress) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func ParseProxyIPV4(ip string) (*pb.IPAddress, error) {\n\tnetIP := net.ParseIP(ip)\n\tif netIP == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid IP address: %s\", ip)\n\t}\n\n\toBigInt := IPToInt(netIP.To4())\n\treturn &pb.IPAddress{\n\t\tIp: &pb.IPAddress_Ipv4{\n\t\t\tIpv4: uint32(oBigInt.Uint64()),\n\t\t},\n\t}, nil\n}",
"func parseIPv4(s string) (ip ipOctets, cc int) {\n\tip = make(ipOctets, net.IPv4len)\n\n\tfor i := 0; i < net.IPv4len; i++ {\n\t\tip[i] = make([]ipOctet, 0)\n\t}\n\n\tvar bb [2]uint16 // octet bounds: 0 - lo, 1 - hi\n\n\ti := 0 // octet idx\n\tk := 0 // bound idx: 0 - lo, 1 - hi\n\nloop:\n\tfor i < net.IPv4len {\n\t\t// Decimal number.\n\t\tn, c, ok := dtoi(s)\n\t\tif !ok || n > 0xFF {\n\t\t\treturn nil, cc\n\t\t}\n\n\t\t// Save bound.\n\t\tbb[k] = uint16(n)\n\n\t\t// Stop at max of string.\n\t\ts = s[c:]\n\t\tcc += c\n\t\tif len(s) == 0 {\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak\n\t\t}\n\n\t\t// Otherwise must be followed by dot, colon or dp.\n\t\tswitch s[0] {\n\t\tcase '.':\n\t\t\tfallthrough\n\t\tcase ',':\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\tbb[1] = 0\n\t\t\tk = 0\n\t\tcase '-':\n\t\t\tif k == 1 {\n\t\t\t\t// To many dashes in one octet.\n\t\t\t\treturn nil, cc\n\t\t\t}\n\t\t\tk++\n\t\tdefault:\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak loop\n\t\t}\n\n\t\tif s[0] == '.' {\n\t\t\ti++\n\t\t}\n\n\t\ts = s[1:]\n\t\tcc++\n\t}\n\n\tif i < net.IPv4len {\n\t\t// Missing ip2octets.\n\t\treturn nil, cc\n\t}\n\n\treturn ip, cc\n}",
"func parseIPv4(s string) (ip IP, ok bool) {\n\tvar ip4 [4]byte\n\n\tfor i := 0; i < 4; i++ {\n\t\tvar (\n\t\t\tj int\n\t\t\tacc uint16\n\t\t)\n\t\t// Parse one byte of digits. Bail if we overflow, stop at\n\t\t// first non-digit.\n\t\t//\n\t\t// As of Go 1.15, don't try to factor this digit reading into\n\t\t// a helper function. Its complexity is slightly too high for\n\t\t// inlining, which ends up costing +50% in parse time.\n\t\tfor j = 0; j < len(s); j++ {\n\t\t\tif s[j] < '0' || s[j] > '9' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tacc = (acc * 10) + uint16(s[j]-'0')\n\t\t\tif acc > 255 {\n\t\t\t\treturn IP{}, false\n\t\t\t}\n\t\t}\n\t\t// There must be at least 1 digit per quad.\n\t\tif j == 0 {\n\t\t\treturn IP{}, false\n\t\t}\n\n\t\tip4[i] = uint8(acc)\n\n\t\t// Non-final byte must be followed by a dot\n\t\tif i < 3 {\n\t\t\tif len(s) == j || s[j] != '.' {\n\t\t\t\treturn IP{}, false\n\t\t\t}\n\t\t\tj++\n\t\t}\n\n\t\t// Advance to the next set of digits.\n\t\ts = s[j:]\n\t}\n\tif len(s) != 0 {\n\t\treturn IP{}, false\n\t}\n\n\treturn IPv4(ip4[0], ip4[1], ip4[2], ip4[3]), true\n}",
"func ParsePublicIPV4(ip string) (*l5dNetPb.IPAddress, error) {\n\tnetIP := net.ParseIP(ip)\n\tif netIP != nil {\n\t\toBigInt := IPToInt(netIP.To4())\n\t\tnetIPAddress := &l5dNetPb.IPAddress{\n\t\t\tIp: &l5dNetPb.IPAddress_Ipv4{\n\t\t\t\tIpv4: uint32(oBigInt.Uint64()),\n\t\t\t},\n\t\t}\n\t\treturn netIPAddress, nil\n\t}\n\treturn nil, fmt.Errorf(\"Invalid IP address: %s\", ip)\n}",
"func unpackSockaddr4(data []byte) (net.IP, int) {\n\tif len(data) != 16 {\n\t\tpanic(\"unexpected struct length\")\n\t}\n\tvar port uint16\n\tbinary.Read(bytes.NewReader(data[2:4]), binary.BigEndian, &port)\n\treturn data[4:8], int(port)\n}",
"func ParseFromIPAddr(ipNet *net.IPNet) (*IPv4Address, *IPv6Address, error) {\n\tif ipNet == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Nil address: %v\", ipNet)\n\t}\n\n\tif v4Addr := ipNet.IP.To4(); v4Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv4AddressFromBytes(v4Addr, uint(cidr))\n\t\treturn &ret, nil, nil\n\t}\n\tif v6Addr := ipNet.IP.To16(); v6Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv6Address(v6Addr, uint(cidr))\n\t\treturn nil, &ret, nil\n\t}\n\n\treturn nil, nil, fmt.Errorf(\"couldn't parse either v4 or v6 address: %v\", ipNet)\n}",
"func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}",
"func ParseIPFromString(address string) (*IPv4Address, *IPv6Address, error) {\n\tvar err error\n\n\t// see if there's a CIDR\n\tparts := strings.Split(address, \"/\")\n\tcidr := -1 // default needs to be -1 to handle /0\n\tif len(parts) == 2 {\n\t\tc, err := strconv.ParseUint(parts[1], 10, 8)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"couldn't parse CIDR to int: %s\", err)\n\t\t}\n\t\tif c > 128 {\n\t\t\treturn nil, nil, fmt.Errorf(\"Invalid CIDR: %d\", c)\n\t\t}\n\t\tcidr = int(c)\n\t}\n\n\t// try parsing as IPv4 - force CIDR at the end\n\tv4AddrStr := address\n\tif cidr == -1 {\n\t\t// no CIDR specified - tack on /32\n\t\tv4AddrStr = fmt.Sprintf(\"%s/32\", address)\n\t}\n\t_, ipNet, err := net.ParseCIDR(v4AddrStr)\n\tif err == nil {\n\t\tcidr, mask := ipNet.Mask.Size()\n\t\tif v4Addr := ipNet.IP.To4(); v4Addr != nil && mask == 32 { // nil error here\n\t\t\tret := NewIPv4AddressFromBytes(v4Addr, uint(cidr))\n\t\t\treturn &ret, nil, nil\n\t\t}\n\t}\n\n\t// try parsing as IPv6\n\tv6AddrStr := address\n\tif cidr == -1 {\n\t\t// no CIDR specified - tack on /128\n\t\tv6AddrStr = fmt.Sprintf(\"%s/128\", address)\n\t}\n\t_, ipNet, err = net.ParseCIDR(v6AddrStr)\n\tif err == nil {\n\t\tcidr, mask := ipNet.Mask.Size()\n\t\tif v6Addr := ipNet.IP.To16(); v6Addr != nil && mask == 128 {\n\t\t\tret := NewIPv6Address(v6Addr, uint(cidr))\n\t\t\treturn nil, &ret, nil\n\t\t}\n\t}\n\n\treturn nil, nil, fmt.Errorf(\"couldn't parse either v4 or v6 address\")\n}",
"func (f *FloatingIP) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(f.IP); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing IPv4Address '%s'\", f.IP)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(32, 32),\n\t}, nil\n}",
"func CheckIPv4Addr(addr string) error {\n\tif IsEmptyString(&addr) {\n\t\treturn errors.New(\"addr is empty\")\n\t}\n\n\tstrArray := strings.Split(addr, \":\")\n\n\tif len(strArray) != 2 {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tif IsEmptyString(&strArray[0]) {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tif IsEmptyString(&strArray[1]) {\n\t\treturn errors.New(\"Invalid addr:\" + addr)\n\t}\n\n\tvar error error\n\n\tipv4 := strArray[0]\n\terror = CheckIPv4(ipv4)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\tvar port int64\n\tport, error = strconv.ParseInt(strArray[1], 10, 64)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\terror = CheckPort(port)\n\tif error != nil {\n\t\treturn error\n\t}\n\n\treturn nil\n}",
"func Ipv4(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIpv4), v))\n\t})\n}",
"func IPv4(a, b, c, d uint8) IP {\n\treturn IP{\n\t\tlo: 0xffff00000000 | uint64(a)<<24 | uint64(b)<<16 | uint64(c)<<8 | uint64(d),\n\t\tz: z4,\n\t}\n}",
"func decodeMask(mask string) (uint32, error) {\n\timask, err := strconv.Atoi(mask)\n\tvar outmask uint32\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"Error decoding netmask\")\n\t}\n\tif imask > 32 || imask < 0 {\n\t\treturn 0, errors.New(\"Mask out of bounds\")\n\t}\n\tfor i := 0; i < imask; i++ {\n\t\toutmask += 1 << i\n\t}\n\treturn outmask, nil\n}",
"func decodeIPv4ToNetIP(ip uint32) net.IP {\n\toBigInt := big.NewInt(0)\n\toBigInt = oBigInt.SetUint64(uint64(ip))\n\treturn IntToIPv4(oBigInt)\n}",
"func IsIPv4(dots string) bool {\n\tip := net.ParseIP(dots)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To4() != nil\n}",
"func IsIPv4(value string) bool {\n\tip := net.ParseIP(value)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To4() != nil\n}",
"func IsIpv4(s string) bool {\n\tips := strings.Split(s, ipSep)\n\tif len(ips) != ipV4Len {\n\t\treturn false\n\t}\n\tfor _, v := range ips {\n\t\tnum, e := strconv.Atoi(v)\n\t\tif e != nil || num > ipv4Max || num < ipv4Min {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func network4(addr uint32, prefix uint) uint32 {\n\treturn addr & netmask(prefix)\n}",
"func FilterIPV4(ips []net.IP) []string {\n\tvar ret = make([]string, 0)\n\tfor _, ip := range ips {\n\t\tif ip.To4() != nil {\n\t\t\tret = append(ret, ip.String())\n\t\t}\n\t}\n\treturn ret\n}",
"func IPv4(str string) bool {\n\tip := net.ParseIP(str)\n\treturn ip != nil && strings.Contains(str, \".\")\n}",
"func (m *IPV4) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWan(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func IPv4(name string) (string, error) {\n\ti, err := net.InterfaceByName(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taddrs, err := i.Addrs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipn, ok := a.(*net.IPNet); ok {\n\t\t\tif ipn.IP.To4() != nil {\n\t\t\t\treturn ipn.IP.String(), nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"no IPv4 found for interface: %q\", name)\n}",
"func IsIPv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(s)\n\treturn ip != nil && strings.Contains(s, \".\") // && ip.To4() != nil\n}",
"func splitRange4(addr uint32, prefix uint, lo, hi uint32, cidrs *[]*net.IPNet) error {\n\tif prefix > 32 {\n\t\treturn fmt.Errorf(\"Invalid mask size: %d\", prefix)\n\t}\n\n\tbc := broadcast4(addr, prefix)\n\tif (lo < addr) || (hi > bc) {\n\t\treturn fmt.Errorf(\"%d, %d out of range for network %d/%d, broadcast %d\", lo, hi, addr, prefix, bc)\n\t}\n\n\tif (lo == addr) && (hi == bc) {\n\t\tcidr := net.IPNet{IP: uint32ToIPV4(addr), Mask: net.CIDRMask(int(prefix), 8*net.IPv4len)}\n\t\t*cidrs = append(*cidrs, &cidr)\n\t\treturn nil\n\t}\n\n\tprefix++\n\tlowerHalf := addr\n\tupperHalf := setBit(addr, prefix, 1)\n\tif hi < upperHalf {\n\t\treturn splitRange4(lowerHalf, prefix, lo, hi, cidrs)\n\t} else if lo >= upperHalf {\n\t\treturn splitRange4(upperHalf, prefix, lo, hi, cidrs)\n\t} else {\n\t\terr := splitRange4(lowerHalf, prefix, lo, broadcast4(lowerHalf, prefix), cidrs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn splitRange4(upperHalf, prefix, upperHalf, hi, cidrs)\n\t}\n}",
"func isIPv4(fl FieldLevel) bool {\n\tip := net.ParseIP(fl.Field().String())\n\n\treturn ip != nil && ip.To4() != nil\n}",
"func isCIDRv4(fl FieldLevel) bool {\n\tip, _, err := net.ParseCIDR(fl.Field().String())\n\n\treturn err == nil && ip.To4() != nil\n}",
"func (s *Server) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(s.IPv4Address); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing IPv4Address '%s'\", s.IPv4Address)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(32, 32),\n\t}, nil\n}",
"func IPv4NetStartEnd(ip net.IP, mask int) (start, end uint32) {\n\tswitch {\n\tcase mask < 0:\n\t\treturn minIPv4, maxIPv4\n\tcase mask > 32:\n\t\treturn maxIPv4, maxIPv4\n\t}\n\n\ti := IPv4ToUInt(ip)\n\tsm := uint32(maxIPv4 << uint32(32-mask))\n\tem := ^sm\n\n\tstart = i & sm\n\tend = i | em\n\treturn start, end\n}",
"func (n *hostOnlyNetwork) SaveIPv4(vbox VBoxManager) error {\n\tif n.IPv4.IP != nil && n.IPv4.Mask != nil {\n\t\tif err := vbox.vbm(\"hostonlyif\", \"ipconfig\", n.Name, \"--ip\", n.IPv4.IP.String(), \"--netmask\", net.IP(n.IPv4.Mask).String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func IsIPv4(ipAddress string) bool {\n\tip := net.ParseIP(ipAddress)\n\treturn ip != nil && strings.Count(ipAddress, \":\") < 2\n}",
"func (me TxsdAddressSimpleContentExtensionCategory) IsIpv4NetMask() bool {\n\treturn me.String() == \"ipv4-net-mask\"\n}",
"func IsIPv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(s)\n\treturn ip != nil && ip.To4() != nil\n}",
"func Ipv4ToInt(ip string) (intv int64, err error) {\n\tips := strings.Split(ip, ipSep)\n\tif len(ips) != ipV4Len {\n\t\terr = BadIpv4Error\n\t\treturn\n\t}\n\n\tfor i, v := range ips {\n\t\tnum, e := strconv.Atoi(v)\n\t\tif e != nil || num < ipv4Min || num > ipv4Max {\n\t\t\terr = BadIpv4Error\n\t\t\treturn\n\t\t}\n\t\tintv += int64(num) << ipv4Shift[i]\n\t}\n\treturn\n}",
"func IPv4ClassfulNetwork(address net.IP) *net.IPNet {\n\tif address.To4() != nil {\n\t\tvar newIP net.IP\n\t\tvar newMask net.IPMask\n\t\tswitch {\n\t\tcase uint8(address[0]) < 128:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), 0, 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 0, 0, 0)\n\t\tcase uint8(address[0]) < 192:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), 0, 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 0, 0)\n\t\tcase uint8(address[0]) < 224:\n\t\t\tnewIP = net.IPv4(uint8(address[0]), uint8(address[1]), uint8(address[2]), 0)\n\t\t\tnewMask = net.IPv4Mask(255, 255, 255, 0)\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t\treturn &net.IPNet{IP: newIP, Mask: newMask}\n\t}\n\treturn nil\n}",
"func extractIPv4(reverseName string) (string, error) {\n\t// reverse the segments and then combine them\n\tsegments := ReverseArray(strings.Split(reverseName, \".\"))\n\n\tip := net.ParseIP(strings.Join(segments, \".\")).To4()\n\tif ip == nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse IPv4 reverse name: %q\", reverseName)\n\t}\n\treturn ip.String(), nil\n}",
"func IPv4() (string, error) {\n\tconn, err := net.Dial(\"udp\", \"8.8.8.8:80\")\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Failed to determine your IP\")\n\t}\n\tlocalAddr := conn.LocalAddr().(*net.UDPAddr)\n\tmyIP := localAddr.IP.String()\n\tconn.Close()\n\treturn myIP, nil\n}",
"func NewIPv4(value string) (IPv4, error) {\n\tvar IP = IPv4{value: value}\n\n\tif !IP.validate() {\n\t\treturn IPv4{}, ErrInvalidIPv4\n\t}\n\n\treturn IP, nil\n}",
"func (internet Internet) IPv4(v reflect.Value) (interface{}, error) {\n\treturn internet.ipv4(), nil\n}",
"func ParseIP(s string) (IP, error) {\n\t// IPv4 fast path.\n\tif ip, ok := parseIPv4(s); ok {\n\t\treturn ip, nil\n\t}\n\n\tvar ipa net.IPAddr\n\tipa.IP = net.ParseIP(s)\n\tif ipa.IP == nil {\n\t\tswitch percent := strings.Index(s, \"%\"); percent {\n\t\tcase -1:\n\t\t\t// handle bad input with no % at all, so the net.ParseIP was not due to a zoned IPv6 fail\n\t\t\treturn IP{}, fmt.Errorf(\"netaddr.ParseIP(%q): unable to parse IP\", s)\n\t\tcase 0:\n\t\t\t// handle bad input with % at the start\n\t\t\treturn IP{}, fmt.Errorf(\"netaddr.ParseIP(%q): missing IPv6 address\", s)\n\t\tcase len(s) - 1:\n\t\t\t// handle bad input with % at the end\n\t\t\treturn IP{}, fmt.Errorf(\"netaddr.ParseIP(%q): missing zone\", s)\n\t\tdefault:\n\t\t\t// net.ParseIP can't deal with zoned scopes, let's split and try to parse the IP again\n\t\t\ts, ipa.Zone = s[:percent], s[percent+1:]\n\t\t\tipa.IP = net.ParseIP(s)\n\t\t\tif ipa.IP == nil {\n\t\t\t\treturn IP{}, fmt.Errorf(\"netaddr.ParseIP(%q): unable to parse IP\", s)\n\t\t\t}\n\t\t}\n\t}\n\n\tif !strings.Contains(s, \":\") {\n\t\tif ip4 := ipa.IP.To4(); ip4 != nil {\n\t\t\tif ipa.Zone != \"\" {\n\t\t\t\treturn IP{}, fmt.Errorf(\"netaddr.ParseIP(%q): invalid zone with IPv4 address\", s)\n\t\t\t}\n\t\t\treturn IPv4(ip4[0], ip4[1], ip4[2], ip4[3]), nil\n\t\t}\n\t}\n\treturn ipv6Slice(ipa.IP.To16()).WithZone(ipa.Zone), nil\n}",
"func (n *NetworkAssociation) IPv4Net() (*net.IPNet, error) {\n\tvar ip net.IP\n\tif ip = net.ParseIP(n.ServerIP); ip == nil {\n\t\treturn nil, fmt.Errorf(\"error parsing ServerIP '%s'\", ip)\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: net.CIDRMask(24, 32),\n\t}, nil\n}",
"func (o NodeBalancerOutput) Ipv4() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NodeBalancer) pulumi.StringOutput { return v.Ipv4 }).(pulumi.StringOutput)\n}",
"func (internet *Internet) IPv4Address() string {\n\tvar parts []string\n\tfor i := 0; i < 4; i++ {\n\t\tparts = append(parts, fmt.Sprintf(\"%d\", internet.faker.random.Intn(253)+2))\n\t}\n\treturn strings.Join(parts, \".\")\n}",
"func (o *NetworkElementSummaryAllOf) GetOutOfBandIpv4Mask() string {\n\tif o == nil || o.OutOfBandIpv4Mask == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.OutOfBandIpv4Mask\n}",
"func IsIPv4(ip string) bool {\n\treturn net.ParseIP(ip).To4() != nil\n}",
"func mask4(n uint8) uint32 {\n\treturn ^uint32(0) << (32 - n)\n}",
"func inAddrV4(ip netip.Addr) (uint32, error) {\n\tif !ip.Is4() {\n\t\treturn 0, fmt.Errorf(\"%s is not IPv4\", ip)\n\t}\n\tv4 := ip.As4()\n\treturn endian.Uint32(v4[:]), nil\n}",
"func IsIPv4(ipv4 string) bool {\n\tis, _ := regexp.Match(`^(2[0-5]{2}|2[0-4][0-9]|1?[0-9]{1,2}).(2[0-5]{2}|2[0-4][0-9]|1?[0-9]{1,2}).(2[0-5]{2}|2[0-4][0-9]|1?[0-9]{1,2}).(2[0-5]{2}|2[0-4][0-9]|1?[0-9]{1,2})$`, []byte(ipv4))\n\treturn is\n}",
"func (m *InterfaceProtocolConfigIPV4) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateDhcp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStatic(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func IpV4Address() string {\n\tblocks := []string{}\n\tfor i := 0; i < 4; i++ {\n\t\tnumber := seedAndReturnRandom(255)\n\t\tblocks = append(blocks, strconv.Itoa(number))\n\t}\n\n\treturn strings.Join(blocks, \".\")\n}",
"func decodeAddress(address string) (uint32, error) {\n\tsplit := strings.Split(address, \".\")\n\tif len(split) != 4 {\n\t\treturn 0, errors.New(\"Error decoding IPv4 address: wrong amount of octets\")\n\t}\n\tvar IPaddress uint32\n\tfor i, octetstr := range split {\n\t\tsegment, err := strconv.Atoi(octetstr)\n\t\tif err != nil {\n\t\t\treturn 0, errors.Wrap(err, \"Error decoding IPv4 address\")\n\t\t}\n\t\tif segment > math.MaxUint8 {\n\t\t\treturn 0, errors.New(\"Error decoding IPv4 address: value overflow\")\n\t\t}\n\t\t// Shift octets by determined amount of bits.\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tsegment = segment << 24\n\t\tcase 1:\n\t\t\tsegment = segment << 16\n\t\tcase 2:\n\t\t\tsegment = segment << 8\n\t\t}\n\t\tIPaddress += uint32(segment)\n\t}\n\treturn IPaddress, nil\n}",
"func IsIPv4(s string) bool {\n\tvar p [IPv4len]byte\n\tfor i := 0; i < IPv4len; i++ {\n\t\tif len(s) == 0 {\n\t\t\t// Missing octets.\n\t\t\treturn false\n\t\t}\n\t\tif i > 0 {\n\t\t\tif s[0] != '.' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ts = s[1:]\n\t\t}\n\t\tn, c, ok := dtoi(s)\n\t\tif !ok || n > 0xFF {\n\t\t\treturn false\n\t\t}\n\t\ts = s[c:]\n\t\tp[i] = byte(n)\n\t}\n\tif len(s) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}",
"func extractUnicastIPv4Addrs(addrs []net.Addr) []string {\n\tvar ips []string\n\n\tfor _, a := range addrs {\n\t\tvar ip net.IP\n\n\t\tswitch a := a.(type) {\n\t\tcase *net.IPNet:\n\t\t\tip = a.IP\n\t\tcase *net.IPAddr:\n\t\t\tip = a.IP\n\t\t}\n\n\t\tif ip == nil || len(ip.To4()) == 0 {\n\t\t\t// Windows dataplane doesn't support IPv6 yet.\n\t\t\tcontinue\n\t\t}\n\t\tif ip.IsLoopback() {\n\t\t\t// Skip 127.0.0.1.\n\t\t\tcontinue\n\t\t}\n\t\tips = append(ips, ip.String()+\"/32\")\n\t}\n\n\treturn ips\n}",
"func ResolveIPv4(host string) (net.IP, error) {\n\tif node := DefaultHosts.Search(host); node != nil {\n\t\tif ip := node.Data.(net.IP).To4(); ip != nil {\n\t\t\treturn ip, nil\n\t\t}\n\t}\n\n\tip := net.ParseIP(host)\n\tif ip != nil {\n\t\tif !strings.Contains(host, \":\") {\n\t\t\treturn ip, nil\n\t\t}\n\t\treturn nil, errIPVersion\n\t}\n\n\tif DefaultResolver != nil {\n\t\treturn DefaultResolver.ResolveIPv4(host)\n\t}\n\n\tipAddrs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ip := range ipAddrs {\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\treturn ip4, nil\n\t\t}\n\t}\n\n\treturn nil, errIPNotFound\n}",
"func isIP4AddrResolvable(fl FieldLevel) bool {\n\tif !isIPv4(fl) {\n\t\treturn false\n\t}\n\n\t_, err := net.ResolveIPAddr(\"ip4\", fl.Field().String())\n\n\treturn err == nil\n}",
"func newBlock4(ip net.IP, mask net.IPMask) *cidrBlock4 {\n\tvar block cidrBlock4\n\n\tblock.first = ipv4ToUInt32(ip)\n\tprefix, _ := mask.Size()\n\tblock.last = broadcast4(block.first, uint(prefix))\n\n\treturn &block\n}",
"func IntToIpv4(intv int64) (ip string, err error) {\n\tif intv < ipv4MinInt || intv > ipv4MaxInt {\n\t\terr = BadIpv4Error\n\t\treturn\n\t}\n\n\tip = strings.Join([]string{\n\t\tstrconv.Itoa(int(intv & ip1x >> ipv4Shift[0])),\n\t\tstrconv.Itoa(int(intv & ip2x >> ipv4Shift[1])),\n\t\tstrconv.Itoa(int(intv & ip3x >> ipv4Shift[2])),\n\t\tstrconv.Itoa(int(intv & ip4x >> ipv4Shift[3]))},\n\t\tipSep)\n\n\treturn\n}",
"func isIPv4(s string) bool {\n\tip := netutils.ParseIPSloppy(s)\n\treturn ip != nil && strings.Contains(s, \".\")\n}",
"func (o *NetworkElementSummaryAllOf) SetOutOfBandIpv4Mask(v string) {\n\to.OutOfBandIpv4Mask = &v\n}",
"func ResolveToIPv4Address(address string) (string, error) {\n\tif ip := net.ParseIP(address); ip != nil {\n\t\t// Address is either an IPv6 or IPv4 address\n\t\tipv4 := ip.To4()\n\t\tif ipv4 == nil {\n\t\t\treturn \"\", errors.Errorf(\"not an IPv4 network address: %s\", ip.String())\n\t\t}\n\t\treturn ipv4.String(), nil\n\t}\n\n\t// DNS address in this case\n\tips, err := net.LookupIP(address)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"lookup of address %s failed\", address)\n\t}\n\t// Get first IPv4 address returned\n\tfor _, returnedIP := range ips {\n\t\tif returnedIP.To4() != nil {\n\t\t\treturn returnedIP.String(), nil\n\t\t}\n\t}\n\treturn \"\", errors.Errorf(\"%s does not resolve to an IPv4 address\", address)\n}",
"func IPv4Address(ctx context.Context, client *docker.Client, containerID string) (net.IP, error) {\n\tc, err := client.InspectContainerWithContext(containerID, ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"find container %s address: %w\", containerID, err)\n\t}\n\treturn ipv4Address(c)\n}",
"func isIPv4(s []byte) bool {\n\tfor _, v := range s[4:] {\n\t\tif v != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func IsIPv4Net(ipnet *net.IPNet) bool {\n\treturn ipnet.IP.To4() != nil\n}",
"func (o *NetworkElementSummaryAllOf) GetOutOfBandIpv4MaskOk() (*string, bool) {\n\tif o == nil || o.OutOfBandIpv4Mask == nil {\n\t\treturn nil, false\n\t}\n\treturn o.OutOfBandIpv4Mask, true\n}",
"func uint32ToIPv4(intIP uint32) net.IP {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, intIP)\n\treturn ip\n}",
"func parsedIp(ip []byte) []byte {\n\tvar ip4 []byte\n\tip_arr := bytes.Split(ip, dot)\n\tif len(ip_arr) != 4 {\n\t\treturn nil\n\t}\n\tfor _, p := range ip_arr {\n\t\ttmp := bytes2byte(p)\n\t\tif tmp == 32 {\n\t\t\treturn nil\n\t\t}\n\t\tip4 = append(ip4, tmp)\n\t}\n\treturn ip4\n}",
"func ScanOnIPv4WithCIDR(iface, cidrIP string) ([]Device, error) {\n\tipParts := strings.Split(cidrIP, \"/\")\n\tif len(ipParts) != 2 {\n\t\treturn nil, errors.New(\"Invalid CIDR IP - \" + cidrIP)\n\t}\n\tip := ipParts[0]\n\tmask := ipParts[1]\n\n\t// nmap args\n\targTarget := ip[:strings.LastIndex(ip, \".\")] + \".0/\" + mask\n\n\t// TODO: check if it's running with su privileges\n\tcmd := exec.Command(\"nmap\", \"-oN\", \"-\", \"-sP\", argTarget, \"-T\", \"insane\", \"--exclude\", ip)\n\n\t//cmd.Stdin = strings.NewReader(\"\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlines := strings.Split(out.String(), \"\\n\")\n\tl := len(lines)\n\n\tif l < 5 {\n\t\treturn nil, errors.New(\"nmap invalid response\")\n\t}\n\n\tvar devices []Device\n\tfor i := 1; i < l-2; i += 3 {\n\t\tdevices = append(devices, Device{\n\t\t\tIP: lines[i][21:],\n\t\t\tMAC: lines[i+2][13:30],\n\t\t\tManufacturer: lines[i+2][32 : len(lines[i+2])-1],\n\t\t})\n\t}\n\n\treturn devices, nil\n}",
"func (out *ipv4Subnet) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\n\tif ip, ipnet, err := net.ParseCIDR(s); err == nil {\n\t\tif out.Addr, err = convertIPv4(ip.To4()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif out.Mask, err = convertIPv4(ipnet.Mask); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif ip := net.ParseIP(s); ip != nil {\n\t\tvar err error\n\t\tif out.Addr, err = convertIPv4(ip.To4()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tout.Mask = 0xffffffff\n\t\treturn nil\n\t}\n\treturn errors.New(\"Failed to parse address \" + s)\n}",
"func (f *FSEIDFields) SetIPv4Flag() {\n\tf.Flags |= 0x02\n}",
"func packSockaddr4(addr net.IP, port int) []byte {\n\tip4 := addr.To4()\n\tif ip4 == nil {\n\t\tpanic(\"must take an IPv4 address\")\n\t}\n\tvar buf bytes.Buffer\n\tbuf.WriteByte(16)\n\tbuf.WriteByte(unix.AF_INET)\n\tbinary.Write(&buf, binary.BigEndian, uint16(port))\n\tbuf.Write(ip4)\n\tbuf.Write(make([]byte, 8))\n\treturn buf.Bytes()\n}",
"func (ipSet *IPSet) IsIPv4() bool {\n\treturn govalidator.IsIPv4(ipSet.IPv4)\n}",
"func broadcast4(addr uint32, prefix uint) uint32 {\n\treturn addr | ^netmask(prefix)\n}",
"func stringIPv4(n uint32) string {\n\tip := make(net.IP, 4)\n\tbinary.BigEndian.PutUint32(ip, n)\n\treturn ip.String()\n}",
"func uint32ToIPV4(addr uint32) net.IP {\n\tip := make([]byte, net.IPv4len)\n\tbinary.BigEndian.PutUint32(ip, addr)\n\treturn ip\n}",
"func (m *IPAddresses100IPV4Address) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAddress(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAddressOrigin(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubnetMask(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (p *PeerToPeer) handlePacketIPv4(contents []byte, proto int) {\n\tLog(Trace, \"Handling IPv4 Packet\")\n\tf := new(ethernet.Frame)\n\tif err := f.UnmarshalBinary(contents); err != nil {\n\t\tLog(Error, \"Failed to unmarshal IPv4 packet\")\n\t}\n\n\tif f.EtherType != ethernet.EtherTypeIPv4 {\n\t\treturn\n\t}\n\tmsg := CreateNencP2PMessage(p.Crypter, contents, uint16(proto), 1, 1, 1)\n\tp.SendTo(f.Destination, msg)\n}",
"func decodeMask(mask string) (uint64, error) {\n\tmask = strings.ToLower(mask)\n\n\tif strings.HasPrefix(mask, \"0x\") {\n\t\tif len(mask) < 3 {\n\t\t\treturn 0, fmt.Errorf(\"invalid mask: %s\", mask)\n\t\t}\n\n\t\tb, err := hex.DecodeString(mask[2:])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"invalid mask: %w\", err)\n\t\t}\n\n\t\tvar u uint64\n\t\tfor _, v := range b {\n\t\t\tu = u<<8 | uint64(v)\n\t\t}\n\n\t\treturn u, nil\n\t}\n\n\treturn strconv.ParseUint(mask, 10, 64)\n}",
"func GetNetAndMask(input string) (string, int, error) {\n\t_, cidr, err := net.ParseCIDR(input)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tnet := cidr.IP.String()\n\tmask, _ := cidr.Mask.Size()\n\treturn net, mask, nil\n}",
"func Ipv4Contains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldIpv4), v))\n\t})\n}",
"func Ipv4In(vs ...string) predicate.Agent {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldIpv4), v...))\n\t})\n}",
"func ipv4only(addr IPAddr) bool {\n\treturn supportsIPv4 && addr.IP.To4() != nil\n}",
"func (m *RIBMessage) IPv4Unicast() (*IPv4UnicastAnnounceTextMessage, error) {\n\tif m.Family() != \"ipv4 unicast\" {\n\t\treturn nil, fmt.Errorf(\"wrong entry family: %s\", m.Family())\n\t}\n\tnm := &IPv4UnicastAnnounceTextMessage{}\n\tres, err := parseIPv4UnicastLine(m.Details)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnm.NLRI = res[\"nlri\"]\n\tnm.NextHop = res[\"next_hop\"]\n\tnm.Attributes = res[\"attributes\"]\n\treturn nm, nil\n}",
"func add_cidr_mask(addr string) string {\n\tif strings.Contains(addr, \"/\") {\n\t\treturn addr\n\t}\n\n\tif strings.Contains(addr, \":\") {\n\t\treturn addr + \"/128\"\n\t} else {\n\t\treturn addr + \"/32\"\n\t}\n}",
"func isUDP4AddrResolvable(fl FieldLevel) bool {\n\tif !isIP4Addr(fl) {\n\t\treturn false\n\t}\n\n\t_, err := net.ResolveUDPAddr(\"udp4\", fl.Field().String())\n\n\treturn err == nil\n}",
"func ipv4ToUInt32(ip net.IP) uint32 {\n\treturn binary.BigEndian.Uint32(ip)\n}",
"func DetectHostIPv4() (string, error) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {\n\t\t\tif ipnet.IP.To4() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn ipnet.IP.String(), nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"cannot detect host IPv4 address\")\n}",
"func IsIPv4(ip *net.IP) bool {\n\treturn ip.To4() != nil\n}",
"func IsCIDRv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\tip, _, err := net.ParseCIDR(s)\n\treturn err == nil && ip.To4() != nil\n}",
"func IsCIDRv4(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\tip, _, err := net.ParseCIDR(s)\n\treturn err == nil && ip.To4() != nil\n}",
"func IsIPv4(ip net.IP) bool {\n\treturn (len(ip) == net.IPv4len) || (len(ip) == net.IPv6len &&\n\t\tip[0] == 0x00 &&\n\t\tip[1] == 0x00 &&\n\t\tip[2] == 0x00 &&\n\t\tip[3] == 0x00 &&\n\t\tip[4] == 0x00 &&\n\t\tip[5] == 0x00 &&\n\t\tip[6] == 0x00 &&\n\t\tip[7] == 0x00 &&\n\t\tip[8] == 0x00 &&\n\t\tip[9] == 0x00 &&\n\t\tip[10] == 0xff &&\n\t\tip[11] == 0xff)\n}",
"func localIPv4s() ([]string, error) {\n\tvar ips []string\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\n\tfor _, a := range addrs {\n\t\tif ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {\n\t\t\tips = append(ips, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn ips, nil\n}",
"func isIPv4(ip net.IP) bool {\n\treturn ip.To4() != nil && strings.Count(ip.String(), \":\") < 2\n}",
"func IsIP4(val interface{}) bool {\n\treturn isMatch(ip4, val)\n}"
] | [
"0.6910413",
"0.65817773",
"0.64788103",
"0.64404845",
"0.61875916",
"0.61496323",
"0.6074284",
"0.60079217",
"0.59941304",
"0.59614414",
"0.58839774",
"0.58533674",
"0.582732",
"0.5775293",
"0.57644033",
"0.57606894",
"0.5736692",
"0.573576",
"0.570588",
"0.5696371",
"0.56833994",
"0.5683031",
"0.5661739",
"0.56537896",
"0.564742",
"0.5631333",
"0.5623487",
"0.5592208",
"0.5565591",
"0.5532977",
"0.5532031",
"0.5510396",
"0.55049044",
"0.55017656",
"0.5488127",
"0.5470335",
"0.54689074",
"0.5462034",
"0.5431872",
"0.54143304",
"0.5386428",
"0.5378486",
"0.5353306",
"0.535178",
"0.53505886",
"0.53452814",
"0.5290702",
"0.5286229",
"0.5286054",
"0.5280478",
"0.5274077",
"0.5265233",
"0.5240618",
"0.5230248",
"0.5219644",
"0.521165",
"0.52110946",
"0.5209704",
"0.5205639",
"0.51969534",
"0.5193639",
"0.5179071",
"0.51727194",
"0.5170799",
"0.51628405",
"0.51565486",
"0.5150351",
"0.5139961",
"0.51299226",
"0.51045585",
"0.50871766",
"0.50706404",
"0.5070487",
"0.5064613",
"0.5063839",
"0.5051146",
"0.50358635",
"0.50352484",
"0.50330913",
"0.50295043",
"0.50266266",
"0.5013194",
"0.49972895",
"0.49823952",
"0.49815032",
"0.49718073",
"0.4970712",
"0.49498683",
"0.49457875",
"0.4938413",
"0.49351496",
"0.49335426",
"0.49216595",
"0.49178654",
"0.49156058",
"0.49156058",
"0.49060822",
"0.4904693",
"0.4895479",
"0.48838085"
] | 0.86540896 | 0 |
NewPreRunner takes a name and a standalone pre runner compatible function and turns them into a Group compatible PreRunner, ready for registration. | NewPreRunner принимает имя и функцию-предварительный запускатор, совместимую с самостоятельным запуском, и преобразует их в PreRunner, совместимый с группой, готовый к регистрации. | func NewPreRunner(name string, fn func() error) PreRunner {
return preRunner{name: name, fn: fn}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func preRun(c *cobra.Command, args []string) error {\n\tif len(args) < 1 {\n\t\treturn errors.New(\"Missing name argument\")\n\t}\n\n\tname = args[0]\n\treturn nil\n}",
"func newRunnerGroup(scope RunnerGroupScope, name string) RunnerGroup {\n\tif name == \"\" {\n\t\treturn RunnerGroup{\n\t\t\tScope: scope,\n\t\t\tKind: Default,\n\t\t\tName: \"\",\n\t\t}\n\t}\n\n\treturn RunnerGroup{\n\t\tScope: scope,\n\t\tKind: Custom,\n\t\tName: name,\n\t}\n}",
"func New(t *testing.T, name string, arg ...string) *Runner {\n\treturn &Runner{t, name, arg}\n}",
"func NewRunner(parent string) *Runner {\n\tr := &Runner{}\n\tc := &cobra.Command{\n\t\tUse: \"fix LOCAL_PKG_DIR\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tShort: docs.FixShort,\n\t\tLong: docs.FixShort + \"\\n\" + docs.FixLong,\n\t\tExample: docs.FixExamples,\n\t\tPreRunE: r.preRunE,\n\t\tRunE: r.runE,\n\t\tSuggestFor: []string{\"upgrade\", \"migrate\"},\n\t}\n\tcmdutil.FixDocs(\"kpt\", parent, c)\n\tc.Flags().BoolVar(&r.Fix.DryRun, \"dry-run\", false,\n\t\t`Dry run emits the actions`)\n\tr.Command = c\n\treturn r\n}",
"func NewRunner(getter Getter) *Runner {\n\treturn &Runner{getter: getter}\n}",
"func NewRunner(wfClientset wfclientset.Interface) *Runner {\n\treturn &Runner{wfClientset: wfClientset}\n}",
"func NewRunner(ctx *pulumi.Context,\n\tname string, args *RunnerArgs, opts ...pulumi.ResourceOption) (*Runner, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RegistrationToken == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RegistrationToken'\")\n\t}\n\tif args.RegistrationToken != nil {\n\t\targs.RegistrationToken = pulumi.ToSecret(args.RegistrationToken).(pulumi.StringInput)\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"authenticationToken\",\n\t\t\"registrationToken\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Runner\n\terr := ctx.RegisterResource(\"gitlab:index/runner:Runner\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (k *PluginRunner) preRun(cmd *cobra.Command, args []string) error {\n\tlr := fLdr.RestrictionRootOnly\n\tfSys := filesys.MakeFsOnDisk()\n\tldr, err := fLdr.NewLoader(lr, filepath.Clean(k.root), fSys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv := validator.NewKustValidator()\n\n\tuf := kunstruct.NewKunstructuredFactoryImpl()\n\tvar pm resmap.Merginator // TODO The actual implementation is internal now...\n\trf := resmap.NewFactory(resource.NewFactory(uf), pm)\n\n\tk.h = resmap.NewPluginHelpers(ldr, v, rf)\n\n\tif c, ok := k.plugin.(resmap.Configurable); ok {\n\t\tconfig, err := k.config(cmd, args)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := c.Config(k.h, config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (s *Supervisor) AddRunner(name string, callback Callback, policyOptions ...PolicyOption) {\n\tkey := fmt.Sprintf(\"%s-%s\", \"runner\", name)\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\n\tif _, exists := s.processes[key]; exists {\n\t\ts.logger(Error, loggerData{\"name\": name}, \"runner already exists\")\n\t\treturn\n\t}\n\n\tr := &runner{\n\t\tCallback: callback,\n\t\tname: name,\n\t\trestartPolicy: s.policy.Restart,\n\t\tlogger: s.logger,\n\t}\n\n\tp := Policy{\n\t\tRestart: s.policy.Restart,\n\t}\n\tp.Reconfigure(policyOptions...)\n\n\tr.restartPolicy = p.Restart\n\n\ts.processes[key] = r\n}",
"func newPreReqValidator(opts ...option) *preReqValidator {\n\tcfg := defaultPreReqCfg()\n\tfor _, opt := range opts {\n\t\topt(cfg)\n\t}\n\treturn &preReqValidator{\n\t\tctxName: cfg.ctxName,\n\t\tns: cfg.ns,\n\t\tk8sClientProvider: cfg.k8sClientProvider,\n\t\toktetoClientProvider: cfg.oktetoClientProvider,\n\t\tgetContextStore: cfg.getContextStore,\n\t\tgetCtxResource: cfg.getCtxResource,\n\t}\n}",
"func NewRunner(be func() Backend, host string, userf, passf string) *Runner {\n\treturn &Runner{\n\t\tbe: be,\n\t\thost: host,\n\t\tuserf: userf,\n\t\tpassf: passf,\n\t\tsessions: make(chan error),\n\t\tpwdOver: make(chan struct{}),\n\t\tbroken: makeBroken(),\n\t\tlogins: gen.NewLogins(),\n\t\tagents: gen.NewAgents(),\n\t\tpool: newPool(),\n\t}\n}",
"func (rnr *Runner) Preparer(l logger.Logger) (preparer.Preparer, error) {\n\n\t// NOTE: We have a good generic preparer so we'll provide that here\n\n\tl.Debug(\"** Preparer **\")\n\n\t// Return the existing preparer if we already have one\n\tif rnr.Prepare != nil {\n\t\tl.Debug(\"Returning existing preparer\")\n\t\treturn rnr.Prepare, nil\n\t}\n\n\tl.Debug(\"Creating new preparer\")\n\n\tp, err := prepare.NewPrepare(l)\n\tif err != nil {\n\t\tl.Warn(\"Failed new prepare >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\tdb, err := rnr.Store.GetDb()\n\tif err != nil {\n\t\tl.Warn(\"Failed getting database handle >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\terr = p.Init(db)\n\tif err != nil {\n\t\tl.Warn(\"Failed preparer init >%v<\", err)\n\t\treturn nil, err\n\t}\n\n\trnr.Prepare = p\n\n\treturn p, nil\n}",
"func NewFakeAppPrecondition(name string, numApps int, innerPre func(name string, opts ...chrome.Option) testing.Precondition, skiaRenderer bool) *preImpl {\n\tname = fmt.Sprintf(\"%s_%d\", name, numApps)\n\ttmpDir, err := ioutil.TempDir(\"\", name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\topts := make([]chrome.Option, 0, numApps)\n\tfor i := 0; i < numApps; i++ {\n\t\topts = append(opts, chrome.UnpackedExtension(filepath.Join(tmpDir, fmt.Sprintf(\"fake_%d\", i))))\n\t}\n\tif skiaRenderer {\n\t\tname = name + \"_skia_renderer\"\n\t\topts = append(opts, chrome.EnableFeatures(\"UseSkiaRenderer\"))\n\t}\n\tcrPre := innerPre(name, opts...)\n\treturn &preImpl{crPre: crPre, numApps: numApps, extDirBase: tmpDir, prepared: false}\n}",
"func NewRunner(pubClientFactory func() publisher.Client, mod *Wrapper) Runner {\n\treturn &runner{\n\t\tdone: make(chan struct{}),\n\t\tmod: mod,\n\t\tclient: pubClientFactory(),\n\t}\n}",
"func NewRunner(client ucare.Client, customStorage string) *Runner {\n\treturn &Runner{\n\t\tFile: file.NewService(client),\n\t\tGroup: group.NewService(client),\n\t\tUpload: upload.NewService(client),\n\t\tConversion: conversion.NewService(client),\n\t\tWebhook: webhook.NewService(client),\n\t\tProject: project.NewService(client),\n\t\tArtifacts: Artifacts{\n\t\t\tCustomStorage: customStorage,\n\t\t},\n\t}\n}",
"func New(runner, tracker, hosted string) *Runner {\n\tn := &Runner{\n\t\ttcl: client.New(tracker, http.DefaultClient, client.JsonCodec),\n\t\tbase: hosted,\n\t\trunner: runner,\n\t\trpc: gorpc.NewServer(),\n\t\trq: rpc.NewRunnerQueue(),\n\t\tresp: make(chan rpc.Output),\n\t}\n\n\t//register the run service in the rpc\n\tif err := n.rpc.RegisterService(n.rq, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the pinger\n\tif err := n.rpc.RegisterService(pinger.Pinger{}, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register ourselves in the rpc\n\tif err := n.rpc.RegisterService(n, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the codec\n\tn.rpc.RegisterCodec(json.NewCodec(), \"application/json\")\n\n\t//start processing\n\tgo n.run()\n\n\treturn n\n}",
"func NewPrecompileCaller(address common.Address, caller bind.ContractCaller) (*PrecompileCaller, error) {\n\tcontract, err := bindPrecompile(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PrecompileCaller{contract: contract}, nil\n}",
"func NewRunner(methods MethodMap) *Runner {\n\treturn &Runner{methods}\n}",
"func NewRunner(env []string) *Runner {\n\treturn &Runner{\n\t\tenv: env,\n\t}\n}",
"func NewRunner(c client.Client) *ClientPipelineRunner {\n\treturn &ClientPipelineRunner{client: c, objectMeta: objectMetaCreator}\n}",
"func newProcessRunner(pipeID string, p phono.Processor) (*processRunner, error) {\n\tfn, err := p.Process(pipeID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := processRunner{\n\t\tfn: fn,\n\t\tProcessor: p,\n\t\thooks: bindHooks(p),\n\t}\n\treturn &r, nil\n}",
"func Register(name string, fn func(ctx Context, payload map[string]interface{}) (Runner, error)) {\n\tonce.Do(func() {\n\t\trunners = factory{\n\t\t\trunners: make(map[string]func(ctx Context, payload map[string]interface{}) (Runner, error)),\n\t\t}\n\t})\n\n\trunners.Lock()\n\tdefer runners.Unlock()\n\n\trunners.runners[name] = fn\n}",
"func (pfs *Supervisor) NewRunner(t *testing.T, m Matrix) *Runner {\n\tif Flags.PrintMatrix {\n\t\tmatrix := m.scenarios()\n\t\tfor _, m := range matrix {\n\t\t\tfmt.Printf(\"%s/%s\\n\", t.Name(), m)\n\t\t}\n\t\tt.Skip(\"Just printing test matrix (-ls-matrix flag set)\")\n\t}\n\tt.Helper()\n\tt.Parallel()\n\tpf := &Runner{\n\t\tt: t,\n\t\tmatrix: m,\n\t\ttestNames: map[string]struct{}{},\n\t\ttestNamesPassed: map[string]struct{}{},\n\t\ttestNamesSkipped: map[string]struct{}{},\n\t\ttestNamesFailed: map[string]struct{}{},\n\t\tparent: pfs,\n\t}\n\tpfs.mu.Lock()\n\tdefer pfs.mu.Unlock()\n\tpfs.fixtures[t.Name()] = pf\n\treturn pf\n}",
"func NewRunner(cli *DockerClient, problemDir, fileName, ft string, timeLimit time.Duration) (*Runner, error) {\n\tdef := languageDefs[ft]\n\n\ttestCtr := &Container{\n\t\tDocker: cli,\n\t\tImage: def.Image,\n\t\tCmd: []string{\"sleep\", \"1000000000000\"},\n\t\tWorkingDir: \"/mnt\",\n\t\tOut: os.Stdout,\n\t\tReadOnly: true,\n\t}\n\n\tif err := testCtr.BindDir(problemDir, \"/mnt\", true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := testCtr.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Runner{\n\t\tproblemDir: problemDir,\n\t\tfileName: fileName,\n\t\tcontainer: testCtr,\n\t\ttimeLimit: timeLimit,\n\t\tft: ft,\n\t}, nil\n}",
"func PreHook(f func()) {\n\thookLock.Lock()\n\tdefer hookLock.Unlock()\n\n\tprehooks = append(prehooks, f)\n}",
"func NewRunner(manager *PM, command *core.Command, factory process.ProcessFactory, hooks ...RunnerHook) Runner {\n\tstatsInterval := command.StatsInterval\n\n\tif statsInterval < 30 {\n\t\tstatsInterval = 30\n\t}\n\n\trunner := &runnerImpl{\n\t\tmanager: manager,\n\t\tcommand: command,\n\t\tfactory: factory,\n\t\tkill: make(chan int),\n\t\thooks: hooks,\n\n\t\tstatsd: stats.NewStatsd(\n\t\t\tcommand.ID,\n\t\t\ttime.Duration(statsInterval)*time.Second,\n\t\t\tmanager.statsFlushCallback),\n\t}\n\n\trunner.wg.Add(1)\n\treturn runner\n}",
"func NameFunc(name string, fn func(ctx context.Context) error) NamedRunner {\n\treturn Name(name, RunnerFunc(fn))\n}",
"func NewPreparersBuilder(filePath string, params map[string]string) *PreparersBuilder {\n\treturn &PreparersBuilder{preparers: &Preparers{functions: &[]Preparer{}}, filePath: filePath, params: params}\n}",
"func newPumpRunner(pipeID string, p phono.Pump) (*pumpRunner, error) {\n\tfn, err := p.Pump(pipeID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := pumpRunner{\n\t\tfn: fn,\n\t\tPump: p,\n\t\thooks: bindHooks(p),\n\t}\n\treturn &r, nil\n}",
"func NewRunner(dir string, logger *Logger, options Options) Runner {\n\tif !options.FirecrackerOptions.Enabled {\n\t\treturn &dockerRunner{dir: dir, logger: logger, options: options}\n\t}\n\n\treturn &firecrackerRunner{name: options.ExecutorName, dir: dir, logger: logger, options: options}\n}",
"func NewPrecompileFilterer(address common.Address, filterer bind.ContractFilterer) (*PrecompileFilterer, error) {\n\tcontract, err := bindPrecompile(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PrecompileFilterer{contract: contract}, nil\n}",
"func New() *LegPreAllocGrp {\n\tvar m LegPreAllocGrp\n\treturn &m\n}",
"func (b *OGame) BeginNamed(name string) Prioritizable {\n\treturn b.WithPriority(taskRunner.Normal).BeginNamed(name)\n}",
"func (tf *TestFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {\n}",
"func NewRunner(reader io.Reader, comp Component) (Runner, error) {\n\tr := &runner{\n\t\tcomp: comp,\n\t}\n\trecvComp, ok := comp.(ReceiveComponent)\n\tif ok {\n\t\tr.recvComp = recvComp\n\t}\n\tsendComp, ok := comp.(SendComponent)\n\tif ok {\n\t\tr.sendComp = sendComp\n\t}\n\terr := r.load(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}",
"func (tr *TaskRunner) prestart() error {\n\t// Determine if the allocation is terminal and we should avoid running\n\t// prestart hooks.\n\tif tr.shouldShutdown() {\n\t\ttr.logger.Trace(\"skipping prestart hooks since allocation is terminal\")\n\t\treturn nil\n\t}\n\n\tif tr.logger.IsTrace() {\n\t\tstart := time.Now()\n\t\ttr.logger.Trace(\"running prestart hooks\", \"start\", start)\n\t\tdefer func() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished prestart hooks\", \"end\", end, \"duration\", end.Sub(start))\n\t\t}()\n\t}\n\n\t// use a join context to allow any blocking pre-start hooks\n\t// to be canceled by either killCtx or shutdownCtx\n\tjoinedCtx, joinedCancel := joincontext.Join(tr.killCtx, tr.shutdownCtx)\n\tdefer joinedCancel()\n\n\tfor _, hook := range tr.runnerHooks {\n\t\tpre, ok := hook.(interfaces.TaskPrestartHook)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := pre.Name()\n\n\t\t// Build the request\n\t\treq := interfaces.TaskPrestartRequest{\n\t\t\tTask: tr.Task(),\n\t\t\tTaskDir: tr.taskDir,\n\t\t\tTaskEnv: tr.envBuilder.Build(),\n\t\t\tTaskResources: tr.taskResources,\n\t\t}\n\n\t\torigHookState := tr.hookState(name)\n\t\tif origHookState != nil {\n\t\t\tif origHookState.PrestartDone {\n\t\t\t\ttr.logger.Trace(\"skipping done prestart hook\", \"name\", pre.Name())\n\n\t\t\t\t// Always set env vars from hooks\n\t\t\t\tif name == HookNameDevices {\n\t\t\t\t\ttr.envBuilder.SetDeviceHookEnv(name, origHookState.Env)\n\t\t\t\t} else {\n\t\t\t\t\ttr.envBuilder.SetHookEnv(name, origHookState.Env)\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Give the hook it's old data\n\t\t\treq.PreviousState = origHookState.Data\n\t\t}\n\n\t\treq.VaultToken = tr.getVaultToken()\n\t\treq.NomadToken = tr.getNomadToken()\n\n\t\t// Time the prestart hook\n\t\tvar start time.Time\n\t\tif tr.logger.IsTrace() {\n\t\t\tstart = time.Now()\n\t\t\ttr.logger.Trace(\"running prestart hook\", \"name\", name, \"start\", start)\n\t\t}\n\n\t\t// Run the prestart hook\n\t\tvar resp interfaces.TaskPrestartResponse\n\t\tif err := pre.Prestart(joinedCtx, &req, &resp); err != nil {\n\t\t\ttr.emitHookError(err, name)\n\t\t\treturn structs.WrapRecoverable(fmt.Sprintf(\"prestart hook %q failed: %v\", name, err), err)\n\t\t}\n\n\t\t// Store the hook state\n\t\t{\n\t\t\thookState := &state.HookState{\n\t\t\t\tData: resp.State,\n\t\t\t\tPrestartDone: resp.Done,\n\t\t\t\tEnv: resp.Env,\n\t\t\t}\n\n\t\t\t// Store and persist local state if the hook state has changed\n\t\t\tif !hookState.Equal(origHookState) {\n\t\t\t\ttr.stateLock.Lock()\n\t\t\t\ttr.localState.Hooks[name] = hookState\n\t\t\t\ttr.stateLock.Unlock()\n\n\t\t\t\tif err := tr.persistLocalState(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Store the environment variables returned by the hook\n\t\tif name == HookNameDevices {\n\t\t\ttr.envBuilder.SetDeviceHookEnv(name, resp.Env)\n\t\t} else {\n\t\t\ttr.envBuilder.SetHookEnv(name, resp.Env)\n\t\t}\n\n\t\t// Store the resources\n\t\tif len(resp.Devices) != 0 {\n\t\t\ttr.hookResources.setDevices(resp.Devices)\n\t\t}\n\t\tif len(resp.Mounts) != 0 {\n\t\t\ttr.hookResources.setMounts(resp.Mounts)\n\t\t}\n\n\t\tif tr.logger.IsTrace() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished prestart hook\", \"name\", name, \"end\", end, \"duration\", end.Sub(start))\n\t\t}\n\t}\n\n\treturn nil\n}",
"func WithPreRunE(preRunE func(cmd *cobra.Command, args []string) error) RunnerOption {\n\treturn func(k *PluginRunner) {\n\t\tk.cmd.PreRunE = func(cmd *cobra.Command, args []string) error {\n\t\t\tif err := preRunE(cmd, args); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Explicitly call through to the default implementation to invoke Configurable.Config\n\t\t\treturn k.preRun(cmd, args)\n\t\t}\n\t}\n}",
"func (tc *TestCase) SetPreTestFunc(curFunc func(data interface{}, context *TestContext)) {\n\tif tc.PreTestFunc == nil {\n\t\ttc.PreTestFunc = curFunc\n\t}\n}",
"func (client ModelClient) AddPrebuiltPreparer(ctx context.Context, appID uuid.UUID, versionID string, prebuiltExtractorNames []string) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t\t\"versionId\": autorest.Encode(\"path\", versionID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/versions/{versionId}/prebuilts\", pathParameters),\n\t\tautorest.WithJSON(prebuiltExtractorNames))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func PrebuiltFactory() android.Module {\n\tmodule := &Prebuilt{}\n\tmodule.AddProperties(&module.properties)\n\tandroid.InitSingleSourcePrebuiltModule(module, &module.properties, \"Source\")\n\tandroid.InitAndroidMultiTargetsArchModule(module, android.DeviceSupported, android.MultilibCommon)\n\treturn module\n}",
"func NewPrecompile(address common.Address, backend bind.ContractBackend) (*Precompile, error) {\n\tcontract, err := bindPrecompile(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Precompile{PrecompileCaller: PrecompileCaller{contract: contract}, PrecompileTransactor: PrecompileTransactor{contract: contract}, PrecompileFilterer: PrecompileFilterer{contract: contract}}, nil\n}",
"func (client AppsClient) AddCustomPrebuiltDomainPreparer(ctx context.Context, prebuiltDomainCreateObject PrebuiltDomainCreateObject) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"AzureRegion\": client.AzureRegion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"https://{AzureRegion}.api.cognitive.microsoft.com/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPath(\"/apps/customprebuiltdomains\"),\n\t\tautorest.WithJSON(prebuiltDomainCreateObject))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func NewPregeneratorFromFountain(f *Fountain, pregenInterval int64) *PreGenerator {\n\tpg := &PreGenerator{\n\t\tstartdate: f.startdate,\n\t\tduration: f.duration,\n\t\tpregenInterval: pregenInterval,\n\t\tratchet: f.serviceDesc.ratchet.Copy(),\n\t\tlastCounter: 1,\n\t}\n\treturn pg\n}",
"func NewCustom(executor executor.Executor) executor.Launcher {\n\treturn New(executor, fmt.Sprintf(\"stress-ng-custom %s\", StressngCustomArguments.Value()), StressngCustomArguments.Value())\n}",
"func NewRunner() *Runner {\n\tr := &Runner{}\n\tr.generator = romanumeral.NewRomanNumeralGenerator()\n\n\treturn r\n}",
"func New(stages ...StageRunner) *Pipeline {\n\treturn &Pipeline{\n\t\tstages: stages,\n\t}\n}",
"func New(config *Config) functions.Runner {\n\treturn &impl{*config}\n}",
"func newRestoreRunner(restore *backupv1alpha1.Restore, common service.CommonObjects, observer *observe.Observer) *restoreRunner {\n\treturn &restoreRunner{\n\t\trestore: restore,\n\t\tCommonObjects: common,\n\t\tconfig: newConfig(),\n\t\tobserver: observer,\n\t}\n}",
"func NewRunner(config *Config, client Client, server Server) *Runner {\n\treturn &Runner{\n\t\tconfig: config,\n\t\tclient: client,\n\t\tserver: server,\n\t}\n}",
"func Name(name string, runner Runner) NamedRunner {\n\treturn &namedRunner{\n\t\tRunner: runner,\n\t\tname: name,\n\t}\n}",
"func NewRunner() Runner {\n\treturn execRunner{}\n}",
"func New() (*Runner, error) {\n\tpool, err := dockertest.NewPool(\"\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to start pool\")\n\t}\n\n\treturn NewWithPool(pool), nil\n}",
"func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory {\n\tf.preprocessTxHook = preprocessFn\n\treturn f\n}",
"func NewSingleRunner(\n\texec execer.Execer,\n\tfilerMap runner.RunTypeMap,\n\toutput runner.OutputCreator,\n\tstat stats.StatsReceiver,\n\tdirMonitor *stats.DirsMonitor,\n\trID runner.RunnerID,\n\tpreprocessors []func() error,\n\tpostprocessors []func() error,\n\tuploader LogUploader,\n) runner.Service {\n\treturn NewQueueRunner(exec, filerMap, output, 0, stat, dirMonitor, rID, preprocessors, postprocessors, uploader)\n}",
"func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory,\n\topts ...frameworkruntime.Option) (framework.Framework, error) {\n\trecorder := recorderFact(cfg.SchedulerName)\n\topts = append(opts, frameworkruntime.WithEventRecorder(recorder))\n\treturn frameworkruntime.NewFramework(ctx, r, &cfg, opts...)\n}",
"func NewRunner(executor Executor, rep reporter.Reporter) *Runner {\n\treturn &Runner{\n\t\texecutor: executor,\n\t\treporter: rep,\n\t}\n}",
"func (pu *PatientrecordUpdate) SetPrenameID(id int) *PatientrecordUpdate {\n\tpu.mutation.SetPrenameID(id)\n\treturn pu\n}",
"func (puo *PatientrecordUpdateOne) SetPrenameID(id int) *PatientrecordUpdateOne {\n\tpuo.mutation.SetPrenameID(id)\n\treturn puo\n}",
"func NewNamePreclaimTx(accountID, name string, ttlnoncer TTLNoncer) (tx *NamePreclaimTx, nameSalt *big.Int, err error) {\n\tttl, _, accountNonce, err := ttlnoncer(accountID, config.Client.TTL)\n\tif err != nil {\n\t\treturn\n\t}\n\t// calculate the commitment and get the preclaim salt since the salt is 32\n\t// bytes long, you must use a big.Int to convert it into an integer\n\tcm, nameSalt, err := generateCommitmentID(name)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttx = &NamePreclaimTx{accountID, cm, config.Client.Fee, ttl, accountNonce}\n\tCalculateFee(tx)\n\treturn\n}",
"func (puo *PatientrecordUpdateOne) SetPrename(p *Prename) *PatientrecordUpdateOne {\n\treturn puo.SetPrenameID(p.ID)\n}",
"func V23RunnerFunc(ctxfn func() *context.T, run func(*context.T, *cmdline.Env, []string) error) cmdline.Runner {\n\treturn RunnerFunc(runner{false, ctxfn, run}.Run)\n}",
"func GetPre(config *viper.Viper, spec *models.Spec) (PreOperation, map[string]interface{}) {\n\tif spec.PreRun == nil {\n\t\treturn &DummyPre{}, map[string]interface{}{}\n\t}\n\n\tswitch spec.PreRun.Function {\n\tcase PreRunFunctionRedis:\n\t\treturn redis.GetPre(config), spec.PreRun.Args\n\t}\n\treturn &DummyPre{}, map[string]interface{}{}\n}",
"func NewRunner(config *config.Config, once bool) (*Runner, error) {\n\t// var repos repository.Repo\n\tlogger := log.WithField(\"caller\", \"runner\")\n\n\t// Create repos from configuration\n\trepos, err := repository.LoadRepos(config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot load repositories from configuration: %s\", err)\n\t}\n\tvar reposI = make([]repository.Repo, len(repos))\n\tfor index, repo := range repos {\n\t\treposI[index] = repo\n\t}\n\t// Create watcher to watch for repo changes\n\twatcher := watch.New(reposI, config.HookSvr, once)\n\n\t// Create the handler\n\thandler, err := kv.New(config.Consul)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trunner := &Runner{\n\t\tlogger: logger,\n\t\tErrCh: make(chan error),\n\t\tRcvDoneCh: make(chan struct{}, 1),\n\t\tSndDoneCh: make(chan struct{}, 1),\n\t\tonce: once,\n\t\tkvHandler: handler,\n\t\twatcher: watcher,\n\t}\n\n\treturn runner, nil\n}",
"func (pu *PatientrecordUpdate) SetPrename(p *Prename) *PatientrecordUpdate {\n\treturn pu.SetPrenameID(p.ID)\n}",
"func NewRunner(view ui.ViewInterface, proxy proxy.ProxyInterface, project *config.Project) *Runner {\n\treturn &Runner{\n\t\tproxy: proxy,\n\t\tprojectName: project.Name,\n\t\tapplications: project.Applications,\n\t\tcmds: make(map[string]*exec.Cmd, 0),\n\t\tview: view,\n\t}\n}",
"func CreatePreFeaturizedMultiBandImageClusteringPipeline(name string, description string, variables []*model.Variable, params *ClusterParams) (*FullySpecifiedPipeline, error) {\n\tvar steps []Step\n\tif params.UseKMeans {\n\t\tif params.PoolFeatures {\n\t\t\tsteps = []Step{\n\t\t\t\tNewDatasetToDataframeStep(map[string]DataRef{\"inputs\": &PipelineDataRef{0}}, []string{\"produce\"}),\n\t\t\t\tNewDistilColumnParserStep(map[string]DataRef{\"inputs\": &StepDataRef{0, \"produce\"}}, []string{\"produce\"}, []string{model.TA2RealType}),\n\t\t\t\tNewExtractColumnsByStructuralTypeStep(map[string]DataRef{\"inputs\": &StepDataRef{1, \"produce\"}}, []string{\"produce\"},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"float\", // python type\n\t\t\t\t\t\t\"numpy.float32\", // numpy types\n\t\t\t\t\t\t\"numpy.float64\",\n\t\t\t\t\t}),\n\t\t\t\tNewKMeansClusteringStep(map[string]DataRef{\"inputs\": &StepDataRef{2, \"produce\"}}, []string{\"produce\"}, params.ClusterCount),\n\t\t\t\tNewConstructPredictionStep(map[string]DataRef{\"inputs\": &StepDataRef{3, \"produce\"}}, []string{\"produce\"}, &StepDataRef{1, \"produce\"}),\n\t\t\t}\n\t\t} else {\n\t\t\tsteps = []Step{\n\t\t\t\tNewDatasetToDataframeStep(map[string]DataRef{\"inputs\": &PipelineDataRef{0}}, []string{\"produce\"}),\n\t\t\t\tNewDistilColumnParserStep(map[string]DataRef{\"inputs\": &StepDataRef{0, \"produce\"}}, []string{\"produce\"}, []string{model.TA2RealType}),\n\t\t\t\tNewPrefeaturisedPoolingStep(map[string]DataRef{\"inputs\": &StepDataRef{1, \"produce\"}}, []string{\"produce\"}),\n\t\t\t\tNewExtractColumnsByStructuralTypeStep(map[string]DataRef{\"inputs\": &StepDataRef{2, \"produce\"}}, []string{\"produce\"},\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"float\", // python type\n\t\t\t\t\t\t\"numpy.float32\", // numpy types\n\t\t\t\t\t\t\"numpy.float64\",\n\t\t\t\t\t}),\n\t\t\t\tNewKMeansClusteringStep(map[string]DataRef{\"inputs\": &StepDataRef{3, \"produce\"}}, []string{\"produce\"}, params.ClusterCount),\n\t\t\t\tNewConstructPredictionStep(map[string]DataRef{\"inputs\": &StepDataRef{4, \"produce\"}}, []string{\"produce\"}, &StepDataRef{1, \"produce\"}),\n\t\t\t}\n\t\t}\n\t} else {\n\t\tsteps = []Step{\n\t\t\tNewDatasetToDataframeStep(map[string]DataRef{\"inputs\": &PipelineDataRef{0}}, []string{\"produce\"}),\n\t\t\tNewDistilColumnParserStep(map[string]DataRef{\"inputs\": &StepDataRef{0, \"produce\"}}, []string{\"produce\"}, []string{model.TA2RealType}),\n\t\t\tNewExtractColumnsByStructuralTypeStep(map[string]DataRef{\"inputs\": &StepDataRef{1, \"produce\"}}, []string{\"produce\"},\n\t\t\t\t[]string{\n\t\t\t\t\t\"float\", // python type\n\t\t\t\t\t\"numpy.float32\", // numpy types\n\t\t\t\t\t\"numpy.float64\",\n\t\t\t\t}),\n\t\t\tNewHDBScanStep(map[string]DataRef{\"inputs\": &StepDataRef{2, \"produce\"}}, []string{\"produce\"}),\n\t\t\tNewExtractColumnsStep(map[string]DataRef{\"inputs\": &StepDataRef{3, \"produce\"}}, []string{\"produce\"}, []int{-1}),\n\t\t\t// Needs to be added since the input dataset doesn't have a target, and hdbscan doesn't set the target itself. Without this being\n\t\t\t// set the subsequent ConstructPredictions step doesn't work.\n\t\t\tNewAddSemanticTypeStep(map[string]DataRef{\"inputs\": &StepDataRef{4, \"produce\"}}, []string{\"produce\"}, &ColumnUpdate{\n\t\t\t\tIndices: []int{0},\n\t\t\t\tSemanticTypes: []string{\"https://metadata.datadrivendiscovery.org/types/PredictedTarget\"},\n\t\t\t}),\n\t\t\tNewConstructPredictionStep(map[string]DataRef{\"inputs\": &StepDataRef{5, \"produce\"}}, []string{\"produce\"}, &StepDataRef{1, \"produce\"}),\n\t\t}\n\t}\n\n\tinputs := []string{\"inputs\"}\n\toutputs := []DataRef{&StepDataRef{len(steps) - 1, \"produce\"}}\n\n\tpipeline, err := NewPipelineBuilder(name, description, inputs, outputs, steps).Compile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpipelineJSON, err := MarshalSteps(pipeline)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfullySpecified := &FullySpecifiedPipeline{\n\t\tPipeline: pipeline,\n\t\tEquivalentValues: []interface{}{pipelineJSON},\n\t}\n\treturn fullySpecified, nil\n}",
"func (p *PrecheckHandler) Precheck(c echo.Context, clusterID uint, driverID uint, modules []model.ModuleType, moduleConfig string) error {\n\tavailableModules := make(map[string]string)\n\n\tcluster, err := p.clusterStore.GetByID(clusterID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cluster == nil {\n\t\treturn fmt.Errorf(\"not able to find cluster with id %d\", clusterID)\n\t}\n\n\tcfs, err := p.configFileStore.GetAll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif driverID > 0 {\n\t\tdriver, err := p.driverStore.GetByID(driverID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif driver == nil {\n\t\t\treturn fmt.Errorf(\"not able to find driver with id %d\", driverID)\n\t\t}\n\n\t\tdriverPrechecks := p.precheckGetter.GetDriverPrechecks(driver.StorageArrayType.Name, cluster.ConfigFileData, cluster.ClusterDetails.Nodes, modules, c.Logger())\n\t\tfor _, precheck := range driverPrechecks {\n\t\t\tc.Logger().Printf(\"Running precheck: %T for driver of type %s\", precheck, driver.StorageArrayType.Name)\n\t\t\terr := precheck.Validate()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tavailableModules[\"csidriver\"] = driver.StorageArrayType.Name\n\t}\n\n\tfor _, m := range modules {\n\t\tavailableModules[m.Name] = m.Name\n\t}\n\n\t// Run pre-checks for standalone modules (ex: observability)\n\tfor _, module := range modules {\n\t\tmodulePrechecks := p.precheckGetter.GetModuleTypePrechecks(module.Name, moduleConfig, cluster.ConfigFileData, cfs, availableModules)\n\t\tfor _, precheck := range modulePrechecks {\n\t\t\tc.Logger().Printf(\"Running precheck: %T for %s module\", precheck, module.Name)\n\t\t\terr := precheck.Validate()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (r *registryExecutor) PreCheck(context.Context) error {\n\treturn nil\n}",
"func PersistentPreRunEFn(ctx *Context) func(*cobra.Command, []string) error {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\trootViper := viper.New()\n\t\trootViper.BindPFlags(cmd.Flags())\n\t\trootViper.BindPFlags(cmd.PersistentFlags())\n\n\t\tif cmd.Name() == version.Cmd.Name() {\n\t\t\treturn nil\n\t\t}\n\n\t\tconfig, err := interceptConfigs(ctx, rootViper)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlogger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))\n\t\tlogger, err = tmflags.ParseLogLevel(config.LogLevel, logger, tmcfg.DefaultLogLevel())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif rootViper.GetBool(tmcli.TraceFlag) {\n\t\t\tlogger = log.NewTracingLogger(logger)\n\t\t}\n\n\t\tlogger = logger.With(\"module\", \"main\")\n\t\tctx.Config = config\n\t\tctx.Logger = logger\n\n\t\treturn nil\n\t}\n}",
"func newFromName(clusterClient kubernetes.Interface, client k8s.Interface, wfr, namespace string) (Operator, error) {\n\tw, err := client.CycloneV1alpha1().WorkflowRuns(namespace).Get(context.TODO(), wfr, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &operator{\n\t\tclusterClient: clusterClient,\n\t\tclient: client,\n\t\trecorder: common.GetEventRecorder(client, common.EventSourceWfrController),\n\t\twfr: w,\n\t}, nil\n}",
"func NewEventRunner(events []cases.EventDescriptor, raftEngine *RaftEngine) *EventRunner {\n\ter := &EventRunner{events: make([]Event, 0, len(events)), raftEngine: raftEngine}\n\tfor _, e := range events {\n\t\tevent := parserEvent(e)\n\t\tif event != nil {\n\t\t\ter.events = append(er.events, event)\n\t\t}\n\t}\n\treturn er\n}",
"func NewRunner(\n\tcheckInterval time.Duration,\n\temissionInterval time.Duration,\n\ttimeoutInterval time.Duration,\n\tlogger lager.Logger,\n\tchecker Checker,\n\texecutorClient executor.Client,\n\tmetronClient loggingclient.IngressClient,\n\tclock clock.Clock,\n) *Runner {\n\treturn &Runner{\n\t\tcheckInterval: checkInterval,\n\t\temissionInterval: emissionInterval,\n\t\ttimeoutInterval: timeoutInterval,\n\t\tlogger: logger.Session(\"garden-healthcheck\"),\n\t\tchecker: checker,\n\t\texecutorClient: executorClient,\n\t\tmetronClient: metronClient,\n\t\tclock: clock,\n\t\thealthy: false,\n\t\tfailures: 0,\n\t}\n}",
"func (o *RunSuiteOptions) SuiteWithProviderPreSuite() error {\n\tif err := o.SuiteWithInitializedProviderPreSuite(); err != nil {\n\t\treturn err\n\t}\n\to.GinkgoRunSuiteOptions.MatchFn = o.config.MatchFn()\n\treturn nil\n}",
"func newBaseRunner(collector *resourceStatusCollector) *baseRunner {\n\treturn &baseRunner{\n\t\tcollector: collector,\n\t}\n}",
"func NewNameLT(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldNewName), v))\n\t})\n}",
"func (client ModelClient) AddCustomPrebuiltDomainPreparer(ctx context.Context, appID uuid.UUID, versionID string, prebuiltDomainObject PrebuiltDomainCreateBaseObject) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t\t\"versionId\": autorest.Encode(\"path\", versionID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/versions/{versionId}/customprebuiltdomains\", pathParameters),\n\t\tautorest.WithJSON(prebuiltDomainObject))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func NewFakeRunner(t *testing.T) *FakeRunner {\n\treturn &FakeRunner{\n\t\tservices: map[string]serviceState{},\n\t\tcmds: []string{},\n\t\tt: t,\n\t\tcontainers: map[string]string{},\n\t\timages: map[string]string{},\n\t}\n}",
"func New(opts ...func(*Runner) error) (*Runner, error) {\n\tr := &Runner{usedNew: true}\n\tfor _, opt := range opts {\n\t\tif err := opt(r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// Set the default fallbacks, if necessary.\n\tif r.Env == nil {\n\t\tEnv(nil)(r)\n\t}\n\tif r.Dir == \"\" {\n\t\tif err := Dir(\"\")(r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif r.Exec == nil {\n\t\tModule(nil)(r)\n\t}\n\tif r.Open == nil {\n\t\tModule(nil)(r)\n\t}\n\tif r.Stdout == nil || r.Stderr == nil {\n\t\tStdIO(r.Stdin, r.Stdout, r.Stderr)(r)\n\t}\n\treturn r, nil\n}",
"func NewPipeline(ctx *pulumi.Context,\n\tname string, args *PipelineArgs, opts ...pulumi.ResourceOption) (*Pipeline, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.BootstrapConfiguration == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'BootstrapConfiguration'\")\n\t}\n\tif args.PipelineType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PipelineType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20200713preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Pipeline\n\terr := ctx.RegisterResource(\"azure-native:devops/v20200713preview:Pipeline\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (l *jsiiProxy_LambdaDeploymentGroup) AddPreHook(preHook awslambda.IFunction) {\n\t_jsii_.InvokeVoid(\n\t\tl,\n\t\t\"addPreHook\",\n\t\t[]interface{}{preHook},\n\t)\n}",
"func (w *DefaultPreWorkflowHooksCommandRunner) RunPreHooks(\n\tbaseRepo models.Repo,\n\theadRepo models.Repo,\n\tpull models.PullRequest,\n\tuser models.User,\n) {\n\tif opStarted := w.Drainer.StartOp(); !opStarted {\n\t\tif commentErr := w.VCSClient.CreateComment(baseRepo, pull.Num, ShutdownComment, \"pre_workflow_hooks\"); commentErr != nil {\n\t\t\tw.Logger.Log(logging.Error, \"unable to comment that Atlantis is shutting down: %s\", commentErr)\n\t\t}\n\t\treturn\n\t}\n\tdefer w.Drainer.OpDone()\n\n\tlog := w.buildLogger(baseRepo.FullName, pull.Num)\n\tdefer w.logPanics(baseRepo, pull.Num, log)\n\n\tlog.Info(\"running pre hooks\")\n\n\tunlockFn, err := w.WorkingDirLocker.TryLock(baseRepo.FullName, pull.Num, DefaultWorkspace)\n\tif err != nil {\n\t\tlog.Warn(\"workspace is locked\")\n\t\treturn\n\t}\n\tlog.Debug(\"got workspace lock\")\n\tdefer unlockFn()\n\n\trepoDir, _, err := w.WorkingDir.Clone(log, headRepo, pull, DefaultWorkspace)\n\tif err != nil {\n\t\tlog.Err(\"unable to run pre workflow hooks: %s\", err)\n\t\treturn\n\t}\n\n\tpreWorkflowHooks := make([]*valid.PreWorkflowHook, 0)\n\tfor _, repo := range w.GlobalCfg.Repos {\n\t\tif repo.IDMatches(baseRepo.ID()) && len(repo.PreWorkflowHooks) > 0 {\n\t\t\tpreWorkflowHooks = append(preWorkflowHooks, repo.PreWorkflowHooks...)\n\t\t}\n\t}\n\n\tctx := models.PreWorkflowHookCommandContext{\n\t\tBaseRepo: baseRepo,\n\t\tHeadRepo: headRepo,\n\t\tLog: log,\n\t\tPull: pull,\n\t\tUser: user,\n\t\tVerbose: false,\n\t}\n\n\terr = w.runHooks(ctx, preWorkflowHooks, repoDir)\n\n\tif err != nil {\n\t\tlog.Err(\"pre workflow hook run error results: %s\", err)\n\t}\n}",
"func NewRunner(sources []string, count int, languagesYml string, autoPull bool, log *logrus.Logger) (*Runner, error) {\n\tvar langs *Languages\n\n\tif languagesYml == \"\" {\n\t\tlanguagesYml = DefaultLanguagesYml\n\t}\n\n\tif _, err := os.Stat(languagesYml); err != nil && autoPull {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"url\": DefaultLanguagesYmlURL,\n\t\t\t\"dest\": languagesYml,\n\t\t}).Info(\"downloading\")\n\n\t\terr = PullLanguagesYml(DefaultLanguagesYmlURL, languagesYml)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif _, err := os.Stat(languagesYml); err == nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"languages\": languagesYml,\n\t\t}).Info(\"loading\")\n\n\t\tlangs, err = LoadLanguages(languagesYml)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Runner{\n\t\tSources: sources,\n\t\tCount: count,\n\t\tFrobs: DefaultFrobs,\n\t\tLanguages: langs,\n\n\t\tlog: log,\n\t}, nil\n}",
"func TestPreprocess(t *testing.T) {\n\tdefaultRes := resource.JobResource{\n\t\tWorkerCPU: \"1000m\",\n\t\tWorkerMemory: \"1Gi\",\n\t\tPSCPU: \"1000m\",\n\t\tPSMemory: \"1Gi\",\n\t\tMasterCPU: \"1000m\",\n\t\tMasterMemory: \"1Gi\",\n\t}\n\n\ti := New(defaultRes)\n\ttype TestCase struct {\n\t\tCode string\n\t\tExpected *types.Parameter\n\t}\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%ps=1\n%worker=1\nsome code here.\n`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tPSCount: 1,\n\t\t\t\tWorkerCount: 1,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%ps=1\nsome code here.\n`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tPSCount: 1,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%ps=1`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tPSCount: 1,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%cleanPolicy=running`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tCleanPolicy: types.CleanPodPolicyRunning,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%cleanPolicy=all`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tCleanPolicy: types.CleanPodPolicyAll,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%cleanPolicy=none`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tCleanPolicy: types.CleanPodPolicyNone,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// Invalid clean policy will use default value none.\n\t\t\tCode: `%framework=tensorflow\n%cleanPolicy=test`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tCleanPolicy: types.CleanPodPolicyNone,\n\t\t\t\tResource: defaultRes,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%ps=1;%cpu=100m;%memory=100Mi`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tPSCount: 1,\n\t\t\t\tResource: resource.JobResource{\n\t\t\t\t\tPSCPU: \"100m\",\n\t\t\t\t\tPSMemory: \"100Mi\",\n\t\t\t\t\tWorkerCPU: defaultRes.WorkerCPU,\n\t\t\t\t\tWorkerMemory: defaultRes.WorkerMemory,\n\t\t\t\t\tMasterCPU: defaultRes.MasterCPU,\n\t\t\t\t\tMasterMemory: defaultRes.MasterMemory,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=tensorflow\n%ps=1;%cpu=100m;%memory=100Mi\n%worker=2;%cpu=10m;%memory=10Mi`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypeTensorFlow,\n\t\t\t\tPSCount: 1,\n\t\t\t\tWorkerCount: 2,\n\t\t\t\tResource: resource.JobResource{\n\t\t\t\t\tPSCPU: \"100m\",\n\t\t\t\t\tPSMemory: \"100Mi\",\n\t\t\t\t\tWorkerCPU: \"10m\",\n\t\t\t\t\tWorkerMemory: \"10Mi\",\n\t\t\t\t\tMasterCPU: defaultRes.MasterCPU,\n\t\t\t\t\tMasterMemory: defaultRes.MasterMemory,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tCode: `%framework=pytorch\n%master=1;%cpu=100m;%memory=100Mi\n%worker=2;%cpu=10m;%memory=10Mi`,\n\t\t\tExpected: &types.Parameter{\n\t\t\t\tFramework: types.FrameworkTypePyTorch,\n\t\t\t\tMasterCount: 1,\n\t\t\t\tWorkerCount: 2,\n\t\t\t\tResource: resource.JobResource{\n\t\t\t\t\tPSCPU: defaultRes.PSCPU,\n\t\t\t\t\tPSMemory: defaultRes.PSMemory,\n\t\t\t\t\tWorkerCPU: \"10m\",\n\t\t\t\t\tWorkerMemory: \"10Mi\",\n\t\t\t\t\tMasterCPU: \"100m\",\n\t\t\t\t\tMasterMemory: \"100Mi\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tactual, err := i.Preprocess(tc.Code)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Expected nil got error: %v\", err)\n\t\t}\n\t\tif !reflect.DeepEqual(actual, tc.Expected) {\n\t\t\tt.Errorf(\"Expected %v, got %v\", tc.Expected, actual)\n\t\t}\n\t}\n}",
"func startRunner(manifest *manifest) (*testRunner, error) {\n\ttype runner struct {\n\t\tName string\n\t\tCommand struct {\n\t\t\tWindows string\n\t\t\tLinux string\n\t\t\tDarwin string\n\t\t}\n\t}\n\n\tvar r runner\n\tcontents := readFileContents(fmt.Sprintf(\"runner/%s.json\", manifest.Language))\n\terr := json.Unmarshal([]byte(contents), &r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcommand := \"\"\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcommand = r.Command.Windows\n\t\tbreak\n\tcase \"darwin\":\n\t\tcommand = r.Command.Darwin\n\t\tbreak\n\tdefault:\n\t\tcommand = r.Command.Linux\n\t\tbreak\n\t}\n\n\tcmd := exec.Command(command)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Wait for the process to exit so we will get a detailed error message\n\tgo func() {\n\t\terr := cmd.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Runner exited with error: %s\\n\", err.Error())\n\t\t}\n\t}()\n\n\treturn &testRunner{cmd: cmd}, nil\n}",
"func TestPruner(t *testing.T) {\n\tte := framework.SetupAvailableImageRegistry(t, nil)\n\tdefer framework.TeardownImageRegistry(te)\n\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tframework.DumpImagePrunerResource(t, te.Client())\n\t\t}\n\t}()\n\n\t// TODO: Move these checks to a conformance test run on all providers\n\tframework.EnsureInternalRegistryHostnameIsSet(te)\n\tframework.EnsureOperatorIsNotHotLooping(te)\n\tframework.EnsureServiceCAConfigMap(te)\n\tframework.EnsureNodeCADaemonSetIsAvailable(te)\n\n\t// Check that the pruner custom resource was created\n\tcr, err := te.Client().ImagePruners().Get(\n\t\tcontext.Background(), defaults.ImageRegistryImagePrunerResourceName, metav1.GetOptions{},\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !cr.Spec.IgnoreInvalidImageReferences {\n\t\tt.Errorf(\"the default pruner config should have spec.ignoreInvalidImageReferences set to true, but it doesn't\")\n\t}\n\n\t// Check that the cronjob was created\n\tcronjob, err := te.Client().BatchV1beta1Interface.CronJobs(defaults.ImageRegistryOperatorNamespace).Get(\n\t\tcontext.Background(), \"image-pruner\", metav1.GetOptions{},\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that the Available condition is set for the pruner\n\tframework.PrunerConditionExistsWithStatusAndReason(te, \"Available\", operatorapi.ConditionTrue, \"AsExpected\")\n\n\t// Check that the Scheduled condition is set for the cronjob\n\tframework.PrunerConditionExistsWithStatusAndReason(te, \"Scheduled\", operatorapi.ConditionTrue, \"Scheduled\")\n\n\t// Check that the Failed condition is set correctly for the last job run\n\tframework.PrunerConditionExistsWithStatusAndReason(te, \"Failed\", operatorapi.ConditionFalse, \"Complete\")\n\n\tif !containsString(cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args, \"--ignore-invalid-refs=true\") {\n\t\tdefer framework.DumpYAML(t, \"cronjob\", cronjob)\n\t\tt.Fatalf(\"flag --ignore-invalid-refs=true is not found\")\n\t}\n\n\t// Check that making changes to the pruner custom resource trickle down to the cronjob\n\t// and that the conditions get updated correctly\n\ttruePtr := true\n\tcr.Spec.Suspend = &truePtr\n\tcr.Spec.Schedule = \"10 10 * * *\"\n\tcr.Spec.IgnoreInvalidImageReferences = false\n\t_, err = te.Client().ImagePruners().Update(\n\t\tcontext.Background(), cr, metav1.UpdateOptions{},\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\t// Reset the CR\n\t\tcr, err := te.Client().ImagePruners().Get(\n\t\t\tcontext.Background(), defaults.ImageRegistryImagePrunerResourceName, metav1.GetOptions{},\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tfalsePtr := false\n\t\tcr.Spec.Suspend = &falsePtr\n\t\tcr.Spec.Schedule = \"\"\n\t\t_, err = te.Client().ImagePruners().Update(\n\t\t\tcontext.Background(), cr, metav1.UpdateOptions{},\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\t// Check that the Scheduled condition is set for the cronjob\n\tframework.PrunerConditionExistsWithStatusAndReason(te, \"Scheduled\", operatorapi.ConditionFalse, \"Suspended\")\n\n\tcronjob, err = te.Client().BatchV1beta1Interface.CronJobs(defaults.ImageRegistryOperatorNamespace).Get(\n\t\tcontext.Background(), \"image-pruner\", metav1.GetOptions{},\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif *cronjob.Spec.Suspend != true {\n\t\tt.Errorf(\"The cronjob Spec.Suspend field should have been true, but was %v instead\", *cronjob.Spec.Suspend)\n\t}\n\n\tif cronjob.Spec.Schedule != \"10 10 * * *\" {\n\t\tt.Errorf(\"The cronjob Spec.Schedule field should have been '10 10 * * *' but was %v instead\", cronjob.Spec.Schedule)\n\t}\n\n\tif !containsString(cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args, \"--ignore-invalid-refs=false\") {\n\t\tt.Fatalf(\"The cronjob container arguments should contain --ignore-invalid-refs=false, but arguments are %v\", cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args)\n\t}\n}",
"func NewRunnerAPI() runnerAPI {\n\treturn runnerAPI{}\n}",
"func ExecutePre(ctx context.Context, c string) {\n\texecute(ctx, c, preJobMap)\n}",
"func (name *GroupNameFlag) Preprocess(c *app.Context) (err error) {\n\tif name.Value == \"\" {\n\t\treturn\n\t}\n\tgroupName := lib.ParseGroupName(name.Value, c.Config().GetGroup())\n\tname.GroupName = groupName\n\treturn\n}",
"func New() *Runner {\n\treturn &Runner{\n\t\tmanager: endly.New(),\n\t\tEvents: NewEventTags(),\n\t\tRenderer: NewRenderer(os.Stdout, 120),\n\t\tgroup: &MessageGroup{},\n\t\txUnitSummary: xunit.NewTestsuite(),\n\t\tStyle: NewStyle(),\n\t}\n}",
"func NewStage(name string, e func(*Context) error) Stage {\n\treturn &baseStage{name: name, execFunc: e}\n}",
"func GetRunFnRunner(name string) *RunFnRunner {\n\tr := &RunFnRunner{}\n\tc := &cobra.Command{\n\t\tUse: \"run DIR\",\n\t\tAliases: []string{\"run-fns\"},\n\t\tShort: commands.RunFnsShort,\n\t\tLong: commands.RunFnsLong,\n\t\tExample: commands.RunFnsExamples,\n\t\tRunE: r.runE,\n\t\tArgs: cobra.ExactArgs(1),\n\t}\n\tfixDocs(name, c)\n\tc.Flags().BoolVar(&r.IncludeSubpackages, \"include-subpackages\", true,\n\t\t\"also print resources from subpackages.\")\n\tr.Command = c\n\tr.Command.Flags().BoolVar(\n\t\t&r.DryRun, \"dry-run\", false, \"print results to stdout\")\n\tr.Command.Flags().StringSliceVar(\n\t\t&r.FnPaths, \"fn-path\", []string{},\n\t\t\"directories containing functions without configuration\")\n\tr.Command.AddCommand(XArgsCommand())\n\tr.Command.AddCommand(WrapCommand())\n\treturn r\n}",
"func newRunner(output string, err error) *MockRunner {\n\tm := &MockRunner{}\n\tm.On(\"Run\", mock.Anything).Return([]byte(output), err)\n\treturn m\n}",
"func (*clusterPackagesExecutor) PreCheck(ctx context.Context) error {\n\treturn nil\n}",
"func (s *BasePlSqlParserListener) EnterNew_partition_name(ctx *New_partition_nameContext) {}",
"func (m *Measure) preBarrier(t time.Time) {\n\tm.preRunners = append(m.preRunners, Runner{PreTime: t})\n\tfmt.Printf(\"Now there is %v runners qualifying\\n\", len(m.preRunners))\n\tfmt.Printf(\"Now there is %v runners running\\n\", len(m.runners))\n\n}",
"func (r *Runner) Name() string { return RunnerName }",
"func (this *Tidy) NewPreTags(val string) (bool, error) {\n\tv := (*C.tmbchar)(C.CString(val))\n\tdefer C.free(unsafe.Pointer(v))\n\treturn this.optSetString(C.TidyPreTags, v)\n}",
"func (client ModelClient) AddCustomPrebuiltIntentPreparer(ctx context.Context, appID uuid.UUID, versionID string, prebuiltDomainModelCreateObject PrebuiltDomainModelCreateObject) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"appId\": autorest.Encode(\"path\", appID),\n\t\t\"versionId\": autorest.Encode(\"path\", versionID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}/luis/api/v2.0\", urlParameters),\n\t\tautorest.WithPathParameters(\"/apps/{appId}/versions/{versionId}/customprebuiltintents\", pathParameters),\n\t\tautorest.WithJSON(prebuiltDomainModelCreateObject))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}",
"func NewNameLT(v string) predicate.User {\n\treturn predicate.User(sql.FieldLT(FieldNewName, v))\n}",
"func (r *PodsIncomingReflector) PreAdd(obj interface{}) (interface{}, watch.EventType) {\n\tforeignPod := obj.(*corev1.Pod)\n\n\thomePod := r.sharedPreRoutine(foreignPod)\n\tif homePod == nil {\n\t\treturn nil, watch.Added\n\t}\n\n\treturn homePod, watch.Added\n}"
] | [
"0.574454",
"0.5559259",
"0.5491292",
"0.54819775",
"0.54751414",
"0.54033977",
"0.53813756",
"0.5324207",
"0.5233656",
"0.5183823",
"0.51202166",
"0.511692",
"0.50739557",
"0.5055979",
"0.50478184",
"0.50427",
"0.49869433",
"0.49579746",
"0.49525964",
"0.4946813",
"0.49373913",
"0.49336827",
"0.49235207",
"0.48999068",
"0.48933965",
"0.48901942",
"0.48623538",
"0.48579386",
"0.4843702",
"0.48044685",
"0.47489446",
"0.4748392",
"0.47396335",
"0.47339883",
"0.4727486",
"0.47206095",
"0.47160387",
"0.47072002",
"0.47016266",
"0.4652885",
"0.4635366",
"0.46322677",
"0.4632154",
"0.4628737",
"0.46272334",
"0.46250686",
"0.4622277",
"0.46195516",
"0.4614659",
"0.45811537",
"0.45796323",
"0.45774665",
"0.4570769",
"0.4569171",
"0.4566725",
"0.45497006",
"0.4542608",
"0.4535446",
"0.4523536",
"0.45225245",
"0.4517741",
"0.45177177",
"0.4516559",
"0.45138404",
"0.44844016",
"0.44811296",
"0.44795853",
"0.44748157",
"0.4470957",
"0.44660124",
"0.44580963",
"0.44507033",
"0.444974",
"0.44480857",
"0.44377986",
"0.44274485",
"0.44140425",
"0.44096494",
"0.44069102",
"0.44012696",
"0.43975478",
"0.4393636",
"0.4388487",
"0.43817866",
"0.43783182",
"0.4372668",
"0.43718755",
"0.436917",
"0.43678924",
"0.43523157",
"0.43365195",
"0.4332182",
"0.43317422",
"0.4317757",
"0.43156034",
"0.4311935",
"0.43105158",
"0.43035093",
"0.42990905",
"0.42901635"
] | 0.82757413 | 0 |
NewGroup return a Group with input name. | NewGroup возвращает группу с указанным именем. | func NewGroup(name string) Group {
return Group{
name: name,
readyCh: make(chan struct{}),
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewGroup(owner, name string) Group {\n\tnow := time.Now()\n\treturn Group{\n\t\tOwner: owner,\n\t\tName: name,\n\t\tDescription: name,\n\t\tAccess: \"private\",\n\t\tBirthtime: now,\n\t\tMTime: now,\n\t}\n}",
"func NewGroup(name string, members ...string) *Group {\n\treturn &Group{\n\t\tName: name,\n\t\tpassword: \"\",\n\t\tGID: -1,\n\t\tUserList: members,\n\t}\n}",
"func NewGroup() *Group {\n\treturn &Group{}\n}",
"func NewGroup() *Group {\n\treturn &Group{}\n}",
"func NewGroup(name string, maxSize int) *Group {\n\treturn &Group{\n\t\tName: name,\n\t\tMembers: []Person{},\n\t\tMaxSize: maxSize,\n\t}\n}",
"func NewGroup(name string, o Owner) *Group {\n\tng := new(Group)\n\tng.Own = o\n\tng.Name = name\n\n\tvar nid OwnerID\n\tnid.Type = 'g'\n\tnid.UserDefined = name2userdefined(name)\n\t// nid.Stamp = newstamp()\n\n\tng.ID = nid\n\n\treturn ng\n}",
"func NewGroup(field string) Group {\n\treturn Group{\n\t\tField: field,\n\t}\n}",
"func New(dir string) *Group {\n\tg := &Group{\n\t\tdir: dir,\n\t}\n\tg.Clear()\n\treturn g\n}",
"func NewGroup(system SystemUtils) *Group {\n\treturn &Group{\n\t\tsystem: system,\n\t}\n}",
"func NewGroup()(*Group) {\n m := &Group{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.group\";\n m.SetOdataType(&odataTypeValue);\n return m\n}",
"func New(gid string) *Group {\n return &Group{\n Client: client.New().Init(),\n GroupID: gid,\n }\n}",
"func newRunnerGroup(scope RunnerGroupScope, name string) RunnerGroup {\n\tif name == \"\" {\n\t\treturn RunnerGroup{\n\t\t\tScope: scope,\n\t\t\tKind: Default,\n\t\t\tName: \"\",\n\t\t}\n\t}\n\n\treturn RunnerGroup{\n\t\tScope: scope,\n\t\tKind: Custom,\n\t\tName: name,\n\t}\n}",
"func (visual *Visual) NewGroup(parts []string, effect string) (*Group, error) {\n\tgroup, err := newGroup(parts, effect)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvisual.mux.Lock()\n\tvisual.groups = append(visual.groups, group)\n\tvisual.mux.Unlock()\n\n\treturn group, nil\n}",
"func NewGroup(ctx context.Context) *Group {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdone: make(chan struct{}),\n\t}\n\n\tgo g.wait()\n\n\treturn g\n}",
"func (s *GroupsService) Create(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/create\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}",
"func NewGroup(name string, cacheBytes int64, getter Getter) *Group {\n\tif getter == nil {\n\t\tpanic(\"Nil Getter\")\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\tg := &Group{\n\t\tname: name,\n\t\tmainCache: &SafeCache{maxBytes: cacheBytes},\n\t\tgetter: getter,\n\t}\n\tgroups[name] = g\n\treturn g\n}",
"func newGroup(groupId string, broadcastChannelCap int64) *group {\n\n\tg := &group{\n\t\tId: groupId,\n\t\tclients: make(map[string]*socketClient),\n\t\tbroadcastChannel: make(chan interface{}, broadcastChannelCap),\n\t\tshutdownChannel: make(chan interface{}),\n\t\tdownChannel: make(chan interface{}, broadcastChannelCap),\n\t}\n\n\tAppLogger.Infof(\"[newGroup] group: %s created\", groupId)\n\treturn g\n}",
"func NewGroup(gvr client.GVR) ResourceViewer {\n\tg := Group{ResourceViewer: NewBrowser(gvr)}\n\tg.AddBindKeysFn(g.bindKeys)\n\tg.SetContextFn(g.subjectCtx)\n\n\treturn &g\n}",
"func New(s []string) Group {\n\treturn Group{str: s}\n}",
"func NewGroup(lv Level, w io.Writer) Group {\n\treturn NewGroupWithHandle(lv, w, nil, nil)\n}",
"func NewGroup(ctx context.Context, options ...GroupOption) *Group {\n\tctx, cancel := context.WithCancel(ctx)\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tpool: dummyPool{},\n\t\trecover: false,\n\t}\n\tfor _, opt := range options {\n\t\topt(g)\n\t}\n\treturn g\n}",
"func NewGroup(m *algebra.Matrix) *Group {\n\tmat := m\n\tif m == nil || len(m.Get()) != 4 || len(m.Get()[0]) != 4 {\n\t\tmat = algebra.IdentityMatrix(4)\n\t}\n\temptyShapes := make([]Shape, 0, 0)\n\treturn &Group{transform: mat, parent: nil, shapes: emptyShapes, bounds: [2]*algebra.Vector{}}\n}",
"func NewGrouping(arg string) *Grouping {\n\tsplit := strings.Split(arg, \",\")\n\tvar grouping *Grouping\n\tif len(split) > 0 {\n\t\tgrouping = &Grouping{\n\t\t\tName: split[0],\n\t\t}\n\t}\n\tif len(split) >= 2 {\n\t\tindex, _ := strconv.ParseInt(split[1], 0, 64)\n\t\tgrouping.Index = int(index)\n\t}\n\tif len(split) >= 3 {\n\t\tduration, _ := time.ParseDuration(split[2])\n\t\tgrouping.Max = duration\n\t}\n\treturn grouping\n}",
"func (conn Connection) CreateGroup(name string) (resp *http.Response, err error) {\n\tresp, err = conn.Post(fmt.Sprintf(\"/groups/%s\", name), nil, nil)\n\treturn resp, err\n}",
"func (s *GroupsService) Create(r *GroupRequest) (*Group, *Response, error) {\n\treq, err := s.client.NewRequest(\"POST\", \"groups\", r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgroup := &Group{}\n\tresp, err := s.client.Do(req, group)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn group, resp, nil\n}",
"func WindowGroupNew() (*WindowGroup, error) {\n\tc := C.gtk_window_group_new()\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\treturn wrapWindowGroup(glib.Take(unsafe.Pointer(c))), nil\n}",
"func (r *RouteGroup) NewGroup(path string) *RouteGroup {\n\treturn NewRouteGroup(r.r, r.subPath(path))\n}",
"func newGroup() *Group {\n\tg := new(Group)\n\tg.handlers = make([]HandlerFunc, 0)\n\treturn g\n}",
"func NewGroup() UserGroup {\n\tgroup := make(UserGroup, 0)\n\treturn group\n}",
"func NewGroup(client *Client) *GroupService {\n\treturn &GroupService{\n\t\tclient: client,\n\t}\n}",
"func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall {\n\tc := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\tc.group = group\n\treturn c\n}",
"func New(directory string) (*Group, []error) {\n\tthis := new(Group)\n\n\tthis.t = template.New(\"\")\n\tthis.t.Parse(\"\")\n\n\terrs := this.loadFolder(directory)\n\n\treturn this, errs\n}",
"func NewGroup(bootles []Bottle, water uint32) Group {\n\treturn Group{\n\t\tWater: water,\n\t\tBottles: bootles,\n\t}\n}",
"func (og *OrdererGroup) NewGroup(name string) (ValueProposer, error) {\n\treturn NewOrganizationGroup(name, og.mspConfig), nil\n}",
"func NewGroup(client *gosip.SPClient, endpoint string, config *RequestConfig) *Group {\n\treturn &Group{\n\t\tclient: client,\n\t\tendpoint: endpoint,\n\t\tconfig: config,\n\t\tmodifiers: NewODataMods(),\n\t}\n}",
"func CreateGroup(g *Group) (err error) {\n\tif err = IsUsableGroupname(g.Name); err != nil {\n\t\treturn err\n\t}\n\n\tisExist, err := IsGroupExist(0, g.Name)\n\tif err != nil {\n\t\treturn err\n\t} else if isExist {\n\t\treturn ErrGroupAlreadyExist{g.Name}\n\t\t//return nil\n\t}\n\n\tsess := x.NewSession()\n\tdefer sessionRelease(sess)\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.Insert(g); err != nil {\n\t\treturn err\n\t}\n\n\treturn sess.Commit()\n}",
"func CreateDefaultGroup(db *gorp.DbMap, groupName string) error {\n\tquery := `SELECT id FROM \"group\" where name = $1`\n\tvar id int64\n\tif err := db.QueryRow(query, groupName).Scan(&id); err == sql.ErrNoRows {\n\t\tlog.Debug(\"CreateDefaultGroup> create %s group in DB\", groupName)\n\t\tquery = `INSERT INTO \"group\" (name) VALUES ($1)`\n\t\tif _, err := db.Exec(query, groupName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *EtcdGroupService) GroupCreate(ctx context.Context, groupName string) error {\n\treturn c.createGroup(ctx, groupName, 0)\n}",
"func NewGroupBy(name string, ex goexpr.Expr) GroupBy {\n\treturn GroupBy{\n\t\tExpr: ex,\n\t\tName: name,\n\t}\n}",
"func (gr *GroupResource) Create(owner string, name string) (g *GroupDetails, err error) {\n\townerOrCurrentUser(gr, &owner)\n\n\tpath := fmt.Sprintf(\"/groups/%s/\", owner)\n\tvalues := url.Values{}\n\tvalues.Set(\"name\", name)\n\terr = gr.client.do(\"POST\", path, nil, values, &g)\n\n\treturn\n}",
"func (db *MySQLDB) CreateGroup(ctx context.Context, groupName, groupDomain, description string) (*Group, error) {\n\tfLog := mysqlLog.WithField(\"func\", \"CreateGroup\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\tr := &Group{\n\t\tRecID: helper.MakeRandomString(10, true, true, true, false),\n\t\tGroupName: groupName,\n\t\tGroupDomain: groupDomain,\n\t\tDescription: description,\n\t}\n\tq := \"INSERT INTO HANSIP_GROUP(REC_ID, GROUP_NAME, GROUP_DOMAIN, DESCRIPTION) VALUES (?,?,?,?)\"\n\t_, err := db.instance.ExecContext(ctx, q, r.RecID, groupName, groupDomain, description)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\treturn nil, &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error CreateGroup\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\treturn r, nil\n}",
"func (db *Database) AddGroup(name string) (int, error) {\n\trow := db.db.QueryRow(`\n\t\tINSERT INTO melodious.groups (name)\tVALUES ($1) RETURNING id;\n\t`, name)\n\tvar id int\n\terr := row.Scan(&id)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn id, nil\n}",
"func NewGroup(list []*Identity, threshold int, genesis int64, period, catchupPeriod time.Duration,\n\tsch *crypto.Scheme, beaconID string) *Group {\n\treturn &Group{\n\t\tNodes: copyAndSort(list),\n\t\tThreshold: threshold,\n\t\tGenesisTime: genesis,\n\t\tPeriod: period,\n\t\tCatchupPeriod: catchupPeriod,\n\t\tScheme: sch,\n\t\tID: beaconID,\n\t}\n}",
"func New(app, account, region, stack, cluster string) InstanceGroup {\n\treturn group{\n\t\tapp: app,\n\t\taccount: account,\n\t\tregion: region,\n\t\tstack: stack,\n\t\tcluster: cluster,\n\t}\n}",
"func (app *App) GroupCreate(ctx context.Context, groupName string) error {\n\treturn app.groups.GroupCreate(ctx, groupName)\n}",
"func (_IFactorySpace *IFactorySpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IFactorySpace.contract.Transact(opts, \"createGroup\")\n}",
"func GroupName() string {\n\treturn groupName\n}",
"func (s *AutograderService) CreateGroup(ctx context.Context, in *pb.Group) (*pb.Group, error) {\n\tusr, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\ts.logger.Errorf(\"CreateGroup failed: authentication error: %w\", err)\n\t\treturn nil, ErrInvalidUserInfo\n\t}\n\tif !s.isEnrolled(usr.GetID(), in.GetCourseID()) {\n\t\ts.logger.Errorf(\"CreateGroup failed: user %s not enrolled in course %d\", usr.GetLogin(), in.GetCourseID())\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"user not enrolled in given course\")\n\t}\n\tif !(in.Contains(usr) || s.isTeacher(usr.GetID(), in.GetCourseID())) {\n\t\ts.logger.Error(\"CreateGroup failed: user is not group member or teacher\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only group member or teacher can create group\")\n\t}\n\tgroup, err := s.createGroup(in)\n\tif err != nil {\n\t\ts.logger.Errorf(\"CreateGroup failed: %w\", err)\n\t\treturn nil, status.Error(codes.InvalidArgument, \"failed to create group\")\n\t}\n\treturn group, nil\n}",
"func NewGroup(ctx context.Context) *errGroup {\n\tnewCtx, cancel := context.WithCancel(ctx)\n\treturn &errGroup{\n\t\tctx: newCtx,\n\t\tcancel: cancel,\n\t}\n}",
"func (c *Client) CreateGroup() (string, error) {\n\tvar groupId string\n\tif !c.authenticated {\n\t\treturn groupId, errors.New(\"Not authenticated. Call Authenticate first\")\n\t}\n\n\tmsg := common.NewMessage(c.userId, \"server\",\n\t\t\"control\", \"remove_contact\", time.Time{},\n\t\tcommon.TEXT, \"\")\n\n\tencoded, err := msg.Json()\n\tif err != nil {\n\t\tlog.Println(\"Failed to encode create group message\", err)\n\t\treturn groupId, err\n\t}\n\n\terr = c.transport.SendText(string(encoded))\n\tif err != nil {\n\t\tlog.Println(\"Failed to send create group message\", err)\n\t\treturn groupId, err\n\t}\n\n\tresp := <-c.Out\n\tif resp.Status() == common.STATUS_ERROR {\n\t\terrMsg := resp.Error()\n\t\tlog.Println(\"Create group response error\", errMsg)\n\t\treturn groupId, errors.New(errMsg)\n\t}\n\n\tgroupId = resp.GetJsonData(\"group_id\").(string)\n\treturn groupId, nil\n}",
"func GroupName(name string) OptFunc {\n\treturn func(p *ByName) error {\n\t\tname = strings.TrimSpace(name)\n\t\terr := GroupNameCheck(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.groupName = name\n\t\treturn nil\n\t}\n}",
"func NewCalendarGroup()(*CalendarGroup) {\n m := &CalendarGroup{\n Entity: *NewEntity(),\n }\n return m\n}",
"func (g *Group) CreateGroup(name string) (*Group, error) {\n\treturn createGroup(g.id, name, C.H5P_DEFAULT, C.H5P_DEFAULT, C.H5P_DEFAULT)\n}",
"func newProviderGroup(k key) *providerGroup {\n\tifaceKey := key{\n\t\tres: reflect.SliceOf(k.res),\n\t\ttyp: ptGroup,\n\t}\n\n\treturn &providerGroup{\n\t\tresult: ifaceKey,\n\t\tpl: parameterList{},\n\t}\n}",
"func (db *DB) Group(name string) (tx *DB) {\n\ttx = db.getInstance()\n\n\tfields := strings.FieldsFunc(name, utils.IsValidDBNameChar)\n\ttx.Statement.AddClause(clause.GroupBy{\n\t\tColumns: []clause.Column{{Name: name, Raw: len(fields) != 1}},\n\t})\n\treturn\n}",
"func (r *marathonClient) Group(name string) (*Group, error) {\n\tgroup := new(Group)\n\tif err := r.apiGet(fmt.Sprintf(\"%s/%s\", marathonAPIGroups, trimRootPath(name)), nil, group); err != nil {\n\t\treturn nil, err\n\t}\n\treturn group, nil\n}",
"func NewGroup(dataframe *DataFrame, columns ...string) *Groups {\n\t// ret := &Groups{Columns: []string{}, Grouper: columns, Group: make(map[types.C][]indices.Index), Df: dataframe}\n\tret := &Groups{Keys: []Keys{}, Columns: []string{}, Grouper: columns, Group: make(map[types.C][]indices.Index), Df: dataframe}\n\n\treturn ret\n}",
"func New(ctx context.Context, concurrency int) (*Group, context.Context) {\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\n\tparent, ctx := errgroup.WithContext(ctx)\n\treturn &Group{\n\t\tlimiter: make(chan struct{}, concurrency),\n\t\tparent: parent,\n\t\tctx: ctx,\n\t}, ctx\n}",
"func (_BaseGroupFactory *BaseGroupFactoryTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseGroupFactory.contract.Transact(opts, \"createGroup\")\n}",
"func (p ByName) GroupName() string { return p.groupName }",
"func (mg *Groups) Create(group *Group) error {\n\n\tif mg.group != nil && len(group.ID) > 0 {\n\n\t\tpath := fmt.Sprintf(\"%s%s\", marathon.APIGroups, utilities.DelInitialSlash(group.ID))\n\n\t\tif _, err := mg.client.Session.BodyAsJSON(group).Post(path, mg.deploy, mg.fail); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmg.group = group\n\t\treturn nil\n\t}\n\treturn errors.New(\"group cannot be null nor empty\")\n}",
"func NewGroup(thresholds ...*Threshold) *Group {\n\treturn &Group{Thresholds: thresholds}\n}",
"func (a *API) CreateGroup(name, language string, agentPriorities map[string]GroupPriority) (int32, error) {\n\tvar resp createGroupResponse\n\terr := a.Call(\"create_group\", &createGroupRequest{\n\t\tName: name,\n\t\tLanguageCode: language,\n\t\tAgentPriorities: agentPriorities,\n\t}, &resp)\n\n\treturn resp.ID, err\n}",
"func (g *GroupsService) CreateGroup(group Group) (*Group, *Response, error) {\n\tif err := g.client.validate.Struct(group); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := g.client.newRequest(IDM, \"POST\", \"authorize/identity/Group\", &group, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.Header.Set(\"api-version\", groupAPIVersion)\n\n\tvar createdGroup Group\n\n\tresp, err := g.client.do(req, &createdGroup)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn &createdGroup, resp, err\n\n}",
"func (u Util) CreateGroup(g types.Group) error {\n\targs := []string{\"--root\", u.DestDir}\n\n\tif g.Gid != nil {\n\t\targs = append(args, \"--gid\",\n\t\t\tstrconv.FormatUint(uint64(*g.Gid), 10))\n\t}\n\n\tif g.PasswordHash != \"\" {\n\t\targs = append(args, \"--password\", g.PasswordHash)\n\t} else {\n\t\targs = append(args, \"--password\", \"*\")\n\t}\n\n\tif g.System {\n\t\targs = append(args, \"--system\")\n\t}\n\n\targs = append(args, g.Name)\n\n\treturn u.LogCmd(exec.Command(\"groupadd\", args...),\n\t\t\"adding group %q\", g.Name)\n}",
"func (s *Server) makeGroup(cmd string, phone string) (Group, error) {\n\n\tstartIndex := strings.Index(cmd, \"<\")\n\tendIndex := strings.Index(cmd, \">\") + 1\n\n\tscannerEngine := &scanner.GScanner{\n\t\tInput: cmd[startIndex:endIndex],\n\t\tTokens: []string{\"<\", \">\", \":\"},\n\t\tIncludeTokensInOutput: false,\n\t}\n\n\toutput := scannerEngine.Scan()\n\n\tif len(output) == 3 {\n\t\tcommand := output[0]\n\t\tgrpName := strings.TrimSpace(output[1])\n\t\talias := strings.TrimSpace(output[2])\n\n\t\tif command != GroupMakeCommand[1:len(GroupMakeCommand)] {\n\t\t\treturn Group{}, errors.New(\"Bad command for making group. Please use the syntax: \" + GroupMakeCommand)\n\t\t}\n\t\tif len(alias) > GroupAliasMaxLen {\n\t\t\treturn Group{}, errors.New(\"Error. Your group alias cannot be more than \" + strconv.Itoa(GroupAliasMaxLen) + \" characters\")\n\t\t}\n\t\tif strings.Contains(alias, \" \") || strings.Contains(alias, \"\\n\") || strings.Contains(alias, \"\\t\") {\n\t\t\treturn Group{}, errors.New(\"Error: Your group alias must have no white spaces. \")\n\t\t}\n\n\t\tgrp := &Group{\n\t\t\tID: utils.GenUlid(),\n\t\t\tName: grpName,\n\t\t\tAlias: alias,\n\t\t\tAdminPhone: phone,\n\t\t\tMembers: make([]string, 0),\n\t\t}\n\n\t\t//Ensure that none of the user's groups has either of the alias or name given here\n\t\tif s.userHasGroupByName(phone, grp.Name) {\n\t\t\treturn Group{}, errors.New(\"Error: The Group, \" + grp.Name + \" is already amongst your groups!\")\n\t\t}\n\n\t\treturn *grp, nil\n\t}\n\n\terr := errors.New(\"The syntax of your command i.e `\" + cmd + \"` is wrong!\\n Please use `<grpmk:grpName>` to create a new group\")\n\n\treturn Group{}, err\n}",
"func CreateGroup(params types.ContextParams, clientSet apimachinery.ClientSetInterface, groupItems []metadata.Group) []Group {\n\tresults := make([]Group, 0)\n\tfor _, grp := range groupItems {\n\n\t\tresults = append(results, &group{\n\t\t\tgrp: grp,\n\t\t\tparams: params,\n\t\t\tclientSet: clientSet,\n\t\t})\n\t}\n\n\treturn results\n}",
"func Group(name string) (result *lib.FileGroup, err error) {\n\tresult = mainAssetDirectory.GetGroup(name)\n\tif result == nil {\n\t\tresult, err = mainAssetDirectory.NewFileGroup(name)\n\t}\n\treturn\n}",
"func (c *Client) CreateGroup(ctx context.Context, group Group) (Group, error) {\n\tresult := Group{}\n\tbody, err := json.Marshal(group)\n\tif err != nil {\n\t\treturn Group{}, err\n\t}\n\terr = c.sendRequest(\n\t\tctx,\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"%s%s\", c.getRootURL(), groupSlug),\n\t\tbytes.NewReader(body),\n\t\t&result)\n\tif err != nil {\n\t\treturn Group{}, err\n\t}\n\n\treturn result, nil\n}",
"func createEmptyTestGroup(t *testing.T, session *session.Session, name string) error {\n\tsvc := iam.New(session)\n\n\tgroupInput := &iam.CreateGroupInput{\n\t\tGroupName: awsgo.String(name),\n\t}\n\n\t_, err := svc.CreateGroup(groupInput)\n\trequire.NoError(t, err)\n\treturn nil\n}",
"func createGroup() (group resources.Group, err error) {\n\tgroupsClient := resources.NewGroupsClient(config.SubscriptionID)\n\tgroupsClient.Authorizer = autorest.NewBearerAuthorizer(token)\n\n\treturn groupsClient.CreateOrUpdate(\n\t\tctx,\n\t\tresourceGroupName,\n\t\tresources.Group{\n\t\t\tLocation: to.StringPtr(resourceGroupLocation)})\n}",
"func (*CreateInstanceGroup) name() string {\n\treturn \"createInstanceGroup\"\n}",
"func (_BaseContentSpace *BaseContentSpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"createGroup\")\n}",
"func (g *Group) NewSubgroup() *Group {\n\tid := g.db.nextGroupID()\n\tsub := &Group{ID: id, db: g.db}\n\tg.groups = append(g.groups, sub)\n\tg.db.groups[id] = sub\n\treturn sub\n}",
"func CreateGroup(c *store.Context, group *Group) error {\n\n\terr := group.BeforeCreate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar existingGroups []*Group\n\terr = c.Store.FindAll(c, bson.M{\"name\": group.Name}, &existingGroups)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(existingGroups) > 0 {\n\t\treturn helpers.NewError(http.StatusConflict, \"group_already_exists\", \"Group already exists\", err)\n\t}\n\n\terr = c.Store.Create(c, \"groups\", group)\n\tif err != nil {\n\t\treturn helpers.NewError(http.StatusInternalServerError, \"group_creation_failed\", \"Failed to insert the group in the database\", err)\n\t}\n\n\treturn nil\n}",
"func (d *Dao) GroupByName(name string) (res *model.CardGroup, err error) {\n\tres = new(model.CardGroup)\n\tq := d.DB.Table(\"card_group\").Where(\"name=?\", name).First(res)\n\tif q.Error != nil {\n\t\tif q.RecordNotFound() {\n\t\t\terr = nil\n\t\t\tres = nil\n\t\t\treturn\n\t\t}\n\t\terr = errors.Wrapf(err, \"card_group by name\")\n\t}\n\treturn\n}",
"func New(controllerManager *generic.ControllerManager, informers informers.Interface,\n\tclient clientset.Interface) Interface {\n\treturn &group{\n\t\tcontrollerManager: controllerManager,\n\t\tinformers: informers,\n\t\tclient: client,\n\t}\n}",
"func (sqlStore *SQLStore) CreateGroup(group *model.Group) error {\n\tgroup.ID = model.NewID()\n\tgroup.CreateAt = GetMillis()\n\n\t_, err := sqlStore.execBuilder(sqlStore.db, sq.\n\t\tInsert(`\"Group\"`).\n\t\tSetMap(map[string]interface{}{\n\t\t\t\"ID\": group.ID,\n\t\t\t\"Name\": group.Name,\n\t\t\t\"Description\": group.Description,\n\t\t\t\"Version\": group.Version,\n\t\t\t\"CreateAt\": group.CreateAt,\n\t\t\t\"DeleteAt\": 0,\n\t\t}),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create group\")\n\t}\n\n\treturn nil\n}",
"func NewLogGroup(ctx *pulumi.Context,\n\tname string, args *LogGroupArgs, opts ...pulumi.ResourceOpt) (*LogGroup, error) {\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"kmsKeyId\"] = nil\n\t\tinputs[\"name\"] = nil\n\t\tinputs[\"namePrefix\"] = nil\n\t\tinputs[\"retentionInDays\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t} else {\n\t\tinputs[\"kmsKeyId\"] = args.KmsKeyId\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"namePrefix\"] = args.NamePrefix\n\t\tinputs[\"retentionInDays\"] = args.RetentionInDays\n\t\tinputs[\"tags\"] = args.Tags\n\t}\n\tinputs[\"arn\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:cloudwatch/logGroup:LogGroup\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LogGroup{s: s}, nil\n}",
"func (r *Group) Name(name string) *Group {\n\tr.ID = validateID(name)\n\treturn r\n}",
"func New(ctx context.Context) *Group {\n\t// Monitor goroutine context and cancelation.\n\tmctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\taddC: make(chan struct{}),\n\t\tlenC: make(chan int),\n\t}\n\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\tg.monitor(mctx)\n\t}()\n\n\treturn g\n}",
"func (p *ServerGatedServiceClient) CreateGroup(project_id int32, key string, group_id int64, group_name string) (r int64, err error) {\n\tif err = p.sendCreateGroup(project_id, key, group_id, group_name); err != nil {\n\t\treturn\n\t}\n\treturn p.recvCreateGroup()\n}",
"func (m *TeamItemRequestBuilder) Group()(*i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.GroupRequestBuilder) {\n return i8a1cdbeac728d5d9d3409d0d7085c53384ad37435e0292d966ed94bbc4155a05.NewGroupRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func (l *GroupLookup) newKeyGroup(entries []groupKeyListElement) *groupKeyList {\n\tid := l.nextID\n\tl.nextID++\n\treturn &groupKeyList{\n\t\tid: id,\n\t\telements: entries,\n\t}\n}",
"func (a *Client) CreateGroup(params *CreateGroupParams) (*CreateGroupOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateGroupParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"create_group\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/groups\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateGroupReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateGroupOK), nil\n\n}",
"func NewPatchGroup(ctx *pulumi.Context,\n\tname string, args *PatchGroupArgs, opts ...pulumi.ResourceOption) (*PatchGroup, error) {\n\tif args == nil || args.BaselineId == nil {\n\t\treturn nil, errors.New(\"missing required argument 'BaselineId'\")\n\t}\n\tif args == nil || args.PatchGroup == nil {\n\t\treturn nil, errors.New(\"missing required argument 'PatchGroup'\")\n\t}\n\tif args == nil {\n\t\targs = &PatchGroupArgs{}\n\t}\n\tvar resource PatchGroup\n\terr := ctx.RegisterResource(\"aws:ssm/patchGroup:PatchGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (t *OpenconfigSystem_System_Aaa_ServerGroups) NewServerGroup(Name string) (*OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.ServerGroup == nil {\n\t\tt.ServerGroup = make(map[string]*OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.ServerGroup[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list ServerGroup\", key)\n\t}\n\n\tt.ServerGroup[key] = &OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup{\n\t\tName: &Name,\n\t}\n\n\treturn t.ServerGroup[key], nil\n}",
"func NewResourceGroup(key string) ResourceGroup {\n\tscope := Scope{\n\t\tKey: key,\n\t\tScopeObjects: []ScopeObject{\n\t\t\t{\n\t\t\t\tKey: \"*\",\n\t\t\t},\n\t\t},\n\t}\n\tresourceGroup := ResourceGroup{\n\t\tID: \"\",\n\t\tName: key,\n\t\tMeta: map[string]string{\n\t\t\t\"editable\": \"false\",\n\t\t},\n\t\tScope: scope,\n\t}\n\treturn resourceGroup\n}",
"func newRouteGroup(prefix string, router *Router, handlers []Handler) *RouteGroup {\n\treturn &RouteGroup{\n\t\tprefix: prefix,\n\t\trouter: router,\n\t\thandlers: handlers,\n\t}\n}",
"func NewProviderGroup(name string, providers ...Provider) Provider {\n\tgroup := providerGroup{\n\t\tname: name,\n\t}\n\tfor _, provider := range providers {\n\t\tgroup.providers = append([]Provider{provider}, group.providers...)\n\t}\n\treturn group\n}",
"func FindGroupByName(name string) Group {\n\tgroup := Group{}\n\tdb.Where(\"name = ?\", name).Take(&group)\n\treturn group\n}",
"func Group(queryName, group string) *QueryGVR {\n\treturn &QueryGVR{\n\t\tname: queryName,\n\t\tgroup: group,\n\t}\n}",
"func AddGroup(name string, members ...string) (gid int, err error) {\n\ts := NewGShadow(name, members...)\n\tif err = s.Add(nil); err != nil {\n\t\treturn\n\t}\n\n\treturn NewGroup(name, members...).Add()\n}",
"func NewGroupLookup() *GroupLookup {\n\treturn &GroupLookup{\n\t\tlastIndex: -1,\n\t\tnextID: 1,\n\t}\n}",
"func NewProtectGroup()(*ProtectGroup) {\n m := &ProtectGroup{\n LabelActionBase: *NewLabelActionBase(),\n }\n odataTypeValue := \"#microsoft.graph.protectGroup\"\n m.SetOdataType(&odataTypeValue)\n return m\n}",
"func createGroup(logGroupName string) error {\n\n\tlog.Println(\"Creating a LogGroup\", logGroupName)\n\tparams := &cloudwatchlogs.CreateLogGroupInput{\n\t\tLogGroupName: aws.String(logGroupName),\n\t}\n\t_, err := cloudWatchSvc.CreateLogGroup(params)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase cloudwatchlogs.ErrCodeResourceAlreadyExistsException:\n\t\t\t\tlog.Printf(\"LogGroup already exists %s\", logGroupName)\n\t\t\t\treturn nil\n\t\t\tdefault:\n\t\t\t\treturn aerr\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}",
"func NewGroups(filename string, bad BadLineHandler) (*HTGroup, error) {\n\thtGroup := HTGroup{\n\t\tfilePath: filename,\n\t}\n\treturn &htGroup, htGroup.ReloadGroups(bad)\n}",
"func NewProtectionGroup(ctx *pulumi.Context,\n\tname string, args *ProtectionGroupArgs, opts ...pulumi.ResourceOption) (*ProtectionGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Aggregation == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Aggregation'\")\n\t}\n\tif args.Pattern == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Pattern'\")\n\t}\n\tif args.ProtectionGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ProtectionGroupId'\")\n\t}\n\tvar resource ProtectionGroup\n\terr := ctx.RegisterResource(\"aws:shield/protectionGroup:ProtectionGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewSystemGroup(name string, members ...string) *Group {\n\treturn &Group{\n\t\tName: name,\n\t\tpassword: \"\",\n\t\tGID: -1,\n\t\tUserList: members,\n\n\t\taddSystemGroup: true,\n\t}\n}",
"func (c *Client) CreateGroup(ctx context.Context, group *Group) (*Group, *http.Response, error) {\n\t// POST /Groups\n\tresp, err := c.CreateGroupResp(ctx, group)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\tdefer resp.Body.Close()\n\tg := &Group{}\n\treturn g, resp, c.parseResponse(resp, g)\n}"
] | [
"0.7774005",
"0.7614487",
"0.7580504",
"0.7580504",
"0.75550216",
"0.7460315",
"0.7294258",
"0.72896034",
"0.7261106",
"0.7210766",
"0.7202386",
"0.71659905",
"0.7141491",
"0.70930296",
"0.6947957",
"0.6907739",
"0.6901332",
"0.6881721",
"0.6866693",
"0.68410695",
"0.68170804",
"0.6800524",
"0.6765825",
"0.6739809",
"0.6738948",
"0.67354685",
"0.67326653",
"0.6717328",
"0.6688455",
"0.6649262",
"0.6630139",
"0.6616099",
"0.6615016",
"0.65964264",
"0.653488",
"0.6528466",
"0.6514312",
"0.64874464",
"0.64863515",
"0.6480565",
"0.645159",
"0.6447302",
"0.64383286",
"0.6387504",
"0.637926",
"0.6352435",
"0.6340105",
"0.6326478",
"0.62932307",
"0.62811565",
"0.62703454",
"0.62643045",
"0.626236",
"0.6210813",
"0.6210758",
"0.62034833",
"0.6199856",
"0.6189308",
"0.61845195",
"0.61826825",
"0.61740863",
"0.61732364",
"0.6172921",
"0.61663276",
"0.61552155",
"0.6138159",
"0.6134338",
"0.6126589",
"0.6118299",
"0.61174595",
"0.611167",
"0.6110214",
"0.6098611",
"0.6086651",
"0.6086043",
"0.608314",
"0.60651374",
"0.6061095",
"0.6058347",
"0.6048096",
"0.60476846",
"0.6045707",
"0.6045137",
"0.6040846",
"0.6027784",
"0.6026179",
"0.5999546",
"0.59958506",
"0.5991744",
"0.59864897",
"0.59864706",
"0.59681565",
"0.59650624",
"0.59577864",
"0.59518945",
"0.5941032",
"0.5926748",
"0.59123766",
"0.5909949",
"0.59042823"
] | 0.79604924 | 0 |
Name shows the name of the group. | Имя показывает имя группы. | func (g Group) Name() string {
return g.name
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (g *Group) Name() string {\n\treturn g.name\n}",
"func (g *Group) Name() string {\n\treturn g.name\n}",
"func (o ServerGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ServerGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o InstanceGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InstanceGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o PlacementGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PlacementGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (r *LogGroup) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}",
"func (o ReportGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ReportGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o GetGroupResultOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetGroupResult) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o ThingGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ThingGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o ResourceGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ResourceGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func GroupName() string {\n\treturn groupName\n}",
"func (r *Group) Name(name string) *Group {\n\tr.ID = validateID(name)\n\treturn r\n}",
"func (o GroupContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupContainer) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o TargetGroupOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o ElastigroupSignalOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupSignal) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o DataSetGeoSpatialColumnGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSetGeoSpatialColumnGroup) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (s *SoundGroup) Name() (string, error) {\n\tnlen := C.int(len(s.name) + 1)\n\tvar cname *C.char = C.CString(`\\0`)\n\tdefer C.free(unsafe.Pointer(cname))\n\tres := C.FMOD_SoundGroup_GetName(s.cptr, cname, nlen)\n\tif C.GoString(cname) != s.name {\n\t\treturn s.name, errors.New(\"Wrong names\")\n\t}\n\treturn C.GoString(cname), errs[res]\n}",
"func (o BudgetResourceGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *BudgetResourceGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (g *SettingGroup) Name() string {\n\treturn g.NameValue\n}",
"func (p ByName) GroupName() string { return p.groupName }",
"func (pg *PropertyGroup) Name() string {\n\treturn pg.name\n}",
"func (o GroupInitContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainer) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o DomainGroupOutput) GroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DomainGroup) pulumi.StringOutput { return v.GroupName }).(pulumi.StringOutput)\n}",
"func (o GroupPolicyOutput) GroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupPolicy) pulumi.StringOutput { return v.GroupName }).(pulumi.StringOutput)\n}",
"func (o RegionNetworkEndpointGroupOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *RegionNetworkEndpointGroup) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (gb *GroupBy) DisplayName() string {\n\tif gb.Name == \"\" && gb.Key != \"\" {\n\t\treturn gb.Key\n\t}\n\n\treturn gb.Name\n}",
"func (o TargetGroupPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TargetGroup) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o PowerBIOutputDataSourceResponseOutput) GroupName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSourceResponse) *string { return v.GroupName }).(pulumi.StringPtrOutput)\n}",
"func (g *Group) String() string {\n\treturn fmt.Sprintf(\"%s:%d\", g.name, len(g.files))\n}",
"func (o ParameterGroupParameterOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ParameterGroupParameter) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (group *Meetinggroup) String() string {\n\treturn group.Name\n}",
"func (*CreateInstanceGroup) name() string {\n\treturn \"createInstanceGroup\"\n}",
"func (p StorageProvider) GroupName() string {\n\treturn servicecatalog.GroupName\n}",
"func (o OptionGroupOptionOptionSettingOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v OptionGroupOptionOptionSetting) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (g *GitLab) Name() string {\n\treturn \"gitlab\"\n}",
"func (o PowerBIOutputDataSourceOutput) GroupName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSource) *string { return v.GroupName }).(pulumi.StringPtrOutput)\n}",
"func (o LookupRegionNetworkEndpointGroupResultOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupRegionNetworkEndpointGroupResult) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o LookupTransitionRouteGroupResultOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupTransitionRouteGroupResult) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (k *Kind) Name() string {\n\treturn \"team\"\n}",
"func (o GroupContainerVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupContainerVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o DataSetGeoSpatialColumnGroupPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DataSetGeoSpatialColumnGroup) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (group *ContainerGroup_Spec_ARM) GetName() string {\n\treturn group.Name\n}",
"func (o InstanceGroupNamedPortOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InstanceGroupNamedPort) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (c *RestoreItemActionGRPCClient) Name() string {\n\treturn \"\"\n}",
"func (r *Roster) Name() string { return ModuleName }",
"func (gr *Group) String() string {\n\tvar builder strings.Builder\n\tbuilder.WriteString(\"Group(\")\n\tbuilder.WriteString(fmt.Sprintf(\"id=%v\", gr.ID))\n\tbuilder.WriteString(\", name=\")\n\tbuilder.WriteString(gr.Name)\n\tbuilder.WriteString(\", nickname=\")\n\tbuilder.WriteString(gr.Nickname)\n\tbuilder.WriteString(\", count=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Count))\n\tbuilder.WriteString(\", code=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Code))\n\tbuilder.WriteString(\", index=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Index))\n\tbuilder.WriteString(\", min=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Min))\n\tbuilder.WriteString(\", max=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Max))\n\tbuilder.WriteString(\", range=\")\n\tbuilder.WriteString(fmt.Sprintf(\"%v\", gr.Range))\n\tbuilder.WriteString(\", note=\")\n\tbuilder.WriteString(gr.Note)\n\tbuilder.WriteString(\", log=\")\n\tbuilder.WriteString(gr.Log)\n\tbuilder.WriteString(\", username=\")\n\tbuilder.WriteString(gr.Username)\n\tbuilder.WriteByte(')')\n\treturn builder.String()\n}",
"func (o GroupInitContainerVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o DeviceGroupDeviceOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeviceGroupDevice) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o ClusterParameterGroupParameterOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterParameterGroupParameter) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (name GroupNameFlag) String() string {\n\treturn name.GroupName.String()\n}",
"func (v *StackPanel) Name() string {\n\treturn \"stackpanel\"\n}",
"func (d Distribution) Name() string {\n\treturn strings.Replace(d.ID, \"-\", \"::\", -1)\n}",
"func (x Group) String() string {\n\tif str, ok := _GroupMap[x]; ok {\n\t\treturn str\n\t}\n\treturn fmt.Sprintf(\"Group(%d)\", x)\n}",
"func GroupName(name string) OptFunc {\n\treturn func(p *ByName) error {\n\t\tname = strings.TrimSpace(name)\n\t\terr := GroupNameCheck(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.groupName = name\n\t\treturn nil\n\t}\n}",
"func (o AclOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Acl) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (c *client) GetName(ctx context.Context, groupID string) (string, error) {\n\tcli := c.getGroupClient()\n\n\tresp, err := cli.Detail(ctx, &pb.GroupRequest{Groupid: groupID})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\n\tif resp.ResultCode == pb.ResponseCode_NON_EXIST {\n\t\treturn \"\", ErrGroupNonExist\n\t}\n\n\treturn resp.Groups[0].GetName(), nil\n}",
"func (impl *ServerServerGroup) ResourceName() string {\n\treturn \"server-servergroup\"\n}",
"func (o PermissionSetOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PermissionSet) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o ProjectOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Project) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o RegexPatternSetOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *RegexPatternSet) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o StudioOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Studio) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o InstanceGroupManagerVersionOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersion) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (*DeleteInstanceGroup) name() string {\n\treturn \"deleteInstanceGroup\"\n}",
"func (p provider) Name() string {\n\treturn p.name\n}",
"func (s SetOfSpaces) Name() string {\r\n\treturn s.name\r\n}",
"func (o NetworkPacketCaptureOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NetworkPacketCapture) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (p *Provider) Name() string {\n\treturn p.name\n}",
"func (e *EndComponent) Name() string {\n\treturn \"name\"\n}",
"func (kmc KernelModuleCheck) Name() string {\n\tif kmc.Label != \"\" {\n\t\treturn kmc.Label\n\t}\n\treturn fmt.Sprintf(\"KernelModule-%s\", strings.Replace(kmc.Module, \"/\", \"-\", -1))\n}",
"func SetGroupName(name string) {\n\tgroupName = name\n}",
"func (o VolumeGroupSapHanaVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (a *MembersAPI) Name() string {\n\treturn a.name\n}",
"func (*ListInstanceGroups) name() string {\n\treturn \"listInstanceGroups\"\n}",
"func (o InstanceGroupManagerVersionResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersionResponse) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (n NamedComponents) Name() string {\n\treturn n.uniqueComponent\n}",
"func (t *Team) Name() string {\n\treturn t.name\n}",
"func (d *Demo) Name() string {\n\treturn d.info.Name()\n}",
"func (o ConnectedRegistryNotificationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ConnectedRegistryNotification) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (m *Group) GetDisplayName()(*string) {\n return m.displayName\n}",
"func (p Project) Name() string {\n\treturn p.name\n}",
"func (o RuleMfaOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *RuleMfa) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (c *Dg) Show(name string) (Entry, error) {\n c.con.LogQuery(\"(show) device group %q\", name)\n return c.details(c.con.Show, name)\n}",
"func (o ConnectedRegistryOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ConnectedRegistry) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o PlaybackKeyPairOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PlaybackKeyPair) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (c *Config) Name(n int) string {\n\treturn fmt.Sprintf(\"%03d.%s\", n, c.Type)\n}",
"func (*UpdateInstanceGroup) name() string {\n\treturn \"updateInstanceGroup\"\n}",
"func (s *SharingKey) Name() string {\n\treturn string(s.name)\n}",
"func (s Group) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s Group) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (g Gossiper) Name() string {\n\treturn g.name\n}",
"func (id NetworkGroupId) String() string {\n\tcomponents := []string{\n\t\tfmt.Sprintf(\"Subscription: %q\", id.SubscriptionId),\n\t\tfmt.Sprintf(\"Resource Group Name: %q\", id.ResourceGroupName),\n\t\tfmt.Sprintf(\"Network Manager Name: %q\", id.NetworkManagerName),\n\t\tfmt.Sprintf(\"Network Group Name: %q\", id.NetworkGroupName),\n\t}\n\treturn fmt.Sprintf(\"Network Group (%s)\", strings.Join(components, \"\\n\"))\n}",
"func (cmd *CLI) Name() string {\n\tvar name string\n\tif cmd.parent != nil {\n\t\tname = strings.Join([]string{cmd.parent.Name(), cmd.name}, \" \")\n\t} else {\n\t\tname = cmd.name\n\t}\n\treturn name\n}",
"func (o FleetMetricAggregationTypeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FleetMetricAggregationType) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o OptionGroupOptionOutput) OptionName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v OptionGroupOption) string { return v.OptionName }).(pulumi.StringOutput)\n}",
"func (project *Project) Name() string {\n\treturn project.PName\n}",
"func (sh SubdirectoryHeader) Name() string {\n\treturn string(sh.SubdirectoryName[0 : sh.TypeAndNameLength&0xf])\n}",
"func (o BackendServiceFabricClusterServerX509NameOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendServiceFabricClusterServerX509Name) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (l Level) Name() string {\n\treturn LevelName(l)\n}",
"func (g *Gpg) Name() string {\n\treturn gpgLabel\n}",
"func (c Provider) Name() string {\n\treturn \"GitHub\"\n}"
] | [
"0.80168146",
"0.80168146",
"0.75941217",
"0.75454223",
"0.74364364",
"0.7433477",
"0.7422203",
"0.73819286",
"0.7380036",
"0.73613214",
"0.7249155",
"0.72418725",
"0.7185711",
"0.7158183",
"0.7133307",
"0.7036169",
"0.7024919",
"0.7012739",
"0.6959442",
"0.6914941",
"0.6895415",
"0.6876077",
"0.68412757",
"0.67925954",
"0.6775768",
"0.6750172",
"0.6749456",
"0.6671215",
"0.66570324",
"0.6621952",
"0.65969074",
"0.658828",
"0.6568318",
"0.65642446",
"0.6560756",
"0.6558466",
"0.65045017",
"0.64961404",
"0.64923275",
"0.6481466",
"0.64578986",
"0.6445377",
"0.6437958",
"0.6426955",
"0.6398833",
"0.63934124",
"0.6341869",
"0.63200825",
"0.63004357",
"0.6295351",
"0.62376887",
"0.6235386",
"0.6234454",
"0.6233862",
"0.61742026",
"0.6165737",
"0.61524147",
"0.61241984",
"0.6102945",
"0.61028045",
"0.60956293",
"0.60755634",
"0.60608065",
"0.60454684",
"0.603075",
"0.6019226",
"0.6017223",
"0.60166097",
"0.60143864",
"0.60124177",
"0.6005599",
"0.6004766",
"0.6004024",
"0.5999445",
"0.5986665",
"0.59825",
"0.5982008",
"0.5976316",
"0.59726316",
"0.59715724",
"0.59647894",
"0.5962496",
"0.59616077",
"0.5959673",
"0.59561616",
"0.59519047",
"0.59518003",
"0.59487617",
"0.59487617",
"0.5946798",
"0.59402955",
"0.59397256",
"0.5934666",
"0.59329677",
"0.59271914",
"0.5925615",
"0.5925365",
"0.59250283",
"0.5922746",
"0.592048"
] | 0.8108221 | 0 |
Register will inspect the provided objects implementing the Unit interface to see if it needs to register the objects for any of the Group bootstrap phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored by Group. The returned array of booleans is of the same size as the amount of provided Units, signaling for each provided Unit if it successfully registered with Group for at least one of the bootstrap phases or if it was ignored. | Регистр проверит предоставляемые объекты, реализующие интерфейс Unit, чтобы определить, нужно ли зарегистрировать их для любого из этапов запуска Group. Если Unit не удовлетворяет ни одному из этапов запуска, он игнорируется Group. Возвращаемый массив логических значений имеет такую же длину, как и количество предоставленных Unit, и указывает для каждого предоставленного Unit, успешно ли он был зарегистрирован Group для хотя бы одного из этапов запуска или был ли он проигнорирован. | func (g *Group) Register(units ...Unit) []bool {
g.log = logger.GetLogger(g.name)
hasRegistered := make([]bool, len(units))
for idx := range units {
if !g.configured {
// if RunConfig has been called we can no longer register Config
// phases of Units
if c, ok := units[idx].(Config); ok {
g.c = append(g.c, c)
hasRegistered[idx] = true
}
}
if p, ok := units[idx].(PreRunner); ok {
g.p = append(g.p, p)
hasRegistered[idx] = true
}
if s, ok := units[idx].(Service); ok {
g.s = append(g.s, s)
hasRegistered[idx] = true
}
}
return hasRegistered
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *MemoryArrayAllOf) HasUnits() bool {\n\tif o != nil && o.Units != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func Register(pcs ...components.Pluggable) int {\n\tregisteredComponents := 0\n\tfor _, pc := range pcs {\n\t\tregister, ok := registries[pc.Type]\n\t\tif !ok {\n\t\t\tlog.Warnf(\"%s is not registered as a pluggable component\", pc.Type)\n\t\t\tcontinue\n\t\t}\n\t\tregister(pc)\n\t\tregisteredComponents++\n\t\tlog.Infof(\"%s.%s %s pluggable component was successfully registered\", pc.Type, pc.Name, pc.Version)\n\t}\n\treturn registeredComponents\n}",
"func Register(toReg ...Component) {\n\tfor i := range toReg {\n\t\tComponents = append(Components, toReg[i])\n\t}\n\tsort.Sort(ByCommand(Components))\n}",
"func (container *Container) Register(factories ...interface{}) error {\n\tfor _, factory := range factories {\n\t\terr := container.RegisterOne(factory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Register(m ...initializer.Simple) {\n\tall = append(all, m...)\n}",
"func (o *StorageHitachiParityGroupAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *Resolver) Units() (*[]*Unit, error) {\n\tvar result []*Unit\n\tfor _, theirUnit := range units.All() {\n\t\tourUnit, err := NewUnit(theirUnit.Name)\n\t\tif err != nil {\n\t\t\treturn &result, err\n\t\t}\n\t\tresult = append(result, &ourUnit)\n\t}\n\treturn &result, nil\n}",
"func (b *Builder) Register(br *sous.BuildResult) error {\n\tfor _, prod := range br.Products {\n\t\tif prod.Advisories.Contains(sous.IsBuilder) {\n\t\t\tmessages.ReportLogFieldsMessage(\"not pushing builder image\", logging.DebugLevel, b.log, prod)\n\t\t\tcontinue\n\t\t}\n\t\terr := b.pushToRegistry(prod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.recordName(prod)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *EquipmentBaseSensor) HasUnits() bool {\n\tif o != nil && o.Units != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (app *App) Register(devices ...*message.Device) {\n\t// we are registering devices, we should also register for\n\t// discovery messages, but they are handled in the app.\n\t// we add a NoopHandler to the map to make the subscription work\n\t// we only do this if no handler is allready registered\n\tif _, ok := app.handler[queue.Inventory]; !ok {\n\t\tapp.SetHandler(queue.Inventory, app.inventoryHandler)\n\t}\n\n\tapp.deviceLock.Lock()\n\tdefer app.deviceLock.Unlock()\n\tfor _, device := range devices {\n\t\tfound := false\n\t\tfor _, d := range app.devices {\n\t\t\tif *device.ID == *d.device.ID {\n\t\t\t\td.lastSeen = time.Now()\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tapp.devices = append(app.devices, &appDevice{device: device, lastSeen: time.Now()})\n\t\t}\n\t}\n}",
"func (r *Registry) RegisterGroups(groups ...*Group) error {\n\tfor _, group := range groups {\n\t\t_, ok := r.groupMap[group.ID]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(\"duplicate group ID %q\", group.ID)\n\t\t}\n\n\t\terr := r.RegisterOutlets(group.Outlets...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr.groupMap[group.ID] = group\n\t\tr.groups = append(r.groups, group)\n\n\t\tlog.WithField(\"groupID\", group.ID).Info(\"registered outlet group\")\n\t}\n\n\treturn nil\n}",
"func (r *Registry) Register(ds ...*Object) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.initDataStores()\n\n\tfor _, obj := range ds {\n\t\tname := obj.Name\n\t\tr.dataStores[name] = obj\n\t\tr.dataStores[name].Enabled = true\n\t}\n}",
"func RegisterAll(emitPerNodeGroupMetrics bool) {\n\tlegacyregistry.MustRegister(clusterSafeToAutoscale)\n\tlegacyregistry.MustRegister(nodesCount)\n\tlegacyregistry.MustRegister(nodeGroupsCount)\n\tlegacyregistry.MustRegister(unschedulablePodsCount)\n\tlegacyregistry.MustRegister(maxNodesCount)\n\tlegacyregistry.MustRegister(cpuCurrentCores)\n\tlegacyregistry.MustRegister(cpuLimitsCores)\n\tlegacyregistry.MustRegister(memoryCurrentBytes)\n\tlegacyregistry.MustRegister(memoryLimitsBytes)\n\tlegacyregistry.MustRegister(lastActivity)\n\tlegacyregistry.MustRegister(functionDuration)\n\tlegacyregistry.MustRegister(functionDurationSummary)\n\tlegacyregistry.MustRegister(errorsCount)\n\tlegacyregistry.MustRegister(scaleUpCount)\n\tlegacyregistry.MustRegister(gpuScaleUpCount)\n\tlegacyregistry.MustRegister(failedScaleUpCount)\n\tlegacyregistry.MustRegister(failedGPUScaleUpCount)\n\tlegacyregistry.MustRegister(scaleDownCount)\n\tlegacyregistry.MustRegister(gpuScaleDownCount)\n\tlegacyregistry.MustRegister(evictionsCount)\n\tlegacyregistry.MustRegister(unneededNodesCount)\n\tlegacyregistry.MustRegister(unremovableNodesCount)\n\tlegacyregistry.MustRegister(scaleDownInCooldown)\n\tlegacyregistry.MustRegister(oldUnregisteredNodesRemovedCount)\n\tlegacyregistry.MustRegister(overflowingControllersCount)\n\tlegacyregistry.MustRegister(skippedScaleEventsCount)\n\tlegacyregistry.MustRegister(napEnabled)\n\tlegacyregistry.MustRegister(nodeGroupCreationCount)\n\tlegacyregistry.MustRegister(nodeGroupDeletionCount)\n\tlegacyregistry.MustRegister(pendingNodeDeletions)\n\n\tif emitPerNodeGroupMetrics {\n\t\tlegacyregistry.MustRegister(nodesGroupMinNodes)\n\t\tlegacyregistry.MustRegister(nodesGroupMaxNodes)\n\t}\n}",
"func (o *Group) Register() int {\n\to.wait_lock.Lock()\n\tdefer o.wait_lock.Unlock()\n\to.wg().Add(1)\n\to.wait_index++\n\to.wait_register[o.wait_index] = true\n\treturn o.wait_index\n}",
"func (o *MemoryArrayAllOf) GetUnitsOk() ([]MemoryUnitRelationship, bool) {\n\tif o == nil || o.Units == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Units, true\n}",
"func (i Transform) Registered() bool {\n\tfor _, v := range _Transform_values {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o *MemoryArrayAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *MemoryArrayAllOf) SetUnits(v []MemoryUnitRelationship) {\n\to.Units = v\n}",
"func (s *ExternalAgentStartedState) Register(events []Event) error {\n\tfor _, e := range events {\n\t\tif err := s.agent.subscribeUnsafe(e); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.agent.setStateUnsafe(s.agent.RegisteredState)\n\ts.initFlow.ExternalAgentRegistered()\n\treturn nil\n}",
"func (me *Container) Register(r Registries) *Container {\n\tfor _, v := range r {\n\t\tme.bag[v.Key] = v.Value\n\t}\n\n\treturn me\n}",
"func (m MapRegistry) Register(rs []transport.Registrant) {\n\tfor _, r := range rs {\n\t\tif r.Service == \"\" {\n\t\t\tr.Service = m.defaultService\n\t\t}\n\n\t\tif r.Procedure == \"\" {\n\t\t\tpanic(\"Expected procedure name not to be empty string in registration\")\n\t\t}\n\n\t\tsp := transport.ServiceProcedure{\n\t\t\tService: r.Service,\n\t\t\tProcedure: r.Procedure,\n\t\t}\n\t\tm.entries[sp] = r.HandlerSpec\n\t}\n}",
"func isRegistered(sample string, stringArray []string) bool {\n\tfor _, value := range stringArray {\n\t\tif ok := strings.Compare(sample, value); ok == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (tm *TestManager) Register(obj interface{}) {\n\ttm.ModuleMap[Common.GetTypeName(obj)] = reflect.TypeOf(obj)\n}",
"func IsRegistered(name string) bool {\n\t_, ok := registry[name]\n\treturn ok\n}",
"func (m *manager) IsRegistered(name string) bool {\n\treturn m.registry.Get(name) != nil\n}",
"func (m *DeviceGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAwsTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAzureTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCustomProperties(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGcpTestResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubGroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (container *Container) RegisterCompose(registrars ...Registrar) error {\n\tfor _, registrar := range registrars {\n\t\terr := registrar(container)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}",
"func (router *Router) Register(modules ...IModule) {\n\tfor _, m := range modules {\n\t\trouter.modules[m.GetMID()] = m\n\t}\n}",
"func (m *Metrics) MustRegister(metrics ...prometheus.Collector) {\n\tm.reg.MustRegister(metrics...)\n}",
"func validateDevices(agentDevices, containerDevices []device.Device) bool {\n\tfor _, d := range containerDevices {\n\t\tif !slices.Contains(agentDevices, d) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Register() {\n\tregisterConstMetrics()\n\t{{[- if .API.Enabled ]}}\n\tregisterGRPCMetrics()\n\t{{[- end ]}}\n\t{{[- if .Storage.Enabled ]}}\n\tregisterDatabaseMetrics()\n\t{{[- end ]}}\n\tregisterBusinessMetrics()\n}",
"func IsRegistered(name string) bool {\n\treturn i.IsRegistered(name)\n}",
"func (p *DevicePool) Register(d *Device, id string) bool {\n\tp.devicesMtx.Lock()\n\tdefer p.devicesMtx.Unlock()\n\t_, ok := p.devices[id]\n\tif ok {\n\t\treturn false\n\t}\n\tp.devices[id] = d\n\treturn true\n}",
"func Register() []Option {\n\treturn []Option{\n\t\t/* Dependencies */\n\t\tDependencies(\n\t\t\tapp.Providers(),\n\t\t),\n\t\t\n\t\t/* Modules */\n\t\tModules(\n\t\t\tsrvhttp.HealthCheckModule{}, // health check module (http demo)\n\t\t\tdocs.Module{}, // docs module\n\t\t),\n\n\t\t/* Module Constructors */\n\t\tConstructors(\n\t\t\tapp.New,\n\t\t\tconfig.New, // config module\n\t\t\tcore.NewServeModule, // server module\n\t\t),\n\t}\n}",
"func IsRegistered(name string) bool {\n\t_, ok := driverMap[name]\n\treturn ok\n}",
"func (o *VirtualizationIweClusterAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (gf *GOFactory) Register(typeID string, creator ICreator) bool {\n\tgologger.SLogger.Println(\"registering\", typeID)\n\n\t// check if already registered\n\t_, ok := gf.GoCreator[typeID]\n\tif ok {\n\t\tgologger.SLogger.Println(\"Already Registered Object \", typeID)\n\n\t\treturn false\n\t}\n\n\tgf.GoCreator[typeID] = creator\n\n\tgologger.SLogger.Println(\"Added To Factory Obj Of Type\", typeID)\n\n\treturn true\n}",
"func Register(j Job) {\n\tif j == nil {\n\t\tpanic(\"Can't register nil job\")\n\t}\n\tregistry = append(registry, j)\n}",
"func Register(params ...interface{}) {\n\t// appendParams will append the object that annotated with at.AutoConfiguration\n\tcomponentContainer, _ = appendParams(componentContainer, params...)\n\treturn\n}",
"func (oc *OrganizationCreate) AddUnits(o ...*OrgUnit) *OrganizationCreate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oc.AddUnitIDs(ids...)\n}",
"func checkUnits(units []ignv2_2types.Unit) bool {\n\tfor _, u := range units {\n\t\tfor j := range u.Dropins {\n\t\t\tpath := filepath.Join(pathSystemd, u.Name+\".d\", u.Dropins[j].Name)\n\t\t\tif status := checkFileContentsAndMode(path, []byte(u.Dropins[j].Contents), defaultFilePermissions); !status {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tif u.Contents == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tpath := filepath.Join(pathSystemd, u.Name)\n\t\tif u.Mask {\n\t\t\tlink, err := filepath.EvalSymlinks(path)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"state validation: error while evaluation symlink for path: %q, err: %v\", path, err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif strings.Compare(pathDevNull, link) != 0 {\n\t\t\t\tglog.Errorf(\"state validation: invalid unit masked setting. path: %q; expected: %v; received: %v\", path, pathDevNull, link)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif status := checkFileContentsAndMode(path, []byte(u.Contents), defaultFilePermissions); !status {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}",
"func (m *ComputeRackUnit) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with ComputePhysical\n\tif err := m.ComputePhysical.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAdapters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBiosBootmode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBiosunits(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBmc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBoard(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBootDeviceBootmode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFanmodules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGenericInventoryHolders(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocatorLed(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePciDevices(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePsus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRackEnclosureSlot(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRegisteredDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSasExpanders(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStorageEnclosures(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTopSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (l unitList) Started() bool {\n\tfor _, unit := range l {\n\t\tif unit.State != string(provision.StatusStarted) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (h *StorageArrayHandler) Register(api *echo.Group) {\n\tjwtMiddleware := middleware.JWT(utils.JWTSecret)\n\n\tstorageArrays := api.Group(\"/storage-arrays\", jwtMiddleware)\n\tstorageArrays.POST(\"\", h.CreateStorageArray)\n\tstorageArrays.GET(\"\", h.ListStorageArrays)\n\tstorageArrays.GET(\"/:id\", h.GetStorageArray)\n\tstorageArrays.DELETE(\"/:id\", h.DeleteStorageArray)\n\tstorageArrays.PATCH(\"/:id\", h.UpdateStorageArray)\n}",
"func (o *RackUnitPersonality) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me *GlobalContainer) Register(r Registries) *GlobalContainer {\n\tfor _, v := range r {\n\t\tglobalContainerInstance.Container.bag[v.Key] = v.Value\n\t}\n\n\treturn me\n}",
"func Registered() (names []string) {\n\tmux.Lock()\n\tdefer mux.Unlock()\n\n\tfor key := range pool {\n\t\tnames = append(names, key)\n\t}\n\treturn names\n}",
"func (o *DataExportQuery) HasUnits() bool {\n\tif o != nil && o.Units != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (self gcmSink) Register(metrics []sink_api.MetricDescriptor) error {\n\tfor _, metric := range metrics {\n\t\tif err := self.core.Register(metric.Name, metric.Description, metric.Type.String(), metric.ValueType.String(), metric.Labels); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif rateMetric, exists := gcmRateMetrics[metric.Name]; exists {\n\t\t\tif err := self.core.Register(rateMetric.name, rateMetric.description, sink_api.MetricGauge.String(), sink_api.ValueDouble.String(), metric.Labels); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (reg *registrar) Register(example interface{}) error {\n\treg.lock.Lock()\n\tdefer reg.lock.Unlock()\n\treturn reg.Registry.Register(example)\n}",
"func lazyCreateUnits(cCmd *cobra.Command, args []string) error {\n\terrchan := make(chan error)\n\tblockAttempts, _ := cCmd.Flags().GetInt(\"block-attempts\")\n\tvar wg sync.WaitGroup\n\tfor _, arg := range args {\n\t\targ = maybeAppendDefaultUnitType(arg)\n\t\tname := unitNameMangle(arg)\n\n\t\tret, err := checkUnitCreation(cCmd, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if ret != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Assume that the name references a local unit file on\n\t\t// disk or if it is an instance unit and if so get its\n\t\t// corresponding unit\n\t\tuf, err := getUnitFile(cCmd, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = createUnit(name, uf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo checkUnitState(name, job.JobStateInactive, blockAttempts, os.Stdout, &wg, errchan)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errchan)\n\t}()\n\n\thaserr := false\n\tfor msg := range errchan {\n\t\tstderr(\"Error waiting on unit creation: %v\", msg)\n\t\thaserr = true\n\t}\n\n\tif haserr {\n\t\treturn fmt.Errorf(\"One or more errors creating units\")\n\t}\n\n\treturn nil\n}",
"func (o *NiatelemetryNexusDashboardsAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func RegisterElements(\n\tname string,\n\ttoInternal func(interface{}, string) (TAI64NARUXTime, error),\n\tfromInternal func(TAI64NARUXTime, string) (string, error),\n\toffset func(TAI64NARUXTime, interface{}) (TAI64NARUXTime, error),\n\tdefaultFormat string,\n) {\n\tregisteredCalendars[canonCalendarName(name)] = calendarRegistration{\n\t\tToInternal: toInternal,\n\t\tFromInternal: fromInternal,\n\t\tOffset: offset,\n\t\tDefaultFormat: defaultFormat,\n\t}\n}",
"func Register(mut Mutation) {\n\tgob.Register(mut)\n\tt := reflect.TypeOf(mut)\n\tregistry[t.String()] = t\n}",
"func (o *StorageHitachiPortAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *jsiiProxy_ProfilingGroup) Validate() *[]*string {\n\tvar returns *[]*string\n\n\t_jsii_.Invoke(\n\t\tp,\n\t\t\"validate\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func UnitNames() [7]string {\n\treturn unitNames\n}",
"func Register(i interface{}) {\n\tregister(i)\n}",
"func deployUnits(hosts Hosts, appName, appVersion, unitFile string) bool {\n\t// get the next instance number to use for deployment\n\t// this function will also handling initializing the instance number\n\t// if one does not exist\n\n\tfmt.Println(\"getting instance number for non global unit\")\n\tnextInstNum := getNextInstance(hosts.etcd, appName)\n\n\t// init what we are going to return\n\tvar status bool\n\n\t// deploy new unit.\n\tsendTries := 5\n\tfor sendTries != 0 {\n\t\tsendUnitResponse := sendUnitFile(hosts.fleet, appName+\"-\"+appVersion, fmt.Sprintf(\"%d\", nextInstNum), unitFile)\n\t\tif sendUnitResponse.StatusCode != 201 {\n\t\t\t// special catch for 204 errors.\n\t\t\tif sendUnitResponse.StatusCode == 204 {\n\t\t\t\tcolor.Red(\"Received 204 - Duplicate unit file submitted to fleet. This usually a sign multiple unit files for this version. Contact DevOPs\")\n\t\t\t} else {\n\t\t\t\tcolor.Red(\"Error communicating with fleet trying again\")\n\t\t\t}\n\t\t\tsendTries--\n\t\t\tif sendTries == 0 {\n\t\t\t\tcolor.Red(\"Deployment Failed\")\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t} else {\n\t\t\t// we succeeded now break out of this loop\n\t\t\tsendTries = 0\n\t\t}\n\t}\n\n\t// now wait for the container to be up\n\t// only for the main unit types. Not watching for presence yet\n\tsuccess := instanceUp(hosts, appName, appVersion, fmt.Sprintf(\"%d\", nextInstNum), 600)\n\tif success == true {\n\t\tstatus = true\n\t\tcolor.Green(\"Deployment Successful\")\n\t} else {\n\t\tstatus = false\n\t}\n\n\t// default to false but we should never really hit this\n\treturn status\n\n}",
"func (m MultiProducer) MultiRegister(r Registerer) {\n\tfor k, p := range m.producers {\n\t\tr.Register(k, p)\n\t}\n}",
"func (h *Healthz) Register() *Check {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\tcheck := new(Check)\n\th.checks = append(h.checks, check)\n\n\treturn check\n}",
"func (m *manager) Register(name string, item interface{}, tags map[string]string) error {\n\tif err := dtos.ValidateMetricName(name, \"metric\"); err != nil {\n\t\treturn err\n\t}\n\n\tif len(tags) > 0 {\n\t\tif err := m.setMetricTags(name, tags); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := m.registry.Register(name, item); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (p *Plugin) Register() error {\n\tpr := PluginRegistry()\n\tpr.mut.Lock()\n\tdefer pr.mut.Unlock()\n\n\tfor _, plugin := range pr.plugins {\n\t\tif plugin.Name == p.Name {\n\t\t\tlog.Printf(\"Ignoring multiple calls to Register() for plugin '%s'\", p.Name)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tpr.plugins = append(pr.plugins, p)\n\n\treturn nil\n}",
"func (o *StoragePhysicalDiskAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func RegisterTypes(registry interface {\n\t RegisterType(name string, obj any)\n}) {\n\n}",
"func (o *EquipmentIdentityAllOf) HasRegisteredDevice() bool {\n\tif o != nil && o.RegisteredDevice != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func SupportedUnits() map[UnitType][]string {\n\tunitLock.RLock()\n\tdefer unitLock.RUnlock()\n\tsupported := make(map[UnitType][]string, len(supportedUnits))\n\tfor unit, aliases := range supportedUnits {\n\t\tsupported[unit] = aliases\n\t}\n\treturn supported\n}",
"func setTargetStateOfUnits(units []string, state job.JobState) ([]*schema.Unit, error) {\n\ttriggered := make([]*schema.Unit, 0)\n\tfor _, name := range units {\n\t\tu, err := cAPI.Unit(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error retrieving unit %s from registry: %v\", name, err)\n\t\t} else if u == nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to find unit %s\", name)\n\t\t} else if job.JobState(u.DesiredState) == state {\n\t\t\tlog.Debugf(\"Unit(%s) already %s, skipping.\", u.Name, u.DesiredState)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Setting Unit(%s) target state to %s\", u.Name, state)\n\t\tif err := cAPI.SetUnitTargetState(u.Name, string(state)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttriggered = append(triggered, u)\n\t}\n\n\treturn triggered, nil\n}",
"func New(tag string) (Unit, error) {\n\tfor u, pair := range unitTags {\n\t\tif pair.Matched(tag) {\n\t\t\treturn u, nil\n\t\t}\n\t}\n\treturn Unimplemented, fmt.Errorf(\"Unimplemented unit tag: %v\", tag)\n}",
"func (mr *MockISubKeyBucketMockRecorder) Register(receiver interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockISubKeyBucket)(nil).Register), receiver)\n}",
"func (o *MemoryArrayAllOf) GetUnits() []MemoryUnitRelationship {\n\tif o == nil {\n\t\tvar ret []MemoryUnitRelationship\n\t\treturn ret\n\t}\n\treturn o.Units\n}",
"func Register() error {\n\tnoopCompress := noop.New()\n\n\tfor name, c := range map[string]compress.Compression{\n\t\t\"\": noopCompress, // to ensure backwards compatibility\n\t\tnoop.AlgorithmName: noopCompress,\n\t\tgzip.AlgorithmName: gzip.New(),\n\t} {\n\t\tif err := compress.RegisterAlgorithm(name, c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (ouo *OrganizationUpdateOne) AddUnits(o ...*OrgUnit) *OrganizationUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ouo.AddUnitIDs(ids...)\n}",
"func (mr *MockUsersRepoInterfaceMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUsersRepoInterface)(nil).Register), arg0)\n}",
"func (bc *Blockchain) Validate() bool {\n\tfor i := 0; i < len(*bc); i++ {\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif !(*bc)[i].Validate((*bc)[i-1]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Register(fn interface{}) error {\n\t// Validate that its a function\n\tfnType := reflect.TypeOf(fn)\n\tif err := validateFnFormat(fnType); err != nil {\n\t\treturn err\n\t}\n\t// Check if already registered\n\tfnName := getFunctionName(fn)\n\t_, ok := fnLookup.getFn(fnName)\n\tif ok {\n\t\treturn nil\n\t}\n\tfor i := 0; i < fnType.NumIn(); i++ {\n\t\targType := fnType.In(i)\n\t\t// Interfaces cannot be registered, their implementations should be\n\t\t// https://golang.org/pkg/encoding/gob/#Register\n\t\tif argType.Kind() != reflect.Interface {\n\t\t\targ := reflect.Zero(argType).Interface()\n\t\t\tif err := GlobalBackend().Encoder().Register(arg); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to register the message for encoding\")\n\t\t\t}\n\t\t}\n\t}\n\tfnLookup.addFn(fnName, fn)\n\treturn nil\n}",
"func Register(ch chan ExecutionEvent, topics ...Topic) {\n\tfor _, t := range topics {\n\t\tsubscriberRegistry[t] = append(subscriberRegistry[t], ch)\n\t}\n}",
"func push(q *availableUnits, units ...*workUnit) {\n\tfor _, unit := range units {\n\t\tq.Add(unit)\n\t}\n}",
"func (d *DeviceGroup) Validate() error {\n\tvar mErr multierror.Error\n\n\tif d.Vendor == \"\" {\n\t\t_ = multierror.Append(&mErr, fmt.Errorf(\"device vendor must be specified\"))\n\t}\n\tif d.Type == \"\" {\n\t\t_ = multierror.Append(&mErr, fmt.Errorf(\"device type must be specified\"))\n\t}\n\tif d.Name == \"\" {\n\t\t_ = multierror.Append(&mErr, fmt.Errorf(\"device name must be specified\"))\n\t}\n\n\tfor i, dev := range d.Devices {\n\t\tif dev == nil {\n\t\t\t_ = multierror.Append(&mErr, fmt.Errorf(\"device %d is nil\", i))\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := dev.Validate(); err != nil {\n\t\t\t_ = multierror.Append(&mErr, multierror.Prefix(err, fmt.Sprintf(\"device %d: \", i)))\n\t\t}\n\t}\n\n\tfor k, v := range d.Attributes {\n\t\tif err := v.Validate(); err != nil {\n\t\t\t_ = multierror.Append(&mErr, fmt.Errorf(\"device attribute %q invalid: %v\", k, err))\n\t\t}\n\t}\n\n\treturn mErr.ErrorOrNil()\n\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) HasPortGroups() bool {\n\tif o != nil && o.PortGroups != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *registerManager) IsAllNodeRegistered(ts time.Time) bool {\n r.Lock()\n defer r.Unlock()\n\n var (\n isAllDone = true\n mLen = len(r.monitorList)\n )\n // if node is not being registered\n if !r.isRegisteringNode {\n return false\n }\n for i := 0; i < mLen; i++ {\n if !r.monitorList[i].isRegistered {\n isAllDone = false\n break\n }\n }\n return isAllDone\n}",
"func (r *Registrator) Register(port int, reg quantum.Registry) error {\n\tfor _, t := range reg.Types() {\n\t\tr.Jobs[t] = \"0.0.0.0:\" + strconv.Itoa(port)\n\t}\n\n\treturn nil\n}",
"func Register() map[string]string {\n\treturn map[string]string{\n\t\t\"name\": \"Beubo Example Plugin\",\n\t\t// identifier should be a unique identifier used to differentiate this plugin from other plugins\n\t\t\"identifier\": \"beubo_example_plugin\",\n\t}\n}",
"func (ou *OrganizationUpdate) AddUnits(o ...*OrgUnit) *OrganizationUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ou.AddUnitIDs(ids...)\n}",
"func (me *masterExtension) Units() ([]k8scloudconfig.UnitAsset, error) {\n\tunitsMeta := []k8scloudconfig.UnitMetadata{\n\t\t{\n\t\t\tAssetContent: ignition.AzureCNINatRules,\n\t\t\tName: \"azure-cni-nat-rules.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.CertificateDecrypterUnit,\n\t\t\tName: \"certificate-decrypter.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.EtcdMountUnit,\n\t\t\tName: \"var-lib-etcd.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.DockerMountUnit,\n\t\t\tName: \"var-lib-docker.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.KubeletMountUnit,\n\t\t\tName: \"var-lib-kubelet.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.VNICConfigurationUnit,\n\t\t\tName: \"vnic-configuration.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t}\n\n\tdata := me.templateData(me.certFiles)\n\n\t// To use the certificate decrypter unit for the etcd data encryption config file.\n\tdata.certificateDecrypterUnitParams.CertsPaths = append(data.certificateDecrypterUnitParams.CertsPaths, encryptionConfigFilePath)\n\n\tvar newUnits []k8scloudconfig.UnitAsset\n\n\tfor _, fm := range unitsMeta {\n\t\tc, err := k8scloudconfig.RenderAssetContent(fm.AssetContent, data)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tunitAsset := k8scloudconfig.UnitAsset{\n\t\t\tMetadata: fm,\n\t\t\tContent: c,\n\t\t}\n\n\t\tnewUnits = append(newUnits, unitAsset)\n\t}\n\n\treturn newUnits, nil\n}",
"func Register(s *health.State, options ...Option) error {\n\tro := newRegisterOptions(options...)\n\tr := metric.NewRegistry()\n\tif err := addMetrics(r, ro, s); err != nil {\n\t\treturn err\n\t}\n\tmetricproducer.GlobalManager().AddProducer(r)\n\treturn nil\n}",
"func (o *EmbeddedUnitModel) HasUnitGroupId() bool {\n\tif o != nil && o.UnitGroupId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (usl UnitStatusList) Group() (UnitStatusList, error) {\n\tmatchers := map[string]struct{}{}\n\tnewList := []fleet.UnitStatus{}\n\n\thashesEqual, err := allHashesEqual(usl)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\tfor _, us := range usl {\n\t\t// Group unit status\n\t\tgrouped, suffix, err := groupUnitStatus(usl, us)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\n\t\t// Prevent doubled aggregation.\n\t\tif _, ok := matchers[suffix]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tmatchers[suffix] = struct{}{}\n\n\t\tstatesEqual := allStatesEqual(grouped)\n\n\t\t// Aggregate.\n\t\tif hashesEqual && statesEqual {\n\t\t\tnewStatus := grouped[0]\n\t\t\tnewStatus.Name = \"*\"\n\t\t\tnewList = append(newList, newStatus)\n\t\t} else {\n\t\t\tnewList = append(newList, grouped...)\n\t\t}\n\t}\n\n\treturn newList, nil\n}",
"func (_m *IService) RegisterMix(info models.MixRegistrationInfo) {\n\t_m.Called(info)\n}",
"func Register(functions ...func()) *Manager {\n\treturn defaultManager.Register(functions...)\n}",
"func MustRegister(decoder Decoder, mediaTypes ...string) {\n\tif err := Register(decoder, mediaTypes...); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (r *Registration) Check() (errlist []error) {\n\tif !r.NameIsOk() {\n\t\terrlist = append(errlist, nameError)\n\t}\n\tif !r.AddressIsOk() {\n\t\terrlist = append(errlist, addressError)\n\t}\n\tif !r.EmailIsOk() {\n\t\terrlist = append(errlist, emailError)\n\t}\n\treturn errlist\n}",
"func MustRegisterMetrics(registrables ...metrics.Registerable) {\n\tfor _, r := range registrables {\n\t\terr := legacyregistry.Register(r)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to register metric %s: %v\", r.FQName(), err)\n\t\t}\n\t}\n}",
"func (mr *MockHubMockRecorder) Register(c interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockHub)(nil).Register), c)\n}",
"func Register(plugins *admission.Plugins) {\n\tplugins.Register(PluginName, NewFactory)\n}",
"func Register(cmds ...*cli.Command) error {\n\treturn cmd.Register(cmds...)\n}",
"func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, groups []int64) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(groups) == 0 && len(s.ranges) > 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, \"unable to validate empty groups against required ranges\"))\n\t}\n\n\tfor _, group := range groups {\n\t\tif !s.isGroupValid(group) {\n\t\t\tdetail := fmt.Sprintf(\"%d is not an allowed group\", group)\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, detail))\n\t\t}\n\t}\n\n\treturn allErrs\n}",
"func (backend *Backend) DevicesRegistered() map[string]device.Interface {\n\treturn backend.devices\n}",
"func (o *DataExportQuery) HasUnit() bool {\n\tif o != nil && o.Unit != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}"
] | [
"0.5419398",
"0.5418993",
"0.531174",
"0.5119865",
"0.50986594",
"0.49601153",
"0.4939661",
"0.4924268",
"0.49226665",
"0.48969033",
"0.48539323",
"0.47764537",
"0.4633344",
"0.46204185",
"0.45852447",
"0.45746577",
"0.45736307",
"0.45174125",
"0.44895804",
"0.44889534",
"0.44798318",
"0.44712976",
"0.44646692",
"0.44265658",
"0.44189548",
"0.4414824",
"0.44139528",
"0.4383407",
"0.4381984",
"0.4373068",
"0.43660828",
"0.4362405",
"0.4362103",
"0.43437326",
"0.43433616",
"0.43266928",
"0.4315063",
"0.4311639",
"0.43113142",
"0.42997432",
"0.42972127",
"0.4291208",
"0.4283654",
"0.42783833",
"0.42590705",
"0.42544344",
"0.42519882",
"0.42436448",
"0.42177314",
"0.42158648",
"0.42140162",
"0.42057735",
"0.42020893",
"0.4197392",
"0.41915435",
"0.41889566",
"0.4187937",
"0.41855633",
"0.41847798",
"0.41822326",
"0.41810393",
"0.41801816",
"0.41784483",
"0.41761586",
"0.4170961",
"0.41653702",
"0.41573548",
"0.415344",
"0.41444054",
"0.4142637",
"0.41370174",
"0.4129867",
"0.41273823",
"0.4116813",
"0.41097203",
"0.41064116",
"0.4103202",
"0.4100136",
"0.4096936",
"0.40968347",
"0.40955207",
"0.40921968",
"0.40910852",
"0.4088122",
"0.40801927",
"0.40779468",
"0.40726605",
"0.40716234",
"0.40708363",
"0.4070183",
"0.4065028",
"0.40582117",
"0.40500468",
"0.40455124",
"0.4039415",
"0.4036452",
"0.40339336",
"0.40335172",
"0.40300587",
"0.40249932"
] | 0.7825831 | 0 |
RunConfig runs the Config phase of all registered Config aware Units. Only use this function if needing to add additional wiring between config and (pre)run phases and a separate PreRunner phase is not an option. In most cases it is best to use the Run method directly as it will run the Config phase prior to executing the PreRunner and Service phases. If an error is returned the application must shut down as it is considered fatal. | RunConfig запускает этап конфигурации (Config) всех зарегистрированных Units, осознающих конфигурацию. Используйте эту функцию только в случае необходимости добавления дополнительных связей между этапом конфигурации и (пред)запуском, а отдельный этап PreRunner не является опцией. В большинстве случаев лучше использовать метод Run напрямую, так как он сначала выполнит этап конфигурации, прежде чем запустить этапы PreRunner и Service. Если возвращается ошибка, приложение должно завершиться, так как она считается фатальной. | func (g *Group) RunConfig() (interrupted bool, err error) {
g.log = logger.GetLogger(g.name)
g.configured = true
if g.name == "" {
// use the binary name if custom name has not been provided
g.name = path.Base(os.Args[0])
}
defer func() {
if err != nil {
g.log.Error().Err(err).Msg("unexpected exit")
}
}()
// Load config from env and file
if err = config.Load(g.f.Name, g.f.FlagSet); err != nil {
return false, errors.Wrapf(err, "%s fails to load config", g.f.Name)
}
// bail early on help or version requests
switch {
case g.showRunGroup:
fmt.Println(g.ListUnits())
return true, nil
}
// Validate Config inputs
for idx := range g.c {
// a Config might have been deregistered during Run
if g.c[idx] == nil {
g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate")
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config")
if vErr := g.c[idx].Validate(); vErr != nil {
err = multierr.Append(err, vErr)
}
}
// exit on at least one Validate error
if err != nil {
return false, err
}
// log binary name and version
g.log.Info().Msg("started")
return false, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ConfigRun(ctx *cli.Context) error {\n\topt, err := InitOption(ctx)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"option error: %w\", err)\n\t}\n\n\t// Disable OS and language analyzers\n\topt.DisabledAnalyzers = append(analyzer.TypeOSes, analyzer.TypeLanguages...)\n\n\t// Scan only config files\n\topt.VulnType = nil\n\topt.SecurityChecks = []string{types.SecurityCheckConfig}\n\n\t// Run filesystem command internally\n\treturn run(ctx.Context, opt, filesystemArtifact)\n}",
"func RunForConfig(ctx context.Context, cfg Config, init func(*InitData) error) error {\n\tlogger, err := newLogger(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tlogger.Info(\"Starting server\",\n\t\tzap.String(\"network\", cfg.GetNetwork()),\n\t\tzap.String(\"address\", cfg.GetAddress()),\n\t)\n\tlistener, err := net.Listen(cfg.GetNetwork(), cfg.GetAddress())\n\tif err != nil {\n\t\treturn err\n\t}\n\ts := grpc.NewServer(grpc.UnaryInterceptor(newSecretChecker(cfg).Intercept))\n\tif err = init(&InitData{\n\t\tLogger: logger,\n\t\tServer: s,\n\t\tListener: listener,\n\t}); err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ts.GracefulStop()\n\t}()\n\terr = s.Serve(listener)\n\tlogger.Info(\"Server stopped\")\n\treturn err\n}",
"func Run(ctx context.Context, cfg *config.Config) error {\n\tMetrics = newMetrics()\n\tdefer runCleanupHooks()\n\n\t// apply defaults before validation\n\tcfg.ApplyDefaults()\n\n\terr := cfg.Validate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to validate config: %w\\n%+v\", err, cfg)\n\t}\n\n\tfuncMap := template.FuncMap{}\n\terr = bindPlugins(ctx, cfg, funcMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if a custom Stdin is set in the config, inject it into the context now\n\tctx = data.ContextWithStdin(ctx, cfg.Stdin)\n\n\topts := optionsFromConfig(cfg)\n\topts.Funcs = funcMap\n\ttr := NewRenderer(opts)\n\n\tstart := time.Now()\n\n\tnamer := chooseNamer(cfg, tr)\n\ttmpl, err := gatherTemplates(ctx, cfg, namer)\n\tMetrics.GatherDuration = time.Since(start)\n\tif err != nil {\n\t\tMetrics.Errors++\n\t\treturn fmt.Errorf(\"failed to gather templates for rendering: %w\", err)\n\t}\n\tMetrics.TemplatesGathered = len(tmpl)\n\n\terr = tr.RenderTemplates(ctx, tmpl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *ConfigController) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer func() {\n\t\tc.queue.ShutDown()\n\t}()\n\n\tglog.V(3).Infoln(\"Creating CDI config\")\n\tif _, err := CreateCDIConfig(c.client, c.cdiClientSet, c.configName); err != nil {\n\t\truntime.HandleError(err)\n\t\treturn errors.Wrap(err, \"Error creating CDI config\")\n\t}\n\n\tglog.V(3).Infoln(\"Starting config controller Run loop\")\n\tif threadiness < 1 {\n\t\treturn errors.Errorf(\"expected >0 threads, got %d\", threadiness)\n\t}\n\n\tif ok := cache.WaitForCacheSync(stopCh, c.ingressesSynced, c.routesSynced); !ok {\n\t\treturn errors.New(\"failed to wait for caches to sync\")\n\t}\n\n\tglog.V(3).Infoln(\"ConfigController cache has synced\")\n\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tglog.Info(\"Started workers\")\n\t<-stopCh\n\tglog.Info(\"Shutting down workers\")\n\treturn nil\n}",
"func (o *UninstallConfigOptions) Run() error {\n\n\terr := UninstallConfig()\n\tif err != nil {\n\t\treturn errors.Wrapf(err,\"Uninstall Config command failed.\")\n\t}\n\treturn nil\n}",
"func (act *ActionConfig) Run() error {\n\t// prepre for configuration\n\t// recovery from log\n\treturn act.next()\n}",
"func Run(cliCtx *cli.Context) error {\n\tc, err := config.New(cliCtx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn run(c)\n}",
"func Run(a *config.Args) error {\n\tfor {\n\t\t// copy the baseline config\n\t\tcfg := *a\n\n\t\t// load the config file\n\t\tif err := fetchConfig(&cfg); err != nil {\n\t\t\tif cfg.StartupOptions.ConfigRepo != \"\" {\n\t\t\t\tlog.Errorf(\"Unable to load configuration file, waiting for 1 minute and then will try again: %v\", err)\n\t\t\t\ttime.Sleep(time.Minute)\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"unable to load configuration file: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := serve(&cfg); err != nil {\n\t\t\tif cfg.StartupOptions.ConfigRepo != \"\" {\n\t\t\t\tlog.Errorf(\"Unable to initialize server likely due to bad config, waiting for 1 minute and then will try again: %v\", err)\n\t\t\t\ttime.Sleep(time.Minute)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"unable to initialize server: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Infof(\"Configuration change detected, attempting to reload configuration\")\n\t\t}\n\t}\n}",
"func Run(root string) error {\n\tv1, err := readConfig()\n\tmust(err)\n\n\tvar context = runconf{\n\t\tFilterOut: v1.GetStringSlice(\"filterOut\"),\n\t\tLookFor: v1.GetStringSlice(\"lookFor\"),\n\t\trootPath: root,\n\t}\n\n\titerate(context)\n\treturn nil\n}",
"func (fr *Runner) RunConfigs(cfgs ...Config) (stdout, stderr string, err error) {\n\targs := fr.argsFromConfigs(append([]Config{fr.Global}, cfgs...)...)\n\n\treturn fr.Run(args...)\n}",
"func (c ConfigUnmarshalTests) Run(t *testing.T) {\n\ttestConfMaps, err := confmaptest.LoadConf(c.TestsFile)\n\trequire.NoError(t, err)\n\n\tfor _, tc := range c.Tests {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\ttestConfMap, err := testConfMaps.Sub(tc.Name)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotZero(t, len(testConfMap.AllKeys()), fmt.Sprintf(\"config not found: '%s'\", tc.Name))\n\n\t\t\tcfg := newAnyOpConfig(c.DefaultConfig)\n\t\t\terr = config.UnmarshalReceiver(testConfMap, cfg)\n\n\t\t\tif tc.ExpectErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tc.Expect, cfg.Operator.Builder)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (manager *Manager) Run(configManagerID string, configManagerSpec string) error {\n\n\t// We dont want any reloads happening until we are fully running\n\tmanager.configReloadMutex.Lock()\n\n\tif len(configManagerID) > 0 {\n\t\tmanager.InitializeConfigurationManager(configManagerID, configManagerSpec)\n\t}\n\n\tconfiguration := config.Config{}\n\tmanager.InitializeConnectionManagers(configuration)\n\n\tlog.Println(\"Initialization of plugins done.\")\n\n\tlog.Println(\"Initializing the proxy...\")\n\tresolver := NewResolver(manager.ProviderFactories, manager, nil)\n\n\tmanager.Proxy = secretless.Proxy{\n\t\tConfig: configuration,\n\t\tEventNotifier: manager,\n\t\tResolver: resolver,\n\t\tRunHandlerFunc: manager._RunHandler,\n\t\tRunListenerFunc: manager._RunListener,\n\t}\n\n\tmanager.configReloadMutex.Unlock()\n\n\tmanager.Proxy.Run()\n\n\treturn nil\n}",
"func RunConfigs() {\n\tloggingSetup()\n}",
"func Run(configPath, devURL, addr string) error {\n\tcfg := new(Config)\n\tf, err := os.Open(configPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open config file: %s\", err)\n\t}\n\n\tif err := json.NewDecoder(f).Decode(cfg); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode config file: %s\", err)\n\t}\n\n\tsrv, err := setupServer(cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to initialise server: %s\", err)\n\t}\n\n\tif err := setupAssets(devURL, srv); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Serving on\", addr)\n\n\tif err := http.ListenAndServe(addr, srv); err != nil {\n\t\treturn fmt.Errorf(\"failed to start server: %s\", err)\n\t}\n\n\treturn nil\n}",
"func Run() {\n\tl := logrus.WithField(\"component\", \"main\")\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer l.Info(\"Done.\")\n\n\t// handle termination signals\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, unix.SIGTERM, unix.SIGINT)\n\tgo func() {\n\t\ts := <-signals\n\t\tsignal.Stop(signals)\n\t\tl.Warnf(\"Got %s, shutting down...\", unix.SignalName(s.(unix.Signal)))\n\t\tcancel()\n\t}()\n\n\tfor {\n\t\tcfg, configFilepath, err := config.Get(l)\n\t\tif err != nil {\n\t\t\tl.Fatalf(\"Failed to load configuration: %s.\", err)\n\t\t}\n\t\tconfig.ConfigureLogger(cfg)\n\t\tl.Debugf(\"Loaded configuration: %+v\", cfg)\n\n\t\trun(ctx, cfg, configFilepath)\n\n\t\tif ctx.Err() != nil {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (cli *ApplyConfigStepImplementation) Run(pipelineInfo *model.PipelineInfo) error {\n\t// Get radix application from config map\n\tnamespace := utils.GetAppNamespace(cli.GetAppName())\n\tconfigMap, err := cli.GetKubeutil().GetConfigMap(namespace, pipelineInfo.RadixConfigMapName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigFileContent, ok := configMap.Data[pipelineDefaults.PipelineConfigMapContent]\n\tif !ok {\n\t\treturn fmt.Errorf(\"failed load RadixApplication from ConfigMap\")\n\t}\n\tra, err := CreateRadixApplication(cli.GetRadixclient(), configFileContent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Apply RA to cluster\n\tapplicationConfig, err := application.NewApplicationConfig(cli.GetKubeclient(), cli.GetKubeutil(),\n\t\tcli.GetRadixclient(), cli.GetRegistration(), ra)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = applicationConfig.ApplyConfigToApplicationNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set back to pipeline\n\tpipelineInfo.SetApplicationConfig(applicationConfig)\n\n\tpipelineInfo.PrepareBuildContext, err = getPrepareBuildContextContent(configMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pipelineInfo.PipelineArguments.PipelineType == string(v1.BuildDeploy) {\n\t\tgitCommitHash, gitTags := cli.getHashAndTags(namespace, pipelineInfo)\n\t\terr = validate.GitTagsContainIllegalChars(gitTags)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpipelineInfo.SetGitAttributes(gitCommitHash, gitTags)\n\t\tpipelineInfo.StopPipeline, pipelineInfo.StopPipelineMessage = getPipelineShouldBeStopped(pipelineInfo.PrepareBuildContext)\n\t}\n\n\treturn nil\n}",
"func (cmd *GenerateConfigCommand) Run(_ context.Context) error {\n\tconf := server.NewConfig()\n\tret, err := toml.Marshal(*conf)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unmarshaling default config\")\n\t}\n\tfmt.Fprintf(cmd.Stdout, \"%s\\n\", ret)\n\treturn nil\n}",
"func (cli *CLI) Run(args []string) int {\n\n\tc, err := cli.setup(args)\n\tif err != nil {\n\t\tlogging.Error(\"unable to parse configuration: %v\", err)\n\t\treturn ExitCodeParseConfigError\n\t}\n\n\t// Set the logging level for the logger.\n\tlogging.SetLevel(c.LogLevel)\n\n\t// Initialize telemetry if this was configured by the user.\n\tif c.Telemetry.StatsdAddress != \"\" {\n\t\tsink, statsErr := metrics.NewStatsdSink(c.Telemetry.StatsdAddress)\n\t\tif statsErr != nil {\n\t\t\tlogging.Error(\"unable to setup telemetry correctly: %v\", statsErr)\n\t\t\treturn ExitCodeTelemtryError\n\t\t}\n\t\tmetrics.NewGlobal(metrics.DefaultConfig(\"replicator\"), sink)\n\t}\n\n\t// Create the initial runner with the merged configuration parameters.\n\trunner, err := NewRunner(c)\n\tif err != nil {\n\t\treturn ExitCodeRunnerError\n\t}\n\n\tlogging.Debug(\"running version %v\", version.Get())\n\tgo runner.Start()\n\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase s := <-signalCh:\n\t\t\tswitch s {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT:\n\t\t\t\trunner.Stop()\n\t\t\t\treturn ExitCodeInterrupt\n\n\t\t\tcase syscall.SIGHUP:\n\t\t\t\trunner.Stop()\n\n\t\t\t\t// Reload the configuration in order to make proper use of SIGHUP.\n\t\t\t\tc, err := cli.setup(args)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ExitCodeParseConfigError\n\t\t\t\t}\n\n\t\t\t\t// Setup a new runner with the new configuration.\n\t\t\t\trunner, err = NewRunner(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ExitCodeRunnerError\n\t\t\t\t}\n\n\t\t\t\tgo runner.Start()\n\t\t\t}\n\t\t}\n\t}\n}",
"func Run(ctx *cli.Context) error {\n\tc, err := NewConfig(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn run(ctx.Context, c)\n}",
"func (cmd *PrintConfigCommand) Run(args ...string) error {\n\t// Parse command flags.\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tconfigPath := fs.String(\"config\", \"\", \"\")\n\tfs.Usage = func() {\n\t\tif _, err := fmt.Fprintln(os.Stderr, printConfigUsage); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t// Parse config from path.\n\topt := Options{ConfigPath: *configPath}\n\tconfig, err := cmd.parseConfig(opt.GetConfigPath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse config: %s\", err)\n\t}\n\n\t// Validate the configuration.\n\tif err = config.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"%s. To generate a valid configuration file run `emailworker config > emailworker.generated.toml`\", err)\n\t}\n\n\tif err = toml.NewEncoder(cmd.Stdout).Encode(config); err != nil {\n\t\treturn fmt.Errorf(\"error encoding toml: %s\", err)\n\t}\n\t_, err = fmt.Fprint(cmd.Stdout, \"\\n\")\n\treturn err\n}",
"func (conf *Config) Run() error {\n\t// no error-checking if nothing to check errors on\n\tif conf == nil {\n\t\treturn nil\n\t}\n\tif conf.ThreadCount < 0 {\n\t\treturn fmt.Errorf(\"invalid thread count %d [must be a positive number]\", conf.ThreadCount)\n\t}\n\t// if not given other instructions, just describe the specs\n\tif !conf.Generate && !conf.Delete && conf.Verify == \"\" {\n\t\tconf.Describe = true\n\t\tconf.onlyDescribe = true\n\t}\n\tif conf.Verify != \"\" {\n\t\terr := conf.verifyType.UnmarshalText([]byte(conf.Verify))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unknown verify type '%s'\", conf.Verify)\n\t\t}\n\t} else {\n\t\t// If we're deleting, and not generating, we can skip\n\t\t// verification. Otherwise, default to erroring out if\n\t\t// there's a mismatch.\n\t\tif conf.Delete && !conf.Generate {\n\t\t\tconf.verifyType = verifyTypeNone\n\t\t} else {\n\t\t\tconf.verifyType = verifyTypeError\n\t\t}\n\t}\n\tconf.NewSpecsFiles(conf.flagset.Args())\n\tif len(conf.specFiles) < 1 {\n\t\treturn errors.New(\"must specify one or more spec files\")\n\t}\n\tif conf.ColumnScale < 0 || conf.ColumnScale > (1<<31) {\n\t\treturn fmt.Errorf(\"column scale [%d] should be between 1 and 2^31\", conf.ColumnScale)\n\t}\n\tif conf.RowScale < 0 || conf.RowScale > (1<<16) {\n\t\treturn fmt.Errorf(\"row scale [%d] should be between 1 and 2^16\", conf.RowScale)\n\t}\n\treturn nil\n}",
"func initRunConfig() (*runConfig, error) {\n\t// Find the server binary for each phase\n\tpCmd := flagServerCmd\n\tif flagParseServerCmd != \"\" {\n\t\tpCmd = flagParseServerCmd\n\t}\n\tif pCmd == \"\" {\n\t\treturn nil, fmt.Errorf(\"no parse server defined\")\n\t}\n\n\tcCmd := flagServerCmd\n\tif flagCheckServerCmd != \"\" {\n\t\tcCmd = flagCheckServerCmd\n\t}\n\tif cCmd == \"\" {\n\t\treturn nil, fmt.Errorf(\"no check server defined\")\n\t}\n\n\teCmd := flagServerCmd\n\tif flagEvalServerCmd != \"\" {\n\t\teCmd = flagEvalServerCmd\n\t}\n\tif eCmd == \"\" {\n\t\treturn nil, fmt.Errorf(\"no eval server defined\")\n\t}\n\n\t// Only launch each required binary once\n\tservers := make(map[string]celrpc.ConfClient)\n\tservers[pCmd] = nil\n\tservers[cCmd] = nil\n\tservers[eCmd] = nil\n\tfor cmd := range servers {\n\t\tvar cli celrpc.ConfClient\n\t\tvar err error\n\t\tif flagPipe {\n\t\t\tcli, err = celrpc.NewPipeClient(cmd, flagPipeBase64)\n\t\t} else {\n\t\t\tcli, err = celrpc.NewGrpcClient(cmd)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tservers[cmd] = cli\n\t}\n\n\tvar rc runConfig\n\trc.parseClient = servers[pCmd]\n\trc.checkClient = servers[cCmd]\n\trc.evalClient = servers[eCmd]\n\trc.checkedOnly = flagCheckedOnly\n\trc.skipCheck = flagSkipCheck\n\treturn &rc, nil\n}",
"func runConfig(cfg Config, root string) []error {\n\tfiles, err := gatherFiles(root, cfg)\n\tif err != nil {\n\t\treturn []error{fmt.Errorf(\"Failed to gather files: %w\", err)}\n\t}\n\n\tfmt.Printf(\"Scanning %d files...\\n\", len(files))\n\n\tvar wg sync.WaitGroup\n\terrs := make([]error, len(files))\n\tfor i, file := range files {\n\t\ti, file := i, file\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrs[i] = examine(root, file, cfg)\n\t\t}()\n\t}\n\twg.Wait()\n\n\treturn removeNilErrs(errs)\n}",
"func (o *GetVaultConfigOptions) Run() error {\n\tvar vaultClient vault.Client\n\tvar err error\n\n\tif o.Name != \"\" || o.Namespace != \"\" {\n\t\tvaultClient, err = o.vaultClient(o.Name, o.Namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tvaultClient, err = o.systemVaultClient()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\turl, token, err := vaultClient.Config()\n\t// Echo the client config out to the command line to be piped into bash\n\tif o.terminal == \"\" {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\to.terminal = \"cmd\"\n\t\t} else {\n\t\t\to.terminal = \"sh\"\n\t\t}\n\t}\n\tif o.terminal == \"cmd\" {\n\t\t_, _ = fmt.Fprintf(o.Out, \"set VAULT_ADDR=%s\\nset VAULT_TOKEN=%s\\n\", url.String(), token)\n\t} else {\n\t\t_, _ = fmt.Fprintf(o.Out, \"export VAULT_ADDR=%s\\nexport VAULT_TOKEN=%s\\n\", url.String(), token)\n\t}\n\n\treturn err\n}",
"func Run(appCtx app.Context) error {\n\tif err := loadConfig(&appCtx); err != nil {\n\t\treturn err\n\t}\n\tapi, err := makeAPI(appCtx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn api.Run()\n}",
"func (p *Pipeline) Run(args []string) int {\n\tif err := p.LoadConfig(); err != nil {\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func (i *MonitorInstance) InitConfig(\n\tctx context.Context,\n\te ctxt.Executor,\n\tclusterName,\n\tclusterVersion,\n\tdeployUser string,\n\tpaths meta.DirPaths,\n) error {\n\tgOpts := *i.topo.BaseTopo().GlobalOptions\n\tif err := i.BaseInstance.InitConfig(ctx, e, gOpts, deployUser, paths); err != nil {\n\t\treturn err\n\t}\n\n\tenableTLS := gOpts.TLSEnabled\n\t// transfer run script\n\tspec := i.InstanceSpec.(*PrometheusSpec)\n\n\tcfg := &scripts.PrometheusScript{\n\t\tPort: spec.Port,\n\t\tWebExternalURL: fmt.Sprintf(\"http://%s\", utils.JoinHostPort(spec.Host, spec.Port)),\n\t\tRetention: getRetention(spec.Retention),\n\t\tEnableNG: spec.NgPort > 0,\n\n\t\tDeployDir: paths.Deploy,\n\t\tLogDir: paths.Log,\n\t\tDataDir: paths.Data[0],\n\n\t\tNumaNode: spec.NumaNode,\n\t}\n\n\tfp := filepath.Join(paths.Cache, fmt.Sprintf(\"run_prometheus_%s_%d.sh\", i.GetHost(), i.GetPort()))\n\tif err := cfg.ConfigToFile(fp); err != nil {\n\t\treturn err\n\t}\n\n\tdst := filepath.Join(paths.Deploy, \"scripts\", \"run_prometheus.sh\")\n\tif err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {\n\t\treturn err\n\t}\n\n\tif _, _, err := e.Execute(ctx, \"chmod +x \"+dst, false); err != nil {\n\t\treturn err\n\t}\n\n\ttopoHasField := func(field string) (reflect.Value, bool) {\n\t\treturn findSliceField(i.topo, field)\n\t}\n\tmonitoredOptions := i.topo.GetMonitoredOptions()\n\n\t// transfer config\n\tcfig := config.NewPrometheusConfig(clusterName, clusterVersion, enableTLS)\n\tif monitoredOptions != nil {\n\t\tcfig.AddBlackbox(i.GetHost(), uint64(monitoredOptions.BlackboxExporterPort))\n\t}\n\tcfig.ScrapeInterval = spec.ScrapeInterval\n\tcfig.ScrapeTimeout = spec.ScrapeTimeout\n\tuniqueHosts := set.NewStringSet()\n\n\tif servers, found := topoHasField(\"PDServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tpd := servers.Index(i).Interface().(*PDSpec)\n\t\t\tuniqueHosts.Insert(pd.Host)\n\t\t\tcfig.AddPD(pd.Host, uint64(pd.ClientPort))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"TiKVServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tkv := servers.Index(i).Interface().(*TiKVSpec)\n\t\t\tuniqueHosts.Insert(kv.Host)\n\t\t\tcfig.AddTiKV(kv.Host, uint64(kv.StatusPort))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"TiDBServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tdb := servers.Index(i).Interface().(*TiDBSpec)\n\t\t\tuniqueHosts.Insert(db.Host)\n\t\t\tcfig.AddTiDB(db.Host, uint64(db.StatusPort))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"TiFlashServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tflash := servers.Index(i).Interface().(*TiFlashSpec)\n\t\t\tuniqueHosts.Insert(flash.Host)\n\t\t\tcfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort))\n\t\t\tcfig.AddTiFlash(flash.Host, uint64(flash.StatusPort))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"PumpServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tpump := servers.Index(i).Interface().(*PumpSpec)\n\t\t\tuniqueHosts.Insert(pump.Host)\n\t\t\tcfig.AddPump(pump.Host, uint64(pump.Port))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"Drainers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tdrainer := servers.Index(i).Interface().(*DrainerSpec)\n\t\t\tuniqueHosts.Insert(drainer.Host)\n\t\t\tcfig.AddDrainer(drainer.Host, uint64(drainer.Port))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"CDCServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tcdc := servers.Index(i).Interface().(*CDCSpec)\n\t\t\tuniqueHosts.Insert(cdc.Host)\n\t\t\tcfig.AddCDC(cdc.Host, uint64(cdc.Port))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"TiKVCDCServers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\ttikvCdc := servers.Index(i).Interface().(*TiKVCDCSpec)\n\t\t\tuniqueHosts.Insert(tikvCdc.Host)\n\t\t\tcfig.AddTiKVCDC(tikvCdc.Host, uint64(tikvCdc.Port))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"Monitors\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tmonitoring := servers.Index(i).Interface().(*PrometheusSpec)\n\t\t\tuniqueHosts.Insert(monitoring.Host)\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"Grafanas\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tgrafana := servers.Index(i).Interface().(*GrafanaSpec)\n\t\t\tuniqueHosts.Insert(grafana.Host)\n\t\t\tcfig.AddGrafana(grafana.Host, uint64(grafana.Port))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"Alertmanagers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\talertmanager := servers.Index(i).Interface().(*AlertmanagerSpec)\n\t\t\tuniqueHosts.Insert(alertmanager.Host)\n\t\t\tcfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort))\n\t\t}\n\t}\n\tif servers, found := topoHasField(\"Masters\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tmaster := reflect.Indirect(servers.Index(i))\n\t\t\thost, port := master.FieldByName(\"Host\").String(), master.FieldByName(\"Port\").Int()\n\t\t\tuniqueHosts.Insert(host)\n\t\t\tcfig.AddDMMaster(host, uint64(port))\n\t\t}\n\t}\n\n\tif servers, found := topoHasField(\"Workers\"); found {\n\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\tworker := reflect.Indirect(servers.Index(i))\n\t\t\thost, port := worker.FieldByName(\"Host\").String(), worker.FieldByName(\"Port\").Int()\n\t\t\tuniqueHosts.Insert(host)\n\t\t\tcfig.AddDMWorker(host, uint64(port))\n\t\t}\n\t}\n\n\tif monitoredOptions != nil {\n\t\tfor host := range uniqueHosts {\n\t\t\tcfig.AddNodeExpoertor(host, uint64(monitoredOptions.NodeExporterPort))\n\t\t\tcfig.AddBlackboxExporter(host, uint64(monitoredOptions.BlackboxExporterPort))\n\t\t\tcfig.AddMonitoredServer(host)\n\t\t}\n\t}\n\n\tremoteCfg, err := encodeRemoteCfg2Yaml(spec.RemoteConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcfig.SetRemoteConfig(string(remoteCfg))\n\n\t// doesn't work\n\tif _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, alertmanager := range spec.ExternalAlertmanagers {\n\t\tcfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort))\n\t}\n\tcfig.AddPushgateway(spec.PushgatewayAddrs)\n\n\tif spec.RuleDir != \"\" {\n\t\tfilter := func(name string) bool { return strings.HasSuffix(name, \".rules.yml\") }\n\t\terr := i.IteratorLocalConfigDir(ctx, spec.RuleDir, filter, func(name string) error {\n\t\t\tcfig.AddLocalRule(name)\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"add local rule\")\n\t\t}\n\t}\n\n\tif err := i.installRules(ctx, e, paths.Deploy, clusterName, clusterVersion); err != nil {\n\t\treturn errors.Annotate(err, \"install rules\")\n\t}\n\n\tif err := i.initRules(ctx, e, spec, paths, clusterName); err != nil {\n\t\treturn err\n\t}\n\n\tif spec.NgPort > 0 {\n\t\tpds := []string{}\n\t\tif servers, found := topoHasField(\"PDServers\"); found {\n\t\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\t\tpd := servers.Index(i).Interface().(*PDSpec)\n\t\t\t\tpds = append(pds, fmt.Sprintf(\"\\\"%s\\\"\", utils.JoinHostPort(pd.Host, pd.ClientPort)))\n\t\t\t}\n\t\t}\n\t\tngcfg := &config.NgMonitoringConfig{\n\t\t\tClusterName: clusterName,\n\t\t\tAddress: utils.JoinHostPort(i.GetListenHost(), spec.NgPort),\n\t\t\tAdvertiseAddress: utils.JoinHostPort(i.GetHost(), spec.NgPort),\n\t\t\tPDAddrs: strings.Join(pds, \",\"),\n\t\t\tTLSEnabled: enableTLS,\n\n\t\t\tDeployDir: paths.Deploy,\n\t\t\tDataDir: paths.Data[0],\n\t\t\tLogDir: paths.Log,\n\t\t}\n\n\t\tif servers, found := topoHasField(\"Monitors\"); found {\n\t\t\tfor i := 0; i < servers.Len(); i++ {\n\t\t\t\tmonitoring := servers.Index(i).Interface().(*PrometheusSpec)\n\t\t\t\tcfig.AddNGMonitoring(monitoring.Host, uint64(monitoring.NgPort))\n\t\t\t}\n\t\t}\n\t\tfp = filepath.Join(paths.Cache, fmt.Sprintf(\"ngmonitoring_%s_%d.toml\", i.GetHost(), i.GetPort()))\n\t\tif err := ngcfg.ConfigToFile(fp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst = filepath.Join(paths.Deploy, \"conf\", \"ngmonitoring.toml\")\n\t\tif err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfp = filepath.Join(paths.Cache, fmt.Sprintf(\"prometheus_%s_%d.yml\", i.GetHost(), i.GetPort()))\n\tif err := cfig.ConfigToFile(fp); err != nil {\n\t\treturn err\n\t}\n\tif spec.AdditionalScrapeConf != nil {\n\t\terr = mergeAdditionalScrapeConf(fp, spec.AdditionalScrapeConf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdst = filepath.Join(paths.Deploy, \"conf\", \"prometheus.yml\")\n\tif err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {\n\t\treturn err\n\t}\n\n\treturn checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), clusterVersion, i.OS(), i.Arch(), i.ComponentName()+\".yml\", paths, nil)\n}",
"func (c *Cfg) Run(args ...string) {\n\tif args == nil {\n\t\targs = os.Args[1:]\n\t}\n\tc, cmd, args, err := c.Parse(args)\n\tif err == nil {\n\t\tif err = cmd.Main(args); err == nil {\n\t\t\tExit(0)\n\t\t\treturn\n\t\t}\n\t}\n\tif err == ErrHelp {\n\t\tw := newWriter(c)\n\t\tdefer w.done(os.Stderr, 0)\n\t\tw.help()\n\t} else {\n\t\tswitch e := err.(type) {\n\t\tcase UsageError:\n\t\t\tw := newWriter(c)\n\t\t\tdefer w.done(os.Stderr, 2)\n\t\t\tw.error(string(e))\n\t\tcase ExitCode:\n\t\t\tExit(int(e))\n\t\tdefault:\n\t\t\tverb := \"%v\"\n\t\t\tif Debug {\n\t\t\t\tverb = \"%+v\"\n\t\t\t}\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: \"+verb+\"\\n\", err)\n\t\t\tExit(1)\n\t\t}\n\t}\n}",
"func (o *Options) Run(ctx context.Context) error {\n\tlog.Info(\"getting rest config\")\n\trestConfig, err := config.GetConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"setting up manager\")\n\tmgr, err := manager.New(restConfig, manager.Options{\n\t\tScheme: kubernetes.SeedScheme,\n\t\tLeaderElection: false,\n\t\tMetricsBindAddress: \"0\", // disable for now, as we don't scrape the component\n\t\tHost: o.BindAddress,\n\t\tPort: o.Port,\n\t\tCertDir: o.ServerCertDir,\n\t\tGracefulShutdownTimeout: &gracefulShutdownTimeout,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"setting up webhook server\")\n\tserver := mgr.GetWebhookServer()\n\tserver.Register(extensioncrds.WebhookPath, &webhook.Admission{Handler: extensioncrds.New(runtimelog.Log.WithName(extensioncrds.HandlerName))})\n\tserver.Register(podschedulername.WebhookPath, &webhook.Admission{Handler: admission.HandlerFunc(podschedulername.DefaultShootControlPlanePodsSchedulerName)})\n\tserver.Register(extensionresources.WebhookPath, &webhook.Admission{Handler: extensionresources.New(runtimelog.Log.WithName(extensionresources.HandlerName), o.AllowInvalidExtensionResources)})\n\n\tlog.Info(\"starting manager\")\n\tif err := mgr.Start(ctx); err != nil {\n\t\tlog.Error(err, \"error running manager\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Run(conf *config.Config) {\n\n\tvar eventHandler = ParseEventHandler(conf)\n\tcontroller.Start(conf, eventHandler)\n}",
"func Run(yml load.Config) {\n\t// samplesToMerge := map[string][]interface{}{}\n\tvar samplesToMerge load.SamplesToMerge\n\tsamplesToMerge.Data = map[string][]interface{}{}\n\tload.Logrus.WithFields(logrus.Fields{\n\t\t\"name\": yml.Name,\n\t\t\"apis\": len(yml.APIs),\n\t}).Debug(\"config: processing apis\")\n\n\t// load secrets\n\t_ = loadSecrets(&yml)\n\n\t// intentionally handled synchronously\n\tfor i := range yml.APIs {\n\t\tif err := runVariableProcessor(&yml); err != nil {\n\t\t\tload.Logrus.WithError(err).Error(\"config: variable processor error\")\n\t\t}\n\t\tdataSets := FetchData(i, &yml, &samplesToMerge)\n\t\tprocessor.RunDataHandler(dataSets, &samplesToMerge, i, &yml, i)\n\t}\n\n\tload.Logrus.WithFields(logrus.Fields{\n\t\t\"name\": yml.Name,\n\t\t\"apis\": len(yml.APIs),\n\t}).Debug(\"config: finished variable processing apis\")\n\n\t// processor.ProcessSamplesToMerge(&samplesToMerge, &yml)\n\t// hren MergeAndJoin processing - replacing processor.ProcessSamplesToMerge\n\tprocessor.ProcessSamplesMergeJoin(&samplesToMerge, &yml)\n}",
"func Run(config Configuration) {\n\tif err := validateConfig(&config); err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.SetOutput(os.Stdout)\n\tif config.LogFile != \"\" {\n\t\tlogFile, err := os.OpenFile(config.LogFile, os.O_CREATE|os.O_WRONLY, 0666)\n\t\tif err == nil {\n\t\t\tlog.SetOutput(logFile)\n\t\t} else {\n\t\t\tlog.Warnf(\"failed to open log file %s, using default stderr\", config.LogFile)\n\t\t}\n\t}\n\n\tif config.DevelopmentConfiguration != nil {\n\t\tif config.DevelopmentConfiguration.Enabled {\n\t\t\tlog.Debug(\"initializing development configuration\")\n\t\t\tif err := config.DevelopmentConfiguration.Init(config.Context.ParentEventID, config.Context.EventID); err != nil {\n\t\t\t\tlog.Errorf(\"%+v\", err) // non fatal error\n\t\t\t}\n\t\t}\n\t}\n\n\tmetaProvider := getMetaProvider(&config)\n\tblobProvider := getBlobProvider(&config, metaProvider)\n\teventProvider := getEventProvider(&config)\n\n\tdataPlane := &dataplane.DataPlane{\n\t\tBlobStorageProvider: blobProvider,\n\t\tDocumentStorageProvider: metaProvider,\n\t\tEventPublisher: eventProvider,\n\t}\n\n\t// TODO Refactor out below into doRun(dataPlane *dataplane.Dataplane, config Configuration)\n\n\tvalidEventTypes := strings.Split(config.ValidEventTypes, \",\")\n\n\tbaseDir := config.BaseDir\n\tif baseDir == \"\" || baseDir == \"./\" || baseDir == \".\\\\\" {\n\t\tbaseDir = getDefaultBaseDir()\n\t\tlog.Debugf(\"using default base directory %s\", baseDir)\n\t}\n\n\taction := strings.ToLower(config.Action)\n\tif config.Action == constants.Prepare {\n\t\tpreparer := preparer.NewPreparer(baseDir, config.DevelopmentConfiguration)\n\t\tdefer preparer.Close()\n\t\tif err := preparer.Prepare(config.Context, dataPlane); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error during prepration %+v\", err))\n\t\t}\n\t} else if config.Action == constants.Commit {\n\t\tcommitter := committer.NewCommitter(baseDir, config.DevelopmentConfiguration)\n\t\tdefer committer.Close()\n\t\tif err := committer.Commit(config.Context, dataPlane, validEventTypes); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error during commit %+v\", err))\n\t\t}\n\t} else {\n\t\tpanic(fmt.Sprintf(\"unsupported action type %+v\", action))\n\t}\n}",
"func Run(args []string) {\n\t// Parse the arguments\n\tvar cmdCfg CmdConfig\n\tif err := parse(args, &cmdCfg); err != nil {\n\t\tlog.Errorf(\"%s\", err)\n\t\tfmt.Fprintf(os.Stderr, \"USAGE \\n\\n\\t%s\\n\\n\", os.Args[0])\n\t\tfmt.Fprint(os.Stderr, \"GLOBAL OPTIONS:\\n\\n\")\n\t\tusage(os.Stderr, &cmdCfg)\n\n\t\tos.Exit(1)\n\t}\n\n\t// set up global context for signal interuption\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tvar stop = make(chan os.Signal)\n\tsignal.Notify(stop, syscall.SIGTERM, syscall.SIGINT)\n\tgo func() {\n\t\tsig := <-stop\n\t\tfmt.Printf(\"caught sig: %+v\\n\", sig)\n\t\tcancel()\n\t\tfmt.Println(\"Waiting up to 2 seconds to finish.\")\n\t\ttime.Sleep(2 * time.Second)\n\t\tos.Exit(0)\n\t}()\n\n\t// Global Configuration\n\t// Read deployment and service files into Kubernetes structs\n\tkubeServiceConfig, kubeServiceConfigErr := getServiceConfig(&cmdCfg)\n\tif kubeServiceConfigErr != nil {\n\t\tlog.Errorf(\"%s\", kubeServiceConfigErr)\n\t\tos.Exit(2)\n\t}\n\n\t// Regional configuration.\n\t// Gather environment variables and secret references.\n\t// Retrieve secrets from vault.\n\t// Create configmap and secret object.\n\tvar regionEnvs []*RegionEnv\n\tfor regionEnv := range createEnv(kubeServiceConfig, fetchSecrets(getConfig(ctx, &cmdCfg))) {\n\t\tlog.Debugf(\"Retrieved Configuration %+v\", regionEnv)\n\t\tif len(regionEnv.Errors) > 0 {\n\t\t\tlog.Errorf(\"%s\", regionEnv.Errors)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tregionEnvs = append(regionEnvs, regionEnv)\n\t}\n\n\t// Run and monitor updates in this order.\n\tupdateFns := []UpdateFn{\n\t\tupdateConfigMapRegion,\n\t\tupdateServiceRegion,\n\t\tupdateServiceAccountRegion,\n\t\tupdateIngressRegion,\n\t\tupdateIngressRouteRegion,\n\t\tupdateGatewayRegion,\n\t\tupdateVirtualServiceRegion,\n\t\tupdateServiceInstanceRegion,\n\t\tupdateServiceBindingRegion,\n\t\tupdateNamedSecretsRegion,\n\t\tupdateDeploymentRegion,\n\t\tupdateJobRegion,\n\t\tupdateHPAutoscalerRegion,\n\t\tupdatePodDisruptionBudgetRegion,\n\t}\n\tfor _, updateFn := range updateFns {\n\t\terr := runUpdate(regionEnvs, updateFn)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n}",
"func Run(settings *Settings) error {\n\tlog.Infof(\"using k8s version: %s\", settings.KubernetesVersion.String())\n\n\t// parse config\n\tlog.Infof(\"reading config from path: %s\", settings.PathConfig)\n\tconfigBytes, err := os.ReadFile(settings.PathConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig := &config{}\n\tif err := yaml.UnmarshalStrict(configBytes, &config); err != nil {\n\t\treturn errors.Wrapf(err, \"cannot parse config\")\n\t}\n\n\tfor _, j := range config.JobGroups {\n\t\tif err := processjobGroup(settings, &j); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer utilruntime.HandleCrash()\n\tdefer c.workqueue.ShutDown()\n\t// Start the informer factories to begin populating the informer caches\n\tklog.Info(\"Starting Configurator controller\")\n\n\t// Wait for the caches to be synced before starting workers\n\tklog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.configmapsSynced, c.customConfigMapSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.secretSynced, c.customSecretSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.Info(\"Starting workers\")\n\t// Launch two workers to process configurator resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tklog.Info(\"Started workers\")\n\t<-stopCh\n\tklog.Info(\"Shutting down workers\")\n\n\treturn nil\n}",
"func Run() int {\n\tpflag.Parse()\n\tpopulateAvailableKubeconfigs()\n\n\tif len(availableKubeconfigs) == 0 {\n\t\tprintKubeConfigHelpOutput()\n\t\treturn 2\n\t}\n\n\t// DEBUG\n\tfmt.Println(availableKubeconfigs)\n\treturn 0\n}",
"func (p *processor) Run(_ context.Context, cfg *ucfg.Config) (err error) {\n\treturn p.Reload(cfg)\n}",
"func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate options\")\n\t}\n\n\tconfig, err := o.LoadSourceConfig()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to load source config\")\n\t}\n\n\tfor i := range config.Spec.Groups {\n\t\tgroup := &config.Spec.Groups[i]\n\t\tfor j := range group.Repositories {\n\t\t\trepo := &group.Repositories[j]\n\n\t\t\tif o.Filter != \"\" && !strings.Contains(repo.Name, o.Filter) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := o.UpgradeRepository(config, group, repo)\n\t\t\tif err != nil {\n\t\t\t\tlog.Logger().Errorf(\"failed to upgrade repository %s due to: %s\", repo.Name, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (ce *MqttConfigExecutor) StartConfig(config *gateways.ConfigContext) {\n\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Msg(\"operating on configuration...\")\n\tm, err := parseConfig(config.Data.Config)\n\tif err != nil {\n\t\tconfig.ErrChan <- err\n\t}\n\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Interface(\"config-value\", *m).Msg(\"mqtt configuration\")\n\n\tgo ce.listenEvents(m, config)\n\n\tfor {\n\t\tselect {\n\t\tcase <-config.StartChan:\n\t\t\tconfig.Active = true\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Msg(\"configuration is running\")\n\n\t\tcase data := <-config.DataChan:\n\t\t\tce.GatewayConfig.DispatchEvent(&gateways.GatewayEvent{\n\t\t\t\tSrc: config.Data.Src,\n\t\t\t\tPayload: data,\n\t\t\t})\n\n\t\tcase <-config.StopChan:\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-name\", config.Data.Src).Msg(\"stopping configuration\")\n\t\t\tconfig.DoneChan <- struct{}{}\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-name\", config.Data.Src).Msg(\"configuration stopped\")\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error {\n\t// Setup the client\n\tp.address = address\n\tif conf == nil {\n\t\tconf = consulapi.DefaultConfig()\n\t}\n\tconf.Address = address\n\tconf.Datacenter = p.Datacenter\n\tconf.Token = p.Token\n\tclient, err := consulapi.NewClient(conf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to connect to agent: %v\", err)\n\t}\n\n\t// Create the logger\n\toutput := p.LogOutput\n\tif output == nil {\n\t\toutput = os.Stderr\n\t}\n\tlogger := log.New(output, \"\", log.LstdFlags)\n\n\treturn p.RunWithClientAndLogger(client, logger)\n}",
"func (c *TargetConfigController) Run(workers int, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\tdefer c.queue.ShutDown()\n\n\tglog.Infof(\"Starting TargetConfigController\")\n\tdefer glog.Infof(\"Shutting down TargetConfigController\")\n\n\t// doesn't matter what workers say, only start one.\n\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\n\t<-stopCh\n}",
"func (cmd *InitCmd) Run(f factory.Factory) error {\n\t// Check if config already exists\n\tcmd.log = f.GetLog()\n\tconfigLoader, err := f.NewConfigLoader(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfigExists := configLoader.Exists()\n\tif configExists && !cmd.Reconfigure {\n\t\toptionNo := \"No\"\n\t\tcmd.log.WriteString(cmd.log.GetLevel(), \"\\n\")\n\t\tcmd.log.Warnf(\"%s already exists in this project\", ansi.Color(\"devspace.yaml\", \"white+b\"))\n\t\tresponse, err := cmd.log.Question(&survey.QuestionOptions{\n\t\t\tQuestion: \"Do you want to delete devspace.yaml and recreate it from scratch?\",\n\t\t\tOptions: []string{optionNo, \"Yes\"},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif response == optionNo {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Delete config & overwrite config\n\tos.RemoveAll(\".devspace\")\n\n\t// Delete configs path\n\tos.Remove(constants.DefaultConfigsPath)\n\n\t// Delete config & overwrite config\n\tos.Remove(constants.DefaultConfigPath)\n\n\t// Delete config & overwrite config\n\tos.Remove(constants.DefaultVarsPath)\n\n\t// Execute plugin hook\n\terr = hook.ExecuteHooks(nil, nil, \"init\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print DevSpace logo\n\tlog.PrintLogo()\n\n\t// Determine if we're initializing from scratch, or using docker-compose.yaml\n\tdockerComposePath, generateFromDockerCompose, err := cmd.shouldGenerateFromDockerCompose()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif generateFromDockerCompose {\n\t\terr = cmd.initDockerCompose(f, dockerComposePath)\n\t} else {\n\t\terr = cmd.initDevspace(f, configLoader)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.log.WriteString(logrus.InfoLevel, \"\\n\")\n\tcmd.log.Done(\"Project successfully initialized\")\n\tcmd.log.Info(\"Configuration saved in devspace.yaml - you can make adjustments as needed\")\n\tcmd.log.Infof(\"\\r \\nYou can now run:\\n1. %s - to pick which Kubernetes namespace to work in\\n2. %s - to start developing your project in Kubernetes\\n\\nRun `%s` or `%s` to see a list of available commands and flags\\n\", ansi.Color(\"devspace use namespace\", \"blue+b\"), ansi.Color(\"devspace dev\", \"blue+b\"), ansi.Color(\"devspace -h\", \"blue+b\"), ansi.Color(\"devspace [command] -h\", \"blue+b\"))\n\n\treturn nil\n}",
"func (e *Engine) Run(ctx context.Context) error {\n\tdefer e.dataStores.Close()\n\n\tfor _, cfg := range e.opts.AppConfigs {\n\t\tif err := e.initApp(ctx, cfg); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tclose(e.ready)\n\n\treturn e.run(ctx)\n}",
"func (gtc *GTClient) RunWithCfg() error {\n\n\treturn nil\n}",
"func (kr *KRun) LoadConfig(ctx context.Context) error {\n\terr := kr.K8SClient(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Load additional settings from env.\n\tkr.initFromEnv()\n\n\t// It is possible to have only one of the 2 mesh connector services installed\n\tif kr.XDSAddr == \"\" || kr.ProjectNumber == \"\" ||\n\t\t(kr.MeshConnectorAddr == \"\" && kr.MeshConnectorInternalAddr == \"\") {\n\t\terr := kr.loadMeshEnv(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}",
"func (container *Container) Run(configPath string, projectName string, volumes map[string]types.Volume, hostsEntries []types.HostsEntry) error {\n\t// set up logging for this run\n\tcolors := util.RandomColor()\n\tourColor := color.New(colors...).SprintfFunc()\n\tlogger := log.New(os.Stdout, fmt.Sprintf(\"[%s] \", ourColor(container.Name)), log.LstdFlags)\n\tlogger.Printf(\"Running\")\n\n\t// set a result to start with\n\tvar result error\n\tresult = nil\n\n\t// check to see if we are not already running a container with this project and name\n\t// get our name\n\tname, err := rkt.GetAppName(projectName, container.Name)\n\t// get a list of running pods\n\trunningPods, err := rkt.GetRunningPods(projectName)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// a pod of our name is already running, we dont continue\n\tfor runningName, _ := range runningPods.Pods {\n\t\tif runningName == name {\n\t\t\tlogger.Printf(\"Using already running container %s for %s.\", runningName, container.Name)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// get our command line\n\tcommandLine, err := container.getCommandLine(projectName, runningPods, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// prefix volumes\n\tfor _, volume := range volumes {\n\t\tcommandLine = append(volume.GenerateCommandLine(), commandLine...)\n\t}\n\n\t// prefix hostsEntries\n\tfor _, entry := range hostsEntries {\n\t\tcommandLine = append(entry.GenerateCommandLine(), commandLine...)\n\t}\n\n\t// prefix our port maps\n\tfor _, entry := range container.Ports {\n\t\t// we do this as close to execution as possible to avoid conflicts\n\t\terr = entry.SetHostPort()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcommandLine = append(entry.GenerateCommandLine(), commandLine...)\n\t}\n\n\t// prefix TODO: we want to allow settings for these\n\tcommandLine = append(strings.Split(fmt.Sprintf(\"rkt run --local-config=%s --dns=host\", configPath), \" \"), commandLine...)\n\n\tlogger.Println(commandLine)\n\t// set up our command run\n\tcommand := exec.Command(commandLine[0], commandLine[1:]...)\n\n\t// setup our state condition results\n\tstatus := make(chan error)\n\t// setup our stop channel to let state conditions know they don't need to continue, we buffer as many values as we have handlers\n\tnumHandlers := container.StateConditions.Count()\n\tstop := make(chan bool, numHandlers)\n\n\t// handle timeouts if set\n\tif container.StateConditions.Timeout != nil {\n\t\tgo container.StateConditions.Timeout.Handle(status, stop, logger)\n\t}\n\n\t// handle log monitors if set (must happen before command is started)\n\tif len(container.StateConditions.FileMonitors) > 0 {\n\t\tfor _, monitor := range container.StateConditions.FileMonitors {\n\t\t\tgo func(monitor state.FileMonitorCondition, status chan error, stop chan bool, logger *log.Logger) {\n\t\t\t\tmonitor.Handle(status, stop, logger)\n\t\t\t}(monitor, status, stop, logger)\n\t\t}\n\t}\n\n\t// we want to both monitor and print outputs so we do things a bit different for this Handler. This has to go prior to\n\t// command.Start()\n\terr = container.handleOutputs(command, status, stop, logger)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// start the command\n\terr = command.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// handle exit conditions if set (must happen after the command is started)\n\tif container.StateConditions.Exit != nil {\n\t\tgo container.StateConditions.Exit.Handle(command, status, stop, logger)\n\t} else {\n\t\t// if we don't have an exit handler, we build a default one to fail on any exit\n\t\texitHandler := state.ExitCondition{\n\t\t\tCodes: []int{-1},\n\t\t\tStatus: \"success\",\n\t\t}\n\t\tgo exitHandler.Handle(command, status, stop, logger)\n\t}\n\n\t// we wait for one of our conditions to return if we have any\n\tif container.StateConditions.Count() != 0 {\n\t\tresult = <-status\n\t\t// once one condition returns, we cancel the rest\n\t\tclose(stop)\n\t\tclose(status)\n\t\t//for i := 0; i < numHandlers; i++ {\n\t\t//\tstop <- true\n\t\t//}\n\t}\n\n\treturn result\n}",
"func Run(updateHandler func(updated *Config)) error {\n\tfor {\n\t\tnext := m.Next()\n\t\tnextCfg := next.(*Config)\n\t\terr := updateGlobals(nextCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdateHandler(nextCfg)\n\t}\n}",
"func Run(ctx context.Context, c *config.Config) error {\n\tdeprecatedLogger, err := util.MakeLogger(c.LogLevel, c.LogFormat)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to make logger: %w\", err)\n\t}\n\tvar logger logr.Logger = logrusr.NewLogger(deprecatedLogger)\n\n\tctrl.SetLogger(logger)\n\tsetupLog := ctrl.Log.WithName(\"setup\")\n\tsetupLog.Info(\"starting controller manager\", \"release\", Release, \"repo\", Repo, \"commit\", Commit)\n\n\tkubeconfig, err := c.GetKubeconfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get kubeconfig from file %q: %w\", c.KubeconfigPath, err)\n\t}\n\n\t// set \"kubernetes.io/ingress.class\" to be used by controllers (defaults to \"kong\")\n\tsetupLog.Info(`the ingress class name has been set`, \"value\", c.IngressClassName)\n\n\tscheme := runtime.NewScheme()\n\tutilruntime.Must(clientgoscheme.AddToScheme(scheme))\n\tutilruntime.Must(konghqcomv1.AddToScheme(scheme))\n\tutilruntime.Must(configurationv1alpha1.AddToScheme(scheme))\n\tutilruntime.Must(configurationv1beta1.AddToScheme(scheme))\n\n\tcontrollerOpts := ctrl.Options{\n\t\tScheme: scheme,\n\t\tMetricsBindAddress: c.MetricsAddr,\n\t\tPort: 9443,\n\t\tHealthProbeBindAddress: c.ProbeAddr,\n\t\tLeaderElection: c.EnableLeaderElection,\n\t\tLeaderElectionID: c.LeaderElectionID,\n\t}\n\n\t// determine how to configure namespace watchers\n\tif strings.Contains(c.WatchNamespace, \",\") {\n\t\tsetupLog.Info(\"manager set up with multiple namespaces\", \"namespaces\", c.WatchNamespace)\n\t\t// this mode does not set the Namespace option, so the manager will default to watching all namespaces\n\t\t// MultiNamespacedCacheBuilder imposes a filter on top of that watch to retrieve scoped resources\n\t\t// from the watched namespaces only.\n\t\tcontrollerOpts.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(c.WatchNamespace, \",\"))\n\t} else {\n\t\tcontrollerOpts.Namespace = c.WatchNamespace\n\t}\n\n\t// build the controller manager\n\tmgr, err := ctrl.NewManager(kubeconfig, controllerOpts)\n\tif err != nil {\n\t\tsetupLog.Error(err, \"unable to start manager\")\n\t\treturn err\n\t}\n\n\tkongClient, err := c.GetKongClient(ctx)\n\tif err != nil {\n\t\tsetupLog.Error(err, \"cannot create a Kong Admin API client\")\n\t\treturn err\n\t}\n\n\t// configure the kong client\n\tkongConfig := sendconfig.Kong{\n\t\tURL: c.KongAdminURL,\n\t\tFilterTags: c.FilterTags,\n\t\tConcurrency: c.Concurrency,\n\t\tClient: kongClient,\n\t\tPluginSchemaStore: util.NewPluginSchemaStore(kongClient),\n\t}\n\n\t// determine the proxy synchronization strategy\n\tsyncTickDuration, err := time.ParseDuration(fmt.Sprintf(\"%gs\", c.ProxySyncSeconds))\n\tif err != nil {\n\t\tsetupLog.Error(err, \"%s is not a valid number of seconds to stagger the proxy server synchronization\")\n\t\treturn err\n\t}\n\n\t// start the proxy cache server\n\tprx, err := proxy.NewCacheBasedProxyWithStagger(ctx,\n\t\t// NOTE: logr-based loggers use the \"logger\" field instead of \"subsystem\". When replacing logrus with logr, replace\n\t\t// WithField(\"subsystem\", ...) with WithName(...).\n\t\tdeprecatedLogger.WithField(\"subsystem\", \"proxy-cache-resolver\"),\n\t\tmgr.GetClient(),\n\t\tkongConfig,\n\t\tc.IngressClassName,\n\t\tc.EnableReverseSync,\n\t\tsyncTickDuration,\n\t\tsendconfig.UpdateKongAdminSimple,\n\t)\n\tif err != nil {\n\t\tsetupLog.Error(err, \"unable to start proxy cache server\")\n\t\treturn err\n\t}\n\n\tcontrollers := []ControllerDef{\n\t\t// ---------------------------------------------------------------------------\n\t\t// Core API Controllers\n\t\t// ---------------------------------------------------------------------------\n\n\t\t{\n\t\t\tIsEnabled: &c.ServiceEnabled,\n\t\t\tController: &configuration.CoreV1ServiceReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"Service\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.ServiceEnabled,\n\t\t\tController: &configuration.CoreV1EndpointsReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"Endpoints\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.IngressNetV1Enabled,\n\t\t\tController: &configuration.NetV1IngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"Ingress\").WithName(\"netv1\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.IngressNetV1beta1Enabled,\n\t\t\tController: &configuration.NetV1Beta1IngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"Ingress\").WithName(\"netv1beta1\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.IngressExtV1beta1Enabled,\n\t\t\tController: &configuration.ExtV1Beta1IngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"Ingress\").WithName(\"extv1beta1\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\n\t\t// ---------------------------------------------------------------------------\n\t\t// Kong API Controllers\n\t\t// ---------------------------------------------------------------------------\n\n\t\t{\n\t\t\tIsEnabled: &c.UDPIngressEnabled,\n\t\t\tController: &kongctrl.KongV1Alpha1UDPIngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"UDPIngress\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.TCPIngressEnabled,\n\t\t\tController: &kongctrl.KongV1Beta1TCPIngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"TCPIngress\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.KongIngressEnabled,\n\t\t\tController: &kongctrl.KongV1KongIngressReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"KongIngress\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.KongClusterPluginEnabled,\n\t\t\tController: &kongctrl.KongV1KongClusterPluginReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"KongClusterPlugin\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.KongPluginEnabled,\n\t\t\tController: &kongctrl.KongV1KongPluginReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"KongPlugin\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tIsEnabled: &c.KongConsumerEnabled,\n\t\t\tController: &kongctrl.KongV1KongConsumerReconciler{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tLog: ctrl.Log.WithName(\"controllers\").WithName(\"KongConsumer\"),\n\t\t\t\tScheme: mgr.GetScheme(),\n\t\t\t\tProxy: prx,\n\t\t\t\tIngressClassName: c.IngressClassName,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range controllers {\n\t\tif err := c.MaybeSetupWithManager(mgr); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to create controller %q: %w\", c.Name(), err)\n\t\t}\n\t}\n\n\t// BUG: kubebuilder (at the time of writing - 3.0.0-rc.1) does not allow this tag anywhere else than main.go\n\t// See https://github.com/kubernetes-sigs/kubebuilder/issues/932\n\t//+kubebuilder:scaffold:builder\n\n\tif err := mgr.AddHealthzCheck(\"health\", healthz.Ping); err != nil {\n\t\treturn fmt.Errorf(\"unable to setup healthz: %w\", err)\n\t}\n\tif err := mgr.AddReadyzCheck(\"check\", healthz.Ping); err != nil {\n\t\treturn fmt.Errorf(\"unable to setup readyz: %w\", err)\n\t}\n\n\tif c.AnonymousReports {\n\t\tsetupLog.Info(\"running anonymous reports\")\n\t\tif err := mgrutils.RunReport(ctx, kubeconfig, kongConfig, Release); err != nil {\n\t\t\tsetupLog.Error(err, \"anonymous reporting failed\")\n\t\t}\n\t} else {\n\t\tsetupLog.Info(\"anonymous reports disabled, skipping\")\n\t}\n\n\tsetupLog.Info(\"starting manager\")\n\treturn mgr.Start(ctx)\n}",
"func Run(config Config, sigCh <-chan bool) error {\n\tif err := config.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\twatcher, err := newFSWatcher(pluginapi.KubeletSocket)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer watcher.Close()\n\n\tfor {\n\t\tif restart, err := runOnce(config, watcher, sigCh); err != nil {\n\t\t\treturn err\n\t\t} else if !restart {\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func (e *EndComponent) Run(ctx context.Context, config *ucfg.Config) error {\n\treturn nil\n}",
"func (kr *KRun) LoadConfig() *KRun {\n\n\tif kr.KSA == \"\" {\n\t\t// Same environment used for VMs\n\t\tkr.KSA = os.Getenv(\"WORKLOAD_SERVICE_ACCOUNT\")\n\t}\n\tif kr.KSA == \"\" {\n\t\tkr.KSA = \"default\"\n\t}\n\n\tif kr.Namespace == \"\" {\n\t\t// Same environment used for VMs\n\t\tkr.Namespace = os.Getenv(\"WORKLOAD_NAMESPACE\")\n\t}\n\tif kr.Name == \"\" {\n\t\tkr.Name = os.Getenv(\"WORKLOAD_NAME\")\n\t}\n\tif kr.Gateway == \"\" {\n\t\tkr.Gateway = os.Getenv(\"GATEWAY_NAME\")\n\t}\n\n\tks := os.Getenv(\"K_SERVICE\")\n\tif kr.Namespace == \"\" {\n\t\tverNsName := strings.SplitN(ks, \"--\", 2)\n\t\tif len(verNsName) > 1 {\n\t\t\tks = verNsName[1]\n\t\t\tkr.Labels[\"ver\"] = verNsName[0]\n\t\t}\n\t\tparts := strings.Split(ks, \"-\")\n\t\tkr.Namespace = parts[0]\n\t\tif len(parts) > 1 {\n\t\t\tkr.Name = parts[1]\n\t\t}\n\t}\n\n\tif kr.Namespace == \"\" {\n\t\tkr.Namespace = \"default\"\n\t}\n\tif kr.Name == \"\" {\n\t\tkr.Name = kr.Namespace\n\t}\n\n\tkr.Aud2File = map[string]string{}\n\tprefix := \".\"\n\tif os.Getuid() == 0 {\n\t\tprefix = \"\"\n\t}\n\tif kr.BaseDir == \"\" {\n\t\tkr.BaseDir = os.Getenv(\"MESH_BASE_DIR\")\n\t}\n\tif kr.BaseDir != \"\" {\n\t\tprefix = kr.BaseDir\n\t}\n\tfor _, kv := range os.Environ() {\n\t\tkvl := strings.SplitN(kv, \"=\", 2)\n\t\tif strings.HasPrefix(kvl[0], \"K8S_SECRET_\") {\n\t\t\tkr.Secrets2Dirs[kvl[0][11:]] = prefix + kvl[1]\n\t\t}\n\t\tif strings.HasPrefix(kvl[0], \"K8S_CM_\") {\n\t\t\tkr.CM2Dirs[kvl[0][7:]] = prefix + kvl[1]\n\t\t}\n\t\tif strings.HasPrefix(kvl[0], \"K8S_TOKEN_\") {\n\t\t\tkr.Aud2File[kvl[0][10:]] = prefix + kvl[1]\n\t\t}\n\t\tif strings.HasPrefix(kvl[0], \"LABEL_\") {\n\t\t\tkr.Labels[kvl[0][6:]] = prefix + kvl[1]\n\t\t}\n\t}\n\n\tif kr.TrustDomain == \"\" {\n\t\tkr.TrustDomain = os.Getenv(\"TRUST_DOMAIN\")\n\t}\n\tif kr.TrustDomain == \"\" {\n\t\tkr.TrustDomain = kr.ProjectId + \".svc.id.goog\"\n\t}\n\tkr.Aud2File[kr.TrustDomain] = prefix + \"/var/run/secrets/tokens/istio-token\"\n\tif !kr.InCluster {\n\t\tkr.Aud2File[\"api\"] = prefix + \"/var/run/secrets/kubernetes.io/serviceaccount/token\"\n\t}\n\tif kr.KSA == \"\" {\n\t\tkr.KSA = \"default\"\n\t}\n\n\tif kr.XDSAddr == \"\" {\n\t\tkr.XDSAddr = os.Getenv(\"XDS_ADDR\")\n\t}\n\t// Advanced options\n\n\t// example dns:debug\n\tkr.AgentDebug = cfg(\"XDS_AGENT_DEBUG\", \"\")\n\n\treturn kr\n}",
"func ConfigAndRunApp(config *config.Configuration) {\n\tapi := new(Api)\n\tapi.Initialize(config)\n\tapi.Run(config.Address)\n}",
"func (d *AlertsRouter) Run(ctx context.Context) error {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(d.adminConfigPollInterval):\n\t\t\tif err := d.SyncAndApplyConfigFromDatabase(); err != nil {\n\t\t\t\td.logger.Error(\"Unable to sync admin configuration\", \"error\", err)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\t// Stop sending alerts to all external Alertmanager(s).\n\t\t\td.adminConfigMtx.Lock()\n\t\t\tfor orgID, s := range d.externalAlertmanagers {\n\t\t\t\tdelete(d.externalAlertmanagers, orgID) // delete before we stop to make sure we don't accept any more alerts.\n\t\t\t\ts.Stop()\n\t\t\t}\n\t\t\td.adminConfigMtx.Unlock()\n\n\t\t\treturn nil\n\t\t}\n\t}\n}",
"func (ac *Config) RunConfiguration(filename string, mux *http.ServeMux, withHandlerFunctions bool) error {\n\t// Retrieve a Lua state\n\tL := ac.luapool.Get()\n\n\t// Basic system functions, like log()\n\tac.LoadBasicSystemFunctions(L)\n\n\t// If there is a database backend\n\tif ac.perm != nil {\n\n\t\t// Retrieve the userstate\n\t\tuserstate := ac.perm.UserState()\n\n\t\t// Server configuration functions\n\t\tac.LoadServerConfigFunctions(L, filename)\n\n\t\tcreator := userstate.Creator()\n\n\t\t// Simpleredis data structures (could be used for storing server stats)\n\t\tdatastruct.LoadList(L, creator)\n\t\tdatastruct.LoadSet(L, creator)\n\t\tdatastruct.LoadHash(L, creator)\n\t\tdatastruct.LoadKeyValue(L, creator)\n\n\t\t// For saving and loading Lua functions\n\t\tcodelib.Load(L, creator)\n\n\t\t// For executing PostgreSQL queries\n\t\tpquery.Load(L)\n\n\t\t// For executing MSSQL queries\n\t\tmssql.Load(L)\n\t}\n\n\t// For handling JSON data\n\tjnode.LoadJSONFunctions(L)\n\tac.LoadJFile(L, filepath.Dir(filename))\n\tjnode.Load(L)\n\n\t// Extras\n\tpure.Load(L)\n\n\t// Plugins\n\tac.LoadPluginFunctions(L, nil)\n\n\t// Cache\n\tac.LoadCacheFunctions(L)\n\n\t// Pages and Tags\n\tonthefly.Load(L)\n\n\t// HTTP Client\n\thttpclient.Load(L, ac.serverHeaderName)\n\n\tif withHandlerFunctions {\n\t\t// Lua HTTP handlers\n\t\tac.LoadLuaHandlerFunctions(L, filename, mux, false, nil, ac.defaultTheme)\n\t}\n\n\t// Run the script\n\tif err := L.DoFile(filename); err != nil {\n\t\t// Close the Lua state\n\t\tL.Close()\n\n\t\t// Logging and/or HTTP response is handled elsewhere\n\t\treturn err\n\t}\n\n\t// Only put the Lua state back if there were no errors\n\tac.luapool.Put(L)\n\n\treturn nil\n}",
"func (b *Builder) InitConfig(ctx *interpolate.Context) (warnings []string, errors []error) {\n\tvar (\n\t\twarns []string\n\t\terrs []error\n\t)\n\n\twarns, errs = b.config.RemoteFileConfig.Prepare(ctx)\n\twarnings = append(warnings, warns...)\n\terrors = append(errors, errs...)\n\n\twarns, errs = b.config.ImageConfig.Prepare(ctx)\n\twarnings = append(warnings, warns...)\n\terrors = append(errors, errs...)\n\n\twarns, errs = b.config.QemuConfig.Prepare(ctx)\n\twarnings = append(warnings, warns...)\n\terrors = append(errors, errs...)\n\n\treturn warnings, errors\n}",
"func Run(c *config.Config, stopCh <-chan struct{}) error {\n\tklog.Info(version.Get().Pretty())\n\tklog.Infof(\"Controller Running with additionalTolerations %v\", c.Cfg.AdditionalTolerations)\n\n\t// start a controller on instances of lb\n\tcontroller := lbcontroller.NewLoadBalancerController(c.Cfg)\n\tcontroller.Run(5, stopCh)\n\n\treturn nil\n}",
"func (c *updateConfigCmd) Run(k *kong.Context, logger logging.Logger) error {\n\tlogger = logger.WithValues(\"Name\", c.Name)\n\tkubeConfig, err := ctrl.GetConfig()\n\tif err != nil {\n\t\tlogger.Debug(errKubeConfig, \"error\", err)\n\t\treturn errors.Wrap(err, errKubeConfig)\n\t}\n\tlogger.Debug(\"Found kubeconfig\")\n\tkube, err := typedclient.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tlogger.Debug(errKubeClient, \"error\", err)\n\t\treturn errors.Wrap(err, errKubeClient)\n\t}\n\tlogger.Debug(\"Created kubernetes client\")\n\tprevConf, err := kube.Configurations().Get(context.Background(), c.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\terr = warnIfNotFound(err)\n\t\tlogger.Debug(\"Failed to update configuration\", \"error\", err)\n\t\treturn errors.Wrap(err, \"cannot update configuration\")\n\t}\n\tlogger.Debug(\"Found previous configuration object\")\n\tpkg := prevConf.Spec.Package\n\tpkgReference, err := name.ParseReference(pkg, name.WithDefaultRegistry(\"\"))\n\tif err != nil {\n\t\terr = warnIfNotFound(err)\n\t\tlogger.Debug(\"Failed to update configuration\", \"error\", err)\n\t\treturn errors.Wrap(err, \"cannot update configuration\")\n\t}\n\tnewPkg := \"\"\n\tif strings.HasPrefix(c.Tag, \"sha256\") {\n\t\tnewPkg = pkgReference.Context().Digest(c.Tag).Name()\n\t} else {\n\t\tnewPkg = pkgReference.Context().Tag(c.Tag).Name()\n\t}\n\tprevConf.Spec.Package = newPkg\n\treq, err := json.Marshal(prevConf)\n\tif err != nil {\n\t\terr = warnIfNotFound(err)\n\t\tlogger.Debug(\"Failed to update configuration\", \"error\", err)\n\t\treturn errors.Wrap(err, \"cannot update configuration\")\n\t}\n\tres, err := kube.Configurations().Patch(context.Background(), c.Name, types.MergePatchType, req, metav1.PatchOptions{})\n\tif err != nil {\n\t\terr = warnIfNotFound(err)\n\t\tlogger.Debug(\"Failed to update configuration\", \"error\", err)\n\t\treturn errors.Wrap(err, \"cannot update configuration\")\n\t}\n\t_, err = fmt.Fprintf(k.Stdout, \"%s/%s updated\\n\", strings.ToLower(v1.ConfigurationGroupKind), res.GetName())\n\treturn err\n}",
"func (cmd *SyncCmd) Run(f factory.Factory) error {\n\tif cmd.Ctx == nil {\n\t\tvar cancelFn context.CancelFunc\n\t\tcmd.Ctx, cancelFn = context.WithCancel(context.Background())\n\t\tdefer cancelFn()\n\t}\n\n\t// Switch working directory\n\tif cmd.ConfigPath != \"\" {\n\t\t_, err := os.Stat(cmd.ConfigPath)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"--config is specified, but config %s cannot be loaded: %v\", cmd.GlobalFlags.ConfigPath, err)\n\t\t}\n\t}\n\n\t// Load generated config if possible\n\tvar err error\n\tvar localCache localcache.Cache\n\tlogger := f.GetLog()\n\tconfigOptions := cmd.ToConfigOptions()\n\tconfigLoader, err := f.NewConfigLoader(cmd.ConfigPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif configLoader.Exists() {\n\t\tif cmd.GlobalFlags.ConfigPath != \"\" {\n\t\t\tconfigExists, err := configLoader.SetDevSpaceRoot(logger)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if !configExists {\n\t\t\t\treturn errors.New(message.ConfigNotFound)\n\t\t\t}\n\n\t\t\tlocalCache, err = configLoader.LoadLocalCache()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Warnf(\"If you want to use the sync paths from `devspace.yaml`, use the `--config=devspace.yaml` flag for this command.\")\n\t\t}\n\t}\n\n\t// Get config with adjusted cluster config\n\tclient, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new kube client\")\n\t}\n\n\t// If the current kube context or namespace is different from old,\n\t// show warnings and reset kube client if necessary\n\tclient, err = kubectl.CheckKubeContext(client, localCache, cmd.NoWarn, cmd.SwitchContext, false, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar configInterface config.Config\n\tif configLoader.Exists() && cmd.GlobalFlags.ConfigPath != \"\" {\n\t\tconfigInterface, err = configLoader.LoadWithCache(context.Background(), localCache, client, configOptions, logger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// create the devspace context\n\tctx := devspacecontext.NewContext(cmd.Ctx, nil, logger).\n\t\tWithConfig(configInterface).\n\t\tWithKubeClient(client)\n\n\t// Execute plugin hook\n\terr = hook.ExecuteHooks(ctx, nil, \"sync\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// get image selector if specified\n\timageSelector, err := getImageSelector(ctx, configLoader, configOptions, cmd.ImageSelector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Build params\n\toptions := targetselector.NewOptionsFromFlags(cmd.Container, cmd.LabelSelector, imageSelector, cmd.Namespace, cmd.Pod).\n\t\tWithPick(cmd.Pick).\n\t\tWithWait(cmd.Wait)\n\n\tif cmd.DownloadOnly && cmd.UploadOnly {\n\t\treturn errors.New(\"--upload-only cannot be used together with --download-only\")\n\t}\n\n\t// Create the sync config to apply\n\tsyncConfig := nameConfig{\n\t\tdevPod: &latest.DevPod{},\n\t\tsyncConfig: &latest.SyncConfig{},\n\t}\n\tif cmd.GlobalFlags.ConfigPath != \"\" && configInterface != nil {\n\t\tdevSection := configInterface.Config().Dev\n\t\tsyncConfigs := []nameConfig{}\n\t\tfor _, v := range devSection {\n\t\t\tloader.EachDevContainer(v, func(devContainer *latest.DevContainer) bool {\n\t\t\t\tfor _, s := range devContainer.Sync {\n\t\t\t\t\tn, err := fromSyncConfig(v, devContainer.Container, s)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tsyncConfigs = append(syncConfigs, n)\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t})\n\t\t}\n\t\tif len(syncConfigs) == 0 {\n\t\t\treturn fmt.Errorf(\"no sync config found in %s\", cmd.GlobalFlags.ConfigPath)\n\t\t}\n\n\t\t// Check which sync config should be used\n\t\tif len(syncConfigs) > 1 {\n\t\t\t// Select syncConfig to use\n\t\t\tsyncConfigNames := []string{}\n\t\t\tfor _, sc := range syncConfigs {\n\t\t\t\tsyncConfigNames = append(syncConfigNames, sc.name)\n\t\t\t}\n\n\t\t\tanswer, err := logger.Question(&survey.QuestionOptions{\n\t\t\t\tQuestion: \"Multiple sync configurations found. Which one do you want to use?\",\n\t\t\t\tDefaultValue: syncConfigNames[0],\n\t\t\t\tOptions: syncConfigNames,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor idx, n := range syncConfigNames {\n\t\t\t\tif answer == n {\n\t\t\t\t\tsyncConfig = syncConfigs[idx]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tsyncConfig = syncConfigs[0]\n\t\t}\n\t}\n\n\t// apply the flags to the empty sync config or loaded sync config from the devspace.yaml\n\tvar configImageSelector []string\n\tif syncConfig.devPod.ImageSelector != \"\" {\n\t\timageSelector, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), syncConfig.devPod.ImageSelector, ctx.Config(), ctx.Dependencies())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconfigImageSelector = []string{imageSelector.Image}\n\t}\n\toptions = options.ApplyConfigParameter(syncConfig.containerName, syncConfig.devPod.LabelSelector, configImageSelector, syncConfig.devPod.Namespace, \"\")\n\toptions, err = cmd.applyFlagsToSyncConfig(syncConfig.syncConfig, options)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"apply flags to sync config\")\n\t}\n\n\t// Start sync\n\toptions = options.WithSkipInitContainers(true)\n\treturn sync.StartSyncFromCmd(ctx, targetselector.NewTargetSelector(options), syncConfig.devPod.Name, syncConfig.syncConfig, cmd.NoWatch)\n}",
"func Run(m *testing.M, opts ...RunOption) {\n\t// Run tests in a separate function such that we can use deferred statements and still\n\t// (indirectly) call `os.Exit()` in case the test setup failed.\n\tif err := func() error {\n\t\tvar cfg runConfig\n\t\tfor _, opt := range opts {\n\t\t\topt(&cfg)\n\t\t}\n\n\t\tdefer mustHaveNoChildProcess()\n\t\tif !cfg.disableGoroutineChecks {\n\t\t\tdefer mustHaveNoGoroutines()\n\t\t}\n\n\t\tcleanup, err := configure()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"test configuration: %w\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\tif cfg.setup != nil {\n\t\t\tif err := cfg.setup(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error calling setup function: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tm.Run()\n\n\t\treturn nil\n\t}(); err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func (c *addDutRun) Run(a subcommands.Application, args []string, env subcommands.Env) int {\n\tif err := c.innerRun(a, args, env); err != nil {\n\t\tPrintError(a.GetErr(), err)\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func (g *Group) Run() (err error) {\n\t// run config registration and flag parsing stages\n\tif interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {\n\t\treturn errRun\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tg.log.Fatal().Err(err).Stack().Msg(\"unexpected exit\")\n\t\t}\n\t}()\n\n\t// execute pre run stage and exit on error\n\tfor idx := range g.p {\n\t\t// a PreRunner might have been deregistered during Run\n\t\tif g.p[idx] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tg.log.Debug().Uint32(\"ran\", uint32(idx+1)).Uint32(\"total\", uint32(len(g.p))).Str(\"name\", g.p[idx].Name()).Msg(\"pre-run\")\n\t\tif err := g.p[idx].PreRun(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tswg := &sync.WaitGroup{}\n\tswg.Add(len(g.s))\n\tgo func() {\n\t\tswg.Wait()\n\t\tclose(g.readyCh)\n\t}()\n\t// feed our registered services to our internal run.Group\n\tfor idx := range g.s {\n\t\t// a Service might have been deregistered during Run\n\t\ts := g.s[idx]\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tg.log.Debug().Uint32(\"total\", uint32(len(g.s))).Uint32(\"ran\", uint32(idx+1)).Str(\"name\", s.Name()).Msg(\"serve\")\n\t\tg.r.Add(func() error {\n\t\t\tnotify := s.Serve()\n\t\t\tswg.Done()\n\t\t\t<-notify\n\t\t\treturn nil\n\t\t}, func(_ error) {\n\t\t\tg.log.Debug().Uint32(\"total\", uint32(len(g.s))).Uint32(\"ran\", uint32(idx+1)).Str(\"name\", s.Name()).Msg(\"stop\")\n\t\t\ts.GracefulStop()\n\t\t})\n\t}\n\n\t// start registered services and block\n\treturn g.r.Run()\n}",
"func (m *Manager) ApplyConfig(cfg *config.Config) error {\n\t// Update only if a config change is detected. If TLS configuration is\n\t// set, we have to restart the manager to make sure that new TLS\n\t// certificates are picked up.\n\tvar blankTLSConfig config_util.TLSConfig\n\tif reflect.DeepEqual(m.config, cfg.TracingConfig) && m.config.TLSConfig == blankTLSConfig {\n\t\treturn nil\n\t}\n\n\tif m.shutdownFunc != nil {\n\t\tif err := m.shutdownFunc(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to shut down the tracer provider: %w\", err)\n\t\t}\n\t}\n\n\t// If no endpoint is set, assume tracing should be disabled.\n\tif cfg.TracingConfig.Endpoint == \"\" {\n\t\tm.config = cfg.TracingConfig\n\t\tm.shutdownFunc = nil\n\t\totel.SetTracerProvider(trace.NewNoopTracerProvider())\n\t\tlevel.Info(m.logger).Log(\"msg\", \"Tracing provider uninstalled.\")\n\t\treturn nil\n\t}\n\n\ttp, shutdownFunc, err := buildTracerProvider(context.Background(), cfg.TracingConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to install a new tracer provider: %w\", err)\n\t}\n\n\tm.shutdownFunc = shutdownFunc\n\tm.config = cfg.TracingConfig\n\totel.SetTracerProvider(tp)\n\n\tlevel.Info(m.logger).Log(\"msg\", \"Successfully installed a new tracer provider.\")\n\treturn nil\n}",
"func (o *ConfigCleanOption) Run(cmd *cobra.Command, args []string) (err error) {\n\tif config := getConfig(); config == nil {\n\t\tcmd.Println(\"cannot found config file\")\n\t}\n\to.Logger = cmd\n\n\titemCount := len(config.JenkinsServers)\n\tcheckResult := make(chan CheckResult, itemCount)\n\n\tfor _, jenkins := range config.JenkinsServers {\n\t\tgo func(target cfg.JenkinsServer) {\n\t\t\tcheckResult <- o.Check(target)\n\t\t}(jenkins)\n\t}\n\n\tcheckResultList := make([]CheckResult, itemCount)\n\tfor i := range config.JenkinsServers {\n\t\tcheckResultList[i] = <-checkResult\n\t}\n\n\t// do the clean work\n\terr = o.CleanByCondition(checkResultList)\n\tcmd.Println()\n\treturn\n}",
"func RunRun(r *cmd.RootCMD, c *cmd.CMD) {\n\tgFlags := r.Flags.(*GlobalFlags)\n\targs := c.Args.(*RunArgs)\n\tflags := c.Flags.(*RunFlags)\n\t// Enable Debug Output\n\tif gFlags.Debug {\n\t\tlog.SetLevel(level.Debug)\n\t}\n\tlog.Debugln(\"Started usysconf\")\n\tdefer log.Debugln(\"Exiting usysconf\")\n\t// Root user check\n\tif os.Geteuid() != 0 {\n\t\tlog.Fatalln(\"You must have root privileges to run triggers\")\n\t}\n\t// Set Chroot as needed\n\tif util.IsChroot() {\n\t\tgFlags.Chroot = true\n\t}\n\t// Set Live as needed\n\tif util.IsLive() {\n\t\tgFlags.Live = true\n\t}\n\t// Load Triggers\n\ttm, err := config.LoadAll()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load triggers, reason: %s\\n\", err)\n\t}\n\t// If the names flag is not present, retrieve the names of the\n\t// configurations in the system and usr directories.\n\tn := args.Triggers\n\tif len(n) == 0 {\n\t\tfor k := range tm {\n\t\t\tn = append(n, k)\n\t\t}\n\t}\n\t// Establish scope of operations\n\ts := triggers.Scope{\n\t\tChroot: gFlags.Chroot,\n\t\tDebug: gFlags.Debug,\n\t\tDryRun: flags.DryRun,\n\t\tForced: flags.Force,\n\t\tLive: gFlags.Live,\n\t}\n\t// Run triggers\n\ttm.Run(s, n)\n}",
"func (srv *Server) Run() error {\n\tvar err error\n\n\t// Initiate a new logger\n\tsrv.log = logrus.New()\n\tif srv.cfg.GetBool(\"debug\") {\n\t\tsrv.log.Level = logrus.DebugLevel\n\t\tsrv.log.Debug(\"Enabling Debug Logging\")\n\t}\n\tif srv.cfg.GetBool(\"trace\") {\n\t\tsrv.log.Level = logrus.TraceLevel\n\t\tsrv.log.Debug(\"Enabling Trace Logging\")\n\t}\n\tif srv.cfg.GetBool(\"disable_logging\") {\n\t\tsrv.log.Level = logrus.FatalLevel\n\t}\n\n\t// Setup Scheduler\n\tsrv.scheduler = tasks.New()\n\tdefer srv.scheduler.Stop()\n\n\t// Config Reload\n\tif srv.cfg.GetInt(\"config_watch_interval\") > 0 {\n\t\t_, err := srv.scheduler.Add(&tasks.Task{\n\t\t\tInterval: time.Duration(srv.cfg.GetInt(\"config_watch_interval\")) * time.Second,\n\t\t\tTaskFunc: func() error {\n\t\t\t\t// Reload config using Viper's Watch capabilities\n\t\t\t\terr := srv.cfg.WatchRemoteConfig()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Support hot enable/disable of debug logging\n\t\t\t\tif srv.cfg.GetBool(\"debug\") {\n\t\t\t\t\tsrv.log.Level = logrus.DebugLevel\n\t\t\t\t}\n\n\t\t\t\t// Support hot enable/disable of trace logging\n\t\t\t\tif srv.cfg.GetBool(\"trace\") {\n\t\t\t\t\tsrv.log.Level = logrus.TraceLevel\n\t\t\t\t}\n\n\t\t\t\t// Support hot enable/disable of all logging\n\t\t\t\tif srv.cfg.GetBool(\"disable_logging\") {\n\t\t\t\t\tsrv.log.Level = logrus.FatalLevel\n\t\t\t\t}\n\n\t\t\t\tsrv.log.Tracef(\"Config reloaded from Consul\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tsrv.log.Errorf(\"Error scheduling Config watcher - %s\", err)\n\t\t}\n\t}\n\n\t// Setup the DB Connection\n\tsrv.kv, err = redis.Dial(redis.Config{\n\t\tServer: srv.cfg.GetString(\"kv_server\"),\n\t\tPassword: srv.cfg.GetString(\"kv_password\"),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not establish database connection - %s\", err)\n\t}\n\tdefer srv.kv.Close()\n\n\t// Initialize the DB\n\terr = srv.kv.Setup()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not setup database - %s\", err)\n\t}\n\n\t// Setup the HTTP Server\n\tsrv.httpRouter = httprouter.New()\n\tsrv.httpServer = &http.Server{\n\t\tAddr: srv.cfg.GetString(\"listen_addr\"),\n\t\tHandler: srv.httpRouter,\n\t}\n\n\t// Setup TLS Configuration\n\tif srv.cfg.GetBool(\"enable_tls\") {\n\t\tsrv.httpServer.TLSConfig = &tls.Config{\n\t\t\tMinVersion: tls.VersionTLS12,\n\t\t\tCipherSuites: []uint16{\n\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\t},\n\t\t}\n\t}\n\n\t// Kick off Graceful Shutdown Go Routine\n\tgo func() {\n\t\t// Make the Trap\n\t\ttrap := make(chan os.Signal, 1)\n\t\tsignal.Notify(trap, syscall.SIGTERM)\n\n\t\t// Wait for a signal then action\n\t\ts := <-trap\n\t\tsrv.log.Infof(\"Received shutdown signal %s\", s)\n\n\t\t// Shutdown the HTTP Server\n\t\terr := srv.httpServer.Shutdown(context.Background())\n\t\tif err != nil {\n\t\t\tsrv.log.Errorf(\"Received errors when shutting down HTTP sessions %s\", err)\n\t\t}\n\n\t\t// Close DB Sessions\n\t\tsrv.kv.Close()\n\n\t\t// Shutdown the app via runCtx\n\t\tsrv.runCancel()\n\t}()\n\n\t// Register Health Check Handler used for Liveness checks\n\tsrv.httpRouter.GET(\"/health\", srv.middleware(srv.Health))\n\n\t// Register Health Check Handler used for Readiness checks\n\tsrv.httpRouter.GET(\"/ready\", srv.middleware(srv.Ready))\n\n\t// Register Hello World Handler\n\tsrv.httpRouter.GET(\"/hello\", srv.middleware(srv.Hello))\n\tsrv.httpRouter.POST(\"/hello\", srv.middleware(srv.SetHello))\n\tsrv.httpRouter.PUT(\"/hello\", srv.middleware(srv.SetHello))\n\n\t// Start HTTP Listener\n\tsrv.log.Infof(\"Starting Listener on %s\", srv.cfg.GetString(\"listen_addr\"))\n\tif srv.cfg.GetBool(\"enable_tls\") {\n\t\terr := srv.httpServer.ListenAndServeTLS(srv.cfg.GetString(\"cert_file\"), srv.cfg.GetString(\"key_file\"))\n\t\tif err != nil {\n\t\t\tif err == http.ErrServerClosed {\n\t\t\t\t// Wait until all outstanding requests are done\n\t\t\t\t<-srv.runCtx.Done()\n\t\t\t\treturn ErrShutdown\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\terr = srv.httpServer.ListenAndServe()\n\tif err != nil {\n\t\tif err == http.ErrServerClosed {\n\t\t\t// Wait until all outstanding requests are done\n\t\t\t<-srv.runCtx.Done()\n\t\t\treturn ErrShutdown\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Run(ctx context.Context, client *guardian.Client, config Config) (err error) {\n\tif config.Icon == nil {\n\t\tconfig.Icon = leaseui.DefaultIcon()\n\t}\n\n\trunner, err := New(client, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runner.Run(ctx)\n\n\t/*\n\t\tg, ctx := errgroup.WithContext(ctx)\n\n\t\trun := func() error {\n\t\t\trunner, err := New(client, config)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn runner.Run(ctx)\n\t\t}\n\n\t\tg.Go(run)\n\t\tg.Go(run)\n\n\t\treturn g.Wait()\n\t*/\n}",
"func (c *LdapChecker) Run(config v1.CanarySpec) []*pkg.CheckResult {\n\tvar results []*pkg.CheckResult\n\tfor _, conf := range config.LDAP {\n\t\tresults = append(results, c.Check(conf))\n\t}\n\treturn results\n}",
"func (c *Controller) Run(stopCh <-chan struct{}) error {\n\t// Normally, we let informers start after all controllers. However, in this case we need namespaces to start and sync\n\t// first, so we have DiscoveryNamespacesFilter ready to go. This avoids processing objects that would be filtered during startup.\n\tc.namespaces.Start(stopCh)\n\t// Wait for namespace informer synced, which implies discovery filter is synced as well\n\tif !kube.WaitForCacheSync(\"namespace\", stopCh, c.namespaces.HasSynced) {\n\t\treturn fmt.Errorf(\"failed to sync namespaces\")\n\t}\n\t// run handlers for the config cluster; do not store this *Cluster in the ClusterStore or give it a SyncTimeout\n\t// this is done outside the goroutine, we should block other Run/startFuncs until this is registered\n\tconfigCluster := &Cluster{Client: c.configClusterClient, ID: c.configClusterID}\n\tc.handleAdd(configCluster, stopCh)\n\tgo func() {\n\t\tt0 := time.Now()\n\t\tlog.Info(\"Starting multicluster remote secrets controller\")\n\t\t// we need to start here when local cluster secret watcher enabled\n\t\tif features.LocalClusterSecretWatcher && features.ExternalIstiod {\n\t\t\tc.secrets.Start(stopCh)\n\t\t}\n\t\tif !kube.WaitForCacheSync(\"multicluster remote secrets\", stopCh, c.secrets.HasSynced) {\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"multicluster remote secrets controller cache synced in %v\", time.Since(t0))\n\t\tc.queue.Run(stopCh)\n\t}()\n\treturn nil\n}",
"func (c *volumeEngine) Run() (op []byte, err error) {\n\tm, err := cast.ConfigToMap(c.prepareFinalConfig())\n\tif err != nil {\n\t\treturn\n\t}\n\t// set customized config\n\tc.engine.SetConfig(m)\n\t// delegate to generic cas template engine\n\treturn c.engine.Run()\n}",
"func Run(t *testing.T, configOpt core.ConfigProvider, sdkOpts ...fabsdk.Option) {\n\tsetupAndRun(t, true, configOpt, e2eTest, sdkOpts...)\n}",
"func Run(c *Config, logger *zap.Logger) error {\n\tif c.Duration > 0 {\n\t\tc.Traces = 0\n\t} else if c.Traces <= 0 {\n\t\treturn fmt.Errorf(\"either `traces` or `duration` must be greater than 0\")\n\t}\n\n\twg := sync.WaitGroup{}\n\tvar running uint32 = 1\n\tfor i := 0; i < c.Workers; i++ {\n\t\twg.Add(1)\n\t\tw := worker{\n\t\t\tid: i,\n\t\t\ttraces: c.Traces,\n\t\t\tmarshal: c.Marshal,\n\t\t\tdebug: c.Debug,\n\t\t\tfirehose: c.Firehose,\n\t\t\tpause: c.Pause,\n\t\t\tduration: c.Duration,\n\t\t\trunning: &running,\n\t\t\twg: &wg,\n\t\t\tlogger: logger.With(zap.Int(\"worker\", i)),\n\t\t}\n\n\t\tgo w.simulateTraces()\n\t}\n\tif c.Duration > 0 {\n\t\ttime.Sleep(c.Duration)\n\t\tatomic.StoreUint32(&running, 0)\n\t}\n\twg.Wait()\n\treturn nil\n}",
"func (i *interactor) Config(args ...string) error {\n\ti.logger.WithField(\"args\", args).Info(\"Configuring.\")\n\tif out, err := i.executor.Run(append([]string{\"config\"}, args...)...); err != nil {\n\t\treturn fmt.Errorf(\"error configuring %v: %w %v\", args, err, string(out))\n\t}\n\treturn nil\n}",
"func ConfigAndRunApp(config *config.Config) {\r\n\tapp := new(App)\r\n\tapp.Initialize(config)\r\n\tapp.Run(config.ServerHost)\r\n}",
"func Run(rc types.ResourceConfig) int {\n\t// Read files.\n\tf, err := readFiles()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed reading files: %v\\n\", err)\n\n\t\treturn ExitError\n\t}\n\n\tc, err := prepare(f, rc)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed preparing object: %v\\n\", err)\n\n\t\treturn ExitError\n\t}\n\n\t// Calculate and print diff.\n\tfmt.Printf(\"Calculating diff...\\n\\n\")\n\n\td := cmp.Diff(c.Containers().ToExported().PreviousState, c.Containers().DesiredState())\n\n\tif d == \"\" {\n\t\tfmt.Println(\"No changes required\")\n\n\t\treturn ExitOK\n\t}\n\n\tfmt.Printf(\"Following changes required:\\n\\n%s\\n\\n\", d)\n\n\treturn deploy(c)\n}",
"func (c *RunCommand) Run() error {\n\tconf, err := c.loadConfig()\n\tif err != nil {\n\t\tlog.Fatalf(\"error reading config: %s\", err)\n\t}\n\n\tmonitor := monitoring.NewMonitor(log.StandardLogger())\n\n\tsourceManager := connection.NewManager()\n\ttargetManager := connection.NewManager()\n\n\tlogBroker := broker.NewBroker(c.Workers, monitor)\n\n\tif c.Web || c.Ui {\n\t\tc.startHttp(monitor)\n\t}\n\n\tfor name, source := range conf.Sources {\n\t\tswitch source.Provider {\n\t\tcase \"dummy\":\n\t\t\tsourceManager.AddConnection(name, &dummy.Source{})\n\t\tcase \"nomad\":\n\t\t\tsourceManager.AddConnection(name, &nomad.Source{\n\t\t\t\tConfig: source.Config,\n\t\t\t\tConsulAddr: c.Consul,\n\t\t\t})\n\t\t}\n\t}\n\n\tfor name, target := range conf.Targets {\n\t\tswitch target.Provider {\n\t\tcase \"blackhole\":\n\t\t\ttargetManager.AddConnection(name, &blackhole.Target{})\n\t\tcase \"stdout\":\n\t\t\ttargetManager.AddConnection(name, &stdout.Target{})\n\t\tcase \"logzio\":\n\t\t\ttargetManager.AddConnection(name, &logzio.Target{\n\t\t\t\tConfig: target.Config,\n\t\t\t})\n\t\t}\n\t}\n\n\terr = c.startProcesses(conf, sourceManager, targetManager, logBroker)\n\tif err != nil {\n\t\tlog.Fatalf(\"error starting processes: %s\", err)\n\t}\n\n\treturn nil\n}",
"func Run(rpcCfg RPCConfig) error {\n\tconfig := DefaultConfig()\n\n\t// Parse command line flags.\n\tparser := flags.NewParser(&config, flags.Default)\n\tparser.SubcommandsOptional = true\n\n\t_, err := parser.Parse()\n\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Parse ini file.\n\tloopDir := lncfg.CleanAndExpandPath(config.LoopDir)\n\tconfigFile := lncfg.CleanAndExpandPath(config.ConfigFile)\n\n\t// If our loop directory is set and the config file parameter is not\n\t// set, we assume that they want to point to a config file in their\n\t// loop dir. However, if the config file has a non-default value, then\n\t// we leave the config parameter as its custom value.\n\tif loopDir != loopDirBase && configFile == defaultConfigFile {\n\t\tconfigFile = filepath.Join(\n\t\t\tloopDir, defaultConfigFilename,\n\t\t)\n\t}\n\n\tif err := flags.IniParse(configFile, &config); err != nil {\n\t\t// If it's a parsing related error, then we'll return\n\t\t// immediately, otherwise we can proceed as possibly the config\n\t\t// file doesn't exist which is OK.\n\t\tif _, ok := err.(*flags.IniError); ok {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Parse command line flags again to restore flags overwritten by ini\n\t// parse.\n\t_, err = parser.Parse()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tif config.ShowVersion {\n\t\tfmt.Println(appName, \"version\", loop.Version())\n\t\tos.Exit(0)\n\t}\n\n\t// Special show command to list supported subsystems and exit.\n\tif config.DebugLevel == \"show\" {\n\t\tfmt.Printf(\"Supported subsystems: %v\\n\",\n\t\t\tlogWriter.SupportedSubsystems())\n\t\tos.Exit(0)\n\t}\n\n\t// Validate our config before we proceed.\n\tif err := Validate(&config); err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize logging at the default logging level.\n\terr = logWriter.InitLogRotator(\n\t\tfilepath.Join(config.LogDir, defaultLogFilename),\n\t\tconfig.MaxLogFileSize, config.MaxLogFiles,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = build.ParseAndSetDebugLevels(config.DebugLevel, logWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print the version before executing either primary directive.\n\tlog.Infof(\"Version: %v\", loop.Version())\n\n\tlisCfg := newListenerCfg(&config, rpcCfg)\n\n\t// Execute command.\n\tif parser.Active == nil {\n\t\tsignal.Intercept()\n\n\t\tdaemon := New(&config, lisCfg)\n\t\tif err := daemon.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tcase <-signal.ShutdownChannel():\n\t\t\tlog.Infof(\"Received SIGINT (Ctrl+C).\")\n\t\t\tdaemon.Stop()\n\n\t\t\t// The above stop will return immediately. But we'll be\n\t\t\t// notified on the error channel once the process is\n\t\t\t// complete.\n\t\t\treturn <-daemon.ErrChan\n\n\t\tcase err := <-daemon.ErrChan:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif parser.Active.Name == \"view\" {\n\t\treturn view(&config, lisCfg)\n\t}\n\n\treturn fmt.Errorf(\"unimplemented command %v\", parser.Active.Name)\n}",
"func InitConfig(configName string) func() {\n\treturn func() {\n\t\tConfig.ConfigFile = viper.GetString(ConfigFile) // enable ability to specify config file via flag\n\t\tConfig.ConfigDir = viper.GetString(ConfigDir)\n\t\tviper.SetEnvPrefix(\"cilium\")\n\n\t\t// INFO: 启动时候用的 --config-dir=/tmp/cilium/config-map, 每一个文件名 filename 是 key,文件内容是 value\n\t\tif Config.ConfigDir != \"\" {\n\t\t\tif _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {\n\t\t\t\tlog.Fatalf(\"Non-existent configuration directory %s\", Config.ConfigDir)\n\t\t\t}\n\n\t\t\tif m, err := ReadDirConfig(Config.ConfigDir); err != nil {\n\t\t\t\tlog.Fatalf(\"Unable to read configuration directory %s: %s\", Config.ConfigDir, err)\n\t\t\t} else {\n\t\t\t\terr := MergeConfig(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Unable to merge configuration: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif Config.ConfigFile != \"\" {\n\t\t\tviper.SetConfigFile(Config.ConfigFile)\n\t\t} else {\n\t\t\tviper.SetConfigName(configName) // name of config file (without extension)\n\t\t\tviper.AddConfigPath(\"$HOME\") // adding home directory as first search path\n\t\t}\n\n\t\t// If a config file is found, read it in.\n\t\tif err := viper.ReadInConfig(); err == nil {\n\t\t\tlog.WithField(logfields.Path, viper.ConfigFileUsed()).\n\t\t\t\tInfo(\"Using config from file\")\n\t\t} else if Config.ConfigFile != \"\" {\n\t\t\tlog.WithField(logfields.Path, Config.ConfigFile).\n\t\t\t\tFatal(\"Error reading config file\")\n\t\t} else {\n\t\t\tlog.WithField(logfields.Reason, err).Info(\"Skipped reading configuration file\")\n\t\t}\n\t}\n}",
"func Run(ctx app.Context) {\n\t// //////////////////////////////////////////////////////////////////////\n\t// Config and command line\n\t// //////////////////////////////////////////////////////////////////////\n\n\t// Options are set in this order: config -> env var -> cmd line option.\n\t// So first we must apply config files, then do cmd line parsing which\n\t// will apply env vars and cmd line options.\n\n\t// Parse cmd line to get --config files\n\tcmdLine := config.ParseCommandLine(config.Options{})\n\n\t// --config files override defaults if given\n\tconfigFiles := config.DEFAULT_CONFIG_FILES\n\tif cmdLine.Config != \"\" {\n\t\tconfigFiles = cmdLine.Config\n\t}\n\n\t// Parse default options from config files\n\tdef := config.ParseConfigFiles(configFiles, cmdLine.Debug)\n\n\t// Parse env vars and cmd line options, override default config\n\tcmdLine = config.ParseCommandLine(def)\n\n\t// Final options and commands\n\tvar o config.Options = cmdLine.Options\n\tvar c config.Command = cmdLine.Command\n\tif o.Debug {\n\t\tapp.Debug(\"command: %#v\\n\", c)\n\t\tapp.Debug(\"options: %#v\\n\", o)\n\t}\n\n\tif ctx.Hooks.AfterParseOptions != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"calling hook AfterParseOptions\")\n\t\t}\n\t\tctx.Hooks.AfterParseOptions(&o)\n\n\t\t// Dump options again to see if hook changed them\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"options: %#v\\n\", o)\n\t\t}\n\t}\n\tctx.Options = o\n\tctx.Command = c\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Help and version\n\t// //////////////////////////////////////////////////////////////////////\n\n\t// Help uses a Request Manager client to fetch the list of all requests.\n\t// If addr is set, then this works; else, ignore and always print help.\n\trmc, _ := makeRMC(&ctx)\n\n\t// spinc with no args (Args[0] = \"spinc\" itself). Print short request help\n\t// because Ryan is very busy.\n\tif len(os.Args) == 1 {\n\t\tconfig.Help(false, rmc)\n\t\tos.Exit(0)\n\t}\n\n\t// spinc --help or spinc help (full help)\n\tif o.Help || (c.Cmd == \"help\" && len(c.Args) == 0) {\n\t\tconfig.Help(true, rmc)\n\t\tos.Exit(0)\n\t}\n\n\t// spinc help <command>\n\tif c.Cmd == \"help\" && len(c.Args) > 0 {\n\t\t// Need rm client for this\n\t\tif rmc == nil {\n\t\t\tvar err error\n\t\t\trmc, err = makeRMC(&ctx)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\treqName := c.Args[0]\n\t\tif err := config.RequestHelp(reqName, rmc); err != nil {\n\t\t\tswitch err {\n\t\t\tcase config.ErrUnknownRequest:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unknown request: %s. Run spinc (no arguments) to list all requests.\\n\", reqName)\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"API error: %s. Use --ping to test the API connection.\\n\", err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\t// spinc --version or spinc version\n\tif o.Version || c.Cmd == \"version\" {\n\t\tfmt.Println(\"spinc v0.0.0\")\n\t\tos.Exit(0)\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Request Manager Client\n\t// //////////////////////////////////////////////////////////////////////\n\tif rmc == nil {\n\t\tvar err error\n\t\trmc, err = makeRMC(&ctx)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Ping\n\t// //////////////////////////////////////////////////////////////////////\n\tif o.Ping {\n\t\tif _, err := rmc.RequestList(); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Ping failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"%s OK\\n\", o.Addr)\n\t\tos.Exit(0)\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Commands\n\t// //////////////////////////////////////////////////////////////////////\n\tcmdFactory := &cmd.DefaultFactory{}\n\n\tvar err error\n\tvar run app.Command\n\tif ctx.Factories.Command != nil {\n\t\trun, err = ctx.Factories.Command.Make(c.Cmd, ctx)\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase cmd.ErrNotExist:\n\t\t\t\tif o.Debug {\n\t\t\t\t\tapp.Debug(\"user cmd factory cannot make a %s cmd, trying default factory\", c.Cmd)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"User command factory error: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\tif run == nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"using default factory to make a %s cmd\", c.Cmd)\n\t\t}\n\t\trun, err = cmdFactory.Make(c.Cmd, ctx)\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase cmd.ErrNotExist:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unknown command: %s. Run 'spinc help' to list commands.\\n\", c.Cmd)\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Command factory error: %s\\n\", err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif err := run.Prepare(); err != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"%s Prepare error: %s\", c.Cmd, err)\n\t\t}\n\t\tswitch err {\n\t\tcase config.ErrUnknownRequest:\n\t\t\treqName := c.Args[0]\n\t\t\tfmt.Fprintf(os.Stderr, \"Unknown request: %s. Run spinc (no arguments) to list all requests.\\n\", reqName)\n\t\tdefault:\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tif err := run.Run(); err != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"%s Run error: %s\", c.Cmd, err)\n\t\t}\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func (a *Agent) Run(shutdown chan struct{}) error {\n\tvar wg sync.WaitGroup\n\n\tlog.Printf(\"INFO Agent Config: Interval:%s, Hostname:%#v, Flush Interval:%s \\n\",\n\t\ta.Config.Agent.Interval, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)\n\n\t// configure all sources\n\tfor _, source := range a.Config.Sources {\n\t\tsource.SetDefaultTags(a.Config.Tags)\n\t}\n\n\t// Start all ServiceSources\n\tfor _, source := range a.Config.Sources {\n\t\tswitch p := source.Source.(type) {\n\t\tcase optic.ServiceSource:\n\t\t\tacc := NewAccumulator(source, source.EventsCh())\n\t\t\tif err := p.Start(acc); err != nil {\n\t\t\t\tlog.Printf(\"ERROR Service for source %s failed to start, exiting\\n%s\\n\",\n\t\t\t\t\tsource.Name(), err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer p.Stop()\n\t\t}\n\t}\n\n\twg.Add(len(a.Config.Sources))\n\tfor _, source := range a.Config.Sources {\n\t\tinterval := a.Config.Agent.Interval\n\t\t// overwrite global interval if this plugin has it's own\n\t\tif source.Config.Interval != 0 {\n\t\t\tinterval = source.Config.Interval\n\t\t}\n\t\tgo func(source *models.RunningSource, interval time.Duration) {\n\t\t\tdefer wg.Done()\n\t\t\ta.gatherer(shutdown, source, interval)\n\t\t}(source, interval)\n\t}\n\n\twg.Wait()\n\ta.Close()\n\treturn nil\n}",
"func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) error {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\tm.cancelDiscoverers()\n\tfor name, scfg := range cfg {\n\t\tfor provName, prov := range m.providersFromConfig(scfg) {\n\t\t\tm.startProvider(m.ctx, poolKey{setName: name, provider: provName}, prov)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Run(name string, initFunc Init, opts ...BinaryOpts) {\n\tvar config Config\n\tfor _, o := range opts {\n\t\to(&config)\n\t}\n\n\tctx := context.Background()\n\tctx = log.WithLogger(ctx, log.G(ctx).WithField(\"runtime\", name))\n\n\tif err := run(ctx, nil, initFunc, name, config); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\tos.Exit(1)\n\t}\n}",
"func (s *Sinker) Run() {\n\tvar err error\n\tvar newCfg *config.Config\n\tdefer func() {\n\t\ts.stopped <- struct{}{}\n\t}()\n\tif cmdOps.PushGatewayAddrs != \"\" {\n\t\taddrs := strings.Split(cmdOps.PushGatewayAddrs, \",\")\n\t\ts.pusher = statistics.NewPusher(addrs, cmdOps.PushInterval, httpAddr)\n\t\tif err = s.pusher.Init(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tgo s.pusher.Run()\n\t}\n\tif s.rcm == nil {\n\t\tif _, err = os.Stat(cmdOps.LocalCfgFile); err == nil {\n\t\t\tif newCfg, err = config.ParseLocalCfgFile(cmdOps.LocalCfgFile); err != nil {\n\t\t\t\tutil.Logger.Fatal(\"config.ParseLocalCfgFile failed\", zap.Error(err))\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tutil.Logger.Fatal(\"expect --local-cfg-file or --nacos-dataid\")\n\t\t\treturn\n\t\t}\n\t\tif err = newCfg.Normallize(); err != nil {\n\t\t\tutil.Logger.Fatal(\"newCfg.Normallize failed\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\t\tif err = s.applyConfig(newCfg); err != nil {\n\t\t\tutil.Logger.Fatal(\"s.applyConfig failed\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\t\t<-s.ctx.Done()\n\t} else {\n\t\tif cmdOps.NacosServiceName != \"\" {\n\t\t\tgo s.rcm.Run()\n\t\t}\n\t\t// Golang <-time.After() is not garbage collected before expiry.\n\t\tticker := time.NewTicker(10 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.ctx.Done():\n\t\t\t\tutil.Logger.Info(\"Sinker.Run quit due to context has been canceled\")\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tif newCfg, err = s.rcm.GetConfig(); err != nil {\n\t\t\t\t\tutil.Logger.Error(\"s.rcm.GetConfig failed\", zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err = newCfg.Normallize(); err != nil {\n\t\t\t\t\tutil.Logger.Error(\"newCfg.Normallize failed\", zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err = s.applyConfig(newCfg); err != nil {\n\t\t\t\t\tutil.Logger.Error(\"s.applyConfig failed\", zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func Run(ctx context.Context, conf Config) error {\n\tsrv := New(ctx, conf)\n\treturn srv.Run()\n}",
"func TestConfig(t *testing.T) {\n\t// Register gomega fail handler\n\tRegisterFailHandler(Fail)\n\n\t// Have go's testing package run package specs\n\tRunSpecs(t, \"go-utils suite\")\n}",
"func (c *ConverterController) Run(ctx context.Context, group *errgroup.Group, cfg *conf.BaseOperatorConf) {\n\n\tif cfg.EnabledPrometheusConverter.ServiceScrape {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.ServiceMonitorsKind, c.serviceInf.Run)\n\t\t})\n\n\t}\n\tif cfg.EnabledPrometheusConverter.PodMonitor {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.PodMonitorsKind, c.podInf.Run)\n\t\t})\n\n\t}\n\tif cfg.EnabledPrometheusConverter.PrometheusRule {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.PrometheusRuleKind, c.ruleInf.Run)\n\t\t})\n\n\t}\n}",
"func Run(cfg RunnerConfig) {\n\tif cfg.LogOutput == nil {\n\t\tcfg.LogOutput = os.Stdout\n\t}\n\tlog.NewLogger(cfg.LogLevel, cfg.LogOutput)\n\tserver.Run()\n}",
"func NewForConfig(ctx context.Context, runCtx *runcontext.RunContext) (*SkaffoldRunner, error) {\n\tevent.InitializeState(runCtx)\n\tevent.LogMetaEvent()\n\teventV2.InitializeState(runCtx)\n\teventV2.LogMetaEvent()\n\t_, endTrace := instrumentation.StartTrace(context.Background(), \"NewForConfig\")\n\tdefer endTrace()\n\n\ttagger, err := tag.NewTaggerMux(runCtx)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating tagger: %w\", err)\n\t}\n\n\tstore := build.NewArtifactStore()\n\tg := graph.ToArtifactGraph(runCtx.Artifacts())\n\tsourceDependencies := graph.NewSourceDependenciesCache(runCtx, store, g)\n\n\tisLocalImage := func(imageName string) (bool, error) {\n\t\treturn isImageLocal(runCtx, imageName)\n\t}\n\n\t// Always add skaffold-specific labels, except during `skaffold render`\n\tlabeller := label.NewLabeller(runCtx.AddSkaffoldLabels(), runCtx.CustomLabels(), runCtx.GetRunID())\n\ttester, err := getTester(ctx, runCtx, isLocalImage)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating tester: %w\", err)\n\t}\n\n\tvar deployer deploy.Deployer\n\n\thydrationDir, err := util.GetHydrationDir(runCtx.Opts, runCtx.WorkingDir, true, isKptRendererOrDeployerUsed(runCtx.Pipelines))\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting render output path: %w\", err)\n\t}\n\n\trenderer, err := GetRenderer(ctx, runCtx, hydrationDir, labeller.Labels(), runCtx.UsingLegacyHelmDeploy())\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating renderer: %w\", err)\n\t}\n\n\tdeployer, err = GetDeployer(ctx, runCtx, labeller, hydrationDir, runCtx.UsingLegacyHelmDeploy())\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating deployer: %w\", err)\n\t}\n\trOpts := platform.ResolverOpts{\n\t\tKubeContext: runCtx.KubeContext,\n\t\tCliPlatformsSelection: runCtx.Opts.Platforms,\n\t\tCheckClusterNodePlatforms: runCtx.CheckClusterNodePlatforms(),\n\t\tDisableMultiPlatformBuild: runCtx.DisableMultiPlatformBuild(),\n\t}\n\n\tplatforms, err := platform.NewResolver(ctx, runCtx.Pipelines.All(), rOpts)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"getting target platforms: %w\", err)\n\t}\n\n\tvar verifier verify.Verifier\n\tverifier, err = GetVerifier(ctx, runCtx, labeller)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating verifier: %w\", err)\n\t}\n\n\tvar acsRunner ActionsRunner\n\tacsRunner, err = GetActionsRunner(ctx, runCtx, labeller, runCtx.VerifyDockerNetwork(), runCtx.Opts.VerifyEnvFile)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating actiosn runner: %w\", err)\n\t}\n\n\tdepLister := func(ctx context.Context, artifact *latest.Artifact) ([]string, error) {\n\t\tctx, endTrace := instrumentation.StartTrace(ctx, \"NewForConfig_depLister\")\n\t\tdefer endTrace()\n\n\t\tbuildDependencies, err := sourceDependencies.SingleArtifactDependencies(ctx, artifact)\n\t\tif err != nil {\n\t\t\tendTrace(instrumentation.TraceEndError(err))\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttestDependencies, err := tester.TestDependencies(ctx, artifact)\n\t\tif err != nil {\n\t\t\tendTrace(instrumentation.TraceEndError(err))\n\t\t\treturn nil, err\n\t\t}\n\t\treturn append(buildDependencies, testDependencies...), nil\n\t}\n\n\tartifactCache, err := cache.NewCache(ctx, runCtx, isLocalImage, depLister, g, store)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"initializing cache: %w\", err)\n\t}\n\t// The Builder must be instantiated AFTER the Deployer, because the Deploy target influences\n\t// the Cluster object on the RunContext, which in turn influences whether or not we will push images.\n\tvar builder build.Builder\n\tbuilder, err = build.NewBuilderMux(runCtx, store, artifactCache, func(p latest.Pipeline) (build.PipelineBuilder, error) {\n\t\treturn GetBuilder(ctx, runCtx, store, sourceDependencies, p)\n\t})\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating builder: %w\", err)\n\t}\n\n\tbuilder, tester, renderer, deployer = WithTimings(builder, tester, renderer, deployer, runCtx.CacheArtifacts())\n\tif runCtx.Notification() {\n\t\tdeployer = WithNotification(deployer)\n\t}\n\n\tmonitor := filemon.NewMonitor()\n\tintents, intentChan := setupIntents(runCtx)\n\trtrigger, err := trigger.NewTrigger(runCtx, intents.IsAnyAutoEnabled)\n\tif err != nil {\n\t\tendTrace(instrumentation.TraceEndError(err))\n\t\treturn nil, fmt.Errorf(\"creating watch trigger: %w\", err)\n\t}\n\n\trbuilder := NewBuilder(builder, tagger, platforms, artifactCache, runCtx)\n\treturn &SkaffoldRunner{\n\t\tBuilder: *rbuilder,\n\t\tPruner: Pruner{Builder: builder},\n\t\trenderer: renderer,\n\t\ttester: tester,\n\t\tdeployer: deployer,\n\t\tplatforms: platforms,\n\t\tmonitor: monitor,\n\t\tlistener: NewSkaffoldListener(monitor, rtrigger, sourceDependencies, intentChan),\n\t\tartifactStore: store,\n\t\tsourceDependencies: sourceDependencies,\n\t\tlabeller: labeller,\n\t\tcache: artifactCache,\n\t\trunCtx: runCtx,\n\t\tintents: intents,\n\t\tisLocalImage: isLocalImage,\n\t\tverifier: verifier,\n\t\tactionsRunner: acsRunner,\n\t}, nil\n}",
"func (o *Orchestrator) Run() {\n\tv := newValidator(o.Plugin)\n\tm := newMetrics(o.healthzPath, o.healthzPort, o.metricsPath, o.metricsPort)\n\n\tv.mustValidatePrerequisites()\n\n\to.mustServeKMSRequests()\n\n\t// Giving some time for kmsPlugin to start Serving.\n\t// TODO: Must be a better way than to sleep.\n\ttime.Sleep(3 * time.Millisecond)\n\n\tv.mustPingRPC()\n\tmustGatherMetrics()\n\n\tm.mustServeHealthzAndMetrics()\n\n\t// Giving some time for HealthZ and Metrics to start Serving.\n\t// TODO: Must be a better way than to sleep.\n\ttime.Sleep(3 * time.Millisecond)\n\tmustEmitOKHealthz()\n\tmustEmitMetrics()\n}",
"func initConfig() {\n\t// Find home directory.\n\thome, err := os.UserHomeDir()\n\tzenDir := home + \"/.zen\"\n\n\tcobra.CheckErr(err)\n\n\t// load the config data\n\tcfg = config.InitConfig(zenDir)\n\n\t// set default exec and runner\n\tcfg.AppCfg.Executor = &plugins.DefaultExecutor{}\n\tcfg.AppCfg.Runner = &plugins.DefaultRunner{}\n\n\t// load plugin from path based on config default\n\tfor _, plugin := range cfg.Plugins.Runners {\n\t\tif plugin.Name == cfg.AppCfg.RunnerID {\n\t\t\tplugins.LoadPlugin(plugin.Path)\n\t\t\tcfg.AppCfg.Runner = plugins.ZenPluginRegistry.Runner\n\t\t}\n\t}\n\n\tfor _, plugin := range cfg.Plugins.Executors {\n\t\tif plugin.Name == cfg.AppCfg.ExecutorID {\n\t\t\tplugins.LoadPlugin(plugin.Path)\n\t\t\tcfg.AppCfg.Executor = plugins.ZenPluginRegistry.Executor\n\t\t}\n\t}\n\n}",
"func (m *Mix) Exec_Config(payload *mixTy.MixConfigAction, tx *types.Transaction, index int) (*types.Receipt, error) {\n\ta := newAction(m, tx)\n\treceipt, err := a.Config(payload)\n\tif err != nil {\n\t\tmlog.Error(\"mix config failed\", \"error\", err, \"hash\", hex.EncodeToString(tx.Hash()))\n\t\treturn nil, err\n\t}\n\treturn receipt, nil\n}",
"func (c *Command) Run(args []string) int {\n\tname, opts, peers, err := c.readConfig()\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tc.instance, err = huton.NewInstance(name, opts...)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\t_, err = c.instance.Join(peers)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\treturn c.handleSignals()\n}",
"func Run(l net.Listener, myconfig *config.Config, routes *[]router.Route, enableCORS bool) error {\n\tvar handler http.Handler\n\n\tif myconfig == nil {\n\t\treturn errors.New(\"Configuration is not set\")\n\t}\n\terr := dbhelper.Connect(myconfig.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmyrouter := router.New(routes, true, true, myconfig.Server)\n\tif enableCORS {\n\t\thandler = router.GetCORS(myrouter, myconfig.Cors)\n\t} else {\n\t\thandler = myrouter\n\t}\n\tif l != nil {\n\t\treturn router.RunWithListener(handler, l)\n\t}\n\treturn router.Run(handler, myconfig.Server.Port)\n}",
"func (r *CmdReporter) Run() error {\n\tstdout, stderr, retcode, err := r.runCommand()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"system failed to run command. %+v\", err)\n\t}\n\n\tif err := r.saveToConfigMap(stdout, stderr, retcode); err != nil {\n\t\treturn fmt.Errorf(\"failed to save command output to ConfigMap. %+v\", err)\n\t}\n\n\treturn nil\n}",
"func (o *CreateConfigGwOptions) Run() error {\n\tif o.ListenAddress == \"\" {\n\t\tlog.Infof(\"%s not specified; using default (%s)\\n\", optionListenAddress, defaultListenAddress)\n\t\to.ListenAddress = defaultListenAddress\n\t\t// return util.MissingOption(optionListenAddress)\n\t}\n\n\tgwConfigDir, err := util.ZitiAppConfigDir(c.ZITI_FABRIC_GW)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"err\")\n\t}\n\n\treturn fmt.Errorf(\"UNIMPLEMENTED: '%s'\", gwConfigDir)\n}",
"func LoadConfig(logger *zap.Logger, cfg interface{}) {\n\terr := envconfig.Process(\"\", cfg)\n\tif err != nil {\n\t\tenvconfig.Usage(\"\", cfg)\n\t\tlogger.Fatal(\"app: could not process config\", zap.Error(err))\n\t}\n}",
"func (config *ReleaseCommandConfig) Run() error {\n\n\tgit, err := gitpkg.GetGit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = release(git)\n\n\treturn err\n}",
"func (c *ConverterController) Run(ctx context.Context, group *errgroup.Group, cfg *config.BaseOperatorConf) {\n\n\tif cfg.EnabledPrometheusConverter.ServiceScrape {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.ServiceMonitorsKind, c.serviceInf.Run)\n\t\t})\n\n\t}\n\tif cfg.EnabledPrometheusConverter.PodMonitor {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.PodMonitorsKind, c.podInf.Run)\n\t\t})\n\n\t}\n\tif cfg.EnabledPrometheusConverter.PrometheusRule {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.PrometheusRuleKind, c.ruleInf.Run)\n\t\t})\n\n\t}\n\tif cfg.EnabledPrometheusConverter.Probe {\n\t\tgroup.Go(func() error {\n\t\t\treturn c.runInformerWithDiscovery(ctx, v1.SchemeGroupVersion.String(), v1.ProbeKindKey, c.probeInf.Run)\n\t\t})\n\n\t}\n\n}",
"func (i *TiFlashInstance) InitConfig(\n\te executor.Executor,\n\tclusterName,\n\tclusterVersion,\n\tdeployUser string,\n\tpaths meta.DirPaths,\n) error {\n\ttopo := i.topo.(*Specification)\n\tif err := i.BaseInstance.InitConfig(e, topo.GlobalOptions, deployUser, paths); err != nil {\n\t\treturn err\n\t}\n\n\tspec := i.InstanceSpec.(TiFlashSpec)\n\n\ttidbStatusAddrs := []string{}\n\tfor _, tidb := range topo.TiDBServers {\n\t\ttidbStatusAddrs = append(tidbStatusAddrs, fmt.Sprintf(\"%s:%d\", tidb.Host, uint64(tidb.StatusPort)))\n\t}\n\ttidbStatusStr := strings.Join(tidbStatusAddrs, \",\")\n\n\tpdStr := strings.Join(i.getEndpoints(), \",\")\n\n\tcfg := scripts.NewTiFlashScript(\n\t\ti.GetHost(),\n\t\tpaths.Deploy,\n\t\tstrings.Join(paths.Data, \",\"),\n\t\tpaths.Log,\n\t\ttidbStatusStr,\n\t\tpdStr,\n\t).WithTCPPort(spec.TCPPort).\n\t\tWithHTTPPort(spec.HTTPPort).\n\t\tWithFlashServicePort(spec.FlashServicePort).\n\t\tWithFlashProxyPort(spec.FlashProxyPort).\n\t\tWithFlashProxyStatusPort(spec.FlashProxyStatusPort).\n\t\tWithStatusPort(spec.StatusPort).\n\t\tWithTmpDir(spec.TmpDir).\n\t\tWithNumaNode(spec.NumaNode).\n\t\tAppendEndpoints(topo.Endpoints(deployUser)...)\n\n\tfp := filepath.Join(paths.Cache, fmt.Sprintf(\"run_tiflash_%s_%d.sh\", i.GetHost(), i.GetPort()))\n\tif err := cfg.ConfigToFile(fp); err != nil {\n\t\treturn err\n\t}\n\tdst := filepath.Join(paths.Deploy, \"scripts\", \"run_tiflash.sh\")\n\n\tif err := e.Transfer(fp, dst, false); err != nil {\n\t\treturn err\n\t}\n\n\tif _, _, err := e.Execute(\"chmod +x \"+dst, false); err != nil {\n\t\treturn err\n\t}\n\n\tconf, err := i.InitTiFlashLearnerConfig(cfg, clusterVersion, topo.ServerConfigs.TiFlashLearner)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge config files for imported instance\n\tif i.IsImported() {\n\t\tconfigPath := ClusterPath(\n\t\t\tclusterName,\n\t\t\tAnsibleImportedConfigPath,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s-learner-%s-%d.toml\",\n\t\t\t\ti.ComponentName(),\n\t\t\t\ti.GetHost(),\n\t\t\t\ti.GetPort(),\n\t\t\t),\n\t\t)\n\t\timportConfig, err := ioutil.ReadFile(configPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconf, err = mergeImported(importConfig, conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = i.mergeTiFlashLearnerServerConfig(e, conf, spec.LearnerConfig, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconf, err = i.InitTiFlashConfig(cfg, topo.ServerConfigs.TiFlash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge config files for imported instance\n\tif i.IsImported() {\n\t\tconfigPath := ClusterPath(\n\t\t\tclusterName,\n\t\t\tAnsibleImportedConfigPath,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"%s-%s-%d.toml\",\n\t\t\t\ti.ComponentName(),\n\t\t\t\ti.GetHost(),\n\t\t\t\ti.GetPort(),\n\t\t\t),\n\t\t)\n\t\timportConfig, err := ioutil.ReadFile(configPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconf, err = mergeImported(importConfig, conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn i.MergeServerConfig(e, conf, spec.Config, paths)\n}",
"func (c *ConfigMapController) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {\n\t// When this function completes, mark the go function as done\n\tdefer wg.Done()\n\n\t// Increment wait group as we're about to execute a go function\n\twg.Add(1)\n\n\t// Execute go function\n\tgo c.configmapInformer.Run(stopCh)\n\n\t// Wait till we receive a stop signal\n\t<-stopCh\n}",
"func (a *Agent) Run(ctx context.Context) error {\n\ta.Context = ctx\n\tlog.Printf(\"I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, \"+\n\t\t\"Flush Interval:%s\",\n\t\ta.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,\n\t\ta.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)\n\n\tlog.Printf(\"D! [agent] Initializing plugins\")\n\terr := a.initPlugins()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\tlog.Printf(\"D! [agent] Connecting outputs\")\n\tnext, ou, err := a.startOutputs(ctx, a.Config.Outputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.ou = ou\n\tvar apu []*processorUnit\n\tvar au *aggregatorUnit\n\tif len(a.Config.Aggregators) != 0 {\n\t\taggC := next\n\t\tif len(a.Config.AggProcessors) != 0 {\n\t\t\taggC, apu, err = a.startProcessors(next, a.Config.AggProcessors)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tnext, au, err = a.startAggregators(aggC, next, a.Config.Aggregators)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar pu []*processorUnit\n\tif len(a.Config.Processors) != 0 {\n\t\tnext, pu, err = a.startProcessors(next, a.Config.Processors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tiu, err := a.startInputs(next, a.Config.Inputs)\n\ta.iu = iu\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runOutputs(ou)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running outputs: %v\", err)\n\t\t}\n\t}()\n\n\tif au != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(apu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runAggregators(startTime, au)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running aggregators: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif pu != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(pu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runInputs(ctx, startTime, iu)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running inputs: %v\", err)\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\tlog.Printf(\"D! [agent] Stopped Successfully\")\n\treturn err\n}"
] | [
"0.71010476",
"0.64792436",
"0.6213528",
"0.62125707",
"0.61918336",
"0.60716456",
"0.60399765",
"0.60297084",
"0.6020371",
"0.60044134",
"0.5958",
"0.59382945",
"0.58830523",
"0.58809024",
"0.5818161",
"0.5803526",
"0.5797414",
"0.5793293",
"0.5785053",
"0.57504505",
"0.57250494",
"0.5718365",
"0.56934637",
"0.565791",
"0.56416804",
"0.5633542",
"0.562904",
"0.55968034",
"0.5543123",
"0.5521258",
"0.55004114",
"0.54941297",
"0.549013",
"0.5475373",
"0.5469869",
"0.5462719",
"0.5460456",
"0.5451451",
"0.54485065",
"0.54406667",
"0.54190236",
"0.5412966",
"0.5381508",
"0.537677",
"0.53745943",
"0.53740484",
"0.53724056",
"0.536694",
"0.53590995",
"0.5338566",
"0.5337688",
"0.5333704",
"0.5331051",
"0.53300285",
"0.5313361",
"0.528527",
"0.52848804",
"0.5284479",
"0.52759975",
"0.5263643",
"0.52625793",
"0.52532923",
"0.52518755",
"0.525099",
"0.52450097",
"0.5234943",
"0.52329934",
"0.52064174",
"0.52040166",
"0.51831824",
"0.5177105",
"0.517596",
"0.51635396",
"0.51616114",
"0.5158125",
"0.5156675",
"0.5137417",
"0.5134622",
"0.51327753",
"0.51198226",
"0.510165",
"0.5101057",
"0.5099833",
"0.50974804",
"0.50931954",
"0.5087555",
"0.5077139",
"0.50768566",
"0.506884",
"0.5065827",
"0.50561374",
"0.50555766",
"0.50474393",
"0.5043617",
"0.50354654",
"0.50327575",
"0.5028912",
"0.5020575",
"0.50166035",
"0.50151515"
] | 0.7411264 | 0 |
Run will execute all phases of all registered Units and block until an error occurs. If RunConfig has been called prior to Run, the Group's Config phase will be skipped and Run continues with the PreRunner and Service phases. The following phases are executed in the following sequence: Config phase (serially, in order of Unit registration) FlagSet() Get & register all FlagSets from Config Units. Flag Parsing Using the provided args (os.Args if empty) Validate() Validate Config Units. Exit on first error. PreRunner phase (serially, in order of Unit registration) PreRun() Execute PreRunner Units. Exit on first error. Service phase (concurrently) Serve() Execute all Service Units in separate Go routines. Wait Block until one of the Serve() methods returns GracefulStop() Call interrupt handlers of all Service Units. Run will return with the originating error on: first Config.Validate() returning an error first PreRunner.PreRun() returning an error first Service.Serve() returning (error or nil) | Run выполнит все фазы всех зарегистрированных Units и заблокирует выполнение до возникновения ошибки. Если RunConfig был вызван до Run, фаза Config группы будет пропущена, и Run продолжится с фаз PreRunner и Service. Следующие фазы выполняются в следующем порядке: Фаза Config (последовательно, в порядке регистрации Unit) FlagSet() Получить и зарегистрировать все FlagSets из Config Units. Парсинг флагов Использовать предоставленные аргументы (os.Args, если они пустые) Validate() Проверить Config Units. Выход при первой ошибке. Фаза PreRunner (последовательно, в порядке регистрации Unit) PreRun() Выполнить PreRunner Units. Выход при первой ошибке. Фаза Service (параллельно) Serve() Выполнить все Service Units в отдельных Go-рутин. Wait Блокировать выполнение до возвращения одного из методов Serve() GracefulStop() Вызвать обработчики прерывания для всех Service Units. Run вернётся с исходной ошибкой в случае: первой ошибки, возвращённой Config.Validate() первой ошибки, возвращённой PreRunner.PreRun() первой ошибки или nil, возвращённой Service.Serve() | func (g *Group) Run() (err error) {
// run config registration and flag parsing stages
if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {
return errRun
}
defer func() {
if err != nil {
g.log.Fatal().Err(err).Stack().Msg("unexpected exit")
}
}()
// execute pre run stage and exit on error
for idx := range g.p {
// a PreRunner might have been deregistered during Run
if g.p[idx] == nil {
continue
}
g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run")
if err := g.p[idx].PreRun(); err != nil {
return err
}
}
swg := &sync.WaitGroup{}
swg.Add(len(g.s))
go func() {
swg.Wait()
close(g.readyCh)
}()
// feed our registered services to our internal run.Group
for idx := range g.s {
// a Service might have been deregistered during Run
s := g.s[idx]
if s == nil {
continue
}
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve")
g.r.Add(func() error {
notify := s.Serve()
swg.Done()
<-notify
return nil
}, func(_ error) {
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop")
s.GracefulStop()
})
}
// start registered services and block
return g.r.Run()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Cfg) Run(args ...string) {\n\tif args == nil {\n\t\targs = os.Args[1:]\n\t}\n\tc, cmd, args, err := c.Parse(args)\n\tif err == nil {\n\t\tif err = cmd.Main(args); err == nil {\n\t\t\tExit(0)\n\t\t\treturn\n\t\t}\n\t}\n\tif err == ErrHelp {\n\t\tw := newWriter(c)\n\t\tdefer w.done(os.Stderr, 0)\n\t\tw.help()\n\t} else {\n\t\tswitch e := err.(type) {\n\t\tcase UsageError:\n\t\t\tw := newWriter(c)\n\t\t\tdefer w.done(os.Stderr, 2)\n\t\t\tw.error(string(e))\n\t\tcase ExitCode:\n\t\t\tExit(int(e))\n\t\tdefault:\n\t\t\tverb := \"%v\"\n\t\t\tif Debug {\n\t\t\t\tverb = \"%+v\"\n\t\t\t}\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: \"+verb+\"\\n\", err)\n\t\t\tExit(1)\n\t\t}\n\t}\n}",
"func (cli *CLI) Run(args []string) int {\n\n\tc, err := cli.setup(args)\n\tif err != nil {\n\t\tlogging.Error(\"unable to parse configuration: %v\", err)\n\t\treturn ExitCodeParseConfigError\n\t}\n\n\t// Set the logging level for the logger.\n\tlogging.SetLevel(c.LogLevel)\n\n\t// Initialize telemetry if this was configured by the user.\n\tif c.Telemetry.StatsdAddress != \"\" {\n\t\tsink, statsErr := metrics.NewStatsdSink(c.Telemetry.StatsdAddress)\n\t\tif statsErr != nil {\n\t\t\tlogging.Error(\"unable to setup telemetry correctly: %v\", statsErr)\n\t\t\treturn ExitCodeTelemtryError\n\t\t}\n\t\tmetrics.NewGlobal(metrics.DefaultConfig(\"replicator\"), sink)\n\t}\n\n\t// Create the initial runner with the merged configuration parameters.\n\trunner, err := NewRunner(c)\n\tif err != nil {\n\t\treturn ExitCodeRunnerError\n\t}\n\n\tlogging.Debug(\"running version %v\", version.Get())\n\tgo runner.Start()\n\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase s := <-signalCh:\n\t\t\tswitch s {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT:\n\t\t\t\trunner.Stop()\n\t\t\t\treturn ExitCodeInterrupt\n\n\t\t\tcase syscall.SIGHUP:\n\t\t\t\trunner.Stop()\n\n\t\t\t\t// Reload the configuration in order to make proper use of SIGHUP.\n\t\t\t\tc, err := cli.setup(args)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ExitCodeParseConfigError\n\t\t\t\t}\n\n\t\t\t\t// Setup a new runner with the new configuration.\n\t\t\t\trunner, err = NewRunner(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ExitCodeRunnerError\n\t\t\t\t}\n\n\t\t\t\tgo runner.Start()\n\t\t\t}\n\t\t}\n\t}\n}",
"func (s *ServiceManager) Run() error {\n\tif err := CheckAllRegisteredServices(); err != nil {\n\t\treturn err\n\t}\n\t// Run all service\n\tglog.Infof(\"There are %d service in iothub\", len(s.services))\n\tfor _, service := range s.services {\n\t\tglog.Infof(\"Starting service:'%s'...\", service.Name())\n\t\tgo service.Start()\n\t}\n\t// Wait all service to terminate in main context\n\tfor name, ch := range s.chs {\n\t\t<-ch\n\t\tglog.Info(\"Servide(%s) is terminated\", name)\n\t}\n\treturn nil\n}",
"func Run(m *testing.M, opts ...RunOption) {\n\t// Run tests in a separate function such that we can use deferred statements and still\n\t// (indirectly) call `os.Exit()` in case the test setup failed.\n\tif err := func() error {\n\t\tvar cfg runConfig\n\t\tfor _, opt := range opts {\n\t\t\topt(&cfg)\n\t\t}\n\n\t\tdefer mustHaveNoChildProcess()\n\t\tif !cfg.disableGoroutineChecks {\n\t\t\tdefer mustHaveNoGoroutines()\n\t\t}\n\n\t\tcleanup, err := configure()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"test configuration: %w\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\tif cfg.setup != nil {\n\t\t\tif err := cfg.setup(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error calling setup function: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tm.Run()\n\n\t\treturn nil\n\t}(); err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func (o *Orchestrator) Run() {\n\tv := newValidator(o.Plugin)\n\tm := newMetrics(o.healthzPath, o.healthzPort, o.metricsPath, o.metricsPort)\n\n\tv.mustValidatePrerequisites()\n\n\to.mustServeKMSRequests()\n\n\t// Giving some time for kmsPlugin to start Serving.\n\t// TODO: Must be a better way than to sleep.\n\ttime.Sleep(3 * time.Millisecond)\n\n\tv.mustPingRPC()\n\tmustGatherMetrics()\n\n\tm.mustServeHealthzAndMetrics()\n\n\t// Giving some time for HealthZ and Metrics to start Serving.\n\t// TODO: Must be a better way than to sleep.\n\ttime.Sleep(3 * time.Millisecond)\n\tmustEmitOKHealthz()\n\tmustEmitMetrics()\n}",
"func (m *Manager) Run(ctx context.Context) error {\n\t// start log broadcaster\n\t_ = utils.Pool.Submit(func() { m.logBroadcaster.run(ctx) })\n\n\t// initWorkloadStatus container\n\tif err := m.initWorkloadStatus(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t// start status watcher\n\t_ = utils.Pool.Submit(func() { m.monitor(ctx) })\n\n\t// start health check\n\t_ = utils.Pool.Submit(func() { m.healthCheck(ctx) })\n\n\t// wait for signal\n\t<-ctx.Done()\n\tlog.WithFunc(\"Run\").Info(ctx, \"exiting\")\n\treturn nil\n}",
"func Run(args []string) {\n\t// Parse the arguments\n\tvar cmdCfg CmdConfig\n\tif err := parse(args, &cmdCfg); err != nil {\n\t\tlog.Errorf(\"%s\", err)\n\t\tfmt.Fprintf(os.Stderr, \"USAGE \\n\\n\\t%s\\n\\n\", os.Args[0])\n\t\tfmt.Fprint(os.Stderr, \"GLOBAL OPTIONS:\\n\\n\")\n\t\tusage(os.Stderr, &cmdCfg)\n\n\t\tos.Exit(1)\n\t}\n\n\t// set up global context for signal interuption\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tvar stop = make(chan os.Signal)\n\tsignal.Notify(stop, syscall.SIGTERM, syscall.SIGINT)\n\tgo func() {\n\t\tsig := <-stop\n\t\tfmt.Printf(\"caught sig: %+v\\n\", sig)\n\t\tcancel()\n\t\tfmt.Println(\"Waiting up to 2 seconds to finish.\")\n\t\ttime.Sleep(2 * time.Second)\n\t\tos.Exit(0)\n\t}()\n\n\t// Global Configuration\n\t// Read deployment and service files into Kubernetes structs\n\tkubeServiceConfig, kubeServiceConfigErr := getServiceConfig(&cmdCfg)\n\tif kubeServiceConfigErr != nil {\n\t\tlog.Errorf(\"%s\", kubeServiceConfigErr)\n\t\tos.Exit(2)\n\t}\n\n\t// Regional configuration.\n\t// Gather environment variables and secret references.\n\t// Retrieve secrets from vault.\n\t// Create configmap and secret object.\n\tvar regionEnvs []*RegionEnv\n\tfor regionEnv := range createEnv(kubeServiceConfig, fetchSecrets(getConfig(ctx, &cmdCfg))) {\n\t\tlog.Debugf(\"Retrieved Configuration %+v\", regionEnv)\n\t\tif len(regionEnv.Errors) > 0 {\n\t\t\tlog.Errorf(\"%s\", regionEnv.Errors)\n\t\t\tos.Exit(2)\n\t\t}\n\t\tregionEnvs = append(regionEnvs, regionEnv)\n\t}\n\n\t// Run and monitor updates in this order.\n\tupdateFns := []UpdateFn{\n\t\tupdateConfigMapRegion,\n\t\tupdateServiceRegion,\n\t\tupdateServiceAccountRegion,\n\t\tupdateIngressRegion,\n\t\tupdateIngressRouteRegion,\n\t\tupdateGatewayRegion,\n\t\tupdateVirtualServiceRegion,\n\t\tupdateServiceInstanceRegion,\n\t\tupdateServiceBindingRegion,\n\t\tupdateNamedSecretsRegion,\n\t\tupdateDeploymentRegion,\n\t\tupdateJobRegion,\n\t\tupdateHPAutoscalerRegion,\n\t\tupdatePodDisruptionBudgetRegion,\n\t}\n\tfor _, updateFn := range updateFns {\n\t\terr := runUpdate(regionEnvs, updateFn)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n}",
"func Run() {\n\tl := logrus.WithField(\"component\", \"main\")\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer l.Info(\"Done.\")\n\n\t// handle termination signals\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, unix.SIGTERM, unix.SIGINT)\n\tgo func() {\n\t\ts := <-signals\n\t\tsignal.Stop(signals)\n\t\tl.Warnf(\"Got %s, shutting down...\", unix.SignalName(s.(unix.Signal)))\n\t\tcancel()\n\t}()\n\n\tfor {\n\t\tcfg, configFilepath, err := config.Get(l)\n\t\tif err != nil {\n\t\t\tl.Fatalf(\"Failed to load configuration: %s.\", err)\n\t\t}\n\t\tconfig.ConfigureLogger(cfg)\n\t\tl.Debugf(\"Loaded configuration: %+v\", cfg)\n\n\t\trun(ctx, cfg, configFilepath)\n\n\t\tif ctx.Err() != nil {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (o *Options) Run(ctx context.Context) error {\n\tlog.Info(\"getting rest config\")\n\trestConfig, err := config.GetConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"setting up manager\")\n\tmgr, err := manager.New(restConfig, manager.Options{\n\t\tScheme: kubernetes.SeedScheme,\n\t\tLeaderElection: false,\n\t\tMetricsBindAddress: \"0\", // disable for now, as we don't scrape the component\n\t\tHost: o.BindAddress,\n\t\tPort: o.Port,\n\t\tCertDir: o.ServerCertDir,\n\t\tGracefulShutdownTimeout: &gracefulShutdownTimeout,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"setting up webhook server\")\n\tserver := mgr.GetWebhookServer()\n\tserver.Register(extensioncrds.WebhookPath, &webhook.Admission{Handler: extensioncrds.New(runtimelog.Log.WithName(extensioncrds.HandlerName))})\n\tserver.Register(podschedulername.WebhookPath, &webhook.Admission{Handler: admission.HandlerFunc(podschedulername.DefaultShootControlPlanePodsSchedulerName)})\n\tserver.Register(extensionresources.WebhookPath, &webhook.Admission{Handler: extensionresources.New(runtimelog.Log.WithName(extensionresources.HandlerName), o.AllowInvalidExtensionResources)})\n\n\tlog.Info(\"starting manager\")\n\tif err := mgr.Start(ctx); err != nil {\n\t\tlog.Error(err, \"error running manager\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *VMTServer) Run(_ []string) error {\n\tif err := s.checkFlag(); err != nil {\n\t\tglog.Errorf(\"check flag failed:%v. abort.\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tglog.V(3).Infof(\"spec path is: %v\", s.K8sTAPSpec)\n\tk8sTAPSpec, err := kubeturbo.ParseK8sTAPServiceSpec(s.K8sTAPSpec)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to generate correct TAP config: %v\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tkubeConfig := s.createKubeConfigOrDie()\n\tkubeClient := s.createKubeClientOrDie(kubeConfig)\n\tkubeletClient := s.createKubeletClientOrDie(kubeConfig)\n\tprobeConfig := s.createProbeConfigOrDie(kubeConfig, kubeletClient)\n\tbroker := turbostore.NewPodBroker()\n\n\tvmtConfig := kubeturbo.NewVMTConfig2()\n\tvmtConfig.WithTapSpec(k8sTAPSpec).\n\t\tWithKubeClient(kubeClient).\n\t\tWithKubeletClient(kubeletClient).\n\t\tWithProbeConfig(probeConfig).\n\t\tWithBroker(broker).\n\t\tWithK8sVersion(s.K8sVersion).\n\t\tWithNoneScheduler(s.NoneSchedulerName).\n\t\tWithRecorder(createRecorder(kubeClient))\n\tglog.V(3).Infof(\"Finished creating turbo configuration: %+v\", vmtConfig)\n\n\tvmtService := kubeturbo.NewKubeturboService(vmtConfig)\n\trun := func(_ <-chan struct{}) {\n\t\tvmtService.Run()\n\t\tselect {}\n\t}\n\n\tgo s.startHttp()\n\n\t//if !s.LeaderElection.LeaderElect {\n\tglog.V(2).Infof(\"No leader election\")\n\trun(nil)\n\n\tglog.Fatal(\"this statement is unreachable\")\n\tpanic(\"unreachable\")\n}",
"func (a *App) Run(ctx context.Context) error {\n\n\t// 1. instantiate all components\n\n\tfor _, runnable := range a.runnables {\n\t\ta.logger(\"building: %s\", runnable)\n\n\t\t_, err := a.getValue(runnable)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"building %s: %w\", runnable, err)\n\t\t}\n\t}\n\n\t// 2. start all runnable\n\tctx, cancelCtx := context.WithCancel(ctx)\n\tdefer cancelCtx()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, runnable := range a.runnables {\n\t\ta.logger(\"starting: %s\", runnable)\n\t\terr := a.start(ctx, runnable, &wg)\n\t\tif err != nil {\n\t\t\ta.logger(\"runnable failed to start: %s\", runnable)\n\t\t\tcancelCtx()\n\t\t\tbreak\n\t\t}\n\t}\n\n\t<-ctx.Done()\n\ta.logger(\"context cancelled, starting shutdown\")\n\n\t// 3. TODO: cancel component in reverse order\n\n\twg.Wait()\n\n\treturn nil\n}",
"func (c *cli) Run(args []string) int {\n\t// Parse CLI args and flags\n\tflags, err := c.parseArgs(args)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"%v\\n\", err)\n\t\treturn 1\n\t}\n\n\t// Exit immediately if user asked for a help or an app version\n\tif flags.isHelp || flags.isVersion {\n\t\treturn 0\n\t}\n\n\t// Setup logrus\n\tc.configureLogger(flags.isVerbose)\n\n\t// Load config\n\tconfigChan, err := c.prepareConfigChan(flags.configPath)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"failed to load config: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\t// Run the server (this is blocking call)\n\terr = c.runServer(configChan)\n\n\tif err != nil {\n\t\tfmt.Fprintf(c.stderr, \"server error: %v\\n\", err)\n\t\treturn 1\n\t}\n\n\treturn 0\n}",
"func (p *Pipeline) Run(args []string) int {\n\tif err := p.LoadConfig(); err != nil {\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func (conf *Config) Run() error {\n\t// no error-checking if nothing to check errors on\n\tif conf == nil {\n\t\treturn nil\n\t}\n\tif conf.ThreadCount < 0 {\n\t\treturn fmt.Errorf(\"invalid thread count %d [must be a positive number]\", conf.ThreadCount)\n\t}\n\t// if not given other instructions, just describe the specs\n\tif !conf.Generate && !conf.Delete && conf.Verify == \"\" {\n\t\tconf.Describe = true\n\t\tconf.onlyDescribe = true\n\t}\n\tif conf.Verify != \"\" {\n\t\terr := conf.verifyType.UnmarshalText([]byte(conf.Verify))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unknown verify type '%s'\", conf.Verify)\n\t\t}\n\t} else {\n\t\t// If we're deleting, and not generating, we can skip\n\t\t// verification. Otherwise, default to erroring out if\n\t\t// there's a mismatch.\n\t\tif conf.Delete && !conf.Generate {\n\t\t\tconf.verifyType = verifyTypeNone\n\t\t} else {\n\t\t\tconf.verifyType = verifyTypeError\n\t\t}\n\t}\n\tconf.NewSpecsFiles(conf.flagset.Args())\n\tif len(conf.specFiles) < 1 {\n\t\treturn errors.New(\"must specify one or more spec files\")\n\t}\n\tif conf.ColumnScale < 0 || conf.ColumnScale > (1<<31) {\n\t\treturn fmt.Errorf(\"column scale [%d] should be between 1 and 2^31\", conf.ColumnScale)\n\t}\n\tif conf.RowScale < 0 || conf.RowScale > (1<<16) {\n\t\treturn fmt.Errorf(\"row scale [%d] should be between 1 and 2^16\", conf.RowScale)\n\t}\n\treturn nil\n}",
"func (g *Group) RunConfig() (interrupted bool, err error) {\n\tg.log = logger.GetLogger(g.name)\n\tg.configured = true\n\n\tif g.name == \"\" {\n\t\t// use the binary name if custom name has not been provided\n\t\tg.name = path.Base(os.Args[0])\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tg.log.Error().Err(err).Msg(\"unexpected exit\")\n\t\t}\n\t}()\n\n\t// Load config from env and file\n\tif err = config.Load(g.f.Name, g.f.FlagSet); err != nil {\n\t\treturn false, errors.Wrapf(err, \"%s fails to load config\", g.f.Name)\n\t}\n\n\t// bail early on help or version requests\n\tswitch {\n\tcase g.showRunGroup:\n\t\tfmt.Println(g.ListUnits())\n\t\treturn true, nil\n\t}\n\n\t// Validate Config inputs\n\tfor idx := range g.c {\n\t\t// a Config might have been deregistered during Run\n\t\tif g.c[idx] == nil {\n\t\t\tg.log.Debug().Uint32(\"ran\", uint32(idx+1)).Msg(\"skipping validate\")\n\t\t\tcontinue\n\t\t}\n\t\tg.log.Debug().Str(\"name\", g.c[idx].Name()).Uint32(\"ran\", uint32(idx+1)).Uint32(\"total\", uint32(len(g.c))).Msg(\"validate config\")\n\t\tif vErr := g.c[idx].Validate(); vErr != nil {\n\t\t\terr = multierr.Append(err, vErr)\n\t\t}\n\t}\n\n\t// exit on at least one Validate error\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// log binary name and version\n\tg.log.Info().Msg(\"started\")\n\n\treturn false, nil\n}",
"func (bs *BusinessServer) Run() {\n\t// initialize config.\n\tbs.initConfig()\n\n\t// initialize logger.\n\tbs.initLogger()\n\tdefer bs.Stop()\n\n\t// initialize server modules.\n\tbs.initMods()\n\n\t// register businessserver service.\n\tgo func() {\n\t\tif err := bs.service.Register(bs.etcdCfg); err != nil {\n\t\t\tlogger.Fatal(\"register service for discovery, %+v\", err)\n\t\t}\n\t}()\n\tlogger.Info(\"register service for discovery success.\")\n\n\t// run service.\n\ts := grpc.NewServer(grpc.MaxRecvMsgSize(math.MaxInt32))\n\tpb.RegisterBusinessServer(s, bs)\n\tlogger.Info(\"Business Server running now.\")\n\n\tif err := s.Serve(bs.lis); err != nil {\n\t\tlogger.Fatal(\"start businessserver gRPC service. %+v\", err)\n\t}\n}",
"func (c *SystemCommand) Run(args []string) int {\n\tvar debug bool\n\tf := flag.NewFlagSet(\"system\", flag.ContinueOnError)\n\tf.Usage = func() { c.UI.Output(c.Help()) }\n\tf.BoolVar(&debug, \"debug\", false, \"Debug mode enabled\")\n\tif err := f.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\tsetupLogging(debug)\n\treturn c.doSystemInformations()\n}",
"func Run(root string) error {\n\tv1, err := readConfig()\n\tmust(err)\n\n\tvar context = runconf{\n\t\tFilterOut: v1.GetStringSlice(\"filterOut\"),\n\t\tLookFor: v1.GetStringSlice(\"lookFor\"),\n\t\trootPath: root,\n\t}\n\n\titerate(context)\n\treturn nil\n}",
"func Run(settings *Settings) error {\n\tlog.Infof(\"using k8s version: %s\", settings.KubernetesVersion.String())\n\n\t// parse config\n\tlog.Infof(\"reading config from path: %s\", settings.PathConfig)\n\tconfigBytes, err := os.ReadFile(settings.PathConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig := &config{}\n\tif err := yaml.UnmarshalStrict(configBytes, &config); err != nil {\n\t\treturn errors.Wrapf(err, \"cannot parse config\")\n\t}\n\n\tfor _, j := range config.JobGroups {\n\t\tif err := processjobGroup(settings, &j); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *Server) Run(ctx context.Context) error {\n\t// Attach the context to the errgroup so that goroutines are canceled when\n\t// one of them returns an error.\n\teg, ctx := errgroup.WithContext(ctx)\n\ts.eg = eg\n\tdefer close(s.ready)\n\n\tmm := NewAdvertiserMetrics(s.reg)\n\n\t// Serve on each specified interface.\n\tfor _, ifi := range s.cfg.Interfaces {\n\t\t// Prepend the interface name to all logs for this server.\n\t\tlogf := func(format string, v ...interface{}) {\n\t\t\ts.ll.Println(ifi.Name + \": \" + fmt.Sprintf(format, v...))\n\t\t}\n\n\t\tif !ifi.SendAdvertisements {\n\t\t\tlogf(\"send advertisements is false, skipping initialization\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlogf(\"initializing with %d plugins\", len(ifi.Plugins))\n\n\t\tfor i, p := range ifi.Plugins {\n\t\t\tlogf(\"plugin %02d: %q: %s\", i, p.Name(), p)\n\t\t}\n\n\t\t// TODO: find a way to reasonably test this.\n\n\t\t// Begin advertising on this interface until the context is canceled.\n\t\tad, err := NewAdvertiser(ifi, s.ll, mm)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create NDP advertiser: %v\", err)\n\t\t}\n\n\t\ts.eg.Go(func() error {\n\t\t\tif err := ad.Advertise(ctx); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to advertise NDP: %v\", err)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// Configure the HTTP debug server, if applicable.\n\tif err := s.runDebug(ctx); err != nil {\n\t\treturn fmt.Errorf(\"failed to start debug HTTP server: %v\", err)\n\t}\n\n\t// Indicate readiness to any waiting callers, and then wait for all\n\t// goroutines to be canceled and stopped successfully.\n\ts.ready <- struct{}{}\n\tif err := s.eg.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"failed to serve: %v\", err)\n\t}\n\n\treturn nil\n}",
"func Run() int {\n\tpflag.Parse()\n\tpopulateAvailableKubeconfigs()\n\n\tif len(availableKubeconfigs) == 0 {\n\t\tprintKubeConfigHelpOutput()\n\t\treturn 2\n\t}\n\n\t// DEBUG\n\tfmt.Println(availableKubeconfigs)\n\treturn 0\n}",
"func Run(ctx context.Context, cfg *config.Config) error {\n\tMetrics = newMetrics()\n\tdefer runCleanupHooks()\n\n\t// apply defaults before validation\n\tcfg.ApplyDefaults()\n\n\terr := cfg.Validate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to validate config: %w\\n%+v\", err, cfg)\n\t}\n\n\tfuncMap := template.FuncMap{}\n\terr = bindPlugins(ctx, cfg, funcMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if a custom Stdin is set in the config, inject it into the context now\n\tctx = data.ContextWithStdin(ctx, cfg.Stdin)\n\n\topts := optionsFromConfig(cfg)\n\topts.Funcs = funcMap\n\ttr := NewRenderer(opts)\n\n\tstart := time.Now()\n\n\tnamer := chooseNamer(cfg, tr)\n\ttmpl, err := gatherTemplates(ctx, cfg, namer)\n\tMetrics.GatherDuration = time.Since(start)\n\tif err != nil {\n\t\tMetrics.Errors++\n\t\treturn fmt.Errorf(\"failed to gather templates for rendering: %w\", err)\n\t}\n\tMetrics.TemplatesGathered = len(tmpl)\n\n\terr = tr.RenderTemplates(ctx, tmpl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *RunCommand) Run(args []string) int {\n\tvar (\n\t\tlangFlag string\n\t\tvalidaterFlag string\n\t\tverboseFlag bool\n\t\troundFlag int\n\t)\n\n\tflags := c.Meta.NewFlagSet(\"run\", c.Help())\n\tflags.StringVar(&langFlag, \"l\", \"\", \"Specify Language\")\n\tflags.StringVar(&langFlag, \"language\", \"\", \"Specify Language\")\n\tflags.StringVar(&validaterFlag, \"V\", \"\", \"Specify Validater\")\n\tflags.StringVar(&validaterFlag, \"validater\", \"\", \"Specify Validater\")\n\tflags.BoolVar(&verboseFlag, \"vb\", false, \"increase amount of output\")\n\tflags.BoolVar(&verboseFlag, \"verbose\", false, \"increase amount of output\")\n\tflags.IntVar(&roundFlag, \"p\", 0, \"Rounded to the decimal point p digits\")\n\tflags.IntVar(&roundFlag, \"place\", 0, \"Rounded to the decimal point place digits\")\n\n\tif err := flags.Parse(args); err != nil {\n\t\tmsg := fmt.Sprintf(\"Invalid option: %s\", strings.Join(args, \" \"))\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\targs = flags.Args()\n\n\tif len(args) < 2 {\n\t\tmsg := fmt.Sprintf(\"Invalid arguments: %s\", strings.Join(args, \" \"))\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif _, err := os.Stat(args[0]); err != nil {\n\t\tc.UI.Error(\"does not exist (No such directory)\")\n\t\treturn ExitCodeFailed\n\t}\n\n\tif langFlag == \"\" {\n\t\tlangFlag = strings.Replace(path.Ext(args[1]), \".\", \"\", -1)\n\t}\n\tlang, ok := Lang[langFlag]\n\tif !ok {\n\t\tmsg := fmt.Sprintf(\"Invalid language: %s\", langFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif roundFlag < 0 || roundFlag > 15 {\n\t\tmsg := fmt.Sprintf(\"Invalid round: %d\", roundFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tif validaterFlag == \"float\" {\n\t\tValidaters[\"float\"] = &FloatValidater{Place: roundFlag}\n\t}\n\n\tif validaterFlag == \"\" {\n\t\tvalidaterFlag = \"diff\"\n\t}\n\tv, ok := Validaters[validaterFlag]\n\tif !ok {\n\t\tmsg := fmt.Sprintf(\"Invalid validater: %s\", validaterFlag)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tinfoBuf, err := ioutil.ReadFile(args[0] + \"/\" + \"info.json\")\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"failed to read info file: %v\", err))\n\t\treturn ExitCodeFailed\n\t}\n\n\tinfo := Info{}\n\tif err := json.Unmarshal(infoBuf, &info); err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn ExitCodeFailed\n\t}\n\n\tvar w, e io.Writer\n\tif verboseFlag {\n\t\tw, e = os.Stdout, os.Stderr\n\t}\n\n\tcode, result, clearFunc, err := NewCode(args[1], lang, &info, w, e)\n\tif err != nil {\n\t\tc.UI.Output(err.Error())\n\t\treturn ExitCodeFailed\n\t}\n\tc.UI.Output(result.String())\n\tdefer clearFunc()\n\n\tvar rCode *Code\n\tif info.JudgeType > 0 {\n\t\trCode, clearFunc, err = NewReactiveCode(&info, args[0], w, e)\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn ExitCodeFailed\n\t\t}\n\t\tdefer clearFunc()\n\t}\n\n\tinputFiles, err := filepath.Glob(strings.Join([]string{args[0], \"test_in\", \"*\"}, \"/\"))\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"input testcase error: %v\", err)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\toutputFiles, err := filepath.Glob(strings.Join([]string{args[0], \"test_out\", \"*\"}, \"/\"))\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"output testcase error: %v\", err)\n\t\tc.UI.Error(msg)\n\t\treturn ExitCodeFailed\n\t}\n\n\tfor i := 0; i < len(inputFiles); i++ {\n\t\terr := func() error {\n\t\t\tinput, err := os.Open(inputFiles[i])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"input test file error: %v\", err)\n\t\t\t}\n\t\t\tdefer input.Close()\n\n\t\t\toutput, err := ioutil.ReadFile(outputFiles[i])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"output test file error: %v\", err)\n\t\t\t}\n\n\t\t\tvar result string\n\t\t\tif info.JudgeType > 0 {\n\t\t\t\tresult, err = rCode.Reactive(code, inputFiles[i], outputFiles[i], input, w, e)\n\t\t\t} else {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tresult, err = code.Run(v, output, input, &buf, e)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_, testFile := path.Split(inputFiles[i])\n\t\t\tc.UI.Output(fmt.Sprintf(\"%s\\t%s\", result, testFile))\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\tc.UI.Error(err.Error())\n\t\t\treturn ExitCodeFailed\n\t\t}\n\t}\n\treturn ExitCodeOK\n}",
"func Run(ctx context.Context) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tlogger := log.WithContext(nil)\n\n\ttester := server.Prepare(&server.Options{\n\t\tName: \"tester\",\n\t\tInsecureAddr: viper.GetString(options.FlagServerInsecureBindAddress.GetLong()),\n\t\tSecureAddr: viper.GetString(options.FlagServerSecureBindAddress.GetLong()),\n\t\tInsecurePort: viper.GetInt(options.FlagServerInsecurePort.GetLong()),\n\t\tSecurePort: viper.GetInt(options.FlagServerSecurePort.GetLong()),\n\t\tTLSKey: viper.GetString(options.FlagServerTLSKeyFile.GetLong()),\n\t\tTLSCert: viper.GetString(options.FlagServerTLSCertFile.GetLong()),\n\t\tTLSCa: viper.GetString(options.FlagServerTLSCaFile.GetLong()),\n\t\tHandler: chain.New().Add(\n\t\t\tmiddleware.Correlation,\n\t\t\tmiddleware.Metrics,\n\t\t).Link(middleware.Dump),\n\t\tLogger: logger.Sugar(),\n\t})\n\n\tmetrics := server.Prepare(&server.Options{\n\t\tName: \"prometheus\",\n\t\tInsecureAddr: viper.GetString(options.FlagPrometheusInsecureBindAddress.GetLong()),\n\t\tInsecurePort: viper.GetInt(options.FlagPrometheusInsecurePort.GetLong()),\n\t\tHandler: promhttp.Handler(),\n\t\tLogger: logger.Sugar(),\n\t})\n\n\tvar g run.Group\n\n\tg.Add(func() error {\n\t\t<-ctx.Done()\n\t\treturn nil\n\t}, func(error) {\n\t\tcancel()\n\t})\n\n\tg.Add(func() error {\n\t\treturn logError(metrics.Run())\n\t}, func(error) {\n\t\tlogError(metrics.Close())\n\t})\n\n\tg.Add(func() error {\n\t\treturn logError(tester.Run())\n\t}, func(error) {\n\t\tlogError(tester.Close())\n\t})\n\n\treturn g.Run()\n}",
"func (s *Service) Run(c context.Context, f func(context.Context) error) {\n\tc = gologger.StdConfig.Use(c)\n\n\t// If a service name isn't specified, default to the base of the current\n\t// executable.\n\tif s.Name == \"\" {\n\t\ts.Name = filepath.Base(os.Args[0])\n\t}\n\n\trc := 0\n\tif err := s.runImpl(c, f); err != nil {\n\t\tlog.WithError(err).Errorf(c, \"Application exiting with error.\")\n\t\trc = 1\n\t}\n\tos.Exit(rc)\n}",
"func (c *EBSCommand) Run(args []string) int {\n\n\t// Decorate this CLI's UI\n\tc.Ui = &cli.PrefixedUi{\n\t\tOutputPrefix: \" \",\n\t\tInfoPrefix: \"INFO: \",\n\t\tErrorPrefix: \"ERROR: \",\n\t\tUi: c.Ui,\n\t}\n\n\t// Set the args which may have the mtest config\n\tc.args = args\n\n\t// Dependency Injection\n\tif !c.IsInitialized() {\n\t\terr := c.SetAll()\n\t\tif err != nil {\n\t\t\tc.Ui.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tif c.wtrVarsMake == nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Writer-variants-maker instance is nil.\"))\n\t\treturn 1\n\t}\n\n\t// Defer flush the gatedWriter which is linked to this\n\t// CLI's io.writer during Dependency Injection\n\tgatedLogger := c.wtrVarsMake.GatedWriter()\n\tif gatedLogger == nil {\n\t\treturn 1\n\t}\n\tdefer gatedLogger.Flush()\n\n\tif c.mtestMake == nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Mtest-maker instance is nil.\"))\n\t\treturn 1\n\t}\n\n\t// ebs cli is meant to run Maya Server\n\t// Get a Mtest instance that is associated with Maya Server\n\tmt, err := c.mtestMake.Make()\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\treturn 1\n\t}\n\n\t// Output the header that the server has started\n\tc.Ui.Output(\"Mtest ebs run started! Log data will start streaming:\\n\")\n\n\t// Start EBS use cases\n\trpts, err := mt.Start()\n\tdefer mt.Stop()\n\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\t// Exit code is set to 0 as this has nothing to do\n\t\t// with running of CLI. CLI execution was fine.\n\t\treturn 0\n\t}\n\n\tc.Ui.Info(fmt.Sprintf(\"%+s\", rpts))\n\n\treturn 0\n}",
"func Run(rpcCfg RPCConfig) error {\n\tconfig := DefaultConfig()\n\n\t// Parse command line flags.\n\tparser := flags.NewParser(&config, flags.Default)\n\tparser.SubcommandsOptional = true\n\n\t_, err := parser.Parse()\n\tif e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Parse ini file.\n\tloopDir := lncfg.CleanAndExpandPath(config.LoopDir)\n\tconfigFile := lncfg.CleanAndExpandPath(config.ConfigFile)\n\n\t// If our loop directory is set and the config file parameter is not\n\t// set, we assume that they want to point to a config file in their\n\t// loop dir. However, if the config file has a non-default value, then\n\t// we leave the config parameter as its custom value.\n\tif loopDir != loopDirBase && configFile == defaultConfigFile {\n\t\tconfigFile = filepath.Join(\n\t\t\tloopDir, defaultConfigFilename,\n\t\t)\n\t}\n\n\tif err := flags.IniParse(configFile, &config); err != nil {\n\t\t// If it's a parsing related error, then we'll return\n\t\t// immediately, otherwise we can proceed as possibly the config\n\t\t// file doesn't exist which is OK.\n\t\tif _, ok := err.(*flags.IniError); ok {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Parse command line flags again to restore flags overwritten by ini\n\t// parse.\n\t_, err = parser.Parse()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Show the version and exit if the version flag was specified.\n\tappName := filepath.Base(os.Args[0])\n\tappName = strings.TrimSuffix(appName, filepath.Ext(appName))\n\tif config.ShowVersion {\n\t\tfmt.Println(appName, \"version\", loop.Version())\n\t\tos.Exit(0)\n\t}\n\n\t// Special show command to list supported subsystems and exit.\n\tif config.DebugLevel == \"show\" {\n\t\tfmt.Printf(\"Supported subsystems: %v\\n\",\n\t\t\tlogWriter.SupportedSubsystems())\n\t\tos.Exit(0)\n\t}\n\n\t// Validate our config before we proceed.\n\tif err := Validate(&config); err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize logging at the default logging level.\n\terr = logWriter.InitLogRotator(\n\t\tfilepath.Join(config.LogDir, defaultLogFilename),\n\t\tconfig.MaxLogFileSize, config.MaxLogFiles,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = build.ParseAndSetDebugLevels(config.DebugLevel, logWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print the version before executing either primary directive.\n\tlog.Infof(\"Version: %v\", loop.Version())\n\n\tlisCfg := newListenerCfg(&config, rpcCfg)\n\n\t// Execute command.\n\tif parser.Active == nil {\n\t\tsignal.Intercept()\n\n\t\tdaemon := New(&config, lisCfg)\n\t\tif err := daemon.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tcase <-signal.ShutdownChannel():\n\t\t\tlog.Infof(\"Received SIGINT (Ctrl+C).\")\n\t\t\tdaemon.Stop()\n\n\t\t\t// The above stop will return immediately. But we'll be\n\t\t\t// notified on the error channel once the process is\n\t\t\t// complete.\n\t\t\treturn <-daemon.ErrChan\n\n\t\tcase err := <-daemon.ErrChan:\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif parser.Active.Name == \"view\" {\n\t\treturn view(&config, lisCfg)\n\t}\n\n\treturn fmt.Errorf(\"unimplemented command %v\", parser.Active.Name)\n}",
"func Run() derrors.Error {\n\t// Channel to signal errors from starting the servers\n\terrChan := make(chan error, 1)\n\n\t// Start listening on API port\n\tgrpcListener, err := net.Listen(\"tcp\", \":12345\")\n\tif err != nil {\n\t\treturn derrors.NewUnavailableError(\"failed to listen\", err)\n\t}\n\n\tgrpcServer, derr := ec_stub.Start(grpcListener, errChan)\n\tif derr != nil {\n\t\treturn derr\n\t}\n\tdefer grpcServer.GracefulStop()\n\n\t// Wait for termination signal\n\tsigterm := make(chan os.Signal, 1)\n\tsignal.Notify(sigterm, syscall.SIGTERM)\n\tsignal.Notify(sigterm, syscall.SIGINT)\n\n\tselect {\n\tcase sig := <-sigterm:\n\t\tlog.Info().Str(\"signal\", sig.String()).Msg(\"Gracefully shutting down\")\n\tcase err := <-errChan:\n\t\t// We've already logged the error\n\t\treturn derrors.NewInternalError(\"failed starting server\", err)\n\t}\n\n\treturn nil\n}",
"func (a *Agent) Run(ctx context.Context) error {\n\ta.Context = ctx\n\tlog.Printf(\"I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, \"+\n\t\t\"Flush Interval:%s\",\n\t\ta.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,\n\t\ta.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)\n\n\tlog.Printf(\"D! [agent] Initializing plugins\")\n\terr := a.initPlugins()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\tlog.Printf(\"D! [agent] Connecting outputs\")\n\tnext, ou, err := a.startOutputs(ctx, a.Config.Outputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.ou = ou\n\tvar apu []*processorUnit\n\tvar au *aggregatorUnit\n\tif len(a.Config.Aggregators) != 0 {\n\t\taggC := next\n\t\tif len(a.Config.AggProcessors) != 0 {\n\t\t\taggC, apu, err = a.startProcessors(next, a.Config.AggProcessors)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tnext, au, err = a.startAggregators(aggC, next, a.Config.Aggregators)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar pu []*processorUnit\n\tif len(a.Config.Processors) != 0 {\n\t\tnext, pu, err = a.startProcessors(next, a.Config.Processors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tiu, err := a.startInputs(next, a.Config.Inputs)\n\ta.iu = iu\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runOutputs(ou)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running outputs: %v\", err)\n\t\t}\n\t}()\n\n\tif au != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(apu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runAggregators(startTime, au)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running aggregators: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif pu != nil {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.runProcessors(pu)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"E! [agent] Error running processors: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := a.runInputs(ctx, startTime, iu)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! [agent] Error running inputs: %v\", err)\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\tlog.Printf(\"D! [agent] Stopped Successfully\")\n\treturn err\n}",
"func Run(c *cli.Context, o *options) error {\n\terr := verifyFlags(o)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to verify flags: %v\", err)\n\t}\n\n\terr = initialize()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to initialize: %v\", err)\n\t}\n\tdefer shutdown()\n\n\terr = installToolkit(o)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to install toolkit: %v\", err)\n\t}\n\n\terr = setupRuntime(o)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to setup runtime: %v\", err)\n\t}\n\n\tif !o.noDaemon {\n\t\terr = waitForSignal()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to wait for signal: %v\", err)\n\t\t}\n\n\t\terr = cleanupRuntime(o)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to cleanup runtime: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (c *Command) Run(args []string) {\n\tservice := servd.New()\n\tc.loadAndValidateConfig()\n\n\tswitch args[0] {\n\tcase inquiry:\n\t\tservice.Inquiry()\n\tcase payment:\n\t\tservice.Payment()\n\tcase checkStatus:\n\t\tservice.CheckStatus()\n\tdefault:\n\t\tlog.Println(\"please specify the available command (inquiry, payment, checkstatus)\")\n\t}\n}",
"func (c *CLI) Run(args []string) int {\n\tparam := ¶m{}\n\terr := c.parseArgs(args[1:], param)\n\tif err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"args parse error: %v\", err)\n\t\treturn ExitCodeParseError\n\t}\n\n\tserver, err := NewServer(param.file)\n\tif err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"invalid args. failed to initialize server: %v\", err)\n\t\treturn ExitCodeInvalidArgsError\n\t}\n\n\tif err := server.PrepareServer(); err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"failed to setup server: %v\", err)\n\t\treturn ExitCodeSetupServerError\n\t}\n\n\tif err := server.Run(param.port); err != nil {\n\t\tfmt.Fprintf(c.ErrStream, \"failed from server: %v\", err)\n\t\treturn ExitCodeError\n\t}\n\treturn ExitCodeOK\n}",
"func (s *Service) Run(host, port string) error {\n\t// let's gooooo\n\ts.l.Infow(\"spinning up core service\",\n\t\t\"core.host\", host,\n\t\t\"core.port\", port)\n\tlistener, err := net.Listen(\"tcp\", host+\":\"+port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.grpc.Serve(listener); err != nil {\n\t\ts.l.Errorf(\"error encountered - service stopped\",\n\t\t\t\"error\", err)\n\t\treturn err\n\t}\n\n\t// report shutdown\n\ts.l.Info(\"service shut down\")\n\treturn nil\n}",
"func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate options\")\n\t}\n\n\tconfig, err := o.LoadSourceConfig()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to load source config\")\n\t}\n\n\tfor i := range config.Spec.Groups {\n\t\tgroup := &config.Spec.Groups[i]\n\t\tfor j := range group.Repositories {\n\t\t\trepo := &group.Repositories[j]\n\n\t\t\tif o.Filter != \"\" && !strings.Contains(repo.Name, o.Filter) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := o.UpgradeRepository(config, group, repo)\n\t\t\tif err != nil {\n\t\t\t\tlog.Logger().Errorf(\"failed to upgrade repository %s due to: %s\", repo.Name, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *Manager) Run(ctx context.Context) error {\n\ts := <-m.status\n\tswitch s {\n\tcase StatusShutdown:\n\t\tm.status <- s\n\t\treturn ErrShutdown\n\tcase StatusUnknown:\n\t\t// ok\n\tdefault:\n\t\tm.status <- s\n\t\treturn ErrAlreadyStarted\n\t}\n\n\tstartCtx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tm.startupCancel = cancel\n\tstartupFunc := m.startupFunc\n\tm.status <- StatusStarting\n\n\tif startupFunc != nil {\n\t\tm.startupErr = startupFunc(startCtx)\n\t}\n\tcancel()\n\n\ts = <-m.status\n\n\tswitch s {\n\tcase StatusShutdown:\n\t\tm.status <- s\n\t\t// no error on shutdown while starting\n\t\treturn nil\n\tcase StatusStarting:\n\t\tif m.startupErr != nil {\n\t\t\tm.status <- s\n\t\t\tclose(m.startupDone)\n\t\t\treturn m.startupErr\n\t\t}\n\t\t// ok\n\tdefault:\n\t\tm.status <- s\n\t\tpanic(\"unexpected lifecycle state\")\n\t}\n\n\tctx, m.runCancel = context.WithCancel(ctx)\n\tclose(m.startupDone)\n\tm.status <- StatusReady\n\n\terr := m.runFunc(ctx)\n\tclose(m.runDone)\n\t<-m.shutdownDone\n\treturn err\n}",
"func (phase *EtcdSetupPhase) Run() error {\n\tvar etcdNodes []clustermanager.Node\n\tcluster := phase.clusterManager.Cluster()\n\n\tif cluster.IsolatedEtcd {\n\t\tetcdNodes = phase.provider.GetEtcdNodes()\n\t} else {\n\t\tetcdNodes = phase.provider.GetMasterNodes()\n\t}\n\n\terr := phase.clusterManager.InstallEtcdNodes(etcdNodes, phase.options.KeepData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (r *Runner) Run(ctx context.Context) error {\n\treturn errors.New(\"not implemented\")\n}",
"func (c *Command) Run(s *runtime.Scheme, log logging.Logger) error {\n\tcfg, err := ctrl.GetConfig()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Cannot get config\")\n\t}\n\tlog.Debug(\"Starting\", \"sync-period\", c.Sync.String())\n\n\tmgr, err := ctrl.NewManager(cfg, ctrl.Options{\n\t\tScheme: s,\n\t\tLeaderElection: c.LeaderElection,\n\t\tLeaderElectionID: \"crossplane-leader-election-core\",\n\t\tSyncPeriod: &c.Sync,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Cannot create manager\")\n\t}\n\n\tif err := apiextensions.Setup(mgr, log); err != nil {\n\t\treturn errors.Wrap(err, \"Cannot setup API extension controllers\")\n\t}\n\n\tpkgCache := xpkg.NewImageCache(c.CacheDir, afero.NewOsFs())\n\n\tif err := pkg.Setup(mgr, log, pkgCache, c.Namespace); err != nil {\n\t\treturn errors.Wrap(err, \"Cannot add packages controllers to manager\")\n\t}\n\n\treturn errors.Wrap(mgr.Start(ctrl.SetupSignalHandler()), \"Cannot start controller manager\")\n}",
"func (c *Command) Run(args []string) int {\n\tname, opts, peers, err := c.readConfig()\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tc.instance, err = huton.NewInstance(name, opts...)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\t_, err = c.instance.Join(peers)\n\tif err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\treturn c.handleSignals()\n}",
"func (c *ContainerExecutor) Run(opts ifc.RunOptions) error {\n\tlog.Print(\"starting generic container\")\n\n\tif c.Options.ClusterName != \"\" {\n\t\tcleanup, err := c.SetKubeConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer cleanup()\n\t}\n\n\tinput, err := bundleReader(c.ExecutorBundle)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO this logic is redundant in executor package, move it to pkg/container\n\tvar output io.Writer\n\tif c.ResultsDir == \"\" {\n\t\t// set output only if the output if resulting directory is not defined\n\t\toutput = os.Stdout\n\t}\n\tif err = c.setConfig(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO check the executor type when dryrun is set\n\tif opts.DryRun {\n\t\tlog.Print(\"DryRun execution finished\")\n\t\treturn nil\n\t}\n\n\terr = c.ClientFunc(c.ResultsDir, input, output, c.Container, c.MountBasePath).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Print(\"execution of the generic container finished\")\n\treturn nil\n}",
"func (s *Service) Run() error {\n\tgo func() {\n\t\tif s.config.CertConfig != nil {\n\t\t\tif err := s.Server.ListenAndServeTLS(s.config.CertConfig.CertificateFile, s.config.CertConfig.KeyFile); err != http.ErrServerClosed {\n\t\t\t\tlogrus.Fatalf(\"Failed to start query service: %s\\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := s.Server.ListenAndServe(); err != http.ErrServerClosed {\n\t\t\t\tlogrus.Fatalf(\"Failed to start query service: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tlog.SetLogLevel(s.config.LogLevel)\n\t//We want a graceful exit\n\tif err := s.waitForShutdown(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (a *Agent) Run() {\n\tagent.RegisterAPIValidator(a.validateAPI)\n\tagent.OnConfigChange(a.onConfigChange)\n\n\tgo a.discoveryLoop()\n\tgo a.publishLoop()\n\n\tselect {\n\tcase <-a.stopAgent:\n\t\tlog.Info(\"Received request to kill agent\")\n\t\ta.stopDiscovery <- true\n\t\ta.stopPublish <- true\n\t\treturn\n\t}\n}",
"func (d *Daemon) Run() (err error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tgc := gc.NewGC()\n\tgc.StartGC(ctx)\n\n\tif rerr := d.registerSubReaper(gc); rerr != nil {\n\t\treturn rerr\n\t}\n\n\tlogrus.Debugf(\"Daemon start with option %#v\", d.opts)\n\n\tstack.Setup(d.opts.RunRoot)\n\n\td.NewBackend()\n\n\tif err = d.NewGrpcServer(); err != nil {\n\t\treturn err\n\t}\n\td.backend.Register(d.grpc.server)\n\t// after the daemon is done setting up we can notify systemd api\n\tsystemd.NotifySystemReady()\n\n\terrCh := make(chan error)\n\tif err = d.grpc.Run(ctx, errCh, cancel); err != nil {\n\t\tlogrus.Error(\"Running GRPC server failed: \", err)\n\t}\n\n\tselect {\n\tcase serverErr, ok := <-errCh:\n\t\tif !ok {\n\t\t\tlogrus.Errorf(\"Channel errCh closed, check grpc server err\")\n\t\t}\n\t\terr = serverErr\n\t\tcancel()\n\t// channel closed is what we expected since it's daemon normal behavior\n\tcase <-ctx.Done():\n\t\tlogrus.Infof(\"Context finished with: %v\", ctx.Err())\n\t}\n\n\tsystemd.NotifySystemStopping()\n\td.grpc.server.GracefulStop()\n\td.backend.wg.Wait()\n\treturn err\n}",
"func (c *Controller) Run(ctx context.Context) error {\n\t// Start the informer factories to begin populating the informer caches\n\tc.log.Infof(\"starting step control loop, node name: %s\", c.nodeName)\n\n\t// 初始化runner\n\tc.log.Info(\"init controller engine\")\n\tif err := engine.Init(c.wc, c.informer.Recorder()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.sync(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tc.waitDown(ctx)\n\treturn nil\n}",
"func (e *Signer) Run(ctx context.Context) {\n\t// Shut down queues\n\tdefer utilruntime.HandleCrash()\n\tdefer e.syncQueue.ShutDown()\n\n\tif !cache.WaitForNamedCacheSync(\"bootstrap_signer\", ctx.Done(), e.configMapSynced, e.secretSynced) {\n\t\treturn\n\t}\n\n\tlogger := klog.FromContext(ctx)\n\tlogger.V(5).Info(\"Starting workers\")\n\tgo wait.UntilWithContext(ctx, e.serviceConfigMapQueue, 0)\n\t<-ctx.Done()\n\tlogger.V(1).Info(\"Shutting down\")\n}",
"func (pm *PipelineManager) Run(threadiness int, stopCh <-chan struct{}) error {\n\t// Start the informer factories to begin populating the informer caches\n\tlog.Info(\"[PipelineManager.Run] Starting...\")\n\n\t// Wait for the caches to be synced before starting workers\n\tlog.Info(\"[PipelineManager.Run] Waiting for informer caches to sync\")\n\n\tif ok := cache.WaitForCacheSync(stopCh, pm.deploymentSynced, pm.podSynced); !ok {\n\t\treturn fmt.Errorf(\"[PipelineManager.Run] failed to wait for caches to sync\")\n\t}\n\n\tlog.Info(\"[PipelineManager.Run] Starting workers\")\n\tgo func() {\n\t\t<-stopCh\n\t\tlog.Info(\"[PipelineManager] shutdown work queue\")\n\t\tpm.workqueue.ShutDown()\n\t}()\n\n\t// Launch two workers to process Pipeline resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(pm.runWorker, time.Second, stopCh)\n\t}\n\n\tlog.Info(\"[PipelineManager.Run] Started workers\")\n\treturn nil\n}",
"func (e *Engine) Run() error {\n\n\tgo e.service.Run()\n\n\te.consensus.Run()\n\n\treturn nil\n}",
"func Run(s options.Options, stopCh <-chan struct{}) error {\n\terr := NonBlockingRun(s, stopCh)\n\tif err != nil {\n\t\treturn err\n\t}\n\t<-stopCh\n\treturn nil\n}",
"func Run(ctx context.Context) {\n\tif flags.Version {\n\t\tfmt.Print(info.VersionString())\n\t\treturn\n\t}\n\n\tcfg, err := config.Load(flags.ConfigPath)\n\tif err != nil {\n\t\tif err == config.ErrMissingAPIKey {\n\t\t\tfmt.Println(config.ErrMissingAPIKey)\n\n\t\t\t// a sleep is necessary to ensure that supervisor registers this process as \"STARTED\"\n\t\t\t// If the exit is \"too quick\", we enter a BACKOFF->FATAL loop even though this is an expected exit\n\t\t\t// http://supervisord.org/subprocess.html#process-states\n\t\t\ttime.Sleep(5 * time.Second)\n\n\t\t\t// Don't use os.Exit() method here, even with os.Exit(0) the Service Control Manager\n\t\t\t// on Windows will consider the process failed and log an error in the Event Viewer and\n\t\t\t// attempt to restart the process.\n\t\t\treturn\n\t\t}\n\t\tosutil.Exitf(\"%v\", err)\n\t}\n\terr = info.InitInfo(cfg) // for expvar & -info option\n\tif err != nil {\n\t\tosutil.Exitf(\"%v\", err)\n\t}\n\n\tif flags.Info {\n\t\tif err := info.Info(os.Stdout, cfg); err != nil {\n\t\t\tosutil.Exitf(\"Failed to print info: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tif err := coreconfig.SetupLogger(\n\t\tcoreconfig.LoggerName(\"TRACE\"),\n\t\tcfg.LogLevel,\n\t\tcfg.LogFilePath,\n\t\tcoreconfig.GetSyslogURI(),\n\t\tcoreconfig.Datadog.GetBool(\"syslog_rfc\"),\n\t\tcoreconfig.Datadog.GetBool(\"log_to_console\"),\n\t\tcoreconfig.Datadog.GetBool(\"log_format_json\"),\n\t); err != nil {\n\t\tosutil.Exitf(\"Cannot create logger: %v\", err)\n\t}\n\tdefer log.Flush()\n\n\tif !cfg.Enabled {\n\t\tlog.Info(messageAgentDisabled)\n\n\t\t// a sleep is necessary to ensure that supervisor registers this process as \"STARTED\"\n\t\t// If the exit is \"too quick\", we enter a BACKOFF->FATAL loop even though this is an expected exit\n\t\t// http://supervisord.org/subprocess.html#process-states\n\t\ttime.Sleep(5 * time.Second)\n\t\treturn\n\t}\n\n\tdefer watchdog.LogOnPanic()\n\n\tif flags.CPUProfile != \"\" {\n\t\tf, err := os.Create(flags.CPUProfile)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tlog.Info(\"CPU profiling started...\")\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif flags.PIDFilePath != \"\" {\n\t\terr := pidfile.WritePID(flags.PIDFilePath)\n\t\tif err != nil {\n\t\t\tlog.Criticalf(\"Error writing PID file, exiting: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tlog.Infof(\"PID '%d' written to PID file '%s'\", os.Getpid(), flags.PIDFilePath)\n\t\tdefer os.Remove(flags.PIDFilePath)\n\t}\n\n\terr = metrics.Configure(cfg, []string{\"version:\" + info.Version})\n\tif err != nil {\n\t\tosutil.Exitf(\"cannot configure dogstatsd: %v\", err)\n\t}\n\tdefer metrics.Flush()\n\tdefer timing.Stop()\n\n\tmetrics.Count(\"datadog.trace_agent.started\", 1, nil, 1)\n\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\ttagger.Init()\n\tdefer tagger.Stop()\n\n\tagnt := NewAgent(ctx, cfg)\n\tlog.Infof(\"Trace agent running on host %s\", cfg.Hostname)\n\tagnt.Run()\n\n\t// collect memory profile\n\tif flags.MemProfile != \"\" {\n\t\tf, err := os.Create(flags.MemProfile)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Could not create memory profile: \", err)\n\t\t}\n\n\t\t// get up-to-date statistics\n\t\truntime.GC()\n\t\t// Not using WriteHeapProfile but instead calling WriteTo to\n\t\t// make sure we pass debug=1 and resolve pointers to names.\n\t\tif err := pprof.Lookup(\"heap\").WriteTo(f, 1); err != nil {\n\t\t\tlog.Error(\"Could not write memory profile: \", err)\n\t\t}\n\t\tf.Close()\n\t}\n}",
"func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (p *Plugin) Run(kubeclient kubernetes.Interface) error {\n\tvar err error\n\tconfigMap, err := p.buildConfigMap()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdaemonSet, err := p.buildDaemonSet()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Submit them to the API server, capturing the results\n\tif _, err = kubeclient.CoreV1().ConfigMaps(p.Namespace).Create(configMap); err != nil {\n\t\treturn errors.Wrapf(err, \"could not create ConfigMap for daemonset plugin %v\", p.GetName())\n\t}\n\tif _, err = kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).Create(daemonSet); err != nil {\n\t\treturn errors.Wrapf(err, \"could not create DaemonSet for daemonset plugin %v\", p.GetName())\n\t}\n\n\treturn nil\n}",
"func (rc *Controller) Run(ctx context.Context) error {\n\topts := []func(context.Context) error{\n\t\trc.setGlobalVariables,\n\t\trc.restoreSchema,\n\t\trc.preCheckRequirements,\n\t\trc.initCheckpoint,\n\t\trc.importTables,\n\t\trc.fullCompact,\n\t\trc.cleanCheckpoints,\n\t}\n\n\ttask := log.FromContext(ctx).Begin(zap.InfoLevel, \"the whole procedure\")\n\n\tvar err error\n\tfinished := false\noutside:\n\tfor i, process := range opts {\n\t\terr = process(ctx)\n\t\tif i == len(opts)-1 {\n\t\t\tfinished = true\n\t\t}\n\t\tlogger := task.With(zap.Int(\"step\", i), log.ShortError(err))\n\n\t\tswitch {\n\t\tcase err == nil:\n\t\tcase log.IsContextCanceledError(err):\n\t\t\tlogger.Info(\"task canceled\")\n\t\t\tbreak outside\n\t\tdefault:\n\t\t\tlogger.Error(\"run failed\")\n\t\t\tbreak outside // ps : not continue\n\t\t}\n\t}\n\n\t// if process is cancelled, should make sure checkpoints are written to db.\n\tif !finished {\n\t\trc.waitCheckpointFinish()\n\t}\n\n\ttask.End(zap.ErrorLevel, err)\n\trc.errorMgr.LogErrorDetails()\n\trc.errorSummaries.emitLog()\n\n\treturn errors.Trace(err)\n}",
"func (s Server) Run(ctx context.Context) error {\n\tctx, cancel := context.WithCancel(ctx)\n\terrWg := errgroup.Group{}\n\n\terrWg.Go(func() error {\n\t\tlis, err := net.Listen(\"tcp\", s.grpcAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.grpcServer.Serve(lis)\n\t})\n\n\terrWg.Go(func() error {\n\t\tl, err := net.Listen(\"tcp\", s.httpAddr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.httpServer.Serve(l)\n\t})\n\n\terrWg.Go(func() error {\n\t\tswaggerAddr := s.httpAddr + swaggerUIPrefix\n\t\tlog.Info().Msgf(\"App started. HTTP: %s, Swagger UI: %s, gRPC: %s\", s.httpAddr, swaggerAddr, s.grpcAddr)\n\t\treturn nil\n\t})\n\n\terrWg.Go(func() error {\n\t\tshutdownCh := make(chan os.Signal, 1)\n\t\tsignal.Notify(shutdownCh, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)\n\t\tsig := <-shutdownCh\n\n\t\ts.stop(ctx)\n\t\tcancel()\n\n\t\tlog.Fatal().Msgf(\"exit reason: %s\", sig)\n\n\t\treturn nil\n\t})\n\n\treturn errWg.Wait()\n}",
"func (s *Service) Run(r Runner) {\n\ts.init()\n\tenv := s.envFunc()\n\tpfs := &FlagSet{FlagSet: s.fs, es: s.es}\n\tr.SetFlags(pfs)\n\n\ts.asPlugin = env[\"DRONE\"] == \"true\"\n\ts.debug = env[\"PLUGIN_PLUGIN_DEBUG\"] != \"\"\n\t{\n\t\tlogFlags := 0\n\t\tif s.debug {\n\t\t\tlogFlags = log.Ltime | log.Lshortfile\n\t\t}\n\t\tif s.log.logger == nil {\n\t\t\tlog.SetFlags(logFlags)\n\t\t} else {\n\t\t\ts.log.logger.SetFlags(logFlags)\n\t\t}\n\t}\n\tif s.debug {\n\t\ts.log.Debugln(\"drone plugins debug mode is active!\")\n\t}\n\tif s.debug {\n\t\tfor k, v := range env {\n\t\t\tif strings.HasPrefix(k, \"PLUGIN_\") || strings.HasPrefix(k, \"DRONE_\") {\n\t\t\t\ts.log.Debugf(\"[env] %s=%s\", k, v)\n\t\t\t}\n\t\t}\n\t\ts.es.VisitAll(func(e fenv.EnvFlag) {\n\t\t\ts.log.Debugf(\"[assign] flag '%s' for env vars: %s\",\n\t\t\t\te.Flag.Name, strings.Join(e.Names, \", \"))\n\t\t})\n\t}\n\n\tif pfs.envFilesActive {\n\t\ts.readEnvfiles(env, pfs.envFiles)\n\t}\n\n\tif s.asPlugin {\n\t\ts.fs.Usage = s.usageFuncYml\n\t\ts.fs.Init(s.args()[0], flag.ContinueOnError)\n\t}\n\n\tif err := s.es.ParseEnv(env); err != nil {\n\t\ts.execErr = err\n\t\ts.fs.Usage()\n\t\tif !s.continueOnError {\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn\n\n\t}\n\n\tif err := s.fs.Parse(s.args()[1:]); err != nil {\n\t\ts.execErr = err\n\t\ts.log.Println(err)\n\t\tif !s.continueOnError {\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn\n\n\t}\n\tif s.debug {\n\t\ts.es.VisitAll(func(e fenv.EnvFlag) {\n\t\t\tif !e.IsSelfSet && e.IsSet {\n\t\t\t\ts.log.Debugf(\"[flag] '%s' set: %v\", e.Flag.Name, e.Flag.Value)\n\t\t\t}\n\t\t})\n\t\ts.es.VisitAll(func(e fenv.EnvFlag) {\n\t\t\tif e.IsSelfSet {\n\t\t\t\ts.log.Debugf(\"[envflag] '%s' set by env var '%s': %v\", e.Flag.Name, e.Name, e.Flag.Value)\n\t\t\t}\n\t\t})\n\t\ts.usageFuncYml()\n\t}\n\tctx := context.Background()\n\ts.log.Debugln(\"------ executing plugin func -----\")\n\terr := r.Exec(ctx, s.log)\n\ts.log.Debugln(\"------ plugin func done -----\")\n\ts.execErr = err\n\tvar hasErrors bool\n\tif err != nil {\n\t\ts.log.Debugln(\"ErrUsageError returned\")\n\t\tif s.debug {\n\t\t\t_ = s.log.Output(2, fmt.Sprintf(\"plugin runner error: %v\", err))\n\t\t}\n\t\tif err == ErrUsageError {\n\t\t\thasErrors = true\n\t\t}\n\t} else {\n\t\ts.es.VisitAll(func(e fenv.EnvFlag) {\n\t\t\tif e.Err != nil {\n\t\t\t\thasErrors = true\n\t\t\t}\n\t\t})\n\t}\n\tif hasErrors {\n\t\ts.fs.Usage()\n\t\tif !s.continueOnError {\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn\n\t}\n}",
"func (s *VMTServer) Run() {\n\tif err := s.checkFlag(); err != nil {\n\t\tglog.Fatalf(\"Check flag failed: %v. Abort.\", err.Error())\n\t}\n\n\tkubeConfig := s.createKubeConfigOrDie()\n\tglog.V(3).Infof(\"kubeConfig: %+v\", kubeConfig)\n\n\tkubeClient := s.createKubeClientOrDie(kubeConfig)\n\n\t// Create controller runtime client that support custom resources\n\truntimeClient, err := runtimeclient.New(kubeConfig, runtimeclient.Options{Scheme: customScheme})\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to create controller runtime client: %v.\", err)\n\t}\n\n\t// Openshift client for deploymentconfig resize forced rollouts\n\tosClient, err := osclient.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to generate openshift client for kubernetes target: %v\", err)\n\t}\n\n\t// TODO: Replace dynamicClient with runtimeClient\n\tdynamicClient, err := dynamic.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to generate dynamic client for kubernetes target: %v\", err)\n\t}\n\n\tutil.K8sAPIDeploymentGV, err = discoverk8sAPIResourceGV(kubeClient, util.DeploymentResName)\n\tif err != nil {\n\t\tglog.Warningf(\"Failure in discovering k8s deployment API group/version: %v\", err.Error())\n\t}\n\tglog.V(2).Infof(\"Using group version %v for k8s deployments\", util.K8sAPIDeploymentGV)\n\n\tutil.K8sAPIReplicasetGV, err = discoverk8sAPIResourceGV(kubeClient, util.ReplicaSetResName)\n\tif err != nil {\n\t\tglog.Warningf(\"Failure in discovering k8s replicaset API group/version: %v\", err.Error())\n\t}\n\tglog.V(2).Infof(\"Using group version %v for k8s replicasets\", util.K8sAPIReplicasetGV)\n\n\tglog.V(3).Infof(\"Turbonomic config path is: %v\", s.K8sTAPSpec)\n\n\tk8sTAPSpec, err := kubeturbo.ParseK8sTAPServiceSpec(s.K8sTAPSpec, kubeConfig.Host)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to generate correct TAP config: %v\", err.Error())\n\t}\n\n\tif k8sTAPSpec.FeatureGates != nil {\n\t\terr = utilfeature.DefaultMutableFeatureGate.SetFromMap(k8sTAPSpec.FeatureGates)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Invalid Feature Gates: %v\", err)\n\t\t}\n\t}\n\n\tif utilfeature.DefaultFeatureGate.Enabled(features.GoMemLimit) {\n\t\tglog.V(2).Info(\"Memory Optimisations are enabled.\")\n\t\t// AUTOMEMLIMIT_DEBUG environment variable enables debug logging of AUTOMEMLIMIT\n\t\t// GoMemLimit will be set during the start of each discovery, see K8sDiscoveryClient.Discover,\n\t\t// as memory limit may change overtime\n\t\t_ = os.Setenv(\"AUTOMEMLIMIT_DEBUG\", \"true\")\n\t\tif s.ItemsPerListQuery != 0 {\n\t\t\t// Perform sanity check on user specified value of itemsPerListQuery\n\t\t\tif s.ItemsPerListQuery < processor.DefaultItemsPerGiMemory {\n\t\t\t\tvar errMsg string\n\t\t\t\tif s.ItemsPerListQuery < 0 {\n\t\t\t\t\terrMsg = \"negative\"\n\t\t\t\t} else {\n\t\t\t\t\terrMsg = \"set too low\"\n\t\t\t\t}\n\t\t\t\tglog.Warningf(\"Argument --items-per-list-query is %s (%v). Setting it to the default value of %d.\",\n\t\t\t\t\terrMsg, s.ItemsPerListQuery, processor.DefaultItemsPerGiMemory)\n\t\t\t\ts.ItemsPerListQuery = processor.DefaultItemsPerGiMemory\n\t\t\t} else {\n\t\t\t\tglog.V(2).Infof(\"Set items per list API call to the user specified value: %v.\", s.ItemsPerListQuery)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tglog.V(2).Info(\"Memory Optimisations are not enabled.\")\n\t}\n\n\t// Collect target and probe info such as master host, server version, probe container image, etc\n\tk8sTAPSpec.CollectK8sTargetAndProbeInfo(kubeConfig, kubeClient)\n\n\texcludeLabelsMap, err := nodeUtil.LabelMapFromNodeSelectorString(s.CpufreqJobExcludeNodeLabels)\n\tif err != nil {\n\t\tglog.Fatalf(\"Invalid cpu frequency exclude node label selectors: %v. The selectors \"+\n\t\t\t\"should be a comma saperated list of key=value node label pairs\", err)\n\t}\n\n\ts.ensureBusyboxImageBackwardCompatibility()\n\tkubeletClient := s.CreateKubeletClientOrDie(kubeConfig, kubeClient, s.CpuFrequencyGetterImage,\n\t\ts.CpuFrequencyGetterPullSecret, excludeLabelsMap, s.UseNodeProxyEndpoint)\n\tcaClient, err := clusterclient.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to generate correct TAP config: %v\", err.Error())\n\t\tcaClient = nil\n\t}\n\n\t// Interface to discover turbonomic ORM mappings (legacy and v2) for resize actions\n\tormClientManager := resourcemapping.NewORMClientManager(dynamicClient, kubeConfig)\n\n\t// Configuration for creating the Kubeturbo TAP service\n\tvmtConfig := kubeturbo.NewVMTConfig2()\n\tvmtConfig.WithTapSpec(k8sTAPSpec).\n\t\tWithKubeClient(kubeClient).\n\t\tWithKubeConfig(kubeConfig).\n\t\tWithDynamicClient(dynamicClient).\n\t\tWithControllerRuntimeClient(runtimeClient).\n\t\tWithORMClientManager(ormClientManager).\n\t\tWithKubeletClient(kubeletClient).\n\t\tWithClusterAPIClient(caClient).\n\t\tWithOpenshiftClient(osClient).\n\t\tWithVMPriority(s.VMPriority).\n\t\tWithVMIsBase(s.VMIsBase).\n\t\tUsingUUIDStitch(s.UseUUID).\n\t\tWithDiscoveryInterval(s.DiscoveryIntervalSec).\n\t\tWithValidationTimeout(s.ValidationTimeout).\n\t\tWithValidationWorkers(s.ValidationWorkers).\n\t\tWithDiscoveryWorkers(s.DiscoveryWorkers).\n\t\tWithDiscoveryTimeout(s.DiscoveryTimeoutSec).\n\t\tWithDiscoverySamples(s.DiscoverySamples).\n\t\tWithDiscoverySampleIntervalSec(s.DiscoverySampleIntervalSec).\n\t\tWithSccSupport(s.sccSupport).\n\t\tWithCAPINamespace(s.ClusterAPINamespace).\n\t\tWithContainerUtilizationDataAggStrategy(s.containerUtilizationDataAggStrategy).\n\t\tWithContainerUsageDataAggStrategy(s.containerUsageDataAggStrategy).\n\t\tWithVolumePodMoveConfig(s.FailVolumePodMoves).\n\t\tWithQuotaUpdateConfig(s.UpdateQuotaToAllowMoves).\n\t\tWithReadinessRetryThreshold(s.readinessRetryThreshold).\n\t\tWithClusterKeyInjected(s.ClusterKeyInjected).\n\t\tWithItemsPerListQuery(s.ItemsPerListQuery)\n\n\tif utilfeature.DefaultFeatureGate.Enabled(features.GitopsApps) {\n\t\tvmtConfig.WithGitConfig(s.gitConfig)\n\t} else {\n\t\tif s.gitConfig.GitEmail != \"\" ||\n\t\t\ts.gitConfig.GitSecretName != \"\" ||\n\t\t\ts.gitConfig.GitSecretNamespace != \"\" ||\n\t\t\ts.gitConfig.GitUsername != \"\" {\n\t\t\tglog.V(2).Infof(\"Feature: %v is not enabled, arg values set for git-email: %s, git-username: %s \"+\n\t\t\t\t\"git-secret-name: %s, git-secret-namespace: %s will be ignored.\", features.GitopsApps,\n\t\t\t\ts.gitConfig.GitEmail, s.gitConfig.GitUsername, s.gitConfig.GitSecretName, s.gitConfig.GitSecretNamespace)\n\t\t}\n\t}\n\tglog.V(3).Infof(\"Finished creating turbo configuration: %+v\", vmtConfig)\n\n\t// The KubeTurbo TAP service\n\tk8sTAPService, err := kubeturbo.NewKubernetesTAPService(vmtConfig)\n\tif err != nil {\n\t\tglog.Fatalf(\"Unexpected error while creating Kubernetes TAP service: %s\", err)\n\t}\n\n\t// Its a must to include the namespace env var in the kubeturbo pod spec.\n\tns := util.GetKubeturboNamespace()\n\t// Update scc resources in parallel.\n\tgo ManageSCCs(ns, dynamicClient, kubeClient)\n\n\t// The client for healthz, debug, and prometheus\n\tgo s.startHttp()\n\n\tcleanupWG := &sync.WaitGroup{}\n\tcleanupSCCFn := func() {\n\t\tns := util.GetKubeturboNamespace()\n\t\tCleanUpSCCMgmtResources(ns, dynamicClient, kubeClient)\n\t}\n\tdisconnectFn := func() {\n\t\t// Disconnect from Turbo server when Kubeturbo is shutdown\n\t\t// Close the mediation container including the endpoints. It avoids the\n\t\t// invalid endpoints remaining in the server side. See OM-28801.\n\t\tk8sTAPService.DisconnectFromTurbo()\n\t}\n\tvar cleanupFuns []cleanUp\n\tif s.CleanupSccRelatedResources {\n\t\tcleanupFuns = append(cleanupFuns, cleanupSCCFn)\n\t}\n\tcleanupFuns = append(cleanupFuns, disconnectFn)\n\thandleExit(cleanupWG, cleanupFuns...)\n\n\tgCChan := make(chan bool)\n\tdefer close(gCChan)\n\tworker.NewGarbageCollector(kubeClient, dynamicClient, gCChan, s.GCIntervalMin*60, time.Minute*30).StartCleanup()\n\n\tglog.V(1).Infof(\"********** Start running Kubeturbo Service **********\")\n\tk8sTAPService.ConnectToTurbo()\n\tglog.V(1).Info(\"Kubeturbo service is stopped.\")\n\n\tcleanupWG.Wait()\n\tglog.V(1).Info(\"Cleanup completed. Exiting gracefully.\")\n}",
"func (s *Server) Run(ctx context.Context) error {\n\th := HealthHandler{\n\t\tservices: map[string]Healthier{\n\t\t\t\"ec2\": s.collectors.EC2,\n\t\t\t\"asg\": s.collectors.ASG,\n\t\t\t\"spot\": s.collectors.Spot,\n\t\t\t\"nodes\": s.collectors.Node,\n\t\t\t\"pods\": s.collectors.Pod,\n\n\t\t\t\"mainloop\": s.mainloop,\n\t\t},\n\t}\n\n\trouter := httprouter.New()\n\trouter.GET(\"/\", s.handleStatus)\n\trouter.GET(\"/-/ready\", webutil.HandleHealth)\n\trouter.Handler(\"GET\", \"/-/healthy\", h)\n\trouter.Handler(\"GET\", \"/metrics\", promhttp.Handler())\n\n\treturn webutil.ListenAndServerWithContext(\n\t\tctx, \":8080\", router)\n}",
"func (s *Server) Run(ctx context.Context, wg *sync.WaitGroup) {\n\tif err := s.Config.Validate(); err != nil {\n\t\tlog.Panicf(\"invalid server config: %s\\n\", err)\n\t}\n\n\thandler := &http.Server{\n\t\tHandler: s.Router,\n\t\tAddr: \":\" + strconv.Itoa(s.Config.Port),\n\t}\n\n\tstartServer(ctx, handler, wg)\n}",
"func (c *Controller) Run(stopCh <-chan struct{}) error {\n\t// Normally, we let informers start after all controllers. However, in this case we need namespaces to start and sync\n\t// first, so we have DiscoveryNamespacesFilter ready to go. This avoids processing objects that would be filtered during startup.\n\tc.namespaces.Start(stopCh)\n\t// Wait for namespace informer synced, which implies discovery filter is synced as well\n\tif !kube.WaitForCacheSync(\"namespace\", stopCh, c.namespaces.HasSynced) {\n\t\treturn fmt.Errorf(\"failed to sync namespaces\")\n\t}\n\t// run handlers for the config cluster; do not store this *Cluster in the ClusterStore or give it a SyncTimeout\n\t// this is done outside the goroutine, we should block other Run/startFuncs until this is registered\n\tconfigCluster := &Cluster{Client: c.configClusterClient, ID: c.configClusterID}\n\tc.handleAdd(configCluster, stopCh)\n\tgo func() {\n\t\tt0 := time.Now()\n\t\tlog.Info(\"Starting multicluster remote secrets controller\")\n\t\t// we need to start here when local cluster secret watcher enabled\n\t\tif features.LocalClusterSecretWatcher && features.ExternalIstiod {\n\t\t\tc.secrets.Start(stopCh)\n\t\t}\n\t\tif !kube.WaitForCacheSync(\"multicluster remote secrets\", stopCh, c.secrets.HasSynced) {\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"multicluster remote secrets controller cache synced in %v\", time.Since(t0))\n\t\tc.queue.Run(stopCh)\n\t}()\n\treturn nil\n}",
"func (cli *CLI) Run(args []string) int {\n\tf := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tf.SetOutput(cli.outStream)\n\n\tf.Usage = func() {\n\t\tfmt.Fprintf(cli.outStream, usageText)\n\t\tf.PrintDefaults()\n\t\tfmt.Fprint(cli.outStream, exampleText)\n\t}\n\n\tvar opt Options\n\n\tf.StringVar(&opt.Config, []string{\"c\", \"-config\"}, \"\", \"the path to the configuration file\")\n\tf.StringVar(&opt.Endpoint, []string{\"e\", \"-endpoint\"}, \"\", \"specify github api endpoint\")\n\tf.StringVar(&opt.Token, []string{\"t\", \"-token\"}, \"\", \"github personal token for using API\")\n\tf.StringVar(&opt.Belongs, []string{\"b\", \"-belongs\"}, \"\", \"organization/team on github\")\n\tf.BoolVar(&opt.Syslog, []string{\"s\", \"-syslog\"}, false, \"use syslog for log output\")\n\tf.BoolVar(&opt.Version, []string{\"v\", \"-version\"}, false, \"print the version and exit\")\n\n\tif err := f.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\tparsedArgs := f.Args()\n\n\tif opt.Version {\n\t\tfmt.Fprintf(cli.outStream, \"%s version %s\\n\", Name, Version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif len(parsedArgs) == 0 {\n\t\tf.Usage()\n\t\treturn ExitCodeOK\n\t}\n\n\tif parsedArgs[0] != \"keys\" && parsedArgs[0] != \"pam\" {\n\t\tfmt.Fprintf(cli.errStream, \"invalid argument: %s\\n\", parsedArgs[0])\n\t\treturn ExitCodeError\n\t}\n\n\tc := NewConfig(&opt)\n\toct := NewOctopass(c, cli, nil)\n\tif err := oct.Run(parsedArgs); err != nil {\n\t\tfmt.Fprintf(cli.errStream, \"%s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\treturn ExitCodeOK\n}",
"func (s *SystemService) Run() error {\n\tlogger.Log(\"running service\")\n\n\tname := s.Command.Name\n\tdebugOn := s.Command.Debug\n\n\tvar err error\n\tif debugOn {\n\t\telog = debug.New(name)\n\t} else {\n\t\telog, err = eventlog.Open(name)\n\t\tif err != nil {\n\t\t\tlogger.Log(\"error opening logs: \", err)\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer elog.Close()\n\n\tlogger.Log(\"starting service: \", name)\n\telog.Info(1, fmt.Sprintf(\"starting %s service\", name))\n\n\trun := svc.Run\n\tif debugOn {\n\t\trun = debug.Run\n\t}\n\n\terr = run(name, &windowsService{})\n\tif err != nil {\n\t\tlogger.Log(\"error running service: \", err)\n\t\telog.Error(1, fmt.Sprintf(\"%s service failed: %v\", name, err))\n\t\treturn err\n\t}\n\n\tlogger.Log(\"service stopped: \", name)\n\telog.Info(1, fmt.Sprintf(\"%s service stopped\", name))\n\n\t// if err := svc.Run(s.Command.Name, &windowsService{}); err != nil {\n\t// \treturn err\n\t// }\n\n\treturn nil\n}",
"func (o *Options) Run() error {\n\tclusterConfig, err := loadClusterConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load cluster config: %v\", err)\n\t}\n\n\tclient, err := kubernetes.NewForConfig(clusterConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprowJobClient, err := kube.NewClientInCluster(o.ProwJobNamespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontroller := artifact_uploader.NewController(client.CoreV1(), prowJobClient, o.Options)\n\n\tstop := make(chan struct{})\n\tdefer close(stop)\n\tgo controller.Run(o.NumWorkers, stop)\n\n\t// Wait forever\n\tselect {}\n}",
"func (b *Builder) Run() error {\n\tdefer b.Cleanup()\n\tlogrus.Debug(b.Options)\n\n\tfor _, s := range b.steps {\n\t\terr := s()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tos.Chdir(b.Root)\n\t}\n\treturn nil\n}",
"func Run(ctx app.Context) {\n\t// //////////////////////////////////////////////////////////////////////\n\t// Config and command line\n\t// //////////////////////////////////////////////////////////////////////\n\n\t// Options are set in this order: config -> env var -> cmd line option.\n\t// So first we must apply config files, then do cmd line parsing which\n\t// will apply env vars and cmd line options.\n\n\t// Parse cmd line to get --config files\n\tcmdLine := config.ParseCommandLine(config.Options{})\n\n\t// --config files override defaults if given\n\tconfigFiles := config.DEFAULT_CONFIG_FILES\n\tif cmdLine.Config != \"\" {\n\t\tconfigFiles = cmdLine.Config\n\t}\n\n\t// Parse default options from config files\n\tdef := config.ParseConfigFiles(configFiles, cmdLine.Debug)\n\n\t// Parse env vars and cmd line options, override default config\n\tcmdLine = config.ParseCommandLine(def)\n\n\t// Final options and commands\n\tvar o config.Options = cmdLine.Options\n\tvar c config.Command = cmdLine.Command\n\tif o.Debug {\n\t\tapp.Debug(\"command: %#v\\n\", c)\n\t\tapp.Debug(\"options: %#v\\n\", o)\n\t}\n\n\tif ctx.Hooks.AfterParseOptions != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"calling hook AfterParseOptions\")\n\t\t}\n\t\tctx.Hooks.AfterParseOptions(&o)\n\n\t\t// Dump options again to see if hook changed them\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"options: %#v\\n\", o)\n\t\t}\n\t}\n\tctx.Options = o\n\tctx.Command = c\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Help and version\n\t// //////////////////////////////////////////////////////////////////////\n\n\t// Help uses a Request Manager client to fetch the list of all requests.\n\t// If addr is set, then this works; else, ignore and always print help.\n\trmc, _ := makeRMC(&ctx)\n\n\t// spinc with no args (Args[0] = \"spinc\" itself). Print short request help\n\t// because Ryan is very busy.\n\tif len(os.Args) == 1 {\n\t\tconfig.Help(false, rmc)\n\t\tos.Exit(0)\n\t}\n\n\t// spinc --help or spinc help (full help)\n\tif o.Help || (c.Cmd == \"help\" && len(c.Args) == 0) {\n\t\tconfig.Help(true, rmc)\n\t\tos.Exit(0)\n\t}\n\n\t// spinc help <command>\n\tif c.Cmd == \"help\" && len(c.Args) > 0 {\n\t\t// Need rm client for this\n\t\tif rmc == nil {\n\t\t\tvar err error\n\t\t\trmc, err = makeRMC(&ctx)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\treqName := c.Args[0]\n\t\tif err := config.RequestHelp(reqName, rmc); err != nil {\n\t\t\tswitch err {\n\t\t\tcase config.ErrUnknownRequest:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unknown request: %s. Run spinc (no arguments) to list all requests.\\n\", reqName)\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"API error: %s. Use --ping to test the API connection.\\n\", err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\n\t// spinc --version or spinc version\n\tif o.Version || c.Cmd == \"version\" {\n\t\tfmt.Println(\"spinc v0.0.0\")\n\t\tos.Exit(0)\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Request Manager Client\n\t// //////////////////////////////////////////////////////////////////////\n\tif rmc == nil {\n\t\tvar err error\n\t\trmc, err = makeRMC(&ctx)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Ping\n\t// //////////////////////////////////////////////////////////////////////\n\tif o.Ping {\n\t\tif _, err := rmc.RequestList(); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Ping failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"%s OK\\n\", o.Addr)\n\t\tos.Exit(0)\n\t}\n\n\t// //////////////////////////////////////////////////////////////////////\n\t// Commands\n\t// //////////////////////////////////////////////////////////////////////\n\tcmdFactory := &cmd.DefaultFactory{}\n\n\tvar err error\n\tvar run app.Command\n\tif ctx.Factories.Command != nil {\n\t\trun, err = ctx.Factories.Command.Make(c.Cmd, ctx)\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase cmd.ErrNotExist:\n\t\t\t\tif o.Debug {\n\t\t\t\t\tapp.Debug(\"user cmd factory cannot make a %s cmd, trying default factory\", c.Cmd)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"User command factory error: %s\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\tif run == nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"using default factory to make a %s cmd\", c.Cmd)\n\t\t}\n\t\trun, err = cmdFactory.Make(c.Cmd, ctx)\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase cmd.ErrNotExist:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Unknown command: %s. Run 'spinc help' to list commands.\\n\", c.Cmd)\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Command factory error: %s\\n\", err)\n\t\t\t}\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif err := run.Prepare(); err != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"%s Prepare error: %s\", c.Cmd, err)\n\t\t}\n\t\tswitch err {\n\t\tcase config.ErrUnknownRequest:\n\t\t\treqName := c.Args[0]\n\t\t\tfmt.Fprintf(os.Stderr, \"Unknown request: %s. Run spinc (no arguments) to list all requests.\\n\", reqName)\n\t\tdefault:\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tif err := run.Run(); err != nil {\n\t\tif o.Debug {\n\t\t\tapp.Debug(\"%s Run error: %s\", c.Cmd, err)\n\t\t}\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func (s *Server) Run(\n\t// Common\n\tctx context.Context,\n\tlog logger.Logger,\n\ttracer *trace.TracerProvider,\n) (*Server, error) {\n\t// API port\n\tviper.SetDefault(\"API_PORT\", 7070) // nolint:gomnd\n\t// Request Timeout (seconds)\n\tviper.SetDefault(\"API_TIMEOUT\", \"60s\")\n\n\tconfig := http_server.Config{\n\t\tPort: viper.GetInt(\"API_PORT\"),\n\t\tTimeout: viper.GetDuration(\"API_TIMEOUT\"),\n\t}\n\n\tg := errgroup.Group{}\n\n\tg.Go(func() error {\n\t\treturn s.run(\n\t\t\tctx,\n\t\t\tconfig,\n\t\t\tlog,\n\t\t\ttracer,\n\t\t)\n\t})\n\n\treturn s, nil\n}",
"func (d Driver) Run(name, confTarget, hostVolume string, args []string) error {\n\td.containerID = fmt.Sprintf(\"maestro-%s\", name)\n\td.confTarget = confTarget\n\td.hostVolume = hostVolume\n\td.cmd = args\n\tneedToPull, checkErr := d.needToPull(context.Background())\n\tif checkErr != nil {\n\t\treturn checkErr\n\t}\n\tif needToPull {\n\t\tpullErr := d.pull(context.Background())\n\t\tif pullErr != nil {\n\t\t\treturn pullErr\n\t\t}\n\t}\n\tneedToRemoveOld, removalID, checkRemoveErr := d.needToRemove(context.Background())\n\tif checkRemoveErr != nil {\n\t\treturn checkRemoveErr\n\t}\n\tif needToRemoveOld {\n\t\tremoveErr := d.remove(context.Background(), removalID)\n\t\tif removeErr != nil {\n\t\t\treturn removeErr\n\t\t}\n\t}\n\tcreateErr := d.create(context.Background())\n\tif createErr != nil {\n\t\treturn createErr\n\t}\n\treturn d.start(context.Background())\n}",
"func Run(cfg *config.Config, opts *Options) error {\n\t// TODO https://github.com/kube-compose/kube-compose/issues/2 accept context as a parameter\n\tu := &upRunner{\n\t\tcfg: cfg,\n\t\topts: opts,\n\t}\n\tu.hostAliases.once = &sync.Once{}\n\tu.localImagesCache.once = &sync.Once{}\n\treturn u.run()\n}",
"func (o *ConfigCleanOption) Run(cmd *cobra.Command, args []string) (err error) {\n\tif config := getConfig(); config == nil {\n\t\tcmd.Println(\"cannot found config file\")\n\t}\n\to.Logger = cmd\n\n\titemCount := len(config.JenkinsServers)\n\tcheckResult := make(chan CheckResult, itemCount)\n\n\tfor _, jenkins := range config.JenkinsServers {\n\t\tgo func(target cfg.JenkinsServer) {\n\t\t\tcheckResult <- o.Check(target)\n\t\t}(jenkins)\n\t}\n\n\tcheckResultList := make([]CheckResult, itemCount)\n\tfor i := range config.JenkinsServers {\n\t\tcheckResultList[i] = <-checkResult\n\t}\n\n\t// do the clean work\n\terr = o.CleanByCondition(checkResultList)\n\tcmd.Println()\n\treturn\n}",
"func (r Reflector) Run(ctx context.Context) error {\n\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\n\t// Grab all goroutines error into err\n\tvar err error\n\terrLock := sync.Mutex{}\n\twg := &sync.WaitGroup{}\n\n\t// Goroutine fetch events from API server and schedule them\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif gErr := r.runFetcher(ctx); gErr != nil {\n\t\t\terrLock.Lock()\n\t\t\terr = multierr.Append(err, gErr)\n\t\t\terrLock.Unlock()\n\t\t\tr.log.Errorw(\"Error during fetching events. Send signal to stop\", zap.Error(err))\n\t\t\tcancel()\n\t\t} else {\n\t\t\tr.log.Info(\"Event fetcher was stopped\")\n\t\t}\n\t}()\n\n\n\t// Handler. Can re-schedule event if temporary error happens\n\tfor i := 0; i < r.workersCount; i++ {\n\t\twg.Add(1)\n\t\tlog := r.log.With(\"WorkerID\", i)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tr.runProcessor(log)\n\t\t\tlog.Info(\"Worker was stopped\")\n\t\t}()\n\t}\n\tr.log.Infof(\"Start %d workers\", r.workersCount)\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tr.log.Debug(\"Get cancel signal. Shutdown queue\")\n\t\tr.queue.ShutDown()\n\t}()\n\n\twg.Wait()\n\n\treturn err\n}",
"func (eng *Engine) Run(opts Options) error {\n\tctx := ContextForOptions(opts)\n\n\t// Run the deploy and return if everything works.\n\tfinalErr := eng.runDeploy(ctx)\n\tif finalErr == nil {\n\t\treturn nil\n\t}\n\n\tfmt.Println(strings.Repeat(\"*\", 80))\n\tfmt.Println(\"An error was encountered while deploying the application\")\n\tfmt.Printf(\"The error message was: %v\\n\", finalErr)\n\tfmt.Println(strings.Repeat(\"*\", 80))\n\n\tif err := eng.runRollback(ctx); err != nil {\n\t\treturn nil\n\t}\n\n\treturn finalErr\n}",
"func (c *ConfigController) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer func() {\n\t\tc.queue.ShutDown()\n\t}()\n\n\tglog.V(3).Infoln(\"Creating CDI config\")\n\tif _, err := CreateCDIConfig(c.client, c.cdiClientSet, c.configName); err != nil {\n\t\truntime.HandleError(err)\n\t\treturn errors.Wrap(err, \"Error creating CDI config\")\n\t}\n\n\tglog.V(3).Infoln(\"Starting config controller Run loop\")\n\tif threadiness < 1 {\n\t\treturn errors.Errorf(\"expected >0 threads, got %d\", threadiness)\n\t}\n\n\tif ok := cache.WaitForCacheSync(stopCh, c.ingressesSynced, c.routesSynced); !ok {\n\t\treturn errors.New(\"failed to wait for caches to sync\")\n\t}\n\n\tglog.V(3).Infoln(\"ConfigController cache has synced\")\n\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tglog.Info(\"Started workers\")\n\t<-stopCh\n\tglog.Info(\"Shutting down workers\")\n\treturn nil\n}",
"func (o *CreateTerraformOptions) Run() error {\n\n\tif len(o.Flags.Cluster) > 1 {\n\t\terr := o.validateClusterDetails()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(o.Flags.Cluster) == 0 {\n\t\terr := o.ClusterDetailsWizard()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Infof(\"Creating clusters %v\", o.Clusters)\n\n\terr := o.createOrganisationGitRepo()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (a *Agent) Run() error {\n\t/*\n\t* Start the agent service\n\t*\n\t* Tasks:\n\t* - Initialize Redis connection\n\t* - Start the http debug server and metrics endpoint\n\t* - Start the executor responsible for ensuring node state\n\t */\n\tvar errChan = make(chan error, 1)\n\n\tgo func(errChan chan error) {\n\t\tlog.Infof(\"Starting http server on %s\", a.httpServer.GetListenAddr())\n\t\terrChan <- a.httpServer.Run()\n\t}(errChan)\n\n\tgo func(errChan chan error) {\n\t\tlog.Info(\"Starting executor\")\n\t\terrChan <- a.executor.Run()\n\t}(errChan)\n\n\treturn <-errChan\n}",
"func (cli *CLI) Run(args []string) int {\n\t// Define option flag parse\n\tflags := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tflags.SetOutput(cli.errStream)\n\n\tflags.BoolVar(&cli.nonum, \"nonum\", false, \"hide line numbers\")\n\tflags.StringVar(&cli.delim, \"delim\", \":\", \"a delimiter that separates elements of an argument\")\n\n\tflVersion := flags.Bool(\"version\", false, \"Print version information and quit.\")\n\n\t// Parse commandline flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\t// Show version\n\tif *flVersion {\n\t\tfmt.Fprintf(cli.errStream, \"%s version %s\\n\", Name, Version)\n\t\treturn ExitCodeOK\n\t}\n\n\tif e := cli.split(flags.Args()); e != nil {\n\t\tfmt.Fprintf(cli.errStream, \"Error splitting %s: %s\\n\", flag.Args(), e)\n\t\treturn ExitCodeError\n\t}\n\n\treturn ExitCodeOK\n}",
"func (rnr *Runner) Run(args map[string]interface{}) (err error) {\n\n\trnr.Log.Debug(\"** Run **\")\n\n\t// signal channel\n\tsigChan := make(chan os.Signal, 1)\n\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)\n\n\t// run HTTP server\n\tgo func() {\n\t\trnr.Log.Debug(\"** Running HTTP server process **\")\n\t\terr = rnr.RunHTTPFunc(args)\n\t\tif err != nil {\n\t\t\trnr.Log.Error(\"Failed run server >%v<\", err)\n\t\t\tsigChan <- syscall.SIGTERM\n\t\t}\n\t\trnr.Log.Debug(\"** HTTP server process ended **\")\n\t}()\n\n\t// run daemon server\n\tgo func() {\n\t\trnr.Log.Debug(\"** Running daemon process **\")\n\t\terr = rnr.RunDaemonFunc(args)\n\t\tif err != nil {\n\t\t\trnr.Log.Error(\"Failed run daemon >%v<\", err)\n\t\t\tsigChan <- syscall.SIGTERM\n\t\t}\n\t\trnr.Log.Debug(\"** Daemon process ended **\")\n\t}()\n\n\t// wait\n\tsig := <-sigChan\n\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\n\tbToMb := func(b uint64) uint64 {\n\t\treturn b / 1024 / 1024\n\t}\n\n\terr = fmt.Errorf(\"Received SIG >%v< Mem Alloc >%d MiB< TotalAlloc >%d MiB< Sys >%d MiB< NumGC >%d<\",\n\t\tsig,\n\t\tbToMb(m.Alloc),\n\t\tbToMb(m.TotalAlloc),\n\t\tbToMb(m.Sys),\n\t\tm.NumGC,\n\t)\n\n\trnr.Log.Warn(\">%v<\", err)\n\n\treturn err\n}",
"func (d *driver) Run(ctx context.Context) (ok bool) {\n\tprepareBaseConfig()\n\tprepareCertBundle()\n\tgo d.ensureAllRegistriesAreRunning()\n\n\tinnerCtx, cancel := context.WithCancel(ctx)\n\tprocessExitChan := make(chan processExitMessage)\n\tpc := processContext{\n\t\tContext: innerCtx,\n\t\tProcessExitChan: processExitChan,\n\t}\n\n\t//Overview of how this main loop works:\n\t//\n\t//1. Each call to pc.startRegistry() spawns a keppel-registry process.\n\t// pc.startRegistry() will launch some goroutines that manage the child\n\t// process during its lifetime. Those goroutines are tracked by\n\t// pc.WaitGroup.\n\t//\n\t//2. When the original ctx expires, the aforementioned goroutines will\n\t// cleanly shutdown all keppel-registry processes. When they're done\n\t// shutting down, the main loop (which is waiting on pc.WaitGroup) unblocks\n\t// and returns true.\n\t//\n\t//3. Abnormal termination of a single keppel-registry process is not a fatal\n\t// error. Its observing goroutine will send a processExitMessage that the\n\t// main loop uses to update its bookkeeping accordingly. The next request\n\t// for that Keppel account will launch a new keppel-registry process.\n\t//\n\tok = true\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t//silence govet (cancel() is a no-op since ctx and therefore innerCtx has\n\t\t\t//already expired, but govet cannot understand that and suspects a context leak)\n\t\t\tcancel()\n\t\t\t//wait on child processes\n\t\t\tpc.WaitGroup.Wait()\n\t\t\treturn ok\n\n\t\tcase msg := <-processExitChan:\n\t\t\tdelete(d.listenPorts, msg.AccountName)\n\n\t\tcase req := <-d.getPortRequestChan:\n\t\t\tport, exists := d.listenPorts[req.Account.Name]\n\t\t\tif !exists {\n\t\t\t\td.nextListenPort++\n\t\t\t\tport = d.nextListenPort\n\t\t\t\terr := pc.startRegistry(req.Account, port)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogg.Error(\"[account=%s] failed to start keppel-registry: %s\", req.Account.Name, err.Error())\n\t\t\t\t\t//failure to start new keppel-registries is considered a fatal error\n\t\t\t\t\tok = false\n\t\t\t\t\tcancel()\n\t\t\t\t}\n\t\t\t}\n\t\t\td.listenPorts[req.Account.Name] = port\n\t\t\tif req.Result != nil { //is nil when called from ensureAllRegistriesAreRunning()\n\t\t\t\treq.Result <- port\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer runtime.HandleCrash()\n\tdefer c.workqueue.ShutDown()\n\n\t// Start the informer factories to begin populating the informer caches\n\tglog.Info(\"Starting virtualservices control loop\")\n\n\t// Wait for the caches to be synced before starting workers\n\tglog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.virtualservicesSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tglog.Info(\"Starting workers\")\n\t// Launch workers to process virtualservices resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tglog.Info(\"Started workers\")\n\t<-stopCh\n\tglog.Info(\"Shutting down workers\")\n\n\treturn nil\n}",
"func (s *ServiceManager) Run(routine func(ctx context.Context) error) (err error) {\n\ts.Start(routine)\n\treturn s.Wait()\n}",
"func (manager *Manager) Run(configManagerID string, configManagerSpec string) error {\n\n\t// We dont want any reloads happening until we are fully running\n\tmanager.configReloadMutex.Lock()\n\n\tif len(configManagerID) > 0 {\n\t\tmanager.InitializeConfigurationManager(configManagerID, configManagerSpec)\n\t}\n\n\tconfiguration := config.Config{}\n\tmanager.InitializeConnectionManagers(configuration)\n\n\tlog.Println(\"Initialization of plugins done.\")\n\n\tlog.Println(\"Initializing the proxy...\")\n\tresolver := NewResolver(manager.ProviderFactories, manager, nil)\n\n\tmanager.Proxy = secretless.Proxy{\n\t\tConfig: configuration,\n\t\tEventNotifier: manager,\n\t\tResolver: resolver,\n\t\tRunHandlerFunc: manager._RunHandler,\n\t\tRunListenerFunc: manager._RunListener,\n\t}\n\n\tmanager.configReloadMutex.Unlock()\n\n\tmanager.Proxy.Run()\n\n\treturn nil\n}",
"func (m *Manager) Run(ctx context.Context, run func() error) (err error) {\n\t// Acquire mutex lock\n\tm.mux.Lock()\n\t// Defer the release of mutex lock\n\tdefer m.mux.Unlock()\n\t// Call internal run func\n\treturn m.run(ctx, run)\n}",
"func Run(ctx *cli.Context) error {\n\t// new service\n\tsrv := service.New(\n\t\tservice.Name(\"events\"),\n\t)\n\n\t// register the handlers\n\tpb.RegisterStreamHandler(srv.Server(), new(handler.Stream))\n\tpb.RegisterStoreHandler(srv.Server(), new(handler.Store))\n\n\t// run the service\n\tif err := srv.Run(); err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\treturn nil\n}",
"func Run(version, buildTime string) int {\n\tdefer glog.Flush() // always flush\n\n\trmain := initDaemon(version, buildTime)\n\terr := daemon.rg.run(rmain)\n\n\tif err == nil {\n\t\tglog.Infoln(\"Terminated OK\")\n\t\treturn 0\n\t}\n\tif e, ok := err.(*cos.ErrSignal); ok {\n\t\tglog.Infof(\"Terminated OK via %v\", e)\n\t\treturn e.ExitCode()\n\t}\n\tif errors.Is(err, cmn.ErrStartupTimeout) {\n\t\t// NOTE: stats and keepalive runners wait for the ClusterStarted() - i.e., for the primary\n\t\t// to reach the corresponding stage. There must be an external \"restarter\" (e.g. K8s)\n\t\t// to restart the daemon if the primary gets killed or panics prior (to reaching that state)\n\t\tglog.Errorln(\"Timed-out while starting up\")\n\t}\n\tglog.Errorf(\"Terminated with err: %v\", err)\n\treturn 1\n}",
"func (r chiRouter) Run(cfg config.ServiceConfig) {\n\tr.cfg.Engine.Use(r.cfg.Middlewares...)\n\tif cfg.Debug {\n\t\tr.registerDebugEndpoints()\n\t}\n\n\tr.cfg.Engine.Get(\"/__health\", mux.HealthHandler)\n\n\tserver.InitHTTPDefaultTransport(cfg)\n\n\tr.registerKrakendEndpoints(cfg.Endpoints)\n\n\tr.cfg.Engine.NotFound(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(server.CompleteResponseHeaderName, server.HeaderIncompleteResponseValue)\n\t\thttp.NotFound(w, r)\n\t})\n\n\tif err := r.RunServer(r.ctx, cfg, r.cfg.Engine); err != nil {\n\t\tr.cfg.Logger.Error(logPrefix, err.Error())\n\t}\n\n\tr.cfg.Logger.Info(logPrefix, \"Router execution ended\")\n}",
"func (s *Server) Run() error {\n\t// configure service routes\n\ts.configureRoutes()\n\n\tlog.Infof(\"Serving '%s - %s' on address %s\", s.Title, s.Version, s.server.Addr)\n\t// server is set to healthy when started.\n\ts.healthy = true\n\tif s.config.InsecureHTTP {\n\t\treturn s.server.ListenAndServe()\n\t}\n\treturn s.server.ListenAndServeTLS(s.config.TLSCertFile, s.config.TLSKeyFile)\n}",
"func (e *EndComponent) Run(ctx context.Context, config *ucfg.Config) error {\n\treturn nil\n}",
"func Run(s *options.MCMServer) error {\n\t// To help debugging, immediately log version\n\tklog.V(4).Infof(\"Version: %+v\", version.Get())\n\tif err := s.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\n\t//kubeconfig for the cluster for which machine-controller-manager will create machines.\n\ttargetkubeconfig, err := clientcmd.BuildConfigFromFlags(\"\", s.TargetKubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontrolkubeconfig := targetkubeconfig\n\n\tif s.ControlKubeconfig != \"\" {\n\t\tif s.ControlKubeconfig == \"inClusterConfig\" {\n\t\t\t//use inClusterConfig when controller is running inside clus\n\t\t\tcontrolkubeconfig, err = clientcmd.BuildConfigFromFlags(\"\", \"\")\n\t\t} else {\n\t\t\t//kubeconfig for the seedcluster where MachineCRDs are supposed to be registered.\n\t\t\tcontrolkubeconfig, err = clientcmd.BuildConfigFromFlags(\"\", s.ControlKubeconfig)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// PROTOBUF WONT WORK\n\t// kubeconfig.ContentConfig.ContentType = s.ContentType\n\t// Override kubeconfig qps/burst settings from flags\n\ttargetkubeconfig.QPS = s.KubeAPIQPS\n\tcontrolkubeconfig.QPS = s.KubeAPIQPS\n\ttargetkubeconfig.Burst = int(s.KubeAPIBurst)\n\tcontrolkubeconfig.Burst = int(s.KubeAPIBurst)\n\ttargetkubeconfig.Timeout = targetkubeconfigTimeout\n\tcontrolkubeconfig.Timeout = controlkubeconfigTimeout\n\n\tkubeClientControl, err := kubernetes.NewForConfig(\n\t\trest.AddUserAgent(controlkubeconfig, \"machine-controller-manager\"),\n\t)\n\tif err != nil {\n\t\tklog.Fatalf(\"Invalid API configuration for kubeconfig-control: %v\", err)\n\t}\n\n\tleaderElectionClient := kubernetes.NewForConfigOrDie(rest.AddUserAgent(controlkubeconfig, \"machine-leader-election\"))\n\tklog.V(4).Info(\"Starting http server and mux\")\n\tgo startHTTP(s)\n\n\trecorder := createRecorder(kubeClientControl)\n\n\trun := func(ctx context.Context) {\n\t\tvar stop <-chan struct{}\n\t\t// Control plane client used to interact with machine APIs\n\t\tcontrolMachineClientBuilder := machinecontroller.SimpleClientBuilder{\n\t\t\tClientConfig: controlkubeconfig,\n\t\t}\n\t\t// Control plane client used to interact with core kubernetes objects\n\t\tcontrolCoreClientBuilder := corecontroller.SimpleControllerClientBuilder{\n\t\t\tClientConfig: controlkubeconfig,\n\t\t}\n\t\t// Target plane client used to interact with core kubernetes objects\n\t\ttargetCoreClientBuilder := corecontroller.SimpleControllerClientBuilder{\n\t\t\tClientConfig: targetkubeconfig,\n\t\t}\n\n\t\terr := StartControllers(\n\t\t\ts,\n\t\t\tcontrolkubeconfig,\n\t\t\ttargetkubeconfig,\n\t\t\tcontrolMachineClientBuilder,\n\t\t\tcontrolCoreClientBuilder,\n\t\t\ttargetCoreClientBuilder,\n\t\t\trecorder,\n\t\t\tstop,\n\t\t)\n\n\t\tklog.Fatalf(\"error running controllers: %v\", err)\n\t\tpanic(\"unreachable\")\n\n\t}\n\n\tif !s.LeaderElection.LeaderElect {\n\t\trun(nil)\n\t\tpanic(\"unreachable\")\n\t}\n\n\tid, err := os.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trl, err := resourcelock.New(\n\t\ts.LeaderElection.ResourceLock,\n\t\ts.Namespace,\n\t\t\"machine-controller-manager\",\n\t\tleaderElectionClient.CoreV1(),\n\t\tleaderElectionClient.CoordinationV1(),\n\t\tresourcelock.ResourceLockConfig{\n\t\t\tIdentity: id,\n\t\t\tEventRecorder: recorder,\n\t\t},\n\t)\n\tif err != nil {\n\t\tklog.Fatalf(\"error creating lock: %v\", err)\n\t}\n\n\tctx := context.TODO()\n\tleaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{\n\t\tLock: rl,\n\t\tLeaseDuration: s.LeaderElection.LeaseDuration.Duration,\n\t\tRenewDeadline: s.LeaderElection.RenewDeadline.Duration,\n\t\tRetryPeriod: s.LeaderElection.RetryPeriod.Duration,\n\t\tCallbacks: leaderelection.LeaderCallbacks{\n\t\t\tOnStartedLeading: run,\n\t\t\tOnStoppedLeading: func() {\n\t\t\t\tklog.Fatalf(\"leaderelection lost\")\n\t\t\t},\n\t\t},\n\t})\n\tpanic(\"unreachable\")\n}",
"func (s *StatusSyncer) Run(stopCh <-chan struct{}) {\n\ts.queue.Run(stopCh)\n\tcontrollers.ShutdownAll(s.services, s.nodes, s.pods, s.ingressClasses, s.ingresses)\n}",
"func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) {\n\tif configutil.ConfigPath != cmd.flags.config {\n\t\tconfigutil.ConfigPath = cmd.flags.config\n\n\t\t// Don't use overwrite config if we use a different config\n\t\tconfigutil.OverwriteConfigPath = \"\"\n\t}\n\n\tlog.StartFileLogging()\n\tvar err error\n\n\tconfigExists, _ := configutil.ConfigExists()\n\tif !configExists {\n\t\tinitCmd := &InitCmd{\n\t\t\tflags: InitCmdFlagsDefault,\n\t\t}\n\n\t\tinitCmd.Run(nil, []string{})\n\n\t\t// Ensure that config is initialized correctly\n\t\tconfigutil.SetDefaultsOnce()\n\t}\n\n\t// Create kubectl client\n\tcmd.kubectl, err = kubectl.NewClientWithContextSwitch(cmd.flags.switchContext)\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create new kubectl client: %v\", err)\n\t}\n\n\t// Create namespace if necessary\n\terr = kubectl.EnsureDefaultNamespace(cmd.kubectl, log.GetInstance())\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create namespace: %v\", err)\n\t}\n\n\t// Create cluster role binding if necessary\n\terr = kubectl.EnsureGoogleCloudClusterRoleBinding(cmd.kubectl, log.GetInstance())\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create ClusterRoleBinding: %v\", err)\n\t}\n\n\t// Init image registries\n\tif cmd.flags.initRegistries {\n\t\terr = registry.InitRegistries(cmd.kubectl, log.GetInstance())\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t// Build and deploy images\n\tcmd.buildAndDeploy()\n\n\tif cmd.flags.exitAfterDeploy == false {\n\t\t// Start services\n\t\tcmd.startServices(args)\n\t}\n}",
"func (c *CLI) Run(args []string) int {\n\tvar (\n\t\tdebug bool\n\t\tversion bool\n\t)\n\tflags := flag.NewFlagSet(args[0], flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Fprint(c.errStream, helpText)\n\t}\n\tflags.BoolVar(&debug, \"debug\", false, \"\")\n\tflags.BoolVar(&debug, \"d\", false, \"\")\n\tflags.BoolVar(&version, \"version\", false, \"\")\n\tflags.BoolVar(&version, \"v\", false, \"\")\n\n\t// Parse flag\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeParseFlagsError\n\t}\n\n\tif debug {\n\t\tos.Setenv(EnvDebug, \"1\")\n\t\tDebugf(\"Run as DEBUG mode\")\n\t}\n\n\tif version {\n\t\tfmt.Fprintf(c.outStream, fmt.Sprintf(\"%s\\n\", Version))\n\t\treturn ExitCodeOK\n\t}\n\n\tparsedArgs := flags.Args()\n\tif len(parsedArgs) == 0 {\n\t\tPrintErrorf(\"Invalid argument: you must set keyword.\")\n\t\treturn ExitCodeBadArgs\n\t}\n\n\tkeywords := parsedArgs\n\tDebugf(\"keyword: %s\", keywords)\n\n\tsearcher, err := NewClient(keywords)\n\tif err != nil {\n\t\treturn ExitCodeError\n\t}\n\n\tstatus := searcher.search()\n\tif status != ExitCodeOK {\n\t\treturn ExitCodeError\n\t}\n\n\tsearcher.output(c.outStream)\n\n\treturn ExitCodeOK\n}",
"func Run(ctx *cli.Context) error {\n\tc, err := NewConfig(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn run(ctx.Context, c)\n}",
"func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {\n\tdefer utilruntime.HandleCrash()\n\tdefer c.workqueue.ShutDown()\n\t// Start the informer factories to begin populating the informer caches\n\tklog.Info(\"Starting Configurator controller\")\n\n\t// Wait for the caches to be synced before starting workers\n\tklog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.configmapsSynced, c.customConfigMapSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.Info(\"Waiting for informer caches to sync\")\n\tif ok := cache.WaitForCacheSync(stopCh, c.secretSynced, c.customSecretSynced); !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.Info(\"Starting workers\")\n\t// Launch two workers to process configurator resources\n\tfor i := 0; i < threadiness; i++ {\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\tklog.Info(\"Started workers\")\n\t<-stopCh\n\tklog.Info(\"Shutting down workers\")\n\n\treturn nil\n}",
"func (a *Agent) Run(shutdown chan struct{}) error {\n\tvar wg sync.WaitGroup\n\n\tlog.Printf(\"INFO Agent Config: Interval:%s, Hostname:%#v, Flush Interval:%s \\n\",\n\t\ta.Config.Agent.Interval, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)\n\n\t// configure all sources\n\tfor _, source := range a.Config.Sources {\n\t\tsource.SetDefaultTags(a.Config.Tags)\n\t}\n\n\t// Start all ServiceSources\n\tfor _, source := range a.Config.Sources {\n\t\tswitch p := source.Source.(type) {\n\t\tcase optic.ServiceSource:\n\t\t\tacc := NewAccumulator(source, source.EventsCh())\n\t\t\tif err := p.Start(acc); err != nil {\n\t\t\t\tlog.Printf(\"ERROR Service for source %s failed to start, exiting\\n%s\\n\",\n\t\t\t\t\tsource.Name(), err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer p.Stop()\n\t\t}\n\t}\n\n\twg.Add(len(a.Config.Sources))\n\tfor _, source := range a.Config.Sources {\n\t\tinterval := a.Config.Agent.Interval\n\t\t// overwrite global interval if this plugin has it's own\n\t\tif source.Config.Interval != 0 {\n\t\t\tinterval = source.Config.Interval\n\t\t}\n\t\tgo func(source *models.RunningSource, interval time.Duration) {\n\t\t\tdefer wg.Done()\n\t\t\ta.gatherer(shutdown, source, interval)\n\t\t}(source, interval)\n\t}\n\n\twg.Wait()\n\ta.Close()\n\treturn nil\n}",
"func Run() (err error) {\n\n\terr = sm.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = as.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Start Swagger API Manager (provider)\n\terr = apiMgr.Start(true, false)\n\tif err != nil {\n\t\tlog.Error(\"Failed to start Swagger API Manager with error: \", err.Error())\n\t\treturn err\n\t}\n\tlog.Info(\"Swagger API Manager started\")\n\n\t// Add module Swagger APIs\n\terr = apiMgr.AddApis()\n\tif err != nil {\n\t\tlog.Error(\"Failed to add Swagger APIs with error: \", err.Error())\n\t\treturn err\n\t}\n\tlog.Info(\"Swagger APIs successfully added\")\n\n\t// Register Message Queue handler\n\thandler := mq.MsgHandler{Handler: msgHandler, UserData: nil}\n\thandlerId, err = mqLocal.RegisterHandler(handler)\n\tif err != nil {\n\t\tlog.Error(\"Failed to register local Msg Queue listener: \", err.Error())\n\t\treturn err\n\t}\n\tlog.Info(\"Registered local Msg Queue listener\")\n\n\t// Initalize metric store\n\tupdateStoreName()\n\n\treturn nil\n}",
"func Run() error {\n\tcloseLogger, err := setupLogger()\n\tif err != nil {\n\t\treturn fail.Wrap(err)\n\t}\n\tdefer closeLogger()\n\n\ts := grapiserver.New(\n\t\tgrapiserver.WithGrpcServerUnaryInterceptors(\n\t\t\tgrpc_ctxtags.UnaryServerInterceptor(grpc_ctxtags.WithFieldExtractor(grpc_ctxtags.CodeGenRequestFieldExtractor)),\n\t\t\tgrpc_zap.UnaryServerInterceptor(zap.L()),\n\t\t\tgrpc_zap.PayloadUnaryServerInterceptor(\n\t\t\t\tzap.L(),\n\t\t\t\tfunc(ctx context.Context, fullMethodName string, servingObject interface{}) bool { return true },\n\t\t\t),\n\t\t),\n\t\tgrapiserver.WithGatewayServerMiddlewares(\n\t\t\tgithubEventDispatcher,\n\t\t),\n\t\tgrapiserver.WithServers(\n\t\t\tgithub.NewInstallationEventServiceServer(),\n\t\t),\n\t)\n\treturn s.Serve()\n}",
"func Run(ctx context.Context, client *guardian.Client, config Config) (err error) {\n\tif config.Icon == nil {\n\t\tconfig.Icon = leaseui.DefaultIcon()\n\t}\n\n\trunner, err := New(client, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn runner.Run(ctx)\n\n\t/*\n\t\tg, ctx := errgroup.WithContext(ctx)\n\n\t\trun := func() error {\n\t\t\trunner, err := New(client, config)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn runner.Run(ctx)\n\t\t}\n\n\t\tg.Go(run)\n\t\tg.Go(run)\n\n\t\treturn g.Wait()\n\t*/\n}",
"func (params *serverParams) Run(context.Context) error {\n\tif params.err != nil && len(params.err) > 0 {\n\t\treturn utilerrors.NewAggregate(params.err)\n\t}\n\n\tvar g errgroup.Group\n\tif params.secureServer != nil {\n\t\tparams.options.Logger.Info(fmt.Sprintf(\"starting %s %s server on %s:%d\", params.options.Name, \"HTTPS\", params.options.SecureAddr, params.options.SecurePort))\n\t\tg.Go(func() error {\n\t\t\treturn params.secureServer.ListenAndServeTLS(params.options.TLSCert, params.options.TLSKey)\n\t\t})\n\t}\n\tif params.insecureServer != nil {\n\t\tparams.options.Logger.Info(fmt.Sprintf(\"starting %s %s server on %s:%d\", params.options.Name, \"HTTP\", params.options.InsecureAddr, params.options.InsecurePort))\n\t\tg.Go(func() error {\n\t\t\treturn params.insecureServer.ListenAndServe()\n\t\t})\n\t}\n\terr := g.Wait()\n\tif err != nil && err != http.ErrServerClosed {\n\t\tparams.options.Logger.Error(err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Run(t *testing.T, configOpt core.ConfigProvider, sdkOpts ...fabsdk.Option) {\n\tsetupAndRun(t, true, configOpt, e2eTest, sdkOpts...)\n}",
"func (cli *CLI) Run() {\n\tcli.validateArgs()\n\n\t//possible commands\n\twallet := cmds.ConfigureCreateWalletCmd()\n\tcreate, createData := cmds.ConfigureCreateChainCmd()\n\tgetBalance, getBalanceData := cmds.ConfigureBalanceCmd()\n\tsend, from, to, amount := cmds.ConfigureSendCmd()\n\tprintChain := cmds.ConfigurePrintCmd()\n\n\tif len(os.Args) >= 1 {\n\t\tswitch os.Args[1] {\n\t\tcase cmds.CreateWalletCmdId:\n\t\t\t_ = wallet.Parse(os.Args[2:])\n\t\tcase cmds.PrintCmdId:\n\t\t\t_ = printChain.Parse(os.Args[2:])\n\t\tcase cmds.CreateChainCmdId:\n\t\t\t_ = create.Parse(os.Args[2:])\n\t\tcase cmds.BalanceCmdId:\n\t\t\t_ = getBalance.Parse(os.Args[2:])\n\t\tcase cmds.SendCmdId:\n\t\t\t_ = send.Parse(os.Args[2:])\n\t\tdefault:\n\t\t\tcli.printUsage()\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tcli.printUsage()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tif wallet.Parsed() {\n\t\tcli.createWallet()\n\t}\n\n\tif printChain.Parsed() {\n\t\tcli.printChain()\n\t}\n\n\tif create.Parsed() {\n\t\terr := cli.createBlockchain(*createData)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif getBalance.Parsed() {\n\t\tif *getBalanceData == \"\" {\n\t\t\tgetBalance.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := cli.getBalance(*getBalanceData)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif send.Parsed() {\n\t\tif *from == \"\" || *to == \"\" || *amount <= 0 {\n\t\t\tsend.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr := cli.send(*from, *to, *amount)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}",
"func (m *Manager) Run() {\n\tgo func() {\n\t\tif err := m.Start(context.Background()); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n}",
"func (l *Listen) Run(args []string) int {\n\tconfig.ListenContext = true\n\n\tvar listener, address, cpuprofile string\n\tcmdFlags := flag.NewFlagSet(\"listen\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { l.UI.Error(l.Help()) }\n\tcmdFlags.StringVar(&listener, \"ln\", \"\", \"\")\n\tcmdFlags.StringVar(&address, \"address\", \"\", \"\")\n\tcmdFlags.StringVar(&cpuprofile, \"cpuprofile\", \"\", \"write cpu profile to file\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\targErr := false\n\n\t//Check listener\n\tif listener == \"\" {\n\t\tl.UI.Error(\"Listener name must be specified\")\n\t\targErr = true\n\t}\n\n\t//Check address\n\tif address == \"\" {\n\t\tl.UI.Error(\"Address must be specified\")\n\t\targErr = true\n\t}\n\n\tif argErr {\n\t\tl.UI.Error(\"\")\n\t\tl.UI.Error(l.Help())\n\t\treturn 1\n\t}\n\n\tif cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tl.UI.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\t//Read and record the listener config so it is available to the plugin chain\n\tserviceConfig, err := config.ReadServiceConfig(listener, l.KVStore)\n\tif err != nil {\n\t\tl.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\n\tconfig.RecordActiveConfig(serviceConfig)\n\n\t//Build the service for the named listener\n\ts, err := service.BuildServiceForListener(listener, address, l.KVStore)\n\tif err != nil {\n\t\tl.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tl.UI.Info(fmt.Sprintf(\"***Service:\\n%s\", s))\n\n\t//Build health check context for the named listerner\n\thcc, err := service.BuildHealthContextForListener(listener, l.KVStore)\n\tif err != nil {\n\t\tl.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\tservice.RecordActiveHealthCheckContext(hcc)\n\n\texitChannel := make(chan int)\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, os.Interrupt)\n\n\tgo func() {\n\t\tfor _ = range signalChannel {\n\t\t\texitChannel <- 0\n\t\t}\n\t}()\n\n\tgo func(service service.Service) {\n\t\tservice.Run()\n\t\t//Run can return if it can't open ports, etc.\n\t\texitChannel <- 1\n\t}(s)\n\n\texitStatus := <-exitChannel\n\tfmt.Printf(\"exiting with status %d\\n\", exitStatus)\n\treturn exitStatus\n}",
"func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {\n\treturn r.RunConfig(ctx,\n\t\tRunner(wrapper),\n\t\tPluginSets(pluginSets),\n\t\tHandshakeConfig(hs),\n\t\tEnv(env...),\n\t\tLogger(logger),\n\t\tMetadataMode(false),\n\t)\n}"
] | [
"0.67258763",
"0.66956735",
"0.66749555",
"0.6631407",
"0.6609172",
"0.6555715",
"0.6465029",
"0.64125794",
"0.64101774",
"0.64062446",
"0.6338222",
"0.6321938",
"0.63210386",
"0.63161224",
"0.63157916",
"0.6259968",
"0.6221255",
"0.62152505",
"0.61776745",
"0.6164499",
"0.6158526",
"0.613736",
"0.61324894",
"0.6129859",
"0.6089335",
"0.6083563",
"0.6068427",
"0.6067681",
"0.6059456",
"0.6056985",
"0.60530424",
"0.60360086",
"0.6034598",
"0.60243535",
"0.59989196",
"0.5989384",
"0.5978657",
"0.5974376",
"0.59678537",
"0.5965441",
"0.5963916",
"0.59591943",
"0.5958269",
"0.59535104",
"0.5950086",
"0.59409815",
"0.59345514",
"0.5930353",
"0.5924641",
"0.5916793",
"0.5916686",
"0.5916297",
"0.5915137",
"0.5912678",
"0.59110445",
"0.59064937",
"0.59004736",
"0.59001756",
"0.5897039",
"0.58952594",
"0.5892439",
"0.5892432",
"0.5889571",
"0.5889014",
"0.5877117",
"0.58659005",
"0.58652586",
"0.58592814",
"0.5858116",
"0.5853723",
"0.5852558",
"0.5849522",
"0.58477896",
"0.5837138",
"0.5832099",
"0.58220106",
"0.58199656",
"0.58144397",
"0.58138096",
"0.58094066",
"0.57903475",
"0.5788151",
"0.5784656",
"0.57812744",
"0.5779176",
"0.5770404",
"0.57679963",
"0.5767468",
"0.5762508",
"0.5755075",
"0.57534945",
"0.5743537",
"0.57423544",
"0.57297003",
"0.5727",
"0.572478",
"0.57244706",
"0.5722258",
"0.5720605",
"0.5720596"
] | 0.7633714 | 0 |
ListUnits returns a list of all Group phases and the Units registered to each of them. | ListUnits возвращает список всех фаз Group и Units, зарегистрированных для каждой из них. | func (g Group) ListUnits() string {
var (
s string
t = "cli"
)
if len(g.c) > 0 {
s += "\n- config: "
for _, u := range g.c {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.p) > 0 {
s += "\n- prerun: "
for _, u := range g.p {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.s) > 0 {
s += "\n- serve : "
for _, u := range g.s {
if u != nil {
t = "svc"
s += u.Name() + " "
}
}
}
return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ListUnits(w http.ResponseWriter) error {\n\tconn, err := sd.NewSystemdConnection()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get systemd bus connection: %s\", err)\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tunits, err := conn.ListUnits()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed ListUnits: %v\", err)\n\t\treturn err\n\t}\n\n\treturn share.JSONResponse(units, w)\n}",
"func cmdUnitList(c *cli.Context) error {\n\tif err := adm.VerifyNoArgument(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn adm.Perform(`get`, `/unit/`, `list`, nil, c)\n}",
"func (r *Resolver) Units() (*[]*Unit, error) {\n\tvar result []*Unit\n\tfor _, theirUnit := range units.All() {\n\t\tourUnit, err := NewUnit(theirUnit.Name)\n\t\tif err != nil {\n\t\t\treturn &result, err\n\t\t}\n\t\tresult = append(result, &ourUnit)\n\t}\n\treturn &result, nil\n}",
"func listInstalledUnits(ns string, suffix string) ([]string, error) {\n\targs := []string{\n\t\t\"list-units\",\n\t\t\"--no-legend\",\n\t\t\"--no-pager\",\n\t\tfmt.Sprintf(\"%s_*.%s\", ns, suffix),\n\t}\n\tout, err := exec.Command(\"systemctl\", args...).Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseListUnits(string(out), ns, suffix)\n}",
"func (st *Tools) GetUnitNames() (units []string, err error) {\n\tfiles, err := ioutil.ReadDir(\"/etc/systemd/system/dcos.target.wants\")\n\tif err != nil {\n\t\treturn units, err\n\t}\n\tfor _, f := range files {\n\t\tunits = append(units, f.Name())\n\t}\n\tlogrus.Debugf(\"List of units: %s\", units)\n\treturn units, nil\n}",
"func getUnits(app *App, names []string) unitList {\n\tvar units []Unit\n\tif len(names) > 0 {\n\t\tfor _, unitName := range names {\n\t\t\tfor _, appUnit := range app.Units {\n\t\t\t\tif appUnit.Name == unitName {\n\t\t\t\t\tunits = append(units, appUnit)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn unitList(units)\n}",
"func UnitNames() [7]string {\n\treturn unitNames\n}",
"func NewList(slice []Unit) List {\n\treturn unitlist{slice}\n}",
"func (usl UnitStatusList) Group() (UnitStatusList, error) {\n\tmatchers := map[string]struct{}{}\n\tnewList := []fleet.UnitStatus{}\n\n\thashesEqual, err := allHashesEqual(usl)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\tfor _, us := range usl {\n\t\t// Group unit status\n\t\tgrouped, suffix, err := groupUnitStatus(usl, us)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\n\t\t// Prevent doubled aggregation.\n\t\tif _, ok := matchers[suffix]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tmatchers[suffix] = struct{}{}\n\n\t\tstatesEqual := allStatesEqual(grouped)\n\n\t\t// Aggregate.\n\t\tif hashesEqual && statesEqual {\n\t\t\tnewStatus := grouped[0]\n\t\t\tnewStatus.Name = \"*\"\n\t\t\tnewList = append(newList, newStatus)\n\t\t} else {\n\t\t\tnewList = append(newList, grouped...)\n\t\t}\n\t}\n\n\treturn newList, nil\n}",
"func (m *MeasurementFamilyListUnits) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateUnitCode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *MemoryArrayAllOf) GetUnits() []MemoryUnitRelationship {\n\tif o == nil {\n\t\tvar ret []MemoryUnitRelationship\n\t\treturn ret\n\t}\n\treturn o.Units\n}",
"func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/azure/compute/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}",
"func (c *tiKVGroups) List(opts v1.ListOptions) (result *v1alpha1.TiKVGroupList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v1alpha1.TiKVGroupList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"tikvgroups\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/compute/azure/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}",
"func (c *awsWafregionalRuleGroups) List(opts meta_v1.ListOptions) (result *v1.AwsWafregionalRuleGroupList, err error) {\n\tresult = &v1.AwsWafregionalRuleGroupList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"awswafregionalrulegroups\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (c *tiDBGroups) List(opts v1.ListOptions) (result *v1alpha1.TiDBGroupList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v1alpha1.TiDBGroupList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"tidbgroups\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (s *GroupsService) List(opt ...CallOption) ([]*Group, *Response, error) {\n\tgroups, resp, err := listGroup(s.client, \"groups\", opt...)\n\n\treturn groups, resp, err\n}",
"func (o *MemoryArrayAllOf) SetUnits(v []MemoryUnitRelationship) {\n\to.Units = v\n}",
"func (o *DataExportQuery) SetUnits(v int32) {\n\to.Units = &v\n}",
"func (me *masterExtension) Units() ([]k8scloudconfig.UnitAsset, error) {\n\tunitsMeta := []k8scloudconfig.UnitMetadata{\n\t\t{\n\t\t\tAssetContent: ignition.AzureCNINatRules,\n\t\t\tName: \"azure-cni-nat-rules.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.CertificateDecrypterUnit,\n\t\t\tName: \"certificate-decrypter.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.EtcdMountUnit,\n\t\t\tName: \"var-lib-etcd.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.DockerMountUnit,\n\t\t\tName: \"var-lib-docker.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.KubeletMountUnit,\n\t\t\tName: \"var-lib-kubelet.mount\",\n\t\t\tEnabled: true,\n\t\t},\n\t\t{\n\t\t\tAssetContent: ignition.VNICConfigurationUnit,\n\t\t\tName: \"vnic-configuration.service\",\n\t\t\tEnabled: true,\n\t\t},\n\t}\n\n\tdata := me.templateData(me.certFiles)\n\n\t// To use the certificate decrypter unit for the etcd data encryption config file.\n\tdata.certificateDecrypterUnitParams.CertsPaths = append(data.certificateDecrypterUnitParams.CertsPaths, encryptionConfigFilePath)\n\n\tvar newUnits []k8scloudconfig.UnitAsset\n\n\tfor _, fm := range unitsMeta {\n\t\tc, err := k8scloudconfig.RenderAssetContent(fm.AssetContent, data)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tunitAsset := k8scloudconfig.UnitAsset{\n\t\t\tMetadata: fm,\n\t\t\tContent: c,\n\t\t}\n\n\t\tnewUnits = append(newUnits, unitAsset)\n\t}\n\n\treturn newUnits, nil\n}",
"func (o *EquipmentBaseSensor) GetUnits() string {\n\tif o == nil || o.Units == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Units\n}",
"func (o *EquipmentBaseSensor) SetUnits(v string) {\n\to.Units = &v\n}",
"func (s *GroupsService) List(ctx context.Context, opts *PagingOptions) (*GroupList, error) {\n\tquery := addPaging(url.Values{}, opts)\n\treq, err := s.Client.NewRequest(ctx, http.MethodGet, newURI(groupsURI), WithQuery(query))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"list groups failed: , %w\", err)\n\t}\n\tres, resp, err := s.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"list groups failed: , %w\", err)\n\t}\n\n\tif resp != nil && resp.StatusCode == http.StatusNotFound {\n\t\treturn nil, ErrNotFound\n\t}\n\n\tg := &GroupList{\n\t\tGroups: []*Group{},\n\t}\n\tif err := json.Unmarshal(res, g); err != nil {\n\t\treturn nil, fmt.Errorf(\"list groups failed, unable to unmarshal repository list json: , %w\", err)\n\t}\n\n\tfor _, r := range g.GetGroups() {\n\t\tr.Session.set(resp)\n\t}\n\n\treturn g, nil\n}",
"func (m *MeasurementFamilyListUnitsUnitCode) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConvertFromStandard(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLabels(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func Unit_Values() []string {\n\treturn []string{\n\t\tUnitSeconds,\n\t\tUnitCount,\n\t\tUnitPercent,\n\t}\n}",
"func (d *drawer) DrawUnitList(units []swgohhelp.Unit) ([]byte, error) {\n\t// 5 per row, 100px per unit, 10px padding\n\tpadding := 30\n\tportraitSize := 100\n\tunitSize := portraitSize + padding*2\n\twidth := unitSize * 5\n\theight := unitSize * int(math.Ceil((float64(len(units)) / 5.0)))\n\n\tcanvas := gg.NewContext(width, height)\n\tcanvas.SetHexColor(\"#0D1D25\")\n\tcanvas.Clear()\n\n\t// Use an asset bundle to save some disk I/O\n\tbundle := &assetBundle{\n\t\tui: make(map[string]image.Image),\n\t}\n\n\t// draw each unit portrait\n\tx, y := padding, padding\n\tfor unitCount, u := range units {\n\t\t// Draw portrait\n\t\tportrait, err := loadAsset(fmt.Sprintf(\"characters/%s_portrait.png\", u.Name))\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Error loading character image portrait %v: %v\", u.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tcroppedPortrait := cropCircle(portrait)\n\t\tcanvas.DrawImage(croppedPortrait, x, y)\n\n\t\t// Draw gear\n\t\tgear, _ := bundle.loadUIAsset(fmt.Sprintf(\"ui/gear-icon-g%d_100x100.png\", u.Gear))\n\t\tif gear != nil {\n\t\t\tcanvas.DrawImage(gear, x, y)\n\t\t}\n\n\t\t// Draw stars\n\t\tstarYellow, _ := bundle.loadUIAsset(\"ui/ap-5r-char-portrait_star-yellow.png\")\n\t\tstarGray, _ := bundle.loadUIAsset(\"ui/ap-5r-char-portrait_star-gray.png\")\n\t\tif starYellow != nil {\n\t\t\tcx, cy := x+(unitSize/4)+10, y+(unitSize/4)\n\t\t\trotate := []float64{0, -66, -43, -21, 0, 21, 43, 66}\n\t\t\tfor i := 1; i <= 7; i++ {\n\t\t\t\tcanvas.Push()\n\t\t\t\tcanvas.Stroke()\n\t\t\t\tcanvas.Translate(0.5, 0)\n\t\t\t\tcanvas.RotateAbout(gg.Radians(rotate[i]), f(cx), f(cy))\n\t\t\t\tif u.Rarity >= i {\n\t\t\t\t\tcanvas.DrawImageAnchored(starYellow, cx, cy-26, 0.5, 0.5)\n\t\t\t\t} else {\n\t\t\t\t\tcanvas.DrawImageAnchored(starGray, cx, cy-26, 0.5, 0.5)\n\t\t\t\t}\n\t\t\t\tcanvas.Pop()\n\t\t\t}\n\t\t}\n\n\t\t// Check offset\n\t\tx += unitSize\n\t\tif (unitCount+1)%5 == 0 {\n\t\t\ty += unitSize\n\t\t\tx = padding\n\t\t}\n\t}\n\n\tvar b bytes.Buffer\n\tif err := canvas.EncodePNG(&b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}",
"func (sm *SpaceManager) List(ctx context.Context) ([]string, error) {\n\tlock := sm.Lock.Get(allSpacesLockName)\n\tif !lock.RLock(sm.LockTimeout) {\n\t\treturn nil, ErrorLocking.Format(\"space manager\", allSpacesLockName)\n\t}\n\tdefer lock.RUnlock()\n\treturn list(ctx, sm.Backend, sm.Prefix, validateName, sortNames)\n}",
"func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) {\n\treq, err := s.client.NewRequest(http.MethodGet, \"groups\", opt, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar gs []*Group\n\tresp, err := s.client.Do(req, &gs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn gs, resp, nil\n}",
"func List(c messagebird.Client, options *messagebird.PaginationRequest) (*Groups, error) {\n\tgroupList := &Groups{}\n\tif err := c.Request(groupList, http.MethodGet, path+\"?\"+options.QueryParams(), nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn groupList, nil\n}",
"func (o *DataExportQuery) GetUnits() int32 {\n\tif o == nil || o.Units == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Units\n}",
"func (o ElastigroupMultipleMetricsMetricOutput) Unit() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupMultipleMetricsMetric) *string { return v.Unit }).(pulumi.StringPtrOutput)\n}",
"func ListVolumeGroupNames() ([]string, error) {\n\tresult := new(vgsOutput)\n\tif err := run(\"vgs\", result); err != nil {\n\t\treturn nil, err\n\t}\n\tvar names []string\n\tfor _, report := range result.Report {\n\t\tfor _, vg := range report.Vg {\n\t\t\tnames = append(names, vg.Name)\n\t\t}\n\t}\n\treturn names, nil\n}",
"func (m *manager) List() ([]string, error) {\n\tvar igs []*compute.InstanceGroup\n\n\tzones, err := m.ListZones(utils.AllNodesPredicate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, zone := range zones {\n\t\tigsForZone, err := m.cloud.ListInstanceGroups(zone)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, ig := range igsForZone {\n\t\t\tigs = append(igs, ig)\n\t\t}\n\t}\n\n\tvar names []string\n\tfor _, ig := range igs {\n\t\tif m.namer.NameBelongsToCluster(ig.Name) {\n\t\t\tnames = append(names, ig.Name)\n\t\t}\n\t}\n\n\treturn names, nil\n}",
"func (s *EmptyStore) GroupList() (groups []*storagepb.Group, err error) {\n\treturn groups, nil\n}",
"func (m *VirtualMachinesClientMock) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, rerr *retry.Error) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif _, ok := m.FakeStore[resourceGroupName]; ok {\n\t\tfor _, v := range m.FakeStore[resourceGroupName] {\n\t\t\tresult = append(result, v)\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func (s *statsFloat64) List() []float64 {\n\treturn []float64{\n\t\ts.size,\n\t\ts.used,\n\t\ts.free,\n\t\ts.usedCapacityPercent,\n\t}\n}",
"func (s *statsFloat64) List() []float64 {\n\treturn []float64{\n\t\ts.size,\n\t\ts.used,\n\t\ts.free,\n\t\ts.usedCapacityPercent,\n\t}\n}",
"func ListVolumeGroupNames() ([]string, error) {\n\tresult := new(vgsOutput)\n\tif err := run(\"vgs\", result); err != nil {\n\t\tlog.Errorf(\"ListVolumeGroupNames error: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\tvar names []string\n\tfor _, report := range result.Report {\n\t\tfor _, vg := range report.Vg {\n\t\t\tnames = append(names, vg.Name)\n\t\t}\n\t}\n\treturn names, nil\n}",
"func (oc *OrganizationCreate) AddUnits(o ...*OrgUnit) *OrganizationCreate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oc.AddUnitIDs(ids...)\n}",
"func (ou *OrganizationUpdate) RemoveUnits(o ...*OrgUnit) *OrganizationUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ou.RemoveUnitIDs(ids...)\n}",
"func (client *VirtualMachineScaleSetsClientMock) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, rerr *retry.Error) {\n\tclient.mutex.Lock()\n\tdefer client.mutex.Unlock()\n\n\tresult = []compute.VirtualMachineScaleSet{}\n\tif _, ok := client.FakeStore[resourceGroupName]; ok {\n\t\tfor _, v := range client.FakeStore[resourceGroupName] {\n\t\t\tresult = append(result, v)\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func ListTests() {\n\tfmt.Printf(\"Available test suites:\\n\\tauto\\n\")\n\tfor _, suite := range AllSuites {\n\t\tfmt.Printf(\"\\t%s\\n\", suite)\n\t}\n}",
"func (o FleetOutput) MetricGroups() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *Fleet) pulumi.StringArrayOutput { return v.MetricGroups }).(pulumi.StringArrayOutput)\n}",
"func (c *GroupController) List(ctx *app.ListGroupContext) error {\n\t// GroupController_List: start_implement\n\n\tdataStore := &dal.DataStore{}\n\tdataStore.GetSession()\n\t// Close the session\n\tdefer dataStore.Close()\n\tdc := dal.NewDalGroup(dataStore)\n\n\tgroups, err := dc.FetchAll()\n\n\tif err != nil {\n\t\tctx.ResponseData.Service.LogError(\"InternalServerError\", \"req_id\", middleware.ContextRequestID(ctx), \"ctrl\", \"Group\", \"action\", \"List\", ctx.RequestData.Request.Method, ctx.RequestData.Request.URL, \"databaseError\", err.Error())\n\t\treturn ctx.InternalServerError()\n\t}\n\n\tres := make(app.GwentapiGroupCollection, len(*groups))\n\n\tlastModified := time.Time{}\n\tfor i, group := range *groups {\n\t\tg, _ := factory.CreateGroup(&group)\n\n\t\tif lastModified.Before(group.Last_Modified) {\n\t\t\tlastModified = group.Last_Modified\n\t\t}\n\n\t\tres[i] = g\n\t}\n\n\t// GroupController_List: end_implement\n\thelpers.LastModified(ctx.ResponseData, lastModified)\n\tif ctx.IfModifiedSince != nil {\n\t\tif !helpers.IsModified(*ctx.IfModifiedSince, lastModified) {\n\t\t\treturn ctx.NotModified()\n\t\t}\n\t}\n\treturn ctx.OK(res)\n}",
"func (g *Group) Register(units ...Unit) []bool {\n\tg.log = logger.GetLogger(g.name)\n\thasRegistered := make([]bool, len(units))\n\tfor idx := range units {\n\t\tif !g.configured {\n\t\t\t// if RunConfig has been called we can no longer register Config\n\t\t\t// phases of Units\n\t\t\tif c, ok := units[idx].(Config); ok {\n\t\t\t\tg.c = append(g.c, c)\n\t\t\t\thasRegistered[idx] = true\n\t\t\t}\n\t\t}\n\t\tif p, ok := units[idx].(PreRunner); ok {\n\t\t\tg.p = append(g.p, p)\n\t\t\thasRegistered[idx] = true\n\t\t}\n\t\tif s, ok := units[idx].(Service); ok {\n\t\t\tg.s = append(g.s, s)\n\t\t\thasRegistered[idx] = true\n\t\t}\n\t}\n\treturn hasRegistered\n}",
"func (ouo *OrganizationUpdateOne) RemoveUnits(o ...*OrgUnit) *OrganizationUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ouo.RemoveUnitIDs(ids...)\n}",
"func (c *MockNetworkSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, error) {\n\tvar l []network.SecurityGroup\n\tfor _, nsg := range c.NSGs {\n\t\tl = append(l, nsg)\n\t}\n\treturn l, nil\n}",
"func (c *MockVMScaleSetsClient) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, error) {\n\tvar l []compute.VirtualMachineScaleSet\n\tfor _, vmss := range c.VMSSes {\n\t\tl = append(l, vmss)\n\t}\n\treturn l, nil\n}",
"func groupUnitStatus(usl []fleet.UnitStatus, groupMember fleet.UnitStatus) ([]fleet.UnitStatus, string, error) {\n\tID, err := common.SliceID(groupMember.Name)\n\tif err != nil {\n\t\treturn nil, \"\", maskAny(invalidUnitStatusError)\n\t}\n\n\tnewList := []fleet.UnitStatus{}\n\tfor _, us := range usl {\n\t\texp := common.ExtExp.ReplaceAllString(us.Name, \"\")\n\t\tif !strings.HasSuffix(exp, ID) {\n\t\t\tcontinue\n\t\t}\n\n\t\tnewList = append(newList, us)\n\t}\n\n\treturn newList, ID, nil\n}",
"func (r *PlacementGroupsService) List(profileId int64) *PlacementGroupsListCall {\n\tc := &PlacementGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\treturn c\n}",
"func (r *ProjectsGroupsService) List(name string) *ProjectsGroupsListCall {\n\tc := &ProjectsGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func SupportedUnits() map[UnitType][]string {\n\tunitLock.RLock()\n\tdefer unitLock.RUnlock()\n\tsupported := make(map[UnitType][]string, len(supportedUnits))\n\tfor unit, aliases := range supportedUnits {\n\t\tsupported[unit] = aliases\n\t}\n\treturn supported\n}",
"func List() []string {\n\tnames := make([]string, 0, len(stores))\n\tfor name := range stores {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}",
"func (m *VirtualMachineScaleSetVMsClientMock) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) (result []compute.VirtualMachineScaleSetVM, rerr *retry.Error) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif _, ok := m.FakeStore[resourceGroupName]; ok {\n\t\tfor _, v := range m.FakeStore[resourceGroupName] {\n\t\t\tresult = append(result, v)\n\t\t}\n\t}\n\treturn result, nil\n}",
"func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {\n\n\tttlSeconds := uint64(300)\n\t_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, \"true\", ttlSeconds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// call ManipulateUnits with a function that will stop them\n\tunitDestroyer := func(unit *schema.Unit) error {\n\n\t\t// stop the unit by updating desiredState to inactive\n\t\t// and posting to fleet api\n\t\tendpointUrl := fmt.Sprintf(\"%v/units/%v\", FLEET_API_ENDPOINT, unit.Name)\n\t\treturn DELETE(endpointUrl)\n\n\t}\n\n\treturn c.ManipulateUnits(unitDestroyer, allUnits)\n\n}",
"func (c *Client) ListGroups() ([]string, error) {\n\tvar groups []string\n\tif !c.authenticated {\n\t\treturn groups, errors.New(\"Not authenticated. Call Authenticate first\")\n\t}\n\n\tmsg := common.NewMessage(c.userId, \"server\",\n\t\t\"control\", \"remove_to_group\", time.Time{},\n\t\tcommon.TEXT, \"\")\n\n\tencoded, err := msg.Json()\n\tif err != nil {\n\t\tlog.Println(\"Failed to encode list groups message\", err)\n\t\treturn groups, err\n\t}\n\n\terr = c.transport.SendText(string(encoded))\n\tif err != nil {\n\t\tlog.Println(\"Failed to send list groups message\", err)\n\t\treturn groups, err\n\t}\n\n\tresp := <-c.Out\n\tif resp.Status() == common.STATUS_ERROR {\n\t\terrMsg := resp.Error()\n\t\tlog.Println(\"Delete group response error\", errMsg)\n\t\treturn groups, errors.New(errMsg)\n\t}\n\n\tgroups, _ = resp.GetJsonData(\"groups\").([]string)\n\treturn groups, nil\n}",
"func (s *Set) ListMetricNames() []string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tmetricNames := make([]string, 0, len(s.m))\n\tfor _, nm := range s.m {\n\t\tif nm.isAux {\n\t\t\tcontinue\n\t\t}\n\t\tmetricNames = append(metricNames, nm.name)\n\t}\n\tsort.Strings(metricNames)\n\treturn metricNames\n}",
"func (s *LiftingStorage) GetUniqueUnits() ([]string, error) {\n\tcategorys := make([]string, 0)\n\terr := s.db.Select(&categorys, uniqueUnits)\n\tif err != nil {\n\t\treturn categorys, err\n\t}\n\treturn categorys, nil\n}",
"func (st *API) UserGroupList(ctx context.Context) ([]UserGroup, error) {\n\terr := checkPermission(ctx, \"bot\", \"usergroups:read\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tst.mx.Lock()\n\tdefer st.mx.Unlock()\n\n\tids := make([]string, 0, len(st.usergroups))\n\tfor id := range st.usergroups {\n\t\tids = append(ids, id)\n\t}\n\tsort.Strings(ids)\n\n\tresult := make([]UserGroup, 0, len(ids))\n\tfor _, id := range ids {\n\t\tch := st.usergroups[id]\n\t\tresult = append(result, ch.UserGroup)\n\t}\n\n\treturn result, nil\n}",
"func (ou *OrganizationUpdate) AddUnits(o ...*OrgUnit) *OrganizationUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ou.AddUnitIDs(ids...)\n}",
"func (s *Space) List(ctx context.Context) ([]string, error) {\n\tlock := s.SpaceManager.Lock.Get(s.Name())\n\tif !lock.RLock(s.SpaceManager.LockTimeout) {\n\t\treturn nil, ErrorLocking.Format(\"space\", s.Name())\n\t}\n\tdefer lock.RUnlock()\n\treturn list(ctx, s.SpaceManager.Backend, s.Prefix, validateName, sortNames)\n}",
"func (Unit) Values() []Unit {\n\treturn []Unit{\n\t\t\"Seconds\",\n\t\t\"Microseconds\",\n\t\t\"Milliseconds\",\n\t\t\"Bytes\",\n\t\t\"Kilobytes\",\n\t\t\"Megabytes\",\n\t\t\"Gigabytes\",\n\t\t\"Terabytes\",\n\t\t\"Bits\",\n\t\t\"Kilobits\",\n\t\t\"Megabits\",\n\t\t\"Gigabits\",\n\t\t\"Terabits\",\n\t\t\"Percent\",\n\t\t\"Count\",\n\t\t\"Bytes/Second\",\n\t\t\"Kilobytes/Second\",\n\t\t\"Megabytes/Second\",\n\t\t\"Gigabytes/Second\",\n\t\t\"Terabytes/Second\",\n\t\t\"Bits/Second\",\n\t\t\"Kilobits/Second\",\n\t\t\"Megabits/Second\",\n\t\t\"Gigabits/Second\",\n\t\t\"Terabits/Second\",\n\t\t\"Count/Second\",\n\t\t\"None\",\n\t}\n}",
"func (c *Dg) GetList() ([]string, error) {\n c.con.LogQuery(\"(get) list of device groups\")\n path := c.xpath(nil)\n return c.con.EntryListUsing(c.con.Get, path[:len(path) - 1])\n}",
"func (s *JobService) List(ctx context.Context, clientTimeOffset int, collectAllChildJobs bool) (*Groups, *http.Response, error) {\n\trequest := Request{\n\t\tAction: JobAction,\n\t\tMethod: \"getGroupInfo\",\n\t\tData: []interface{}{[]interface{}{nil}, clientTimeOffset, collectAllChildJobs},\n\t\tType: \"rpc\",\n\t\tTid: 1,\n\t}\n\n\treq, err := s.client.NewRequest(&request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar groups Groups\n\tr := Response{Data: &groups}\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn &groups, resp, nil\n}",
"func (c *MockResourceGroupsClient) List(ctx context.Context, filter string) ([]resources.Group, error) {\n\tif filter != \"\" {\n\t\treturn nil, fmt.Errorf(\"unsupported non-empty filter: %s\", filter)\n\t}\n\tvar l []resources.Group\n\tfor _, rg := range c.RGs {\n\t\tl = append(l, rg)\n\t}\n\treturn l, nil\n}",
"func List() (list []string) {\n\n\tfor i := range box {\n\t\tlist = append(list, i)\n\t}\n\n\treturn list\n}",
"func (cnst planckUnits) Unit() *unit.Unit {\n\treturn unit.New(float64(cnst), unit.Dimensions{\n\t\tunit.MassDim: 1,\n\t\tunit.LengthDim: 2,\n\t\tunit.TimeDim: -1,\n\t})\n}",
"func (covList CoverageList) ListDirectories() []string {\n\tdirSet := map[string]bool{}\n\tfor _, cov := range covList.Group {\n\t\tdirSet[path.Dir(cov.Name)] = true\n\t}\n\tvar result []string\n\tfor key := range dirSet {\n\t\tresult = append(result, key)\n\t}\n\treturn result\n}",
"func (o *GetForecastByCityIDParams) SetUnits(units *string) {\n\to.Units = units\n}",
"func (m *Directory) GetAdministrativeUnits()([]AdministrativeUnitable) {\n val, err := m.GetBackingStore().Get(\"administrativeUnits\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]AdministrativeUnitable)\n }\n return nil\n}",
"func List(numbers []uint64, prefix string) string {\n\tvar s []string\n\tfor _, n := range numbers {\n\t\ts = append(s, fmt.Sprintf(\"%s%d\", prefix, n))\n\t}\n\treturn strings.Join(s, \", \")\n}",
"func (service Service) GetList(pagination entity.Pagination) (ug []entity.UserGroup, count int, err error) {\n\tusers, count, err := service.repository.GetList(pagination)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tug, err = service.mapUsersToUserGroups(users)\n\treturn\n}",
"func ListOrganizations() error {\n\tclient, err := NewPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torgs, _, err := client.Organizations.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(orgs)\n\treturn e\n}",
"func (this *ClientCLI) StatusAll() ([]UnitStatus, error) {\n\tcmd := execPkg.Command(FLEETCTL, ENDPOINT_OPTION, this.etcdPeer, \"list-units\", \"--full=true\", \"-l=true\", \"--fields=unit,load,active,sub,machine\")\n\tstdout, err := exec(cmd)\n\tif err != nil {\n\t\treturn []UnitStatus{}, err\n\t}\n\n\treturn parseFleetStatusOutput(stdout)\n}",
"func (o *MemoryArrayAllOf) GetUnitsOk() ([]MemoryUnitRelationship, bool) {\n\tif o == nil || o.Units == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Units, true\n}",
"func (ouo *OrganizationUpdateOne) AddUnits(o ...*OrgUnit) *OrganizationUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn ouo.AddUnitIDs(ids...)\n}",
"func (db *MySQLDB) ListGroups(ctx context.Context, tenant *Tenant, request *helper.PageRequest) ([]*Group, *helper.Page, error) {\n\tfLog := mysqlLog.WithField(\"func\", \"ListGroups\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\tq := \"SELECT COUNT(*) AS CNT FROM HANSIP_GROUP\"\n\tret := make([]*Group, 0)\n\trow := db.instance.QueryRowContext(ctx, q)\n\tcount := 0\n\terr := row.Scan(&count)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn ret, helper.NewPage(request, uint(count)), nil\n\t\t}\n\t\tfLog.Errorf(\"row.Scan got %s\", err.Error())\n\t\treturn nil, nil, &ErrDBScanError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error ListGroups\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\tpage := helper.NewPage(request, uint(count))\n\tq = fmt.Sprintf(\"SELECT REC_ID, GROUP_NAME, GROUP_DOMAIN, DESCRIPTION FROM HANSIP_GROUP WHERE GROUP_DOMAIN=? ORDER BY GROUP_NAME %s LIMIT %d, %d\", request.Sort, page.OffsetStart, page.OffsetEnd-page.OffsetStart)\n\trows, err := db.instance.QueryContext(ctx, q, tenant.Domain)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.QueryContext got %s. SQL = %s\", err.Error(), q)\n\t\treturn nil, nil, &ErrDBQueryError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error ListGroups\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tr := &Group{}\n\t\terr := rows.Scan(&r.RecID, &r.GroupName, &r.GroupDomain, &r.Description)\n\t\tif err != nil {\n\t\t\tfLog.Warnf(\"row.Scan got %s\", err.Error())\n\t\t\treturn nil, nil, &ErrDBScanError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error ListGroups\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t} else {\n\t\t\tret = append(ret, r)\n\t\t}\n\t}\n\treturn ret, page, nil\n}",
"func (c *cave) orderUnits() []*caveUnit {\n\tvar units []*caveUnit\n\tfor _, unit := range c.units {\n\t\tunits = append(units, unit)\n\t}\n\tsort.Slice(units, func(i, j int) bool {\n\t\treturn units[i].loc.readingLess(units[j].loc)\n\t})\n\treturn units\n}",
"func (client Client) ListByResourceGroup(resourceGroupName string) (result ListResult, err error) {\n\treq, err := client.ListByResourceGroupPreparer(resourceGroupName)\n\tif err != nil {\n\t\treturn result, autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", nil, \"Failure preparing request\")\n\t}\n\n\tresp, err := client.ListByResourceGroupSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\treturn result, autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", resp, \"Failure sending request\")\n\t}\n\n\tresult, err = client.ListByResourceGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func (o *EquipmentBaseSensor) GetUnitsOk() (*string, bool) {\n\tif o == nil || o.Units == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Units, true\n}",
"func (fs *FluidsizeService) List() (fl []Fluidsize, err error) {\n\t// GET: /fluidsizes\n\tvar req *http.Request\n\treq, err = fs.c.NewRequest(\"GET\", \"/fluidsizes\", nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp := struct {\n\t\tStatus string\n\t\tData []Fluidsize\n\t\tMessage string\n\t}{}\n\terr = fs.c.Do(req, &resp)\n\treturn resp.Data, err\n}",
"func (r *Runsc) List(context context.Context) ([]*runc.Container, error) {\n\tdata, stderr, err := cmdOutput(r.command(context, \"list\", \"--format=json\"), false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", err, stderr)\n\t}\n\tvar out []*runc.Container\n\tif err := json.Unmarshal(data, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}",
"func (s *Store) List() []string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tls := make([]string, 0, len(s.ls))\n\tfor p := range s.ls {\n\t\tls = append(ls, p)\n\t}\n\n\treturn ls\n}",
"func (s *Store) List() []string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tls := make([]string, 0, len(s.ls))\n\tfor p := range s.ls {\n\t\tls = append(ls, p)\n\t}\n\n\treturn ls\n}",
"func (m *prom) ListMetric() []string {\n\tvar res = make([]string, 0)\n\tres = append(res, m.ginMet.List()...)\n\tres = append(res, m.othMet.List()...)\n\treturn res\n}",
"func (domain *Domain) ListComponents() ([]string, error) {\n\t// collect names\n\tcomponents := []string{}\n\n\tdomain.ComponentsX.RLock()\n\tfor component := range domain.Components {\n\t\tcomponents = append(components, component)\n\t}\n\tdomain.ComponentsX.RUnlock()\n\n\t// success\n\treturn components, nil\n}",
"func (client WorkloadNetworksClient) ListVMGroups(ctx context.Context, resourceGroupName string, privateCloudName string) (result WorkloadNetworkVMGroupsListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/WorkloadNetworksClient.ListVMGroups\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.wnvgl.Response.Response != nil {\n\t\t\t\tsc = result.wnvgl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"avs.WorkloadNetworksClient\", \"ListVMGroups\", err.Error())\n\t}\n\n\tresult.fn = client.listVMGroupsNextResults\n\treq, err := client.ListVMGroupsPreparer(ctx, resourceGroupName, privateCloudName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListVMGroups\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListVMGroupsSender(req)\n\tif err != nil {\n\t\tresult.wnvgl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListVMGroups\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.wnvgl, err = client.ListVMGroupsResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"avs.WorkloadNetworksClient\", \"ListVMGroups\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.wnvgl.hasNextLink() && result.wnvgl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (s stage) createUnits(config types.Config) error {\n\tfor _, unit := range config.Systemd.Units {\n\t\tif err := s.writeSystemdUnit(unit); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif unit.Enable {\n\t\t\tif err := s.Logger.LogOp(\n\t\t\t\tfunc() error { return s.EnableUnit(unit) },\n\t\t\t\t\"enabling unit %q\", unit.Name,\n\t\t\t); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif unit.Mask {\n\t\t\tif err := s.Logger.LogOp(\n\t\t\t\tfunc() error { return s.MaskUnit(unit) },\n\t\t\t\t\"masking unit %q\", unit.Name,\n\t\t\t); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tfor _, unit := range config.Networkd.Units {\n\t\tif err := s.writeNetworkdUnit(unit); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (d *DivMaster) List() []string {\n\tlist := make([]string, 0)\n\tfor _, v := range d.divName {\n\t\tlist = append(list, v)\n\t}\n\treturn list\n}",
"func (c *routeGroups) List(opts metav1.ListOptions) (result *v1.RouteGroupList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v1.RouteGroupList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"routegroups\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (c *Dg) ShowList() ([]string, error) {\n c.con.LogQuery(\"(show) list of device groups\")\n path := c.xpath(nil)\n return c.con.EntryListUsing(c.con.Show, path[:len(path) - 1])\n}",
"func AllNames() []string {\n\tret := make([]string, 0, len(unitByName))\n\tfor n := range unitByName {\n\t\tif n == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, n)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}",
"func (TtlDurationUnit) Values() []TtlDurationUnit {\n\treturn []TtlDurationUnit{\n\t\t\"Seconds\",\n\t\t\"Minutes\",\n\t\t\"Hours\",\n\t\t\"Days\",\n\t\t\"Weeks\",\n\t}\n}",
"func (TtlDurationUnit) Values() []TtlDurationUnit {\n\treturn []TtlDurationUnit{\n\t\t\"Seconds\",\n\t\t\"Minutes\",\n\t\t\"Hours\",\n\t\t\"Days\",\n\t\t\"Weeks\",\n\t}\n}",
"func (c *MockNatGatewaysClient) List(ctx context.Context, resourceGroupName string) ([]network.NatGateway, error) {\n\tvar l []network.NatGateway\n\tfor _, ngw := range c.NGWs {\n\t\tl = append(l, ngw)\n\t}\n\treturn l, nil\n}",
"func ListVolumeGroupUUIDs() ([]string, error) {\n\tresult := new(vgsOutput)\n\tif err := run(\"vgs\", result, \"--options=vg_uuid\"); err != nil {\n\t\treturn nil, err\n\t}\n\tvar uuids []string\n\tfor _, report := range result.Report {\n\t\tfor _, vg := range report.Vg {\n\t\t\tuuids = append(uuids, vg.UUID)\n\t\t}\n\t}\n\treturn uuids, nil\n}",
"func (c *MockVMScaleSetVMsClient) List(ctx context.Context, resourceGroupName, vmssName string) ([]compute.VirtualMachineScaleSetVM, error) {\n\t// Ignore resourceGroupName and vmssName for simplicity.\n\tvar l []compute.VirtualMachineScaleSetVM\n\tfor _, vm := range c.VMs {\n\t\tl = append(l, vm)\n\t}\n\treturn l, nil\n}",
"func ListVolumeGroupUUIDs() ([]string, error) {\n\tresult := new(vgsOutput)\n\tif err := run(\"vgs\", result, \"--options=vg_uuid\"); err != nil {\n\t\tlog.Errorf(\"ListVolumeGroupUUIDs error: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\tvar uuids []string\n\tfor _, report := range result.Report {\n\t\tfor _, vg := range report.Vg {\n\t\t\tuuids = append(uuids, vg.UUID)\n\t\t}\n\t}\n\treturn uuids, nil\n}",
"func (client *Client) ListExperimentGroups(request *ListExperimentGroupsRequest) (response *ListExperimentGroupsResponse, err error) {\n\tresponse = CreateListExperimentGroupsResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}",
"func (StorageUnit) Values() []StorageUnit {\n\treturn []StorageUnit{\n\t\t\"TB\",\n\t}\n}"
] | [
"0.72441345",
"0.7032067",
"0.6085787",
"0.6073443",
"0.595272",
"0.5778228",
"0.5681488",
"0.5550705",
"0.5550341",
"0.5543723",
"0.553827",
"0.55062217",
"0.5478257",
"0.5455158",
"0.54498625",
"0.5443609",
"0.53967434",
"0.5386662",
"0.53394026",
"0.5324254",
"0.5321294",
"0.527811",
"0.5277048",
"0.5252291",
"0.52466196",
"0.5199905",
"0.51424944",
"0.508134",
"0.5059078",
"0.5036113",
"0.5032049",
"0.5028996",
"0.5014391",
"0.50075984",
"0.5007328",
"0.50015706",
"0.50015706",
"0.4991006",
"0.4988078",
"0.498503",
"0.4965824",
"0.49512297",
"0.49083504",
"0.48942584",
"0.48922098",
"0.48868403",
"0.48840407",
"0.4881102",
"0.48802513",
"0.48748133",
"0.48726508",
"0.4839712",
"0.48395473",
"0.48331937",
"0.48318353",
"0.4819877",
"0.48106697",
"0.48103693",
"0.4807688",
"0.48023576",
"0.47992688",
"0.4795687",
"0.47920135",
"0.47896862",
"0.47841662",
"0.47812605",
"0.47760338",
"0.47734886",
"0.4753167",
"0.47413778",
"0.47094864",
"0.47051668",
"0.47003868",
"0.46971378",
"0.46876964",
"0.46852",
"0.4683783",
"0.46785513",
"0.46757683",
"0.4671572",
"0.46697178",
"0.46654886",
"0.46622628",
"0.46622628",
"0.46613362",
"0.46593252",
"0.46571955",
"0.46512353",
"0.46508574",
"0.4649988",
"0.46460274",
"0.46397316",
"0.46388304",
"0.46388304",
"0.4632877",
"0.46287218",
"0.46269107",
"0.46236902",
"0.46219853",
"0.46184736"
] | 0.75670975 | 0 |
WaitTillReady blocks the goroutine till all modules are ready. | WaitTillReady блокирует горутину до тех пор, пока все модули не будут готовы. | func (g *Group) WaitTillReady() {
<-g.readyCh
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (envManager *TestEnvManager) WaitUntilReady() (bool, error) {\n\tlog.Println(\"Start checking components' status\")\n\tretry := u.Retrier{\n\t\tBaseDelay: 1 * time.Second,\n\t\tMaxDelay: 10 * time.Second,\n\t\tRetries: 8,\n\t}\n\n\tready := false\n\tretryFn := func(_ context.Context, i int) error {\n\t\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\t\tif alive, err := comp.IsAlive(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to comfirm compoment %s is alive %v\", comp.GetName(), err)\n\t\t\t} else if !alive {\n\t\t\t\treturn fmt.Errorf(\"component %s is not alive\", comp.GetName())\n\t\t\t}\n\t\t}\n\n\t\tready = true\n\t\tlog.Println(\"All components are ready\")\n\t\treturn nil\n\t}\n\n\t_, err := retry.Retry(context.Background(), retryFn)\n\treturn ready, err\n}",
"func (c *BFTChain) WaitReady() error {\n\treturn nil\n}",
"func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller-webhook did not come up\")\n\t}\n\n\tif err := WaitForMachineController(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller did not come up\")\n\t}\n\treturn nil\n}",
"func (t *Indie) waitModules() {\n for _, m := range t.modules {\n\tim := reflect.ValueOf(m).FieldByName(\"Module\").Interface()\n\tt.moduleWgs[im.(Module).Name].Wait()\n }\n}",
"func (t *Indie) Wait() {\n t.waitModules()\n}",
"func WaitReady(s *state.State) error {\n\tif !s.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\ts.Logger.Infoln(\"Waiting for machine-controller to come up...\")\n\n\tif err := cleanupStaleResources(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForWebhook(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForMachineController(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\treturn waitForCRDs(s)\n}",
"func WaitReady() {\n\tif deviceReady {\n\t\treturn\n\t}\n\tch := make(chan struct{}, 0)\n\tf := func() {\n\t\tdeviceReady = true\n\t\tclose(ch)\n\t}\n\tOnDeviceReady(f)\n\t<-ch\n\tUnDeviceReady(f)\n}",
"func (b *Botanist) WaitUntilRequiredExtensionsReady(ctx context.Context) error {\n\treturn retry.UntilTimeout(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (done bool, err error) {\n\t\tif err := b.RequiredExtensionsReady(ctx); err != nil {\n\t\t\tb.Logger.Infof(\"Waiting until all the required extension controllers are ready (%+v)\", err)\n\t\t\treturn retry.MinorError(err)\n\t\t}\n\t\treturn retry.Ok()\n\t})\n}",
"func (a *Agent) WaitReady() {\n\ta.statusLock.RLock()\n\tdefer a.statusLock.RUnlock()\n\n\tfor {\n\t\tif a.status == 1 {\n\t\t\treturn\n\t\t}\n\t\ta.statusCond.Wait()\n\t}\n}",
"func WaitForReady() {\n\tucc.WaitForReady()\n}",
"func WaitForReady() {\n\tdefaultClient.WaitForReady()\n}",
"func (p *Pebble) WaitReady(t *testing.T) {\n\tif p.pebbleCMD.Process == nil {\n\t\tt.Fatal(\"Pebble not started\")\n\t}\n\turl := p.DirectoryURL()\n\tRetry(t, 10, 10*time.Millisecond, func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tdefer cancel()\n\n\t\tt.Log(\"Checking pebble readiness\")\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp, err := p.httpClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\treturn nil\n\t})\n}",
"func (c *Client) WaitUntilReady() {\n\tc.waitUntilReady()\n}",
"func Wait() {\n\tdefaultManager.Wait()\n}",
"func StartAndWaitForReady(ctx context.Context, t *testing.T, manager datatransfer.Manager) {\n\tready := make(chan error, 1)\n\tmanager.OnReady(func(err error) {\n\t\tready <- err\n\t})\n\trequire.NoError(t, manager.Start(ctx))\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"did not finish starting up module\")\n\tcase err := <-ready:\n\t\trequire.NoError(t, err)\n\t}\n}",
"func (b *Bucket) WaitUntilReady(timeout time.Duration, opts *WaitUntilReadyOptions) error {\n\tif opts == nil {\n\t\topts = &WaitUntilReadyOptions{}\n\t}\n\n\tcli := b.sb.getCachedClient()\n\tif cli == nil {\n\t\treturn errors.New(\"bucket is not connected\")\n\t}\n\n\terr := cli.getBootstrapError()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprovider, err := cli.getWaitUntilReadyProvider()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdesiredState := opts.DesiredState\n\tif desiredState == 0 {\n\t\tdesiredState = ClusterStateOnline\n\t}\n\n\terr = provider.WaitUntilReady(\n\t\ttime.Now().Add(timeout),\n\t\tgocbcore.WaitUntilReadyOptions{\n\t\t\tDesiredState: gocbcore.ClusterState(desiredState),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (b Build) Wait() {\n\t<-make(chan struct{})\n}",
"func (p *Init) Wait() {\n\t<-p.waitBlock\n}",
"func (b *buildandrun) Wait() {\n\t<-b.done\n}",
"func (t Task) ready() {\n\tif t.depWg != nil {\n\t\tt.SetMessage(\"wait dependency tasks\")\n\t\tt.depWg.Wait()\n\t}\n}",
"func (m *Module) Wait(ctx context.Context) error {\n\tselect {\n\tcase <-m.done:\n\t\tif m.err != nil {\n\t\t\treturn m.err\n\t\t}\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}",
"func (mod *Module)WaitTillStartupDone(){\n\tfor mod.hasRunStartup == false{//wait for startup to stop running\n\t\ttime.Sleep(time.Millisecond*10)\n\t}\n}",
"func (test *Test) WaitForNodesReady(expectedNodes int, timeout time.Duration) {\n\terr := test.waitForNodesReady(expectedNodes, true, timeout)\n\ttest.err(err)\n}",
"func Wait() {\n\twg.Wait()\n}",
"func (vm *VirtualMachine) WaitUntilReady(client SkytapClient) (*VirtualMachine, error) {\n\treturn vm.WaitUntilInState(client, []string{RunStateStop, RunStateStart, RunStatePause}, false)\n}",
"func TestWaitUntilAllNodesReady(t *testing.T) {\n\tt.Parallel()\n\n\toptions := NewKubectlOptions(\"\", \"\", \"default\")\n\n\tWaitUntilAllNodesReady(t, options, 12, 5*time.Second)\n\n\tnodes := GetNodes(t, options)\n\tnodeNames := map[string]bool{}\n\tfor _, node := range nodes {\n\t\tnodeNames[node.Name] = true\n\t}\n\n\treadyNodes := GetReadyNodes(t, options)\n\treadyNodeNames := map[string]bool{}\n\tfor _, node := range readyNodes {\n\t\treadyNodeNames[node.Name] = true\n\t}\n\n\tassert.Equal(t, nodeNames, readyNodeNames)\n}",
"func waitForProvidersReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error {\n\t// If we dont have to wait for providers to be installed\n\t// return early.\n\tif !opts.WaitProviders {\n\t\treturn nil\n\t}\n\n\tlog := logf.Log\n\tlog.Info(\"Waiting for providers to be available...\")\n\n\treturn waitManagerDeploymentsReady(ctx, opts, installQueue, proxy)\n}",
"func (m *DomainMonitor) WaitReady() (err error) {\n\treturn m.sink.WaitReady()\n}",
"func (s *SeleniumServer) Wait() {\n\terr := s.cmd.Wait()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func Wait() {\n\twaitGroup.Wait()\n}",
"func (server *Server) Wait(){\n\tfor{\n\t\tif server.threads > 0 {\n\t\t\tserver.Logger.Info(server.threads, \"active channels at the moment. Waiting for busy goroutine.\")\n\t\t\t<-server.ThreadSync\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (ini *Init) wait() {\n\tvar counter time.Duration\n\tfor !ini.init.Done() {\n\t\tcounter += 10\n\t\tif counter > 600000000 {\n\t\t\tpanic(\"BUG: timed out in lazy init\")\n\t\t}\n\t\ttime.Sleep(counter * time.Microsecond)\n\t}\n}",
"func (b *EventBus) Wait() {\n\tb.wg.Wait()\n\n\tif err := b.amqpChannel.Close(); err != nil {\n\t\tlog.Printf(\"eventhorizon: failed to close RabbitMQ queue: %s\", err)\n\t}\n\n\tif err := b.amqpConn.Close(); err != nil {\n\t\tlog.Printf(\"eventhorizon: failed to close RabbitMQ connection: %s\", err)\n\t}\n}",
"func WaitForNodesReady(t *testing.T, nomadClient *api.Client, nodes int) {\n\tnodesAPI := nomadClient.Nodes()\n\n\ttestutil.WaitForResultRetries(retries, func() (bool, error) {\n\t\tdefer time.Sleep(time.Millisecond * 100)\n\t\tnodesList, _, err := nodesAPI.List(nil)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"error listing nodes: %v\", err)\n\t\t}\n\n\t\teligibleNodes := 0\n\t\tfor _, node := range nodesList {\n\t\t\tif node.Status == \"ready\" {\n\t\t\t\teligibleNodes++\n\t\t\t}\n\t\t}\n\n\t\treturn eligibleNodes >= nodes, fmt.Errorf(\"only %d nodes ready (wanted at least %d)\", eligibleNodes, nodes)\n\t}, func(err error) {\n\t\trequire.NoError(t, err, \"failed to get enough ready nodes\")\n\t})\n}",
"func (elm *etcdLeaseManager) Wait() {\n\telm.wg.Wait()\n}",
"func (cli *CLI) SystemWaitReady() {\n\tintervalSec := time.Duration(3)\n\tutil.Panic(wait.PollImmediateInfinite(intervalSec*time.Second, func() (bool, error) {\n\t\tsys := &nbv1.NooBaa{}\n\t\terr := cli.Client.Get(cli.Ctx, client.ObjectKey{Namespace: cli.Namespace, Name: cli.SystemName}, sys)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif sys.Status.Phase == nbv1.SystemPhaseReady {\n\t\t\tcli.Log.Printf(\"✅ System Phase is \\\"%s\\\".\\n\", sys.Status.Phase)\n\t\t\treturn true, nil\n\t\t}\n\t\tif sys.Status.Phase == nbv1.SystemPhaseRejected {\n\t\t\treturn false, fmt.Errorf(\"❌ System Phase is \\\"%s\\\". describe noobaa for more information\", sys.Status.Phase)\n\t\t}\n\t\tcli.Log.Printf(\"⏳ System Phase is \\\"%s\\\". Waiting for it to be ready ...\\n\", sys.Status.Phase)\n\t\treturn false, nil\n\t}))\n}",
"func (d *InMemoryTaskDB) Wait() {\n\td.modClientsWg.Wait()\n}",
"func WaitOnReady(pvCount int, sleep, timeout time.Duration) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\tch := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tch <- AreAllReady(pvCount)\n\t\t\t\ttime.Sleep(sleep)\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase ready := <-ch:\n\t\t\tif ready {\n\t\t\t\treturn ready\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tDescribePVs()\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func (c *apiConsumers) Wait() {\n\tc.wait()\n}",
"func (display smallEpd) waitUntilIdle() (err error) {\n\tlog.Debug(\"EPD42 WaitUntilIdle\")\n\tfor {\n\t\tbusy, err := display.driver.DigitalRead(display.BUSY)\n\t\tif !busy {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error checking bust %s\\n\", err.Error())\n\t\t}\n\t\tfmt.Printf(\".\")\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\tlog.Debug(\"EPD42 WaitUntilIdle End\")\n\treturn\n}",
"func (chanSync *channelRelaySync) WaitOnSetup() {\n\tdefer chanSync.shared.setupComplete.Wait()\n}",
"func Wait() {\n\tselect {}\n}",
"func (b *Botanist) WaitUntilEtcdsReady(ctx context.Context) error {\n\treturn etcd.WaitUntilEtcdsReady(\n\t\tctx,\n\t\tb.K8sSeedClient.DirectClient(),\n\t\tb.Logger,\n\t\tb.Shoot.SeedNamespace,\n\t\t2,\n\t\t5*time.Second,\n\t\t3*time.Minute,\n\t\t5*time.Minute,\n\t)\n}",
"func (p Poller) WaitReady() error {\n\tinterval := time.Tick(p.Interval)\n\ttimer := time.NewTimer(p.Timeout)\n\n\tfor {\n\t\tlog.WithFields(log.Fields{}).Info(\"polling\")\n\n\t\tready, err := p.CheckReady()\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"ready\": ready,\n\t\t\t\"err\": err,\n\t\t}).Debug(\"poll_result\")\n\n\t\tif p.FailFast && err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ready {\n\t\t\treturn nil\n\t\t}\n\n\t\tselect {\n\t\tcase <-interval:\n\t\t\tcontinue\n\t\tcase <-timer.C:\n\t\t\tlog.WithFields(log.Fields{}).Info(\"timeout_reached\")\n\t\t\treturn fmt.Errorf(\"timeout reached: %s\", p.Timeout)\n\t\t}\n\t}\n}",
"func wait() {\n\twaitImpl()\n}",
"func (m *HeavySyncMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (c *myClient) waitForEnvironmentsReady(p, t int, envList ...string) (err error) {\n\n\tlogger.Infof(\"Waiting up to %v seconds for the environments to be ready\", t)\n\ttimeOut := 0\n\tfor timeOut < t {\n\t\tlogger.Info(\"Waiting for the environments\")\n\t\ttime.Sleep(time.Duration(p) * time.Second)\n\t\ttimeOut = timeOut + p\n\t\tif err = c.checkForBusyEnvironments(envList); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif timeOut >= t {\n\t\terr = fmt.Errorf(\"waitForEnvironmentsReady timed out\")\n\t}\n\treturn err\n}",
"func (wg *WaitGroupBar) Wait() {\n\twg.wg.Wait()\n}",
"func (m *ActiveNodeMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func waitReady(project, name, region string) error {\n\twait := time.Minute * 4\n\tdeadline := time.Now().Add(wait)\n\tfor time.Now().Before(deadline) {\n\t\tsvc, err := getService(project, name, region)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to query Service for readiness: %w\", err)\n\t\t}\n\n\t\tfor _, cond := range svc.Status.Conditions {\n\t\t\tif cond.Type == \"Ready\" {\n\t\t\t\tif cond.Status == \"True\" {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if cond.Status == \"False\" {\n\t\t\t\t\treturn fmt.Errorf(\"reason=%s message=%s\", cond.Reason, cond.Message)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t}\n\treturn fmt.Errorf(\"the service did not become ready in %s, check Cloud Console for logs to see why it failed\", wait)\n}",
"func ReadyWait(ctx context.Context, driverName string, databaseURLs []string, logger func(...interface{})) error {\n\tlogger(driverName, \"checking connection\")\n\tadapter, err := AdapterFor(driverName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcount := len(databaseURLs)\n\tcurr := -1\n\tfor {\n\t\tcurr = (curr + 1) % count\n\t\tdb, err := sql.Open(driverName, databaseURLs[curr])\n\t\tif err == nil {\n\t\t\tlogger(driverName, \"server up\")\n\t\t\tvar num int\n\t\t\tif err = db.QueryRow(adapter.PingQuery).Scan(&num); err == nil {\n\t\t\t\tlogger(driverName, \"connected\")\n\t\t\t\treturn db.Close()\n\t\t\t}\n\t\t\tdb.Close()\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-time.After(time.Second):\n\t\t\tlogger(driverName, \"retrying...\", err)\n\t\t}\n\t}\n}",
"func Wait() {\n\tfor sched0.tasks.length() != 0 {\n\t\truntime.Gosched()\n\t}\n}",
"func (aio *AsyncIO) waitAll() {\n\taio.trigger <- struct{}{}\n\t<-aio.trigger\n}",
"func waitForConductor(ctx context.Context, client *gophercloud.ServiceClient) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.Printf(\"[DEBUG] Waiting for conductor API to become available...\")\n\t\t\tdriverCount := 0\n\n\t\t\tdrivers.ListDrivers(client, drivers.ListDriversOpts{\n\t\t\t\tDetail: false,\n\t\t\t}).EachPage(func(page pagination.Page) (bool, error) {\n\t\t\t\tactual, err := drivers.ExtractDrivers(page)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tdriverCount += len(actual)\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\t// If we have any drivers, conductor is up.\n\t\t\tif driverCount > 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n}",
"func (manager *Manager) Wait() {\n\tlogger.FromCtx(manager.ctx, logger.Flow).WithField(\"flow\", manager.Name).Info(\"Awaiting till all processes are completed\")\n\tmanager.wg.Wait()\n}",
"func Wait() {\n\t<-wait\n}",
"func (a *Agent) Wait() error {\n\ta.init()\n\treturn <-a.waitCh\n}",
"func WaitReady(namespaceStore *nbv1.NamespaceStore) bool {\n\tlog := util.Logger()\n\tklient := util.KubeClient()\n\n\tinterval := time.Duration(3)\n\n\terr := wait.PollUntilContextCancel(ctx, interval*time.Second, true, func(ctx context.Context) (bool, error) {\n\t\terr := klient.Get(util.Context(), util.ObjectKey(namespaceStore), namespaceStore)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"⏳ Failed to get NamespaceStore: %s\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tCheckPhase(namespaceStore)\n\t\tif namespaceStore.Status.Phase == nbv1.NamespaceStorePhaseRejected {\n\t\t\treturn false, fmt.Errorf(\"NamespaceStorePhaseRejected\")\n\t\t}\n\t\tif namespaceStore.Status.Phase != nbv1.NamespaceStorePhaseReady {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\treturn (err == nil)\n}",
"func (wg *WaitGroup) Wait() {\n\twg.waitGroup.Wait()\n}",
"func (node *Node) Wait() error {\n\treturn node.httpAPIServer.Wait()\n}",
"func (twrkr *twerk) Wait() {\n\tif twrkr.stop {\n\t\treturn\n\t}\n\tticker := time.NewTicker(100 * time.Microsecond)\n\tdefer ticker.Stop()\n\n\tfor range ticker.C {\n\t\tif len(twrkr.jobListener) == 0 && twrkr.liveWorkersNum.Get() == 0 && twrkr.currentlyWorkingNum.Get() == 0 {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (client *Client) WaitForAllTestResourcesReady() error {\n\tif err := client.WaitForChannelsReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForSubscriptionsReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForBrokersReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForTriggersReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForCronJobSourcesReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForContainerSourcesReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := pkgTest.WaitForAllPodsRunning(client.Kube, client.Namespace); err != nil {\n\t\treturn err\n\t}\n\t// FIXME(Fredy-Z): This hacky sleep is added to try mitigating the test flakiness.\n\t// Will delete it after we find the root cause and fix.\n\ttime.Sleep(10 * time.Second)\n\treturn nil\n}",
"func (m *UnsyncListMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (s *Server) Wait() {\n\ts.wg.Wait()\n}",
"func (s *Server) Wait() {\n\ts.wg.Wait()\n}",
"func WaitFabricReady(ctx context.Context, log logging.Logger, params WaitFabricReadyParams) error {\n\tif common.InterfaceIsNil(params.StateProvider) {\n\t\treturn errors.New(\"nil NetDevStateProvider\")\n\t}\n\n\tif len(params.FabricIfaces) == 0 {\n\t\treturn errors.New(\"no fabric interfaces requested\")\n\t}\n\n\tparams.FabricIfaces = common.DedupeStringSlice(params.FabricIfaces)\n\n\tch := make(chan error)\n\tgo loopFabricReady(log, params, ch)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase err := <-ch:\n\t\treturn err\n\t}\n}",
"func (s YieldingWaitStrategy) Wait() {\n\truntime.Gosched()\n}",
"func (lc *Closer) Wait() {\n\tlc.waiting.Wait()\n}",
"func (m *StateSwitcherMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (t *Ticker) Wait() {\n\tt.wg.Wait()\n}",
"func (h *KubernetesHelper) WaitUntilDeployReady(deploys map[string]DeploySpec) {\n\tctx := context.Background()\n\tfor deploy, spec := range deploys {\n\t\tif err := h.CheckPods(ctx, spec.Namespace, deploy, 1); err != nil {\n\t\t\tvar out string\n\t\t\t//nolint:errorlint\n\t\t\tif rce, ok := err.(*RestartCountError); ok {\n\t\t\t\tout = fmt.Sprintf(\"Error running test: failed to wait for deploy/%s to become 'ready', too many restarts (%v)\\n\", deploy, rce)\n\t\t\t} else {\n\t\t\t\tout = fmt.Sprintf(\"Error running test: failed to wait for deploy/%s to become 'ready', timed out waiting for condition\\n\", deploy)\n\t\t\t}\n\t\t\tos.Stderr.Write([]byte(out))\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}",
"func (s *Store) WaitForInit() {\n\ts.initComplete.Wait()\n}",
"func Wait() {\n\tfor {\n\t\ttime.Sleep(time.Millisecond)\n\t\tif messages == nil || len(messages) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (s *RaftDatabase) AwaitReady() error {\n\tfor {\n\t\tdatabase, err := s.atomixClient.CloudV1beta1().Databases(s.namespace).Get(s.name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if database.Status.ReadyClusters == database.Spec.Clusters {\n\t\t\treturn nil\n\t\t} else {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t}\n}",
"func (n *Netlify) WaitUntilDeployReady(ctx context.Context, d *models.Deploy) (*models.Deploy, error) {\n\treturn n.waitForState(ctx, d, \"prepared\", \"ready\")\n}",
"func (w *WaitGroup) Wait() {\n\tfor w.counter > 0 {\n\t\tw.waiters++\n\t\tw.wait.Wait()\n\t\tw.waiters--\n\t}\n}",
"func (b *Bot) WaitUntilCompletion() {\n\tb.server.Wait()\n}",
"func (s *Stopper) Wait() {\n\ts.wg.Wait()\n}",
"func (bus *EventBus) WaitAsync() {\n\tbus.wg.Wait()\n}",
"func (bus *EventBus) WaitAsync() {\n\tbus.wg.Wait()\n}",
"func (w *Worker) Wait() {\n\tw.helper.wg.Wait()\n}",
"func (d *InMemoryJobDB) Wait() {\n\td.modClientsWg.Wait()\n}",
"func (r SortedRunner) Wait() error {\n\treturn nil\n}",
"func (m *HostNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (c *Config) Wait() {\n\tc.wg.Wait()\n}",
"func (p *Probe) wait() {\n\tp.waitGroup.Wait()\n}",
"func (s *Server) Wait() {\n\tfor {\n\t\tselect {\n\t\tcase <-s.channelQuit:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (m *OutboundMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (ep *ExpectProcess) Wait() {\n\tep.wg.Wait()\n}",
"func (wg *WaitGroup) Wait() {\n\twg.Wg.Wait()\n}",
"func (t *Terminal) Wait() {\n\tfor <-t.stopChan {\n\t\treturn\n\t}\n}",
"func (w *Worker) Wait() {\n\tw.ow.Do(func() {\n\t\tw.l.Info(\"astikit: worker is now waiting...\")\n\t\tw.wg.Wait()\n\t})\n}",
"func (ms *Server) Wait() {\n\tms.loops.Wait()\n}",
"func (w *WaitGroup) Wait() {\n\tw.wg.Wait()\n}",
"func (e *Executor) Wait() {\n\tfor {\n\t\tnjob, ndone := e.Count()\n\t\tif ndone == njob {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}",
"func (m *ConsensusNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}",
"func (r *volumeReactor) waitForIdle() {\n\tr.ctrl.runningOperations.WaitForCompletion()\n\t// Check every 10ms if the controller does something and stop if it's\n\t// idle.\n\toldChanges := -1\n\tfor {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tchanges := r.getChangeCount()\n\t\tif changes == oldChanges {\n\t\t\t// No changes for last 10ms -> controller must be idle.\n\t\t\tbreak\n\t\t}\n\t\toldChanges = changes\n\t}\n}",
"func waitServerReady(t *testing.T, addr string) {\n\tfor i := 0; i < 50; i++ {\n\t\t_, err := http.DefaultClient.Get(addr)\n\t\t// assume server ready when no err anymore\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tcontinue\n\t}\n}",
"func waitManagerDeploymentsReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error {\n\tfor _, components := range installQueue {\n\t\tfor _, obj := range components.Objs() {\n\t\t\tif util.IsDeploymentWithManager(obj) {\n\t\t\t\tif err := waitDeploymentReady(ctx, obj, opts.WaitProviderTimeout, proxy); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"deployment %q is not ready after %s\", obj.GetName(), opts.WaitProviderTimeout)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *C) Wait() {\n\tc.wg.Wait()\n}"
] | [
"0.71673805",
"0.7037186",
"0.6973799",
"0.69529676",
"0.6943009",
"0.67180645",
"0.6540523",
"0.65389913",
"0.6448208",
"0.64068717",
"0.63826364",
"0.6358909",
"0.63548183",
"0.6340962",
"0.6263704",
"0.6206062",
"0.61352175",
"0.6132525",
"0.61261886",
"0.60737973",
"0.60353774",
"0.602201",
"0.6014714",
"0.597966",
"0.59758645",
"0.5947379",
"0.594252",
"0.5940867",
"0.59302485",
"0.5904095",
"0.58651865",
"0.5850224",
"0.5848503",
"0.58345234",
"0.58126265",
"0.57703835",
"0.5757945",
"0.5757431",
"0.5755224",
"0.57546735",
"0.5748997",
"0.57326674",
"0.57301605",
"0.5715935",
"0.5712563",
"0.5712087",
"0.56998104",
"0.5682395",
"0.5675259",
"0.5668885",
"0.5666431",
"0.5663693",
"0.5653898",
"0.5651902",
"0.563558",
"0.5630438",
"0.5620813",
"0.56075233",
"0.5600549",
"0.5593047",
"0.5587408",
"0.55795765",
"0.5549454",
"0.5546708",
"0.5546708",
"0.5545567",
"0.55342",
"0.55299205",
"0.5527766",
"0.55245334",
"0.5520209",
"0.55125743",
"0.5510038",
"0.5503663",
"0.5487959",
"0.54847515",
"0.54801285",
"0.54755825",
"0.54716367",
"0.54716367",
"0.54703575",
"0.54669285",
"0.5461576",
"0.5456165",
"0.5452607",
"0.5451909",
"0.544927",
"0.54490066",
"0.54489547",
"0.54391843",
"0.5436193",
"0.54347813",
"0.5424801",
"0.5420951",
"0.54201925",
"0.5408455",
"0.5402397",
"0.5401856",
"0.5399817",
"0.53976357"
] | 0.7307017 | 0 |
SetNameIndex sets index of slice element on Name. | SetNameIndex задает индекс элемента среза на Name. | func (v *StringIsPath) SetNameIndex(i int) {
v.Name = fmt.Sprintf("%s[%d]", RxSetNameIndex.ReplaceAllString(v.Name, ""), i)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *StringHasSuffixAny) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}",
"func (v *StringIsUserGroupOrWhitelisted) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}",
"func (v *StringsArePathsNotInTheSameDir) SetNameIndex(i int) {\n\tv.Name = fmt.Sprintf(\"%s[%d]\", RxSetNameIndex.ReplaceAllString(v.Name, \"\"), i)\n}",
"func (e *Element) SetIndex(value int) {\n\tif e.Scene != nil {\n\t\te.Scene.Resize.Notify()\n\t}\n\n\tif e.Parent != nil {\n\t\te.Parent.projectIndex(&value)\n\t\te.Parent.children.ReIndex(e.index, value)\n\t\te.Parent.updateIndexes(e.index, value) // this will set the index\n\t}\n}",
"func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}",
"func (n Nodes) SetIndex(i int, node *Node)",
"func (o *FakeObject) SetIndex(i int, value interface{}) {\n\treflect.ValueOf(o.Value).Index(i).Set(reflect.ValueOf(value))\n}",
"func (v Value) SetIndex(i int, x interface{}) {\n\tpanic(message)\n}",
"func (g *gnmiPath) SetIndex(i int, v any) error {\n\tif i > g.Len() {\n\t\treturn fmt.Errorf(\"invalid index, out of range, got: %d, length: %d\", i, g.Len())\n\t}\n\n\tswitch v := v.(type) {\n\tcase string:\n\t\tif !g.isStringSlicePath() {\n\t\t\treturn fmt.Errorf(\"cannot set index %d of %v to %v, wrong type %T, expected string\", i, v, g, v)\n\t\t}\n\t\tg.stringSlicePath[i] = v\n\t\treturn nil\n\tcase *gnmipb.PathElem:\n\t\tif !g.isPathElemPath() {\n\t\t\treturn fmt.Errorf(\"cannot set index %d of %v to %v, wrong type %T, expected gnmipb.PathElem\", i, v, g, v)\n\t\t}\n\t\tg.pathElemPath[i] = v\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"cannot set index %d of %v to %v, wrong type %T\", i, v, g, v)\n}",
"func (duo *DatumUpdateOne) SetIndex(i int) *DatumUpdateOne {\n\tduo.mutation.ResetIndex()\n\tduo.mutation.SetIndex(i)\n\treturn duo\n}",
"func (o *Object) SetIdx(idx uint32, val interface{}) error {\n\treturn set(o, \"\", idx, val)\n}",
"func (du *DatumUpdate) SetIndex(i int) *DatumUpdate {\n\tdu.mutation.ResetIndex()\n\tdu.mutation.SetIndex(i)\n\treturn du\n}",
"func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}",
"func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}",
"func (this *Value) SetIndex(index int, val interface{}) {\n\tif this.parsedType == ARRAY && index >= 0 {\n\t\tswitch parsedValue := this.parsedValue.(type) {\n\t\tcase []*Value:\n\t\t\tif index < len(parsedValue) {\n\t\t\t\t// if we've already parsed the object, store it there\n\t\t\t\tswitch val := val.(type) {\n\t\t\t\tcase *Value:\n\t\t\t\t\tparsedValue[index] = val\n\t\t\t\tdefault:\n\t\t\t\t\tparsedValue[index] = NewValue(val)\n\t\t\t\t}\n\t\t\t}\n\t\tcase nil:\n\t\t\t// if not store it in alias\n\t\t\tif this.alias == nil {\n\t\t\t\tthis.alias = make(map[string]*Value)\n\t\t\t}\n\t\t\tswitch val := val.(type) {\n\t\t\tcase *Value:\n\t\t\t\tthis.alias[strconv.Itoa(index)] = val\n\t\t\tdefault:\n\t\t\t\tthis.alias[strconv.Itoa(index)] = NewValue(val)\n\t\t\t}\n\n\t\t}\n\t}\n}",
"func (cli *SetWrapper) SetName(name string) error {\n\treturn cli.set.SetValue(fieldSetName, name)\n}",
"func (e *Engine) setName(name string) error {\n\tindex, err := naming.ExtractIndex(name, \"-\", 1)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"couldn't get index from device name: %v\", err)\n\t\treturn err\n\t}\n\te.Name = name\n\te.Index = index\n\treturn nil\n}",
"func (t *Table) Index(idxName string) *Table {\n\tt.columns[len(t.columns)-1].IdxName = idxName\n\tt.columns[len(t.columns)-1].IsIndex = true\n\treturn t\n}",
"func (c *Collection) SetBleveIndex(name string, documentMapping *mapping.DocumentMapping) (err error) {\n\t// Use only the tow first bytes as index prefix.\n\t// The prefix is used to confine indexes with a prefixes.\n\tprefix := c.buildIndexPrefix()\n\tindexHash := blake2b.Sum256([]byte(name))\n\tprefix = append(prefix, indexHash[:2]...)\n\n\t// ok, start building a new index\n\tindex := newIndex(name)\n\tindex.name = name\n\tindex.collection = c\n\tindex.prefix = prefix\n\terr = index.buildSignature(documentMapping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check there is no conflict name or hash\n\tfor _, i := range c.bleveIndexes {\n\t\tif i.name == name {\n\t\t\tif !bytes.Equal(i.signature[:], index.signature[:]) {\n\t\t\t\treturn ErrIndexAllreadyExistsWithDifferentMapping\n\t\t\t}\n\t\t\treturn ErrNameAllreadyExists\n\t\t}\n\t\tif reflect.DeepEqual(i.prefix, prefix) {\n\t\t\treturn ErrHashCollision\n\t\t}\n\t}\n\n\t// Bleve needs to save some parts on the drive.\n\t// The path is based on a part of the collection hash and the index prefix.\n\tcolHash := blake2b.Sum256([]byte(c.name))\n\tindex.path = fmt.Sprintf(\"%x%s%x\", colHash[:2], string(os.PathSeparator), indexHash[:2])\n\n\t// Build the index and set the given document index as default\n\tbleveMapping := bleve.NewIndexMapping()\n\tbleveMapping.StoreDynamic = false\n\tbleveMapping.IndexDynamic = true\n\tbleveMapping.DocValuesDynamic = false\n\n\tfor _, fieldMapping := range documentMapping.Fields {\n\t\tfieldMapping.Store = false\n\t\tfieldMapping.Index = true\n\t}\n\tbleveMapping.DefaultMapping = documentMapping\n\n\t// Build the configuration to use the local bleve storage and initialize the index\n\tconfig := blevestore.NewConfigMap(c.db.ctx, index.path, c.db.privateKey, prefix, c.db.badger, c.db.writeChan)\n\tindex.bleveIndex, err = bleve.NewUsing(c.db.path+string(os.PathSeparator)+index.path, bleveMapping, upsidedown.Name, blevestore.Name, config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Save the on drive bleve element into the index struct itself\n\tindex.bleveIndexAsBytes, err = index.indexZipper()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add the new index to the list of index of this collection\n\tc.bleveIndexes = append(c.bleveIndexes, index)\n\n\t// Index all existing values\n\terr = c.db.badger.View(func(txn *badger.Txn) error {\n\t\titer := txn.NewIterator(badger.DefaultIteratorOptions)\n\t\tdefer iter.Close()\n\n\t\tcolPrefix := c.buildDBKey(\"\")\n\t\tfor iter.Seek(colPrefix); iter.ValidForPrefix(colPrefix); iter.Next() {\n\t\t\titem := iter.Item()\n\n\t\t\tvar err error\n\t\t\tvar itemAsEncryptedBytes []byte\n\t\t\titemAsEncryptedBytes, err = item.ValueCopy(itemAsEncryptedBytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar clearBytes []byte\n\t\t\tclearBytes, err = cipher.Decrypt(c.db.privateKey, item.Key(), itemAsEncryptedBytes)\n\n\t\t\tid := string(item.Key()[len(colPrefix):])\n\n\t\t\tcontent := c.fromValueBytesGetContentToIndex(clearBytes)\n\t\t\terr = index.bleveIndex.Index(id, content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save the new settup\n\treturn c.db.saveConfig()\n}",
"func (self *SinglePad) SetIndexA(member int) {\n self.Object.Set(\"index\", member)\n}",
"func (bA *CompactBitArray) SetIndex(i int, v bool) bool {\n\tif bA == nil {\n\t\treturn false\n\t}\n\n\tif i < 0 || i >= bA.Count() {\n\t\treturn false\n\t}\n\n\tif v {\n\t\tbA.Elems[i>>3] |= (1 << uint8(7-(i%8)))\n\t} else {\n\t\tbA.Elems[i>>3] &= ^(1 << uint8(7-(i%8)))\n\t}\n\n\treturn true\n}",
"func (i *Index) IndexWithName(name string, opt ...Option) *Index {\n\to := &options{parent: i, name: name}\n\to.fill(opt)\n\treturn i.subIndexForKey(o)\n}",
"func (self *FileBaseDataStore) SetIndex(\n\tconfig_obj *api_proto.Config,\n\tindex_urn string,\n\tentity string,\n\tkeywords []string) error {\n\n\tfor _, keyword := range keywords {\n\t\tsubject := path.Join(index_urn, strings.ToLower(keyword), entity)\n\t\terr := writeContentToFile(config_obj, subject, []byte{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (self *Graphics) SetChildIndex(child *DisplayObject, index int) {\n self.Object.Call(\"setChildIndex\", child, index)\n}",
"func (m *RecurrencePattern) SetIndex(value *WeekIndex)() {\n m.index = value\n}",
"func (r *Resultset) NameIndex(name string) (int, error) {\n\tcolumn, ok := r.FieldNames[name]\n\tif ok {\n\t\treturn column, nil\n\t}\n\treturn 0, fmt.Errorf(\"invalid field name %s\", name)\n}",
"func (s UserSet) SetName(value string) {\n\ts.RecordCollection.Set(models.NewFieldName(\"Name\", \"name\"), value)\n}",
"func (a *App) SetIndex(controllerName string) *route {\n\troute := a.newRoute(controllerName, nil)\n\troute.segment = \"\"\n\troute.buildPatterns(\"\")\n\treturn route\n}",
"func (m *metricEventDimensions) SetIndex(val *int32) {\n\tm.indexField = val\n}",
"func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}",
"func (pw *PixelWand) SetIndex(index *IndexPacket) {\n\tC.PixelSetIndex(pw.pw, C.IndexPacket(*index))\n\truntime.KeepAlive(pw)\n}",
"func (t *Dense) SetMaskAtIndex(v bool, i int) error {\n\tif !t.IsMasked() {\n\t\treturn nil\n\t}\n\tt.mask[i] = v\n\treturn nil\n}",
"func (m *CalendarGroup) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (cc *CrontabConfig) SetGroupName(state State, i, fieldi int, a *CrontabConstraint, s string) error {\n\tk := cc.NameToNumber(fieldi, s) + cc.Fields[fieldi].min\n\tif k < cc.Fields[fieldi].min || k > cc.Fields[fieldi].max {\n\t\treturn &ErrorBadIndex{FieldName: cc.Fields[fieldi].name, Value: k}\n\t}\n\tif state == StateInName {\n\t\t*a = [3]int{k, k, 1}\n\t} else if state == StateInEndRangeName {\n\t\t(*a)[1] = k\n\t} else {\n\t\treturn &ErrorParse{Index: i, State: state}\n\t}\n\treturn nil\n}",
"func (z *Zzz) IdxName() int { //nolint:dupl false positive\n\treturn 3\n}",
"func (m *WorkbookRangeBorder) SetSideIndex(value *string)() {\n err := m.GetBackingStore().Set(\"sideIndex\", value)\n if err != nil {\n panic(err)\n }\n}",
"func SliceIndexByName(sl *[]Ki, name string, startIdx int) (int, bool) {\n\treturn SliceIndexByFunc(sl, startIdx, func(ch Ki) bool { return ch.Name() == name })\n}",
"func (o *IssueRemoveLabelParams) SetIndex(index int64) {\n\to.Index = index\n}",
"func (m *LabelActionBase) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (sl *Slice) IndexByName(name string, startIdx int) (int, bool) {\n\treturn sl.IndexByFunc(startIdx, func(ch Ki) bool { return ch.Name() == name })\n}",
"func (b CreateIndexBuilder) Name(name string) CreateIndexBuilder {\n\treturn builder.Set(b, \"Name\", name).(CreateIndexBuilder)\n}",
"func (v *Vec3i) SetByName(name string, value int32) {\n\tswitch name {\n\tcase \"x\", \"X\":\n\t\tv.X = value\n\tcase \"y\", \"Y\":\n\t\tv.Y = value\n\tcase \"z\", \"Z\":\n\t\tv.Z = value\n\tdefault:\n\t\tpanic(\"Invalid Vec3i component name: \" + name)\n\t}\n}",
"func (ms Float64Slice) SetAt(i int, val float64) {\n\t(*ms.getOrig())[i] = val\n}",
"func (r *PopRow) SetName(name string) { r.Data.Name = name }",
"func (idx *IndexMap) Rename(indname string) *IndexMap {\n\tidx.IndexName = indname\n\treturn idx\n}",
"func (s *MyTestStruct) SetName(n string) *MyTestStruct {\n\tif s.mutable {\n\t\ts.field_Name = n\n\t\treturn s\n\t}\n\n\tres := *s\n\tres.field_Name = n\n\treturn &res\n}",
"func (q *Queue) SetIndexed(repoName string, opts IndexOptions, state indexState) {\n\tq.mu.Lock()\n\titem := q.get(repoName)\n\titem.setIndexState(state)\n\tif state != indexStateFail {\n\t\titem.indexed = reflect.DeepEqual(opts, item.opts)\n\t}\n\tif item.heapIdx >= 0 {\n\t\t// We only update the position in the queue, never add it.\n\t\theap.Fix(&q.pq, item.heapIdx)\n\t}\n\tq.mu.Unlock()\n}",
"func (m Mapping) IndexName() string {\n\treturn fmt.Sprintf(\"idx_%s\", m.Alias)\n}",
"func (m *etcdMinion) SetName(name string) error {\n\tnameKey := filepath.Join(m.rootDir, \"name\")\n\topts := &etcdclient.SetOptions{\n\t\tPrevExist: etcdclient.PrevIgnore,\n\t}\n\n\t_, err := m.kapi.Set(context.Background(), nameKey, name, opts)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to set name of minion: %s\\n\", err)\n\t}\n\n\treturn err\n}",
"func (m *WorkbookNamedItem) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (self *TileSprite) SetChildIndex(child *DisplayObject, index int) {\n self.Object.Call(\"setChildIndex\", child, index)\n}",
"func (iob *IndexOptionsBuilder) Name(name string) *IndexOptionsBuilder {\n\tiob.document = append(iob.document, bson.E{\"name\", name})\n\treturn iob\n}",
"func (ll *LevelLedger) SetClassIndex(ref *record.Reference, idx *index.ClassLifeline) error {\n\tk := prefixkey(scopeIDLifeline, ref.Key())\n\tencoded, err := index.EncodeClassLifeline(idx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ll.ldb.Put(k, encoded, nil)\n}",
"func (nc *NodeCreate) SetName(s string) *NodeCreate {\n\tnc.mutation.SetName(s)\n\treturn nc\n}",
"func (_Contract *ContractTransactor) SetName(opts *bind.TransactOpts, node [32]byte, name string) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"setName\", node, name)\n}",
"func (m *WorkbookPivotTable) SetName(value *string)() {\n m.name = value\n}",
"func (c *Mock) SetName(v string) interfaces.Client {\n\treturn c.FakeSetName(v)\n}",
"func (blood *bloodGeneral) SetByIndex(index int, value float64) {\n\te := reflect.ValueOf(blood).Elem()\n\tfield := e.Field(index)\n\tif field.IsValid() && field.CanSet() && field.Kind() == reflect.Float64 {\n\t\tfield.SetFloat(value)\n\t} else {\n\t\tlog.Panicf(\"Cannot find element with index %d in BloodGeneral struct\", index)\n\t}\n\n\treturn\n}",
"func (rc *Cache) PutIndex(key, name string) error {\n\tvar err error\n\tif _, err = rc.do(\"HSET\", key, name, \"1\"); err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func (e *ObservableEditableBuffer) SetName(name string) {\n\tif e.Name() == name {\n\t\treturn\n\t}\n\n\t// SetName always forces an update of the tag.\n\t// TODO(rjk): This reset of filtertagobservers might now be unnecessary.\n\te.filtertagobservers = false\n\tbefore := e.getTagStatus()\n\tdefer e.notifyTagObservers(before)\n\n\tif e.seq > 0 {\n\t\t// TODO(rjk): Pass in the name, make the function name better reflect its purpose.\n\t\te.f.UnsetName(e.Name(), e.seq)\n\t}\n\te.setfilename(name)\n}",
"func (v *V) SetAt(i int, f float64) Vector {\n\tif i < 0 || i >= v.Dim() {\n\t\tpanic(ErrIndex)\n\t}\n\tv.Data[i] = f\n\treturn v\n}",
"func (i *Index) Name() string { return i.name }",
"func (stquo *SurveyTemplateQuestionUpdateOne) SetIndex(i int) *SurveyTemplateQuestionUpdateOne {\n\tstquo.index = &i\n\tstquo.addindex = nil\n\treturn stquo\n}",
"func (kcuo *K8sContainerUpdateOne) SetName(s string) *K8sContainerUpdateOne {\n\tkcuo.mutation.SetName(s)\n\treturn kcuo\n}",
"func (entry *Entry) SetName(name ndn.Name) error {\n\tnameV, _ := name.MarshalBinary()\n\tnameL := len(nameV)\n\tif nameL > MaxNameLength {\n\t\treturn fmt.Errorf(\"FIB entry name cannot exceed %d octets\", MaxNameLength)\n\t}\n\n\tc := (*CEntry)(entry)\n\tc.NameL = uint16(copy(c.NameV[:], nameV))\n\tc.NComps = uint8(len(name))\n\treturn nil\n}",
"func (suo *SettingUpdateOne) SetName(s string) *SettingUpdateOne {\n\tsuo.mutation.SetName(s)\n\treturn suo\n}",
"func (e *Element) ReIndex(old, new int) {\n\tdiv := e.children.Slice()[old]\n\tdiv.Value.SetIndex(new)\n}",
"func (d *V8interceptor) SetByindex(index int32, object, value *V8value, exception *string) int32 {\n\texception_ := C.cef_string_userfree_alloc()\n\tsetCEFStr(*exception, exception_)\n\tdefer func() {\n\t\t*exception = cefstrToString(exception_)\n\t\tC.cef_string_userfree_free(exception_)\n\t}()\n\treturn int32(C.gocef_v8interceptor_set_byindex(d.toNative(), C.int(index), object.toNative(), value.toNative(), (*C.cef_string_t)(exception_), d.set_byindex))\n}",
"func (t *Type) SetNname(n *Node)",
"func newIndex(name string) (index *ind) {\n\tindex = new(ind)\n\tindex.name = name\n\tindex.Storage = map[string][]string{}\n\tindex.Domains = map[string]bool{}\n\treturn\n}",
"func (m *DeviceManagementApplicabilityRuleOsEdition) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (_m *MockMutableSeriesIterators) SetAt(idx int, iter SeriesIterator) {\n\t_m.ctrl.Call(_m, \"SetAt\", idx, iter)\n}",
"func (i *IndexDB) SetIndex(ctx context.Context, pn insolar.PulseNumber, bucket record.Index) error {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\n\terr := i.setBucket(pn, bucket.ObjID, &bucket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstats.Record(ctx, statIndexesAddedCount.M(1))\n\n\tinslogger.FromContext(ctx).Debugf(\"[SetIndex] bucket for obj - %v was set successfully. Pulse: %d\", bucket.ObjID.DebugString(), pn)\n\n\treturn nil\n}",
"func (db *DB) SetClassIndex(id *record.ID, idx *index.ClassLifeline) error {\n\treturn db.Update(func(tx *TransactionManager) error {\n\t\treturn tx.SetClassIndex(id, idx)\n\t})\n}",
"func SetIterationNames(names []string) CustomizeIterationFunc {\n\treturn func(fxt *TestFixture, idx int) error {\n\t\tif len(fxt.Iterations) != len(names) {\n\t\t\treturn errs.Errorf(\"number of names (%d) must match number of iterations to create (%d)\", len(names), len(fxt.Iterations))\n\t\t}\n\t\tfxt.Iterations[idx].Name = names[idx]\n\t\treturn nil\n\t}\n}",
"func poolSetIndex(a interface{}, i int) {\n\ta.(*freeClientPoolEntry).index = i\n}",
"func (kcu *K8sContainerUpdate) SetName(s string) *K8sContainerUpdate {\n\tkcu.mutation.SetName(s)\n\treturn kcu\n}",
"func (m *SDKScriptCollectorAttribute) SetName(val string) {\n\n}",
"func (r *Search) Index(index string) *Search {\n\tr.paramSet |= indexMask\n\tr.index = index\n\n\treturn r\n}",
"func (t *SignerHistory) IndexName() IndexName {\n\treturn signerHistoryIndexName\n}",
"func (nu *NodeUpdate) SetName(s string) *NodeUpdate {\n\tnu.mutation.SetName(s)\n\treturn nu\n}",
"func (mc *MockContiv) SetPodAppNsIndex(pod podmodel.ID, nsIndex uint32) {\n\tmc.podAppNs[pod] = nsIndex\n}",
"func (nuo *NodeUpdateOne) SetName(s string) *NodeUpdateOne {\n\tnuo.mutation.SetName(s)\n\treturn nuo\n}",
"func (ll *LevelLedger) SetObjectIndex(ref *record.Reference, idx *index.ObjectLifeline) error {\n\tk := prefixkey(scopeIDLifeline, ref.Key())\n\tencoded, err := index.EncodeObjectLifeline(idx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ll.ldb.Put(k, encoded, nil)\n}",
"func (r *ProductRow) SetName(name string) { r.Data.Name = name }",
"func (xdc *XxxDemoCreate) SetName(s string) *XxxDemoCreate {\n\txdc.mutation.SetName(s)\n\treturn xdc\n}",
"func (wouo *WorkOrderUpdateOne) SetIndex(i int) *WorkOrderUpdateOne {\n\twouo.index = &i\n\twouo.addindex = nil\n\treturn wouo\n}",
"func (xs *Sheet) SetName(name string) {\n\txs.xb.lib.NewProc(\"xlSheetSetNameW\").\n\t\tCall(xs.self, S(name))\n}",
"func (k *Kitten) SetName(name string) {\n k.Name = name\n}",
"func (_ResolverContract *ResolverContractTransactor) SetName(opts *bind.TransactOpts, node [32]byte, name string) (*types.Transaction, error) {\n\treturn _ResolverContract.contract.Transact(opts, \"setName\", node, name)\n}",
"func NameToIndex(name string, names []string) int {\n\tvar index = -1\n\tfor i := 0; i < len(names); i++ {\n\t\tif name == names[i] {\n\t\t\tindex = i\n\t\t}\n\t}\n\treturn index\n}",
"func (stqu *SurveyTemplateQuestionUpdate) SetIndex(i int) *SurveyTemplateQuestionUpdate {\n\tstqu.index = &i\n\tstqu.addindex = nil\n\treturn stqu\n}",
"func (ms Span) SetName(v string) {\n\tms.orig.Name = v\n}",
"func (muo *ModelUpdateOne) SetName(s string) *ModelUpdateOne {\n\tmuo.mutation.SetName(s)\n\treturn muo\n}",
"func (db *DB) SetObjectIndex(\n\tctx context.Context,\n\tid *core.RecordID,\n\tidx *index.ObjectLifeline,\n) error {\n\treturn db.Update(ctx, func(tx *TransactionManager) error {\n\t\treturn tx.SetObjectIndex(ctx, id, idx)\n\t})\n}",
"func (ig *InstanceGroup) IndexedServiceName(index int, azIndex int, azName string) string {\n\tsn := boshnames.TruncatedServiceName(ig.Name, 53)\n\tif azIndex > -1 {\n\t\treturn fmt.Sprintf(\"%s-%s-%d-%d\", sn, azName, azIndex, index)\n\t}\n\treturn fmt.Sprintf(\"%s-%d\", sn, index)\n}",
"func (s *ReadOnlyStorer) SetIndex(*index.Index) error {\n\treturn ErrReadOnlyStorer.New()\n\n}",
"func (obj *Device) SetIndices(indexData *IndexBuffer) Error {\n\tret, _, _ := syscall.Syscall(\n\t\tobj.vtbl.SetIndices,\n\t\t2,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptr(unsafe.Pointer(indexData)),\n\t\t0,\n\t)\n\treturn toErr(ret)\n}",
"func (o SecondaryIndexOutput) IndexName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecondaryIndex) pulumi.StringOutput { return v.IndexName }).(pulumi.StringOutput)\n}",
"func (m *ParentLabelDetails) SetName(value *string)() {\n err := m.GetBackingStore().Set(\"name\", value)\n if err != nil {\n panic(err)\n }\n}"
] | [
"0.748726",
"0.7431408",
"0.6964414",
"0.6346891",
"0.6253622",
"0.6180723",
"0.6141942",
"0.60409635",
"0.59939164",
"0.5878266",
"0.5856989",
"0.5851746",
"0.57178617",
"0.5685566",
"0.5639303",
"0.56303644",
"0.55680585",
"0.5561141",
"0.5557104",
"0.5499101",
"0.548195",
"0.54818314",
"0.54684585",
"0.5453392",
"0.54036677",
"0.5371271",
"0.53363836",
"0.5287455",
"0.528704",
"0.5279632",
"0.52241546",
"0.5202348",
"0.5188694",
"0.51864153",
"0.5186413",
"0.5183055",
"0.5150658",
"0.5144818",
"0.5126203",
"0.5086723",
"0.5066663",
"0.5065852",
"0.5047603",
"0.50461113",
"0.50301135",
"0.5028728",
"0.50225097",
"0.5021532",
"0.50200856",
"0.50130963",
"0.50078917",
"0.50032824",
"0.5000461",
"0.49974406",
"0.49920255",
"0.49871707",
"0.49830332",
"0.49818647",
"0.49763897",
"0.49634835",
"0.49619636",
"0.4957827",
"0.4951766",
"0.49446127",
"0.49444243",
"0.4942674",
"0.49399543",
"0.49346548",
"0.49300075",
"0.49178302",
"0.49081287",
"0.49054363",
"0.48957288",
"0.489193",
"0.48878166",
"0.4878403",
"0.48759207",
"0.48694032",
"0.48623422",
"0.4861499",
"0.4858354",
"0.4825373",
"0.48231307",
"0.48217747",
"0.48197708",
"0.48173186",
"0.47999308",
"0.47986695",
"0.4794664",
"0.47946015",
"0.47905636",
"0.47803637",
"0.4779535",
"0.47767293",
"0.4775903",
"0.47687873",
"0.47676122",
"0.47615093",
"0.4759826",
"0.47563237"
] | 0.77226394 | 0 |
runIndex is the main function for the index subcommand | runIndex — основная функция для подкоманды index | func runIndex() {
// check index flag is set (global flag but don't require it for all sub commands)
if *indexDir == "" {
fmt.Println("please specify a directory for the index files (--indexDir)")
os.Exit(1)
}
// set up profiling
if *profiling == true {
defer profile.Start(profile.MemProfile, profile.ProfilePath("./")).Stop()
//defer profile.Start(profile.ProfilePath("./")).Stop()
}
// start logging
if *logFile != "" {
logFH := misc.StartLogging(*logFile)
defer logFH.Close()
log.SetOutput(logFH)
} else {
log.SetOutput(os.Stdout)
}
// start the index sub command
start := time.Now()
log.Printf("i am groot (version %s)", version.GetVersion())
log.Printf("starting the index subcommand")
// check the supplied files and then log some stuff
log.Printf("checking parameters...")
misc.ErrorCheck(indexParamCheck())
log.Printf("\tprocessors: %d", *proc)
log.Printf("\tk-mer size: %d", *kmerSize)
log.Printf("\tsketch size: %d", *sketchSize)
log.Printf("\tgraph window size: %d", *windowSize)
log.Printf("\tnum. partitions: %d", *numPart)
log.Printf("\tmax. K: %d", *maxK)
log.Printf("\tmax. sketch span: %d", *maxSketchSpan)
// record the runtime information for the index sub command
info := &pipeline.Info{
Version: version.GetVersion(),
KmerSize: *kmerSize,
SketchSize: *sketchSize,
WindowSize: *windowSize,
NumPart: *numPart,
MaxK: *maxK,
MaxSketchSpan: *maxSketchSpan,
IndexDir: *indexDir,
}
// create the pipeline
log.Printf("initialising indexing pipeline...")
indexingPipeline := pipeline.NewPipeline()
// initialise processes
log.Printf("\tinitialising the processes")
msaConverter := pipeline.NewMSAconverter(info)
graphSketcher := pipeline.NewGraphSketcher(info)
sketchIndexer := pipeline.NewSketchIndexer(info)
// connect the pipeline processes
log.Printf("\tconnecting data streams")
msaConverter.Connect(msaList)
graphSketcher.Connect(msaConverter)
sketchIndexer.Connect(graphSketcher)
// submit each process to the pipeline and run it
indexingPipeline.AddProcesses(msaConverter, graphSketcher, sketchIndexer)
log.Printf("\tnumber of processes added to the indexing pipeline: %d\n", indexingPipeline.GetNumProcesses())
log.Print("creating graphs, sketching traversals and indexing...")
indexingPipeline.Run()
log.Printf("writing index files in \"%v\"...", *indexDir)
misc.ErrorCheck(info.SaveDB(*indexDir + "/groot.lshe"))
misc.ErrorCheck(info.Dump(*indexDir + "/groot.gg"))
log.Printf("finished in %s", time.Since(start))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func index(w http.ResponseWriter, req *http.Request, ctx httputil.Context) (e *httputil.Error) {\n\tif req.URL.Path != \"/\" {\n\t\tnotFound(w, req)\n\t\treturn\n\t}\n\tm := newManager(ctx)\n\n\tres, err := m.Index()\n\tif err != nil {\n\t\te = httputil.Errorf(err, \"couldn't query for test results\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tif err := T(\"index/index.html\").Execute(w, res); err != nil {\n\t\te = httputil.Errorf(err, \"error executing index template\")\n\t}\n\treturn\n}",
"func index(w http.ResponseWriter, r *http.Request){\n\terr := templ.ExecuteTemplate(w, \"index\", nil)\n\tif err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n}",
"func index(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\terr := tpl.ExecuteTemplate(w, \"index.html\", nil)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Fatalln(err)\n\t}\n\tfmt.Println(\"HERE INDEX\")\n}",
"func Run() {\n\terr := Index(\n\t\tviper.GetString(\"collection\"),\n\t\tviper.GetString(\"key\"),\n\t\tviper.GetString(\"indexType\"),\n\t)\n\tif err != nil {\n\t\tlog.Println(\"ERROR applying index\", err)\n\t\treturn\n\t}\n}",
"func index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"index de uma função\")\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tmessage := \"Welcome to Recipe Book!\"\n\tindexT.Execute(w, message)\n}",
"func ExecuteIndex(user models.User, w http.ResponseWriter, r *http.Request) error {\n\taccess := 0\n\n\t// check if user is empty\n\tif user.ID != \"\" {\n\t\taccess = user.GetAccess()\n\t} else {\n\t\t// todo: normal auth page\n\t\tw.Header().Set(\"Content-Type\", \"\")\n\t\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n\t\treturn nil\n\t}\n\n\t// getting all required data\n\tvoiceTime := \"\"\n\tvtd, err := user.GetVoiceTime()\n\tif err == nil {\n\t\tvoiceTime = utils.FormatDuration(vtd)\n\t}\n\txbox := \"\"\n\txboxes, _ := user.GetXboxes()\n\tif len(xboxes) > 0 {\n\t\txbox = xboxes[0].Xbox\n\t}\n\tjoinedAtTime, err := user.GetGuildJoinDate()\n\tjoinedAt := \"\"\n\tif err == nil {\n\t\tdif := int(time.Now().Sub(joinedAtTime).Milliseconds()) / 1000 / 3600 / 24\n\t\tdays := utils.FormatUnit(dif, utils.Days)\n\t\tjoinedAt = fmt.Sprintf(\"%s (%s)\", utils.FormatDateTime(joinedAtTime), days)\n\t}\n\twarns, err := user.GetWarnings()\n\tif err != nil {\n\t\twarns = []models.Warning{}\n\t}\n\n\t// Preparing content and rendering\n\tcontent := IndexContent{\n\t\tUsername: user.Username,\n\t\tAvatar: user.AvatarURL,\n\t\tJoinedAt: joinedAt,\n\t\tXbox: xbox,\n\t\tVoiceTime: voiceTime,\n\t\tWarnsCount: len(warns),\n\t\tWarnings: PrepareWarnings(warns),\n\t}\n\n\ttmpl, err := template.ParseFiles(\"templates/layout.gohtml\", \"templates/index.gohtml\", \"templates/navbar.gohtml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.ExecuteTemplate(w, \"layout\", Layout{\n\t\tTitle: \"Главная страница\",\n\t\tPage: \"index\",\n\t\tAccess: access,\n\t\tContent: content,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func ShowIndex() {\n\tfmt.Printf(\"%v\\n\", indexText)\n}",
"func (c *RunCommand) Index() int {\n\treturn c.cmd.index\n}",
"func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}",
"func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}",
"func (si ServeIndex) Index(w http.ResponseWriter, r *http.Request) {\n\tpara := params.NewParams()\n\tdata, _, err := Updates(r, para)\n\tif err != nil {\n\t\tif _, ok := err.(params.RenamedConstError); ok {\n\t\t\thttp.Redirect(w, r, err.Error(), http.StatusFound)\n\t\t\treturn\n\t\t}\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdata[\"Distrib\"] = si.StaticData.Distrib\n\tdata[\"Exporting\"] = exportingCopy() // from ./ws.go\n\tdata[\"OstentUpgrade\"] = OstentUpgrade.Get()\n\tdata[\"OstentVersion\"] = si.StaticData.OstentVersion\n\tdata[\"TAGGEDbin\"] = si.StaticData.TAGGEDbin\n\n\tsi.IndexTemplate.Apply(w, struct{ Data IndexData }{Data: data})\n}",
"func (i *IndexCommand) Run(ctx context.Context, subcommandArgs []string) error {\n\toutputPath := i.flags.String(\"o\", \"Index.md\", \"Output path contained to the $JOURNAL_PATH.\")\n\tif !i.flags.Parsed() {\n\t\tif err := i.flags.Parse(subcommandArgs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif *outputPath == \".\" {\n\t\t*outputPath = \"Index.md\"\n\t}\n\tindex, err := tagMap(i.options.JournalPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkeys := sortedTagKeys(index)\n\tvar newIndex string\n\tfor _, tag := range keys {\n\t\tnewIndex += fmt.Sprintf(\"\\n* *%s* \", tag)\n\t\tmappedEntries := make([]string, len(index[tag]))\n\t\tmapper := func(entry string) string {\n\t\t\treturn fmt.Sprintf(\"[%s](%s)\", entry, entry)\n\t\t}\n\t\tfor i, entry := range index[tag] {\n\t\t\tmappedEntries[i] = mapper(entry)\n\t\t}\n\t\tnewIndex += strings.Join(mappedEntries, \", \")\n\t}\n\tindexPath := fmt.Sprintf(\"%s/%s\", i.options.JournalPath, path.Base(*outputPath))\n\treturn ioutil.WriteFile(indexPath, []byte(newIndex), 0644)\n}",
"func index(res http.ResponseWriter, req *http.Request) {\n\ttpl, err := template.ParseFiles(\"index.html\")\n\tif err != nil { // if file does not exist, give user a error\n\t\tlog.Fatalln(err) // stops program if file does not exist\n\t}\n\ttpl.Execute(res, nil) // execute the html file\n}",
"func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}",
"func Index(w http.ResponseWriter, data interface{}) {\n\trender(tpIndex, w, data)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\ta := \"hello from index router\"\n\tfmt.Fprintln(w, a)\n}",
"func showIndex(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tServeTemplateWithParams(res, req, \"index.html\", nil)\n}",
"func indexHandler(c *fiber.Ctx) error {\n\treturn common.HandleTemplate(c, \"index\",\n\t\t\"me\", nil, 200)\n}",
"func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}",
"func (b *Blueprint) indexCommand(typ string, columns []string, index string, algorithm string) *Blueprint {\n\t// if no name was specified for this index, we will create one using a bsaic\n\t// convention of the table name, followed by the columns, followd by an\n\t// index type, such as primary or index, which makes the index unique.\n\tif index == \"\" {\n\t\tindex = b.createIndexName(typ, columns)\n\t}\n\n\treturn b.addCommand(typ, &CommandOptions{\n\t\tIndex: index,\n\t\tColumns: columns,\n\t\tAlgorithm: algorithm,\n\t})\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\t// validate the passed-in arguments\n\tvars, err := wdp.ValidateApiArgs(r)\n\tif err != nil {\n\t\tlog.Print(\"error validating API arguments: \", err)\n\t}\n\n\t// parse the template at index.html\n\t// NOTE: SWITCH WHICH OF THESE STATEMENTS IS COMMENTED OUT TO RUN ON CLOUD VPS VS LOCALLY\n\t// t, err := template.ParseFiles(\"templates/index.html\") // LOCAL\n\tt, err := template.ParseFiles(\"/etc/diff-privacy-beam/index.html\") // CLOUD VPS\n\tif err != nil {\n\t\tlog.Print(\"error parsing template index_go.html: \", err)\n\t}\n\n\t// execute the template to serve it back to the client\n\terr = t.Execute(w, vars)\n\tif err != nil {\n\t\tlog.Print(\"error executing template index_go.html: \", err)\n\t}\n}",
"func CheckoutIndexCmd(c *git.Client, args []string) error {\n\tflags := flag.NewFlagSet(\"checkout-index\", flag.ExitOnError)\n\tflags.SetOutput(flag.CommandLine.Output())\n\tflags.Usage = func() {\n\t\tflag.Usage()\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \"\\n\\nOptions:\\n\")\n\t\tflags.PrintDefaults()\n\t\t// Some git tests test for a 129 exit code if the commandline\n\t\t// parsing fails for checkout-index.\n\t\tos.Exit(129)\n\t}\n\toptions := git.CheckoutIndexOptions{}\n\n\tflags.BoolVar(&options.UpdateStat, \"index\", false, \"Update stat information for checkout out entries in the index\")\n\tflags.BoolVar(&options.UpdateStat, \"u\", false, \"Alias for --index\")\n\n\tflags.BoolVar(&options.Quiet, \"quiet\", false, \"Be quiet if files exist or are not in index\")\n\tflags.BoolVar(&options.Quiet, \"q\", false, \"Alias for --quiet\")\n\n\tflags.BoolVar(&options.Force, \"force\", false, \"Force overwrite of existing files\")\n\tflags.BoolVar(&options.Force, \"f\", false, \"Alias for --force\")\n\n\tflags.BoolVar(&options.All, \"all\", false, \"Checkout all files in the index.\")\n\tflags.BoolVar(&options.All, \"a\", false, \"Alias for --all\")\n\n\tflags.BoolVar(&options.NoCreate, \"no-create\", false, \"Don't checkout new files, only refresh existing ones\")\n\tflags.BoolVar(&options.NoCreate, \"n\", false, \"Alias for --no-create\")\n\n\tflags.StringVar(&options.Prefix, \"prefix\", \"\", \"When creating files, prepend string\")\n\tflags.StringVar(&options.Stage, \"stage\", \"\", \"Copy files from named stage (unimplemented)\")\n\n\tflags.BoolVar(&options.Temp, \"temp\", false, \"Instead of copying files to a working directory, write them to a temp dir\")\n\n\tstdin := flags.Bool(\"stdin\", false, \"Instead of taking paths from command line, read from stdin\")\n\tflags.BoolVar(&options.NullTerminate, \"z\", false, \"Use nil instead of newline to terminate paths read from stdin\")\n\n\tflags.Parse(args)\n\tfiles := flags.Args()\n\tif *stdin {\n\t\toptions.Stdin = os.Stdin\n\t}\n\n\t// Convert from string to git.File\n\tgfiles := make([]git.File, len(files))\n\tfor i, f := range files {\n\t\tgfiles[i] = git.File(f)\n\t}\n\n\treturn git.CheckoutIndex(c, options, gfiles)\n\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request) {\n\tdata := &Index{\n\t\tTitle: \"Image gallery\",\n\t\tBody: \"Welcome to the image gallery.\",\n\t}\n\tfor name, img := range images {\n\t\tdata.Links = append(data.Links, Link{\n\t\t\tURL: \"/image/\" + name,\n\t\t\tTitle: img.Title,\n\t\t})\n\t}\n\tif err := indexTemplate.Execute(w, data); err != nil {\n\t\tlog.Println(err)\n\t}\n}",
"func serveIndex(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\terr := serveAssets(w, r, \"index.html\")\n\tcheckError(err)\n}",
"func (c App) Index() revel.Result {\n\treturn c.Render()\n}",
"func (c App) Index() revel.Result {\n\treturn c.Render()\n}",
"func indexHandler(res http.ResponseWriter, req *http.Request) {\n\n\t// Execute the template and respond with the index page.\n\ttemplates.ExecuteTemplate(res, \"index\", nil)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hello from our index page\")\n}",
"func index(w http.ResponseWriter, req *http.Request) {\n\thttp.ServeFile(w, req, \"./templates/index.html\")\n}",
"func index(w http.ResponseWriter, r *http.Request) {\n\tdata := page{Title: \"School Database\", Header: \"Welcome, please select an option\"}\n\ttemplateInit(w, \"index.html\", data)\n\n}",
"func (t tApp) Index(w http.ResponseWriter, r *http.Request) {\n\tvar h http.Handler\n\tc := App.New(w, r, \"App\", \"Index\")\n\tdefer func() {\n\t\tif h != nil {\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\t}()\n\tdefer App.After(c, w, r)\n\tif res := App.Before(c, w, r); res != nil {\n\t\th = res\n\t\treturn\n\t}\n\tif res := c.Index(); res != nil {\n\t\th = res\n\t\treturn\n\t}\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request) {\r\n t, _ := template.New(\"webpage\").Parse(indexPage) // parse embeded index page\r\n t.Execute(w, pd) // serve the index page (html template)\r\n}",
"func ReadIndex(output http.ResponseWriter, reader *http.Request) {\n\tfmt.Fprintln(output, \"ImageManagerAPI v1.0\")\n\tLog(\"info\", \"Endpoint Hit: ReadIndex\")\n}",
"func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tdata, err := newPageData(1)\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t}\n\terr = t.ExecuteTemplate(w, \"index\", data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func (s *service) indexCore(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\treq := &indexRequest{\n\t\tindex: s.index,\n\t\tlog: s.logger,\n\t\tr: r,\n\t\tstore: s.store,\n\t}\n\treq.init()\n\treq.read()\n\treq.readCore()\n\tif req.req.IncludeExecutable {\n\t\treq.readExecutable()\n\t} else {\n\t\treq.computeExecutableSize()\n\t}\n\treq.indexCore()\n\treq.close()\n\n\tif req.err != nil {\n\t\ts.logger.Error(\"indexing\", \"uid\", req.uid, \"err\", req.err)\n\t\twriteError(w, http.StatusInternalServerError, req.err)\n\t\treturn\n\t}\n\n\ts.received.With(prometheus.Labels{\n\t\t\"hostname\": req.coredump.Hostname,\n\t\t\"executable\": req.coredump.Executable,\n\t}).Inc()\n\n\ts.receivedSizes.With(prometheus.Labels{\n\t\t\"hostname\": req.coredump.Hostname,\n\t\t\"executable\": req.coredump.Executable,\n\t}).Observe(datasize.ByteSize(req.coredump.Size).MBytes())\n\n\ts.analysisQueue <- req.coredump\n\n\twrite(w, http.StatusOK, map[string]interface{}{\"acknowledged\": true})\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\t// Fill out the page data for index\n\tpd := PageData{\n\t\tTitle: \"Index Page\",\n\t\tBody: \"This is the body of the index page.\",\n\t}\n\n\t// Render a template with our page data\n\ttmpl, err := render(pd)\n\n\t// if we get an error, write it out and exit\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\t// All went well, so write out the template\n\tw.Write([]byte(tmpl))\n\n\t//fmt.Fprintf(w, \"Hello world from %q\", html.EscapeString(r.URL.Path))\n}",
"func (api *API) GetIndex(w http.ResponseWriter, r *http.Request) {\n\n\tinfo := Info{Port: api.Session.Config.API.Port, Versions: Version}\n\td := Metadata{Info: info}\n\n\tres := CodeToResult[CodeOK]\n\tres.Data = d\n\tres.Message = \"Documentation available at https://github.com/netm4ul/netm4ul\"\n\tw.WriteHeader(res.HTTPCode)\n\tjson.NewEncoder(w).Encode(res)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"I am running...\\n\")\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\n\tfmt.Fprintln(w, \"Steve and Kyle Podcast: #api\")\n\tfmt.Fprintln(w, \"Number of episodes in database:\", EpCount())\n\tfmt.Fprintln(w, \"Created by Derek Slenk\")\n\tfmt.Println(\"Endpoint Hit: Index\")\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\n\t// Render the \"Index.gtpl\"\n\tgoTmpl.ExecuteTemplate(w, \"Index\", nil)\n\n\t//name := r.FormValue(\"name\")\n\n\t// Logging the Action\n\tlog.Println(\"Render: Index.gtpl\")\n\n}",
"func TestIndex(t *testing.T) {\n\tstubTank := &tank.StubTank{Moves: nil}\n\n\ts := server.NewServer(stubTank)\n\n\treq, err := http.NewRequest(http.MethodGet, \"/\", http.NoBody)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating request for index: %v\", err)\n\t}\n\n\tw := httptest.NewRecorder()\n\ts.ServeHTTP(w, req)\n\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"Wrong status - expected %d, got %d\", http.StatusOK, w.Code)\n\t}\n\n\tbody, err := ioutil.ReadAll(w.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading index body: %v\", err)\n\t}\n\n\tif !strings.Contains(string(body), \"html\") {\n\t\tt.Fatalf(\"Expected HTML from the index.\")\n\t}\n}",
"func (h *Handlers) Index(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tfile := path.Join(\"index.html\")\n\n\tdata := struct{ Host string }{Host: h.Host}\n\n\ttemp, _ := template.ParseFiles(file)\n\ttemp.Execute(w, &data)\n}",
"func (t tApp) Index(w http.ResponseWriter, r *http.Request) {\n\tvar h http.Handler\n\tc := App.newC(w, r, \"App\", \"Index\")\n\tdefer func() {\n\t\t// If one of the actions (Before, After or Index) returned\n\t\t// a handler, apply it.\n\t\tif h != nil {\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\t}()\n\tdefer App.after(c, w, r) // Call this at the very end, but before applying result.\n\tif res := App.before(c, w, r); res != nil {\n\t\th = res\n\t\treturn\n\t}\n\tif res := c.Index(); res != nil {\n\t\th = res\n\t\treturn\n\t}\n}",
"func handleIndex(w http.ResponseWriter, r *http.Request) {\n\n\tif r.URL.Path != \"/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tc := appengine.NewContext(r)\n\tlog.Infof(c, \"Serving main page.\")\n\n\ttmpl, _ := template.ParseFiles(\"web/tmpl/index.tmpl\")\n\n\ttmpl.Execute(w, time.Since(initTime))\n}",
"func showIndex(c *gin.Context) {\n\trender(\n\t\tc,\n\t\tgin.H{\n\t\t\t\"title\": \"Home Page\",\n\t\t\t\"payload\": films,\n\t\t},\n\t\ttemplates.Index,\n\t)\n}",
"func indexHandler(w http.ResponseWriter, req *http.Request) {\n\tlayout, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_LAYOUT)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tindex, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_INDEX)\n\t//artical, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_ARTICAL)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tmapOutput := map[string]interface{}{\"Title\": \"炫酷的网站技术\" + TITLE, \"Keyword\": KEYWORD, \"Description\": DESCRIPTION, \"Base\": BASE_URL, \"Url\": BASE_URL, \"Carousel\": getAddition(PREFIX_INDEX), \"Script\": getAddition(PREFIX_SCRIPT), \"Items\": leveldb.GetRandomContents(20, &Filter{})}\n\tcontent := []byte(index.RenderInLayout(layout, mapOutput))\n\tw.Write(content)\n\tgo cacheFile(\"index\", content)\n}",
"func (app *Application) Index(w http.ResponseWriter, r *http.Request) {\n\tdata := struct {\n\t\tTime int64\n\t}{\n\t\tTime: time.Now().Unix(),\n\t}\n\n\tt, err := template.ParseFiles(\"views/index.tpl\")\n\n\tif err != nil {\n\t\tlog.Println(\"Template.Parse:\", err)\n\t\thttp.Error(w, \"Internal Server Error 0x0178\", http.StatusInternalServerError)\n\t}\n\n\tif err := t.Execute(w, data); err != nil {\n\t\tlog.Println(\"Template.Execute:\", err)\n\t\thttp.Error(w, \"Internal Server Error 0x0183\", http.StatusInternalServerError)\n\t}\n}",
"func AdminIndex(c *cli.Context) {\n\tesClient := getESClient(c)\n\tindexName := getRequiredOption(c, FlagIndex)\n\tinputFileName := getRequiredOption(c, FlagInputFile)\n\tbatchSize := c.Int(FlagBatchSize)\n\n\tmessages, err := parseIndexerMessage(inputFileName)\n\tif err != nil {\n\t\tErrorAndExit(\"Unable to parse indexer message\", err)\n\t}\n\n\tbulkRequest := esClient.Bulk()\n\tbulkConductFn := func() {\n\t\t_, err := bulkRequest.Do(context.Background())\n\t\tif err != nil {\n\t\t\tErrorAndExit(\"Bulk failed\", err)\n\t\t}\n\t\tif bulkRequest.NumberOfActions() != 0 {\n\t\t\tErrorAndExit(fmt.Sprintf(\"Bulk request not done, %d\", bulkRequest.NumberOfActions()), err)\n\t\t}\n\t}\n\tfor i, message := range messages {\n\t\tdocID := message.GetWorkflowID() + esDocIDDelimiter + message.GetRunID()\n\t\tvar req elastic.BulkableRequest\n\t\tswitch message.GetMessageType() {\n\t\tcase indexer.MessageTypeIndex:\n\t\t\tdoc := generateESDoc(message)\n\t\t\treq = elastic.NewBulkIndexRequest().\n\t\t\t\tIndex(indexName).\n\t\t\t\tType(esDocType).\n\t\t\t\tId(docID).\n\t\t\t\tVersionType(versionTypeExternal).\n\t\t\t\tVersion(message.GetVersion()).\n\t\t\t\tDoc(doc)\n\t\tcase indexer.MessageTypeDelete:\n\t\t\treq = elastic.NewBulkDeleteRequest().\n\t\t\t\tIndex(indexName).\n\t\t\t\tType(esDocType).\n\t\t\t\tId(docID).\n\t\t\t\tVersionType(versionTypeExternal).\n\t\t\t\tVersion(message.GetVersion())\n\t\tdefault:\n\t\t\tErrorAndExit(\"Unknown message type\", nil)\n\t\t}\n\t\tbulkRequest.Add(req)\n\n\t\tif i%batchSize == batchSize-1 {\n\t\t\tbulkConductFn()\n\t\t}\n\t}\n\tif bulkRequest.NumberOfActions() != 0 {\n\t\tbulkConductFn()\n\t}\n}",
"func Indexer() *cobra.Command {\n\tvar Indexer = &cobra.Command{\n\t\tUse: \"indexer\",\n\t\tShort: \"consumes metrics from the bus and makes them searchable\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\t// get kafka source\n\t\t\ts, err := kafka.NewSource(&kafka.SourceConfig{\n\t\t\t\tAddrs: strings.Split(viper.GetString(flagKafkaAddrs), \",\"),\n\t\t\t\tClientID: viper.GetString(flagKafkaClientID),\n\t\t\t\tGroupID: viper.GetString(flagKafkaGroupID),\n\t\t\t\tTopics: []string{viper.GetString(flagKafkaTopic)},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// custom client used so we can more effeciently reuse connections.\n\t\t\tcustomClient := &http.Client{\n\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\t\t\t\treturn net.Dial(network, addr)\n\t\t\t\t\t},\n\t\t\t\t\tMaxIdleConnsPerHost: viper.GetInt(\"max-idle-conn\"),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// allow sniff to be set because in some networking environments sniffing doesn't work. Should be allowed in prod\n\t\t\tclient, err := elastic.NewClient(\n\t\t\t\telastic.SetURL(viper.GetString(\"es\")),\n\t\t\t\telastic.SetSniff(viper.GetBool(\"es-sniff\")),\n\t\t\t\telastic.SetHttpClient(customClient),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// set up caching es sample indexer\n\t\t\tesIndexer := elasticsearch.NewSampleIndexer(&elasticsearch.SampleIndexerConfig{\n\t\t\t\tClient: client,\n\t\t\t\tIndex: viper.GetString(\"es-index\"),\n\t\t\t})\n\t\t\tsampleIndexer := indexer.NewCachingIndexer(&indexer.CachingIndexerConfig{\n\t\t\t\tIndexer: esIndexer,\n\t\t\t\tMaxDuration: viper.GetDuration(\"es-writecache-duration\"),\n\t\t\t})\n\n\t\t\t// create indexer and run\n\t\t\ti := indexer.NewIndexer(&indexer.Config{\n\t\t\t\tSampleIndexer: sampleIndexer,\n\t\t\t\tSource: s,\n\t\t\t\tNumIndexGoroutines: viper.GetInt(\"indexer-goroutines\"),\n\t\t\t})\n\n\t\t\tprometheus.MustRegister(i)\n\t\t\tprometheus.MustRegister(esIndexer)\n\t\t\tprometheus.MustRegister(sampleIndexer)\n\n\t\t\tgo func() {\n\t\t\t\thttp.Handle(\"/metrics\", prometheus.Handler())\n\t\t\t\thttp.ListenAndServe(\":8080\", nil)\n\t\t\t}()\n\n\t\t\treturn i.Run()\n\t\t},\n\t}\n\n\tIndexer.Flags().String(flagKafkaAddrs, \"\", \"one.example.com:9092,two.example.com:9092\")\n\tIndexer.Flags().String(flagKafkaClientID, \"vulcan-indexer\", \"set the kafka client id\")\n\tIndexer.Flags().String(flagKafkaTopic, \"vulcan\", \"topic to read in kafka\")\n\tIndexer.Flags().String(flagKafkaGroupID, \"vulcan-indexer\", \"workers with the same groupID will join the same Kafka ConsumerGroup\")\n\tIndexer.Flags().String(\"es\", \"http://elasticsearch:9200\", \"elasticsearch connection url\")\n\tIndexer.Flags().Bool(\"es-sniff\", true, \"whether or not to sniff additional hosts in the cluster\")\n\tIndexer.Flags().String(\"es-index\", \"vulcan\", \"the elasticsearch index to write documents into\")\n\tIndexer.Flags().Duration(\"es-writecache-duration\", time.Minute*10, \"the duration to cache having written a value to es and to skip further writes of the same metric\")\n\tIndexer.Flags().Uint(\"indexer-goroutines\", 30, \"worker goroutines for writing indexes\")\n\tIndexer.Flags().Uint(\"max-idle-conn\", 30, \"max idle connections for fetching from data storage\")\n\n\treturn Indexer\n}",
"func (i *indexer) Index() (*Stats, error) {\n\tpkgs, err := i.packages()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn i.index(pkgs)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Welcome to this example API\\n\")\n}",
"func indexHandler(res http.ResponseWriter, req *http.Request) {\n\tfmt.Println(\"website index\")\n\t//grab all partials\n\tpartials, err := loadPartials()\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t//get template function based on index and execute to load page\n\tt, _ := template.ParseFiles(\"../index.html\")\n\tt.Execute(res, partials)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\t//shows 404 not found\n\tif r.URL.Path != \"/\" {\n\t\tNotFound(w, r)\n\t} else {\n\t\ttemplate, err := template.ParseFiles(\"../htmls/index.html\")\n\t\tif err != nil {\n\t\t\tlogger.ErrLogger(err)\n\t\t} else {\n\t\t\ttemplate.Execute(w, nil)\n\t\t}\n\t}\n}",
"func (h *Root) Index(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tif claims, err := auth.ClaimsFromContext(ctx); err == nil && claims.HasAuth() {\n\t\treturn h.indexDashboard(ctx, w, r, params)\n\t}\n\n\treturn h.indexDefault(ctx, w, r, params)\n}",
"func Index(logger *log.Logger, basepath string, done <-chan struct{}, templ *template.Template) http.Handler {\n\ttracker, err := dir.Watch(basepath)\n\tif err != nil {\n\t\tlogger.Printf(\"failed to watch directory [%s] - %v\", basepath, err)\n\t\treturn ResponseCode(500, \"failed to initialize IndexHandler - %v\", err)\n\t}\n\tgo func() {\n\t\t<-done\n\t\ttracker.Close()\n\t}()\n\n\treturn indexHandler{basePath: basepath, templ: templ, l: logger, dir: tracker, done: done}\n}",
"func (d *Diagnosis) indexHandler(w http.ResponseWriter, r *http.Request) {\n\tvar profiles []profile\n\tfor _, p := range pprof.Profiles() {\n\t\tprofiles = append(profiles, profile{\n\t\t\tName: p.Name(),\n\t\t\tHref: p.Name() + \"?debug=1\",\n\t\t\tDesc: profileDescriptions[p.Name()],\n\t\t\tCount: p.Count(),\n\t\t})\n\t}\n\n\t// Adding other profiles exposed from within this package\n\tfor _, p := range []string{\"cmdline\", \"profile\", \"trace\"} {\n\t\tprofiles = append(profiles, profile{\n\t\t\tName: p,\n\t\t\tHref: p,\n\t\t\tDesc: profileDescriptions[p],\n\t\t})\n\t}\n\n\tsort.Slice(profiles, func(i, j int) bool {\n\t\treturn profiles[i].Name < profiles[j].Name\n\t})\n\n\tif err := indexTmpl.Execute(w, map[string]interface{}{\n\t\t\"AppName\": d.appName,\n\t\t\"PathPrefix\": d.pathPrefix,\n\t\t\"Profiles\": profiles,\n\t}); err != nil {\n\t\td.log.Error(err)\n\t}\n}",
"func (h *MovieHandler) index(w http.ResponseWriter, r *http.Request) {\n\t// Call GetMovies to retrieve all movies from the database.\n\tif movies, err := h.MovieService.GetMovies(); err != nil {\n\t\t// Render an error response and set status code.\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t\tlog.Println(\"Error:\", err)\n\t} else {\n\t\t// Render a HTML response and set status code.\n\t\trender.HTML(w, http.StatusOK, \"movie/index.html\", movies)\n\t}\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tenv.Output.WriteChDebug(\"(ApiEngine::Index)\")\n\thttp.Redirect(w, r, \"/api/node\", http.StatusFound)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"s-senpai, please don't hurt me ;_;\\n\")\n}",
"func handleIndex(w http.ResponseWriter, r *http.Request) {\n\tmsg := fmt.Sprintf(\"You've called url %s\", r.URL.String())\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(http.StatusOK) // 200\n\tw.Write([]byte(msg))\n}",
"func main() {\n\t// Create a client\n\tclient, err := elastic.NewClient(elastic.SetURL(ES_URL), elastic.SetSniff(false))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdeleteIndex, err := client.DeleteIndex(INDEX).Do()\n\tif err != nil {\n\t\t// Handle error\n\t\tpanic(err)\n\t}\n\tif !deleteIndex.Acknowledged {\n\t\t// Not acknowledged\n\t}\n\n}",
"func (s *HTTPServer) Index(resp http.ResponseWriter, req *http.Request) {\n\t// Check if this is a non-index path\n\tif req.URL.Path != \"/\" {\n\t\tresp.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Give them something helpful if there's no UI so they at least know\n\t// what this server is.\n\tif !s.IsUIEnabled() {\n\t\tfmt.Fprint(resp, \"Consul Agent\")\n\t\treturn\n\t}\n\n\t// Redirect to the UI endpoint\n\thttp.Redirect(resp, req, \"/ui/\", http.StatusMovedPermanently) // 301\n}",
"func (s *server) handleIndex(FSS fs.FS) http.HandlerFunc {\n\ttype AppConfig struct {\n\t\tAvatarService string\n\t\tToastTimeout int\n\t\tAllowGuests bool\n\t\tAllowRegistration bool\n\t\tDefaultLocale string\n\t\tAuthMethod string\n\t\tAppVersion string\n\t\tCookieName string\n\t\tPathPrefix string\n\t\tAPIEnabled bool\n\t\tCleanupGuestsDaysOld int\n\t\tCleanupStoryboardsDaysOld int\n\t\tShowActiveCountries bool\n\t}\n\ttype UIConfig struct {\n\t\tAnalyticsEnabled bool\n\t\tAnalyticsID string\n\t\tAppConfig AppConfig\n\t\tActiveAlerts []interface{}\n\t}\n\n\ttmpl := s.getIndexTemplate(FSS)\n\n\tappConfig := AppConfig{\n\t\tAvatarService: viper.GetString(\"config.avatar_service\"),\n\t\tToastTimeout: viper.GetInt(\"config.toast_timeout\"),\n\t\tAllowGuests: viper.GetBool(\"config.allow_guests\"),\n\t\tAllowRegistration: viper.GetBool(\"config.allow_registration\") && viper.GetString(\"auth.method\") == \"normal\",\n\t\tDefaultLocale: viper.GetString(\"config.default_locale\"),\n\t\tAuthMethod: viper.GetString(\"auth.method\"),\n\t\tAPIEnabled: viper.GetBool(\"config.allow_external_api\"),\n\t\tAppVersion: s.config.Version,\n\t\tCookieName: s.config.FrontendCookieName,\n\t\tPathPrefix: s.config.PathPrefix,\n\t\tCleanupGuestsDaysOld: viper.GetInt(\"config.cleanup_guests_days_old\"),\n\t\tCleanupStoryboardsDaysOld: viper.GetInt(\"config.cleanup_storyboards_days_old\"),\n\t\tShowActiveCountries: viper.GetBool(\"config.show_active_countries\"),\n\t}\n\n\tActiveAlerts = s.database.GetActiveAlerts()\n\n\tdata := UIConfig{\n\t\tAnalyticsEnabled: s.config.AnalyticsEnabled,\n\t\tAnalyticsID: s.config.AnalyticsID,\n\t\tAppConfig: appConfig,\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdata.ActiveAlerts = ActiveAlerts // get latest alerts from memory\n\n\t\tif embedUseOS {\n\t\t\ttmpl = s.getIndexTemplate(FSS)\n\t\t}\n\n\t\ttmpl.Execute(w, data)\n\t}\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Whoa, Nice!\")\n}",
"func index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tasset, err := Asset(\"static/templates/index.html\")\n\tif err != nil {\n\t\tlog.Panic(\"Unable to read file from bindata: \", err)\n\t}\n\tfmt.Fprint(w, string(asset))\n}",
"func index(c echo.Context) error {\n\tpprof.Index(c.Response().Writer, c.Request())\n\treturn nil\n}",
"func GetIndex(c *gin.Context) {\n\tc.String(http.StatusOK, \"Hello world !!\")\n}",
"func index(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tvar returnResponse = map[string]interface{}{\"message\": \"Welcome to the TonicPow API!\"}\n\tapirouter.ReturnResponse(w, req, http.StatusOK, returnResponse)\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"indexHandler is called\")\n\n\tw.WriteHeader(http.StatusOK)\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"indexHandler is called\")\n\n\tw.WriteHeader(http.StatusOK)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"Hello World!\"))\n}",
"func (v1 *V1Client) Index() error {\n\tresp := v1.POST(nil, \"/api/v1/index\")\n\treturn resp.Error\n}",
"func indexhandler(w http.ResponseWriter, r *http.Request) {\n\tt, _ := template.ParseFiles(os.Getenv(\"GOPATH\")+\"/lib/flickrquizz/index.html\")\n\tt.ExecuteTemplate(w, \"Seed\", rand.Int())\n}",
"func ShowIndex(ctx context.Context, db QueryExecutor, schemaName string, table string) ([]*IndexInfo, error) {\n\t/*\n\t\tshow index example result:\n\t\tmysql> show index from test;\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| test | 0 | PRIMARY | 1 | id | A | 0 | NULL | NULL | | BTREE | | |\n\t\t| test | 0 | aid | 1 | aid | A | 0 | NULL | NULL | YES | BTREE | | |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t*/\n\tindices := make([]*IndexInfo, 0, 3)\n\tquery := fmt.Sprintf(\"SHOW INDEX FROM %s\", TableName(schemaName, table))\n\trows, err := db.QueryContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tfields, err1 := ScanRow(rows)\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tseqInIndex, err1 := strconv.Atoi(string(fields[\"Seq_in_index\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tcardinality, err1 := strconv.Atoi(string(fields[\"Cardinality\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tindex := &IndexInfo{\n\t\t\tTable: string(fields[\"Table\"].Data),\n\t\t\tNoneUnique: string(fields[\"Non_unique\"].Data) == \"1\",\n\t\t\tKeyName: string(fields[\"Key_name\"].Data),\n\t\t\tColumnName: string(fields[\"Column_name\"].Data),\n\t\t\tSeqInIndex: seqInIndex,\n\t\t\tCardinality: cardinality,\n\t\t}\n\t\tindices = append(indices, index)\n\t}\n\n\treturn indices, nil\n}",
"func serveIndex(w http.ResponseWriter, r *http.Request, bs buildSpec, br *buildResult) {\n\txreq := request{bs, \"\", pageIndex}\n\txlink := xreq.link()\n\n\ttype versionLink struct {\n\t\tVersion string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\ttype response struct {\n\t\tErr error\n\t\tLatestVersion string\n\t\tVersionLinks []versionLink\n\t}\n\n\t// Do a lookup to the goproxy in the background, to list the module versions.\n\tc := make(chan response, 1)\n\tgo func() {\n\t\tt0 := time.Now()\n\t\tdefer func() {\n\t\t\tmetricGoproxyListDuration.Observe(time.Since(t0).Seconds())\n\t\t}()\n\n\t\tmodPath, err := module.EscapePath(bs.Mod)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"bad module path: %v\", err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tu := fmt.Sprintf(\"%s%s/@v/list\", config.GoProxy, modPath)\n\t\tmreq, err := http.NewRequestWithContext(r.Context(), \"GET\", u, nil)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: preparing new http request: %v\", errServer, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tmreq.Header.Set(\"User-Agent\", userAgent)\n\t\tresp, err := http.DefaultClient.Do(mreq)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: http request: %v\", errServer, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\tmetricGoproxyListErrors.WithLabelValues(fmt.Sprintf(\"%d\", resp.StatusCode)).Inc()\n\t\t\tc <- response{fmt.Errorf(\"%w: http response from goproxy: %v\", errRemote, resp.Status), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tbuf, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: reading versions from goproxy: %v\", errRemote, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tl := []versionLink{}\n\t\tfor _, s := range strings.Split(string(buf), \"\\n\") {\n\t\t\tif s != \"\" {\n\t\t\t\tvbs := bs\n\t\t\t\tvbs.Version = s\n\t\t\t\tsuccess := fileExists(filepath.Join(vbs.storeDir(), \"recordnumber\"))\n\t\t\t\tp := request{vbs, \"\", pageIndex}.link()\n\t\t\t\tlink := versionLink{s, p, success, p == xlink}\n\t\t\t\tl = append(l, link)\n\t\t\t}\n\t\t}\n\t\tsort.Slice(l, func(i, j int) bool {\n\t\t\treturn semver.Compare(l[i].Version, l[j].Version) > 0\n\t\t})\n\t\tvar latestVersion string\n\t\tif len(l) > 0 {\n\t\t\tlatestVersion = l[0].Version\n\t\t}\n\t\tc <- response{nil, latestVersion, l}\n\t}()\n\n\t// Non-emptiness means we'll serve the error page instead of doing a SSE request for events.\n\tvar output string\n\tif br == nil {\n\t\tif buf, err := readGzipFile(filepath.Join(bs.storeDir(), \"log.gz\")); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tfailf(w, \"%w: reading log.gz: %v\", errServer, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// For not-exist, we'll continue below to build.\n\t\t} else {\n\t\t\toutput = string(buf)\n\t\t}\n\t}\n\n\t// Construct links to other goversions, targets.\n\ttype goversionLink struct {\n\t\tGoversion string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tSupported bool\n\t\tActive bool\n\t}\n\tgoversionLinks := []goversionLink{}\n\tnewestAllowed, supported, remaining := installedSDK()\n\tfor _, goversion := range supported {\n\t\tgvbs := bs\n\t\tgvbs.Goversion = goversion\n\t\tsuccess := fileExists(filepath.Join(gvbs.storeDir(), \"recordnumber\"))\n\t\tp := request{gvbs, \"\", pageIndex}.link()\n\t\tgoversionLinks = append(goversionLinks, goversionLink{goversion, p, success, true, p == xlink})\n\t}\n\tfor _, goversion := range remaining {\n\t\tgvbs := bs\n\t\tgvbs.Goversion = goversion\n\t\tsuccess := fileExists(filepath.Join(gvbs.storeDir(), \"recordnumber\"))\n\t\tp := request{gvbs, \"\", pageIndex}.link()\n\t\tgoversionLinks = append(goversionLinks, goversionLink{goversion, p, success, false, p == xlink})\n\t}\n\n\ttype targetLink struct {\n\t\tGoos string\n\t\tGoarch string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\ttargetLinks := []targetLink{}\n\tfor _, target := range targets.get() {\n\t\ttbs := bs\n\t\ttbs.Goos = target.Goos\n\t\ttbs.Goarch = target.Goarch\n\t\tsuccess := fileExists(filepath.Join(tbs.storeDir(), \"recordnumber\"))\n\t\tp := request{tbs, \"\", pageIndex}.link()\n\t\ttargetLinks = append(targetLinks, targetLink{target.Goos, target.Goarch, p, success, p == xlink})\n\t}\n\n\ttype variantLink struct {\n\t\tVariant string // \"default\" or \"stripped\"\n\t\tTitle string // Displayed on hover in UI.\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\tvar variantLinks []variantLink\n\taddVariant := func(v, title string, stripped bool) {\n\t\tvbs := bs\n\t\tvbs.Stripped = stripped\n\t\tsuccess := fileExists(filepath.Join(vbs.storeDir(), \"recordnumber\"))\n\t\tp := request{vbs, \"\", pageIndex}.link()\n\t\tvariantLinks = append(variantLinks, variantLink{v, title, p, success, p == xlink})\n\t}\n\taddVariant(\"default\", \"\", false)\n\taddVariant(\"stripped\", \"Symbol table and debug information stripped, reducing binary size.\", true)\n\n\tpkgGoDevURL := \"https://pkg.go.dev/\" + path.Join(bs.Mod+\"@\"+bs.Version, bs.Dir[1:]) + \"?tab=doc\"\n\n\tresp := <-c\n\n\tvar filesizeGz string\n\tif br == nil {\n\t\tbr = &buildResult{buildSpec: bs}\n\t} else {\n\t\tif info, err := os.Stat(filepath.Join(bs.storeDir(), \"binary.gz\")); err == nil {\n\t\t\tfilesizeGz = fmt.Sprintf(\"%.1f MB\", float64(info.Size())/(1024*1024))\n\t\t}\n\t}\n\n\tprependDir := xreq.Dir\n\tif prependDir == \"/\" {\n\t\tprependDir = \"\"\n\t}\n\n\tvar newerText, newerURL string\n\tif xreq.Goversion != newestAllowed && newestAllowed != \"\" && xreq.Version != resp.LatestVersion && resp.LatestVersion != \"\" {\n\t\tnewerText = \"A newer version of both this module and the Go toolchain is available\"\n\t} else if xreq.Version != resp.LatestVersion && resp.LatestVersion != \"\" {\n\t\tnewerText = \"A newer version of this module is available\"\n\t} else if xreq.Goversion != newestAllowed && newestAllowed != \"\" {\n\t\tnewerText = \"A newer Go toolchain version is available\"\n\t}\n\tif newerText != \"\" {\n\t\tnbs := bs\n\t\tnbs.Version = resp.LatestVersion\n\t\tnbs.Goversion = newestAllowed\n\t\tnewerURL = request{nbs, \"\", pageIndex}.link()\n\t}\n\n\tfavicon := \"/favicon.ico\"\n\tif output != \"\" {\n\t\tfavicon = \"/favicon-error.png\"\n\t} else if br.Sum == \"\" {\n\t\tfavicon = \"/favicon-building.png\"\n\t}\n\targs := map[string]interface{}{\n\t\t\"Favicon\": favicon,\n\t\t\"Success\": br.Sum != \"\",\n\t\t\"Sum\": br.Sum,\n\t\t\"Req\": xreq, // eg \"/\" or \"/cmd/x\"\n\t\t\"DirAppend\": xreq.appendDir(), // eg \"\" or \"cmd/x/\"\n\t\t\"DirPrepend\": prependDir, // eg \"\" or /cmd/x\"\n\t\t\"GoversionLinks\": goversionLinks,\n\t\t\"TargetLinks\": targetLinks,\n\t\t\"VariantLinks\": variantLinks,\n\t\t\"Mod\": resp,\n\t\t\"GoProxy\": config.GoProxy,\n\t\t\"DownloadFilename\": xreq.downloadFilename(),\n\t\t\"PkgGoDevURL\": pkgGoDevURL,\n\t\t\"GobuildVersion\": gobuildVersion,\n\t\t\"GobuildPlatform\": gobuildPlatform,\n\t\t\"VerifierKey\": config.VerifierKey,\n\t\t\"GobuildsOrgVerifierKey\": gobuildsOrgVerifierKey,\n\t\t\"NewerText\": newerText,\n\t\t\"NewerURL\": newerURL,\n\n\t\t// Whether we will do SSE request for updates.\n\t\t\"InProgress\": br.Sum == \"\" && output == \"\",\n\n\t\t// Non-empty on failure.\n\t\t\"Output\": output,\n\n\t\t// Below only meaningful when \"success\".\n\t\t\"Filesize\": fmt.Sprintf(\"%.1f MB\", float64(br.Filesize)/(1024*1024)),\n\t\t\"FilesizeGz\": filesizeGz,\n\t}\n\n\tif br.Sum == \"\" {\n\t\tw.Header().Set(\"Cache-Control\", \"no-store\")\n\t}\n\n\tif err := buildTemplate.Execute(w, args); err != nil {\n\t\tfailf(w, \"%w: executing template: %v\", errServer, err)\n\t}\n}",
"func (app *App) RenderIndex(w http.ResponseWriter, r *http.Request) {\n\ttmplList := []string{\"./web/views/base.html\",\n\t\t\"./web/views/index.html\"}\n\tres, err := app.TplParser.ParseTemplate(tmplList, nil)\n\tif err != nil {\n\t\tapp.Log.Info(err)\n\t}\n\tio.WriteString(w, res)\n}",
"func TestIndex(t *testing.T) {\n\tdefer os.RemoveAll(\"testidx\")\n\n\tindex, err := New(\"testidx\", mapping)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer index.Close()\n\n\t// index all the people\n\tfor _, person := range people {\n\t\terr = index.Index(person.Identifier, person)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\ttermQuery := NewTermQuery(\"marti\").SetField(\"name\")\n\tsearchRequest := NewSearchRequest(termQuery)\n\tsearchResult, err := index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for term query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"a\" {\n\t\t\tt.Errorf(\"expected top hit id 'a', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\ttermQuery = NewTermQuery(\"noone\").SetField(\"name\")\n\tsearchRequest = NewSearchRequest(termQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(0) {\n\t\tt.Errorf(\"expected 0 total hits\")\n\t}\n\n\tmatchPhraseQuery := NewMatchPhraseQuery(\"long name\")\n\tsearchRequest = NewSearchRequest(matchPhraseQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for phrase query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"b\" {\n\t\t\tt.Errorf(\"expected top hit id 'b', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\ttermQuery = NewTermQuery(\"walking\").SetField(\"name\")\n\tsearchRequest = NewSearchRequest(termQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(0) {\n\t\tt.Errorf(\"expected 0 total hits\")\n\t}\n\n\tmatchQuery := NewMatchQuery(\"walking\").SetField(\"name\")\n\tsearchRequest = NewSearchRequest(matchQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for match query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"c\" {\n\t\t\tt.Errorf(\"expected top hit id 'c', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\tprefixQuery := NewPrefixQuery(\"bobble\").SetField(\"name\")\n\tsearchRequest = NewSearchRequest(prefixQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for prefix query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"d\" {\n\t\t\tt.Errorf(\"expected top hit id 'd', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\tsyntaxQuery := NewSyntaxQuery(\"+name:phone\")\n\tsearchRequest = NewSearchRequest(syntaxQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for syntax query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"d\" {\n\t\t\tt.Errorf(\"expected top hit id 'd', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\tmaxAge := 30.0\n\tnumericRangeQuery := NewNumericRangeQuery(nil, &maxAge).SetField(\"age\")\n\tsearchRequest = NewSearchRequest(numericRangeQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(2) {\n\t\tt.Errorf(\"expected 2 total hits for numeric range query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"b\" {\n\t\t\tt.Errorf(\"expected top hit id 'b', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t\tif searchResult.Hits[1].ID != \"a\" {\n\t\t\tt.Errorf(\"expected next hit id 'a', got '%s'\", searchResult.Hits[1].ID)\n\t\t}\n\t}\n\n\tstartDate = \"2010-01-01\"\n\tdateRangeQuery := NewDateRangeQuery(&startDate, nil).SetField(\"birthday\")\n\tsearchRequest = NewSearchRequest(dateRangeQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(2) {\n\t\tt.Errorf(\"expected 2 total hits for numeric range query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"d\" {\n\t\t\tt.Errorf(\"expected top hit id 'd', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t\tif searchResult.Hits[1].ID != \"c\" {\n\t\t\tt.Errorf(\"expected next hit id 'c', got '%s'\", searchResult.Hits[1].ID)\n\t\t}\n\t}\n\n\t// test that 0 time doesn't get indexed\n\tendDate = \"2010-01-01\"\n\tdateRangeQuery = NewDateRangeQuery(nil, &endDate).SetField(\"birthday\")\n\tsearchRequest = NewSearchRequest(dateRangeQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(1) {\n\t\tt.Errorf(\"expected 1 total hit for numeric range query, got %d\", searchResult.Total)\n\t} else {\n\t\tif searchResult.Hits[0].ID != \"b\" {\n\t\t\tt.Errorf(\"expected top hit id 'b', got '%s'\", searchResult.Hits[0].ID)\n\t\t}\n\t}\n\n\t// test behavior of arrays\n\t// make sure we can successfully find by all elements in array\n\ttermQuery = NewTermQuery(\"gopher\").SetField(\"tags\")\n\tsearchRequest = NewSearchRequest(termQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tif searchResult.Total != uint64(1) {\n\t\t\tt.Errorf(\"expected 1 total hit for term query, got %d\", searchResult.Total)\n\t\t} else {\n\t\t\tif searchResult.Hits[0].ID != \"a\" {\n\t\t\t\tt.Errorf(\"expected top hit id 'a', got '%s'\", searchResult.Hits[0].ID)\n\t\t\t}\n\t\t}\n\t}\n\n\ttermQuery = NewTermQuery(\"belieber\").SetField(\"tags\")\n\tsearchRequest = NewSearchRequest(termQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tif searchResult.Total != uint64(1) {\n\t\t\tt.Errorf(\"expected 1 total hit for term query, got %d\", searchResult.Total)\n\t\t} else {\n\t\t\tif searchResult.Hits[0].ID != \"a\" {\n\t\t\t\tt.Errorf(\"expected top hit id 'a', got '%s'\", searchResult.Hits[0].ID)\n\t\t\t}\n\t\t}\n\t}\n\n\ttermQuery = NewTermQuery(\"notintagsarray\").SetField(\"tags\")\n\tsearchRequest = NewSearchRequest(termQuery)\n\tsearchResult, err = index.Search(searchRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif searchResult.Total != uint64(0) {\n\t\tt.Errorf(\"expected 0 total hits\")\n\t}\n\n\t// lookup document a\n\t// expect to find 2 values for field \"tags\"\n\ttagsCount := 0\n\tdoc, err := index.Document(\"a\")\n\tif err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tfor _, f := range doc.Fields {\n\t\t\tif f.Name() == \"tags\" {\n\t\t\t\ttagsCount++\n\t\t\t}\n\t\t}\n\t}\n\tif tagsCount != 2 {\n\t\tt.Errorf(\"expected to find 2 values for tags\")\n\t}\n}",
"func (c App) Index() revel.Result {\n\tusername, _ := c.Session.Get(\"user\")\n\tfulluser, _ := c.Session.Get(\"fulluser\")\n\treturn c.Render(username, fulluser)\n}",
"func (_Casper *CasperCaller) Index(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Casper.contract.Call(opts, out, \"index\")\n\treturn *ret0, err\n}",
"func TestBuildIndex(t *testing.T) {\n\tbuildIndexFromSite(testHugoPath, testIndexPath)\n\tindex := openIndex(t, testIndexPath)\n\tdefer index.Close()\n\tqueryIndex(t, index)\n}",
"func IndexHandeler(w http.ResponseWriter, r *http.Request) {\n\trespond.OK(w, map[string]interface{}{\n\t\t\"name\": \"hotstar-schedular\",\n\t\t\"version\": 1,\n\t})\n}",
"func (cmd *Command) Run(args ...string) error {\n\tfs := flag.NewFlagSet(\"buildtsi\", flag.ExitOnError)\n\tdataDir := fs.String(\"datadir\", \"\", \"data directory\")\n\twalDir := fs.String(\"waldir\", \"\", \"WAL directory\")\n\tfs.IntVar(&cmd.concurrency, \"concurrency\", runtime.GOMAXPROCS(0), \"Number of workers to dedicate to shard index building. Defaults to GOMAXPROCS\")\n\tfs.StringVar(&cmd.databaseFilter, \"database\", \"\", \"optional: database name\")\n\tfs.StringVar(&cmd.retentionFilter, \"retention\", \"\", \"optional: retention policy\")\n\tfs.StringVar(&cmd.shardFilter, \"shard\", \"\", \"optional: shard id\")\n\tfs.Int64Var(&cmd.maxLogFileSize, \"max-log-file-size\", tsdb.DefaultMaxIndexLogFileSize, \"optional: maximum log file size\")\n\tfs.Uint64Var(&cmd.maxCacheSize, \"max-cache-size\", tsdb.DefaultCacheMaxMemorySize, \"optional: maximum cache size\")\n\tfs.IntVar(&cmd.batchSize, \"batch-size\", defaultBatchSize, \"optional: set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements\")\n\tfs.BoolVar(&cmd.Verbose, \"v\", false, \"verbose\")\n\tfs.SetOutput(cmd.Stdout)\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t} else if fs.NArg() > 0 || *dataDir == \"\" || *walDir == \"\" {\n\t\tfs.Usage()\n\t\treturn nil\n\t}\n\tcmd.Logger = logger.New(cmd.Stderr)\n\n\treturn cmd.run(*dataDir, *walDir)\n}",
"func (c *Monitor) ProfIndex(command string) revel.Result {\n var profs []string\n\n if command != \"\" {\n buf := bytes.NewBuffer([]byte{})\n\n toolbox.ProcessInput(command, buf)\n\n prof := buf.String()\n profs = strings.Split(prof, \"\\n\")\n }\n\n return c.Render(command, profs)\n}",
"func indexHandler(w http.ResponseWriter, r *http.Request, s *Server) {\n\tif r.Method == \"GET\" {\n\t\t//create the source upload page with available languages and metrics\n\t\tpage := &Page{Config: s.Config, Extensions: s.Analyzer.Extensions(), Metrics: s.Analyzer.Metrics, Languages: s.Analyzer.Languages}\n\n\t\t//display the source upload page\n\t\ts.Template.ExecuteTemplate(w, \"index.html\", page)\n\t}\n}",
"func index() string {\n\tvar buffer bytes.Buffer\n\tvar id = 0\n\tvar class = 0\n\tbuffer.WriteString(indexTemplate)\n\tlock.Lock()\n\tfor folderName, folder := range folders {\n\t\tbuffer.WriteString(fmt.Sprintf(\"<h2>%s</h2>\", folderName))\n\t\tfor _, source := range folder {\n\t\t\tif !anyNonRead(source) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsort.Sort(source)\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"<h3>%s</h3>\", source.Title))\n\t\t\tbuffer.WriteString(fmt.Sprintf(`<button onClick=\"hideAll('source_%d'); return false\">Mark all as read</button>`, class))\n\t\t\tbuffer.WriteString(\"<ul>\")\n\n\t\t\tfor _, entry := range source.Entries {\n\t\t\t\tif entry.Read {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<li id=\"entry_%d\">`, id))\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<button class=\"source_%d\" onClick=\"hide('entry_%d', '%s'); return false\">Mark Read</button> `, class, id, entry.Url))\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<a href=\"%s\">%s</a>`, entry.Url, entry.Title))\n\t\t\t\tbuffer.WriteString(\"</li>\")\n\t\t\t\tid += 1\n\t\t\t}\n\t\t\tbuffer.WriteString(\"</ul>\")\n\t\t\tclass += 1\n\t\t}\n\t}\n\tlock.Unlock()\n\tbuffer.WriteString(\"</body></html>\")\n\treturn buffer.String()\n}",
"func Index(c *gin.Context) {\n\n\tw := c.Writer\n\tr := c.Request\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tp, lastMod, err := tools.ReadFileIfModified(time.Time{})\n\tif err != nil {\n\t\tp = []byte(err.Error())\n\t\tlastMod = time.Unix(0, 0)\n\t}\n\tvar v = struct {\n\t\tHost string\n\t\tData string\n\t\tLastMod string\n\t}{\n\t\tr.Host,\n\t\tstring(p),\n\t\tstrconv.FormatInt(lastMod.UnixNano(), 16),\n\t}\n\tindexTempl.Execute(w, &v)\n}",
"func (s *BaseCymbolListener) ExitIndex(ctx *IndexContext) {}",
"func (c *FromCommand) Index() int {\n\treturn c.cmd.index\n}",
"func (ms *MusicServer) Index(response http.ResponseWriter, request *http.Request) {\n\t// Always check addressMask. If no define, mask is 0.0.0.0 and nothing is accepted (except localhost)\n\tif !ms.checkRequester(request) {\n\t\treturn\n\t}\n\tif ms.musicFolder != \"\" {\n\t\ttextIndexer := music.IndexArtists(ms.folder)\n\t\tms.indexManager.UpdateIndexer(textIndexer)\n\t}\n}",
"func (h *Root) indexDefault(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\treturn h.Renderer.Render(ctx, w, r, tmplLayoutSite, \"site-index.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, nil)\n}",
"func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hola, este es el inicio\")\n}",
"func BuildIndex(dir string){\n\tconfig := Config()\n\tbuilder := makeIndexBuilder(config)\n\tbuilder.Build(\"/\")\n\tsort.Strings(documents)\n\tsave(documents, config)\n}",
"func Index(c echo.Context) error {\n\treturn c.Render(http.StatusOK, \"index\", echo.Map{})\n}",
"func index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Welcome!\\n\")\n}",
"func (api *MediaApi) index(c *routing.Context) error {\n\t// --- fetch search data\n\tsearchFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsearchData := utils.GetSearchConditions(c, searchFields)\n\t// ---\n\n\t// --- fetch sort data\n\tsortFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsortData := utils.GetSortFields(c, sortFields)\n\t// ---\n\n\ttotal, _ := api.dao.Count(searchData)\n\n\tlimit, page := utils.GetPaginationSettings(c, total)\n\n\tutils.SetPaginationHeaders(c, limit, total, page)\n\n\titems := []models.Media{}\n\n\tif total > 0 {\n\t\titems, _ = api.dao.GetList(limit, limit*(page-1), searchData, sortData)\n\n\t\titems = daos.ToAbsMediaPaths(items)\n\t}\n\n\treturn c.Write(items)\n}",
"func (h *HTTPApi) listIndex(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tcollections := h.storageNode.Datasources[ps.ByName(\"datasource\")].GetMeta().Databases[ps.ByName(\"dbname\")].ShardInstances[ps.ByName(\"shardinstance\")].Collections[ps.ByName(\"collectionname\")]\n\n\t// Now we need to return the results\n\tif bytes, err := json.Marshal(collections.Indexes); err != nil {\n\t\t// TODO: log this better?\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t}\n}",
"func httpServeIndex(w http.ResponseWriter, r *http.Request) {\n\tbox := rice.MustFindBox(\"public\")\n\thtml := box.MustString(\"index.html\")\n\tcss := box.MustString(\"dist/styles.css\")\n\tt := template.New(\"index\")\n\tt, _ = t.Parse(html)\n\tt.Execute(w, &HTMLContent{CSS: template.CSS(css)})\n}",
"func (prc *PipelineRunsController) Index(c *gin.Context, size, page, offset int) {\n\tjobSpec := job.Job{}\n\terr := jobSpec.SetID(c.Param(\"ID\"))\n\tif err != nil {\n\t\tjsonAPIError(c, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tpipelineRuns, count, err := prc.App.GetJobORM().PipelineRunsByJobID(jobSpec.ID, offset, size)\n\tif err != nil {\n\t\tjsonAPIError(c, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tpaginatedResponse(c, \"offChainReportingPipelineRun\", size, page, pipelineRuns, count, err)\n}",
"func GetIndex(w http.ResponseWriter, req *http.Request, app *App) {\n\tscheme := \"http\"\n\tif req.TLS != nil {\n\t\tscheme = \"https\"\n\t}\n\tbase := []string{scheme, \"://\", req.Host, app.Config.General.Prefix}\n\trender(w, \"index\", map[string]interface{}{\"base\": strings.Join(base, \"\"), \"hideNav\": true}, app)\n}"
] | [
"0.72352827",
"0.7231066",
"0.6974629",
"0.6947217",
"0.6895008",
"0.68026304",
"0.66526365",
"0.65366894",
"0.65255636",
"0.6510996",
"0.6510996",
"0.64903265",
"0.6484665",
"0.648378",
"0.64530957",
"0.64055777",
"0.6405252",
"0.6393524",
"0.63744456",
"0.63676375",
"0.6320205",
"0.63021904",
"0.62569594",
"0.6253341",
"0.6244095",
"0.62348586",
"0.62348586",
"0.62327546",
"0.62201214",
"0.6211135",
"0.61948234",
"0.6189748",
"0.6171233",
"0.6165229",
"0.61625713",
"0.616139",
"0.61490697",
"0.6129594",
"0.61176336",
"0.6107657",
"0.60832256",
"0.6074175",
"0.6068898",
"0.60542756",
"0.60502326",
"0.6048729",
"0.6035932",
"0.60322875",
"0.60163194",
"0.599126",
"0.59909475",
"0.59896123",
"0.5961652",
"0.59614617",
"0.595707",
"0.5956077",
"0.5941416",
"0.59408545",
"0.5939566",
"0.59368587",
"0.59363294",
"0.59136575",
"0.5908333",
"0.5907802",
"0.59019977",
"0.5901875",
"0.5879725",
"0.58652276",
"0.58628803",
"0.58622664",
"0.58622664",
"0.58589417",
"0.58528316",
"0.585175",
"0.58508724",
"0.5848384",
"0.58472973",
"0.5842007",
"0.5835645",
"0.5830665",
"0.5827591",
"0.5826751",
"0.5822455",
"0.5818282",
"0.58047616",
"0.5804595",
"0.57889897",
"0.57863265",
"0.5783793",
"0.5779801",
"0.5777006",
"0.5776255",
"0.5774326",
"0.5773833",
"0.5766765",
"0.5761253",
"0.57577443",
"0.5747304",
"0.5745089",
"0.5742563"
] | 0.7970374 | 0 |
/ Returns an elliptic.CurveWrapper around this group. The elliptic.CurveParams returned by the .Params() method the Curve should not be used for doing ScalarMult, etc.! | Возвращает объект elliptic.CurveWrapper вокруг этой группы. Эллиптические.CurveParams, возвращаемые методом .Params(), не должны использоваться для ScalarMult и т.д.! | func (m *ModulusGroup) AsCurve() elliptic.Curve {
return &asCurve{m}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s Keygen) Curve() elliptic.Curve {\n\treturn s.group\n}",
"func getCurve() elliptic.Curve {\n return elliptic.P256()\n}",
"func CurveParamsParams(curve *elliptic.CurveParams,) *elliptic.CurveParams",
"func NewCurve(xyz XYZer) Curve {\n\tc := Curve{}\n\tc.points.minXYZ.X, c.points.minXYZ.Y, c.points.minXYZ.Z = xyz.XYZ(0)\n\tc.points.maxXYZ.X, c.points.maxXYZ.Y, c.points.maxXYZ.Z = xyz.XYZ(0)\n\tc.points.XYZs = CopyXYZs(xyz)\n\tfor i := range c.points.XYZs {\n\t\tx, y, z := c.points.XYZ(i)\n\t\tupdateBounds(&c.points, x, y, z)\n\t}\n\treturn c\n}",
"func EllipticCurve() elliptic.Curve {\n\treturn p256Strategy()\n}",
"func getECDSACurve(scheme SignatureScheme) elliptic.Curve {\n\tswitch scheme {\n\tcase ECDSAWithP256AndSHA256:\n\t\treturn elliptic.P256()\n\tcase ECDSAWithP384AndSHA384:\n\t\treturn elliptic.P384()\n\tcase ECDSAWithP521AndSHA512:\n\t\treturn elliptic.P521()\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (c *curve) init(self kyber.Group, p *Param, fullGroup bool,\n\tnull, base point) *curve {\n\tc.self = self\n\tc.Param = *p\n\tc.full = fullGroup\n\tc.null = null\n\n\t// Edwards curve parameters as ModInts for convenience\n\tc.a.Init(&p.A, &p.P)\n\tc.d.Init(&p.D, &p.P)\n\n\t// Cofactor\n\tc.cofact.Init64(int64(p.R), &c.P)\n\n\t// Determine the modulus for scalars on this curve.\n\t// Note that we do NOT initialize c.order with Init(),\n\t// as that would normalize to the modulus, resulting in zero.\n\t// Just to be sure it's never used, we leave c.order.M set to nil.\n\t// We want it to be in a ModInt so we can pass it to P.Mul(),\n\t// but the scalar's modulus isn't needed for point multiplication.\n\tif fullGroup {\n\t\t// Scalar modulus is prime-order times the ccofactor\n\t\tc.order.V.SetInt64(int64(p.R)).Mul(&c.order.V, &p.Q)\n\t} else {\n\t\tc.order.V.Set(&p.Q) // Prime-order subgroup\n\t}\n\n\t// Useful ModInt constants for this curve\n\tc.zero.Init64(0, &c.P)\n\tc.one.Init64(1, &c.P)\n\n\t// Identity element is (0,1)\n\tnull.initXY(zero, one, self)\n\n\t// Base point B\n\tvar bx, by *big.Int\n\tif !fullGroup {\n\t\tbx, by = &p.PBX, &p.PBY\n\t} else {\n\t\tbx, by = &p.FBX, &p.FBY\n\t\tbase.initXY(&p.FBX, &p.FBY, self)\n\t}\n\tif by.Sign() == 0 {\n\t\t// No standard base point was defined, so pick one.\n\t\t// Find the lowest-numbered y-coordinate that works.\n\t\t//println(\"Picking base point:\")\n\t\tvar x, y mod.Int\n\t\tfor y.Init64(2, &c.P); ; y.Add(&y, &c.one) {\n\t\t\tif !c.solveForX(&x, &y) {\n\t\t\t\tcontinue // try another y\n\t\t\t}\n\t\t\tif c.coordSign(&x) != 0 {\n\t\t\t\tx.Neg(&x) // try positive x first\n\t\t\t}\n\t\t\tbase.initXY(&x.V, &y.V, self)\n\t\t\tif c.validPoint(base) {\n\t\t\t\tbreak // got one\n\t\t\t}\n\t\t\tx.Neg(&x) // try -bx\n\t\t\tif c.validPoint(base) {\n\t\t\t\tbreak // got one\n\t\t\t}\n\t\t}\n\t\t//println(\"BX: \"+x.V.String())\n\t\t//println(\"BY: \"+y.V.String())\n\t\tbx, by = &x.V, &y.V\n\t}\n\tbase.initXY(bx, by, self)\n\n\t// Uniform representation encoding methods,\n\t// only useful when using the full group.\n\t// (Points taken from the subgroup would be trivially recognizable.)\n\tif fullGroup {\n\t\tif p.Elligator1s.Sign() != 0 {\n\t\t\tc.hide = new(el1param).init(c, &p.Elligator1s)\n\t\t} else if p.Elligator2u.Sign() != 0 {\n\t\t\tc.hide = new(el2param).init(c, &p.Elligator2u)\n\t\t}\n\t}\n\n\t// Sanity checks\n\tif !c.validPoint(null) {\n\t\tpanic(\"invalid identity point \" + null.String())\n\t}\n\tif !c.validPoint(base) {\n\t\tpanic(\"invalid base point \" + base.String())\n\t}\n\n\treturn c\n}",
"func GetCurve(s string) elliptic.Curve {\n\ts3 := s[len(s)-3:]\n\tif s3 == \"256\" {\n\t\treturn elliptic.P256()\n\t} else if s3 == \"384\" {\n\t\treturn elliptic.P384()\n\t} else if s3 == \"521\" {\n\t\treturn elliptic.P521()\n\t}\n\treturn elliptic.P224()\n}",
"func newPrivateKeyOnCurve(c elliptic.Curve) (*PrivateKey, error) {\n\tpk, err := ecdsa.GenerateKey(c, rand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PrivateKey{*pk}, nil\n}",
"func Generic(c elliptic.Curve) KeyExchange {\n\tif c == nil {\n\t\tpanic(\"ecdh: curve is nil\")\n\t}\n\treturn genericCurve{curve: c}\n}",
"func curveByName(name string) elliptic.Curve {\n\tswitch name {\n\tcase \"P-224\":\n\t\treturn elliptic.P224()\n\tcase \"P-256\":\n\t\treturn elliptic.P256()\n\tcase \"P-384\":\n\t\treturn elliptic.P384()\n\tcase \"P-521\":\n\t\treturn elliptic.P521()\n\tcase \"P-256K\", \"SECP256K1\", \"secp256k1\":\n\t\treturn secp256k1.S256()\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func CurveParamsScalarMult(curve *elliptic.CurveParams, Bx, By *big.Int, k []byte) (*big.Int, *big.Int)",
"func CurveParamsScalarBaseMult(curve *elliptic.CurveParams, k []byte) (*big.Int, *big.Int)",
"func CurveByName(curveName string) ec.Curve {\n\tswitch curveName {\n\tcase \"P-224\":\n\t\treturn ec.P224()\n\tcase \"P-256\":\n\t\treturn ec.P256()\n\tcase \"P-384\":\n\t\treturn ec.P384()\n\tcase \"P-521\":\n\t\treturn ec.P521()\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func getCurve(curve string) (elliptic.Curve, string, error) {\n\tswitch curve {\n\tcase \"secp224r1\": // secp224r1: NIST/SECG curve over a 224 bit prime field\n\t\treturn elliptic.P224(), \"secp224r1\", nil\n\tcase \"prime256v1\": // prime256v1: X9.62/SECG curve over a 256 bit prime field\n\t\treturn elliptic.P256(), \"prime256v1\", nil\n\tcase \"secp384r1\": // secp384r1: NIST/SECG curve over a 384 bit prime field\n\t\treturn elliptic.P384(), \"secp384r1\", nil\n\tcase \"secp521r1\": // secp521r1: NIST/SECG curve over a 521 bit prime field\n\t\treturn elliptic.P521(), \"secp521r1\", nil\n\tdefault:\n\t\treturn nil, \"\", fmt.Errorf(\"%s\", helpers.RFgB(\"incorrect curve size passed\"))\n\t}\n}",
"func (this *NurbsCurve) clone() *NurbsCurve {\n\treturn &NurbsCurve{\n\t\tdegree: this.degree,\n\t\tcontrolPoints: append([]HomoPoint(nil), this.controlPoints...),\n\t\tknots: this.knots.Clone(),\n\t}\n}",
"func CurveParamsAdd(curve *elliptic.CurveParams, x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int)",
"func CurveParamsDouble(curve *elliptic.CurveParams, x1, y1 *big.Int) (*big.Int, *big.Int)",
"func NewECDSA(c config.Reader, name string, curve string) (KeyAPI, error) {\n\t// Validate the type of curve passed\n\tec, ty, err := getCurve(curve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate the private key with our own io.Reader\n\tpri, err := ecdsa.GenerateKey(ec, crypto.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Extract the public key\n\tpub := &pri.PublicKey\n\n\t// PEM #1 - encoding\n\tpemKey, pemPub, perr := enc.Encode(pri, pub)\n\tif perr != nil {\n\t\treturn nil, perr\n\t}\n\n\t// Create the key struct object\n\tkey := &key{\n\t\tGID: api.GenerateUUID(),\n\t\tName: name,\n\t\tSlug: helpers.NewHaikunator().Haikunate(),\n\t\tKeyType: fmt.Sprintf(\"ecdsa.PrivateKey <==> %s\", ty),\n\t\tStatus: api.StatusActive,\n\t\tPublicKeyB64: base64.StdEncoding.EncodeToString([]byte(pemPub)),\n\t\tPrivateKeyB64: base64.StdEncoding.EncodeToString([]byte(pemKey)),\n\t\tFingerprintMD5: enc.FingerprintMD5(pub),\n\t\tFingerprintSHA: enc.FingerprintSHA256(pub),\n\t\tCreatedAt: time.Now(),\n\t}\n\n\t// Write the entire key object to FS\n\tif err := key.writeToFS(c, pri, pub); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}",
"func (c EasyCert) newPrivateKey() (crypto.PrivateKey, error) {\n\tif c.ec != \"\" {\n\t\tvar curve elliptic.Curve\n\t\tswitch c.ec {\n\t\tcase \"224\":\n\t\t\tcurve = elliptic.P224()\n\t\tcase \"384\":\n\t\t\tcurve = elliptic.P384()\n\t\tcase \"521\":\n\t\t\tcurve = elliptic.P521()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"Unknown elliptic curve: %q\", c.ec)\n\t\t}\n\t\treturn ecdsa.GenerateKey(curve, rand.Reader)\n\t}\n\treturn rsa.GenerateKey(rand.Reader, c.rsaBits)\n}",
"func (d MapDescriptor) Get(e C.EllCurve) MapToCurve {\n\tswitch d.ID {\n\tcase BF:\n\t\treturn NewBF(e)\n\tcase SSWU:\n\t\tz := e.Field().Elt(d.Z)\n\t\treturn NewSSWU(e, z, d.Iso)\n\tcase SVDW:\n\t\treturn NewSVDW(e)\n\tcase ELL2:\n\t\treturn NewElligator2(e)\n\tdefault:\n\t\tpanic(\"Mapping not supported\")\n\t}\n}",
"func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {\n\tvar privKey ecPrivateKey\n\tif _, err := asn1.Unmarshal(der, &privKey); err != nil {\n\t\treturn nil, errors.New(\"x509: failed to parse EC private key: \" + err.Error())\n\t}\n\tif privKey.Version != ecPrivKeyVersion {\n\t\treturn nil, fmt.Errorf(\"x509: unknown EC private key version %d\", privKey.Version)\n\t}\n\n\tvar curve elliptic.Curve\n\tif namedCurveOID != nil {\n\t\tcurve = namedCurveFromOID(*namedCurveOID)\n\t} else {\n\t\tcurve = namedCurveFromOID(privKey.NamedCurveOID)\n\t}\n\tif curve == nil {\n\t\treturn nil, errors.New(\"x509: unknown elliptic curve\")\n\t}\n\n\tk := new(big.Int).SetBytes(privKey.PrivateKey)\n\tcurveOrder := curve.Params().N\n\tif k.Cmp(curveOrder) >= 0 {\n\t\treturn nil, errors.New(\"x509: invalid elliptic curve private key value\")\n\t}\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.Curve = curve\n\tpriv.D = k\n\n\tprivateKey := make([]byte, (curveOrder.BitLen()+7)/8)\n\n\t// Some private keys have leading zero padding. This is invalid\n\t// according to [SEC1], but this code will ignore it.\n\tfor len(privKey.PrivateKey) > len(privateKey) {\n\t\tif privKey.PrivateKey[0] != 0 {\n\t\t\treturn nil, errors.New(\"x509: invalid private key length\")\n\t\t}\n\t\tprivKey.PrivateKey = privKey.PrivateKey[1:]\n\t}\n\n\t// Some private keys remove all leading zeros, this is also invalid\n\t// according to [SEC1] but since OpenSSL used to do this, we ignore\n\t// this too.\n\tcopy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)\n\tpriv.X, priv.Y = curve.ScalarBaseMult(privateKey)\n\n\treturn priv, nil\n}",
"func PeelCurve() model2d.Curve {\n\treturn model2d.JoinedCurve{\n\t\tmodel2d.BezierCurve{\n\t\t\tmodel2d.XY(-1.0, 0.0),\n\t\t\tmodel2d.XY(-0.6, 0.6),\n\t\t\tmodel2d.XY(0.0, 0.0),\n\t\t},\n\t\tmodel2d.BezierCurve{\n\t\t\tmodel2d.XY(0.0, 0.0),\n\t\t\tmodel2d.XY(0.6, -0.6),\n\t\t\tmodel2d.XY(1.0, 0.0),\n\t\t},\n\t}\n}",
"func (phi *isogeny4) GenerateCurve(p *ProjectivePoint) CurveCoefficientsEquiv {\n\tvar coefEq CurveCoefficientsEquiv\n\tvar xp4, zp4 = &p.X, &p.Z\n\tvar K1, K2, K3 = &phi.K1, &phi.K2, &phi.K3\n\n\top := phi.Field\n\top.Sub(K2, xp4, zp4)\n\top.Add(K3, xp4, zp4)\n\top.Square(K1, zp4)\n\top.Add(K1, K1, K1)\n\top.Square(&coefEq.C, K1)\n\top.Add(K1, K1, K1)\n\top.Square(&coefEq.A, xp4)\n\top.Add(&coefEq.A, &coefEq.A, &coefEq.A)\n\top.Square(&coefEq.A, &coefEq.A)\n\treturn coefEq\n}",
"func genericParamsForCurve(c Curve) *CurveParams {\n\td := *(c.Params())\n\treturn &d\n}",
"func newProtoECDSAPrivateKey(publicKey *ecdsapb.EcdsaPublicKey, keyValue []byte) *ecdsapb.EcdsaPrivateKey {\n\treturn &ecdsapb.EcdsaPrivateKey{\n\t\tVersion: 0,\n\t\tPublicKey: publicKey,\n\t\tKeyValue: keyValue,\n\t}\n}",
"func toECDSA(curveName string, d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\n\tpriv.PublicKey.Curve = CurveType(curveName)\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N,secp256k1N\n\tif priv.D.Cmp(priv.PublicKey.Curve.Params().N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}",
"func (x *ed25519_t) New(public PublicKey, private PrivateKey) (Credentials, error) {\n\n\tvar credentials Ed25519Credentials\n\tvar err error\n\n\terr = credentials.SetPublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = credentials.SetPrivateKey(private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n\n}",
"func NewGRPCClient(conn *grpc.ClientConn, logger log.Logger) mathservice2.Service {\n\tvar divideEndpoint endpoint.Endpoint\n\t{\n\t\tdivideEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Divide\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar maxEndpoint endpoint.Endpoint\n\t{\n\t\tmaxEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Max\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar minEndpoint endpoint.Endpoint\n\t{\n\t\tminEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Min\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar multiplyEndpoint endpoint.Endpoint\n\t{\n\t\tmultiplyEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Multiply\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar powEndpoint endpoint.Endpoint\n\t{\n\t\tpowEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Pow\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar subtractEndpoint endpoint.Endpoint\n\t{\n\t\tsubtractEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Subtract\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\tvar sumEndpoint endpoint.Endpoint\n\t{\n\t\tsumEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"pb.Math\",\n\t\t\t\"Sum\",\n\t\t\tencodeGRPCMathOpRequest,\n\t\t\tdecodeGRPCMathOpResponse,\n\t\t\tpb.MathOpReply{},\n\t\t).Endpoint()\n\t}\n\n\t// Returning the endpoint.Set as a service.Service relies on the\n\t// endpoint.Set implementing the Service methods. That's just a simple bit\n\t// of glue code.\n\treturn mathendpoint2.Set{\n\t\tDivideEndpoint: divideEndpoint,\n\t\tMaxEndpoint: maxEndpoint,\n\t\tMinEndpoint: minEndpoint,\n\t\tMultiplyEndpoint: multiplyEndpoint,\n\t\tPowEndpoint: powEndpoint,\n\t\tSubtractEndpoint: subtractEndpoint,\n\t\tSumEndpoint: sumEndpoint,\n\t}\n}",
"func NewEcdsaParty(prv *PrvKey) (*EcdsaParty, error) {\n\tpub := prv.PubKey()\n\tcurve := pub.Curve\n\tN := curve.Params().N\n\n\t// Paillier key pair.\n\tencpub, encprv, err := paillier.GenerateKeyPair(bitlen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Homomorphic Encryption of party pk.\n\tencpk, err := encpub.Encrypt(prv.D)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &EcdsaParty{\n\t\tN: N,\n\t\tprv: prv,\n\t\tpub: pub,\n\t\tcurve: curve,\n\t\tencpub: encpub,\n\t\tencprv: encprv,\n\t\tencpk: encpk,\n\t}, nil\n}",
"func New(compressed bool) *PrivateKey {\n\tbuf := make([]byte, 32)\n\tz := new(big.Int)\n\n\tfor z.Cmp(big.NewInt(1)) == -1 || z.Cmp(curve.Params().N) == 1 {\n\t\t_, err := rand.Read(buf)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tz.SetBytes(buf)\n\t}\n\n\tKxInt, KyInt := curve.ScalarBaseMult(z.Bytes())\n\tpubKey := &PublicKey{\n\t\tx: KxInt,\n\t\ty: KyInt,\n\t\tcompressed: compressed,\n\t}\n\n\treturn &PrivateKey{\n\t\tk: z,\n\t\tpubkey: pubKey,\n\t}\n}",
"func NewPrivateKey(curve elliptic.Curve) (*PrivateKey, error) {\n\tkey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*PrivateKey)(key), nil\n}",
"func S224() *Curve {\n\tinitonce.Do(initAll)\n\treturn secp224k1\n}",
"func NewKey() (*Key, *big.Int, error) {\n\t// Use the DSA crypto code to generate a key pair. For testing\n\t// purposes, we'll use (2048,224) instead of (2048,160) as used by the\n\t// current Helios implementation\n\tparams := new(dsa.Parameters)\n\tif err := dsa.GenerateParameters(params, rand.Reader, dsa.L2048N224); err != nil {\n\t\t// glog.Error(\"Couldn't generate DSA parameters for the ElGamal group\")\n\t\treturn nil, nil, err\n\t}\n\n\treturn NewKeyFromParams(params.G, params.P, params.Q)\n}",
"func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\t// ECC generate private key\n\tcurve := elliptic.P256()\n\tprivate, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tlog.Println(\"--------\", private)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t// private key generate public key\n\tpubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)\n\treturn *private, pubKey\n}",
"func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tvar curve elliptic.Curve\n\n\tswitch config.Bits {\n\tcase 0:\n\t\tconfig.Bits = 521\n\t\tfallthrough\n\tcase 521:\n\t\tcurve = elliptic.P521()\n\tcase 384:\n\t\tcurve = elliptic.P384()\n\tcase 256:\n\t\tcurve = elliptic.P256()\n\tcase 224:\n\t\t// Not supported by \"golang.org/x/crypto/ssh\".\n\t\treturn KeyPair{}, fmt.Errorf(\"golang.org/x/crypto/ssh does not support %d bits\", config.Bits)\n\tdefault:\n\t\treturn KeyPair{}, fmt.Errorf(\"crypto/elliptic does not support %d bits\", config.Bits)\n\t}\n\n\tprivateKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivateRaw, err := x509.MarshalECPrivateKey(privateKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePem, err := rawPemBlock(&pem.Block{\n\t\tType: \"EC PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privateRaw,\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePem,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}",
"func GetECPrivateKey(raw []byte) (*ecdsa.PrivateKey, error) {\n\tdecoded, _ := pem.Decode(raw)\n\tif decoded == nil {\n\t\treturn nil, errors.New(\"Failed to decode the PEM-encoded ECDSA key\")\n\t}\n\tECPrivKey, err := x509.ParseECPrivateKey(decoded.Bytes)\n\tif err == nil {\n\t\treturn ECPrivKey, nil\n\t}\n\tkey, err2 := x509.ParsePKCS8PrivateKey(decoded.Bytes)\n\tif err2 == nil {\n\t\tswitch key.(type) {\n\t\tcase *ecdsa.PrivateKey:\n\t\t\treturn key.(*ecdsa.PrivateKey), nil\n\t\tcase *rsa.PrivateKey:\n\t\t\treturn nil, errors.New(\"Expecting EC private key but found RSA private key\")\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Invalid private key type in PKCS#8 wrapping\")\n\t\t}\n\t}\n\treturn nil, errors.Wrap(err, \"Failed parsing EC private key\")\n}",
"func mkKey(ecdsaCurve string, rsaBits int) (interface{}, error) {\n\tvar priv interface{}\n\tvar err error\n\tswitch ecdsaCurve {\n\tcase \"\":\n\t\tpriv, err = rsa.GenerateKey(rand.Reader, rsaBits)\n\tcase \"P224\":\n\t\tpriv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n\tcase \"P256\":\n\t\tpriv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tcase \"P384\":\n\t\tpriv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tcase \"P521\":\n\t\tpriv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"Unrecognized elliptic curve: %q\", ecdsaCurve)\n\t\tos.Exit(1)\n\t}\n\treturn priv, err\n}",
"func (priv *PrivateKey) derive() (pub *PublicKey) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\n\t/* Derive public key from Q = d*G */\n\tQ := secp256k1.ScalarBaseMult(priv.D)\n\n\t/* Check that Q is on the curve */\n\tif !secp256k1.IsOnCurve(Q) {\n\t\tpanic(\"Catastrophic math logic failure in public key derivation.\")\n\t}\n\n\tpriv.X = Q.X\n\tpriv.Y = Q.Y\n\n\treturn &priv.PublicKey\n}",
"func ECCPub(parms *TPMSECCParms, pub *TPMSECCPoint) (*ECDHPub, error) {\n\tcurve, err := parms.CurveID.Curve()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ECDHPub{\n\t\tCurve: curve,\n\t\tX: big.NewInt(0).SetBytes(pub.X.Buffer),\n\t\tY: big.NewInt(0).SetBytes(pub.Y.Buffer),\n\t}, nil\n}",
"func (ec *ECPoint) ScalarMult(base *ECPoint, k *big.Int) *ECPoint {\n\tec.X, ec.Y = base.Curve.ScalarMult(base.X, base.Y, k.Bytes())\n\tec.Curve = base.Curve\n\tec.checkNil()\n\n\treturn ec\n}",
"func newPrivateKey() (crypto.Signer, error) {\n\treturn ecdsa.GenerateKey(ellipticCurve, crand.Reader)\n}",
"func generateKeyPair(algo string, ecCurve string) (privateKey interface{}, publicKey interface{}, err error) {\n\n // Make them case-insensitive\n switch strings.ToUpper(algo) {\n // If RSA, generate a pair of RSA keys\n case \"RSA\":\n // rsa.GenerateKey(): https://golang.org/pkg/crypto/rsa/#GenerateKey\n // Return value is of type *rsa.PrivateKey\n privateKey, err = rsa.GenerateKey(rand.Reader, 2048) // by default create a 2048 bit key\n\n // If ECDSA, use a provided curve\n case \"ECDSA\":\n // First check if ecCurve is provided\n if ecCurve == \"\" {\n return nil, nil, errors.New(\"ECDSA needs a curve\")\n }\n // Then generate the key based on the curve\n // Curves: https://golang.org/pkg/crypto/elliptic/#Curve\n // ecdsa.GenerateKey(): https://golang.org/pkg/crypto/ecdsa/#GenerateKey\n // Return value is of type *ecdsa.PrivateKey\n switch strings.ToUpper(ecCurve) {\n case \"P224\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n case \"P256\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n case \"P384\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n case \"P521\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\n // If the curve is invalid\n default:\n return nil, nil, errors.New(\"Unrecognized curve, valid values are P224, P256, P384 and P521\")\n }\n\n // If neither RSA nor ECDSA return an error\n default:\n return nil, nil, errors.New(\"Unrecognized algorithm, valid options are RSA and ECDSA\")\n }\n\n // If we get here, then input parameters have been valid\n // Check if key generation has been successful by checking err\n if err != nil {\n return nil, nil, err\n }\n\n // Exporting the public key (needed later)\n switch tempPrivKey:= privateKey.(type) {\n case *rsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n case *ecdsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n }\n\n return privateKey, publicKey, err // or just return\n}",
"func (ec *ecdsa) NewKey(l int, w io.Writer) error {\n\tq, d, err := secp256k1.NewPair()\n\tif err != nil {\n\t\treturn err\n\t}\n\tk := new(sec1.PrivateKey)\n\terr = k.SetCurve(&secp256k1.OID)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = k.SetPoint(q.GetX(), q.GetY())\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.SetGenerator(d)\n\tec.Private = k\n\n\treturn k.Write(w)\n}",
"func (p *G2Jac) Add(curve *Curve, a *G2Jac) *G2Jac {\n\t// p is infinity, return a\n\tif p.Z.IsZero() {\n\t\tp.Set(a)\n\t\treturn p\n\t}\n\n\t// a is infinity, return p\n\tif a.Z.IsZero() {\n\t\treturn p\n\t}\n\n\t// get some Element from our pool\n\tvar Z1Z1, Z2Z2, U1, U2, S1, S2, H, I, J, r, V e2\n\n\t// Z1Z1 = a.Z ^ 2\n\tZ1Z1.Square(&a.Z)\n\n\t// Z2Z2 = p.Z ^ 2\n\tZ2Z2.Square(&p.Z)\n\n\t// U1 = a.X * Z2Z2\n\tU1.Mul(&a.X, &Z2Z2)\n\n\t// U2 = p.X * Z1Z1\n\tU2.Mul(&p.X, &Z1Z1)\n\n\t// S1 = a.Y * p.Z * Z2Z2\n\tS1.Mul(&a.Y, &p.Z).\n\t\tMulAssign(&Z2Z2)\n\n\t// S2 = p.Y * a.Z * Z1Z1\n\tS2.Mul(&p.Y, &a.Z).\n\t\tMulAssign(&Z1Z1)\n\n\t// if p == a, we double instead\n\tif U1.Equal(&U2) && S1.Equal(&S2) {\n\t\treturn p.Double()\n\t}\n\n\t// H = U2 - U1\n\tH.Sub(&U2, &U1)\n\n\t// I = (2*H)^2\n\tI.Double(&H).\n\t\tSquare(&I)\n\n\t// J = H*I\n\tJ.Mul(&H, &I)\n\n\t// r = 2*(S2-S1)\n\tr.Sub(&S2, &S1).Double(&r)\n\n\t// V = U1*I\n\tV.Mul(&U1, &I)\n\n\t// res.X = r^2-J-2*V\n\tp.X.Square(&r).\n\t\tSubAssign(&J).\n\t\tSubAssign(&V).\n\t\tSubAssign(&V)\n\n\t// res.Y = r*(V-X3)-2*S1*J\n\tp.Y.Sub(&V, &p.X).\n\t\tMulAssign(&r)\n\tS1.MulAssign(&J).Double(&S1)\n\tp.Y.SubAssign(&S1)\n\n\t// res.Z = ((a.Z+p.Z)^2-Z1Z1-Z2Z2)*H\n\tp.Z.AddAssign(&a.Z)\n\tp.Z.Square(&p.Z).\n\t\tSubAssign(&Z1Z1).\n\t\tSubAssign(&Z2Z2).\n\t\tMulAssign(&H)\n\n\treturn p\n}",
"func (ec *EC) BaseMul(x *big.Int) *Point {\n\treturn ec.Mul(x, ec.G)\n}",
"func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {\n\n\tvar res PointProj\n\n\tecurve := GetEdwardsCurve()\n\n\tvar A, B, C, D, E, F, G, H, I fr.Element\n\tA.Mul(&p1.Z, &p2.Z)\n\tB.Square(&A)\n\tC.Mul(&p1.X, &p2.X)\n\tD.Mul(&p1.Y, &p2.Y)\n\tE.Mul(&ecurve.D, &C).Mul(&E, &D)\n\tF.Sub(&B, &E)\n\tG.Add(&B, &E)\n\tH.Add(&p1.X, &p1.Y)\n\tI.Add(&p2.X, &p2.Y)\n\tres.X.Mul(&H, &I).\n\t\tSub(&res.X, &C).\n\t\tSub(&res.X, &D).\n\t\tMul(&res.X, &A).\n\t\tMul(&res.X, &F)\n\tmulByA(&C)\n\tC.Neg(&C)\n\tres.Y.Add(&D, &C).\n\t\tMul(&res.Y, &A).\n\t\tMul(&res.Y, &G)\n\tres.Z.Mul(&F, &G)\n\n\tp.Set(&res)\n\treturn p\n}",
"func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\tcurve := elliptic.P256()\n\n\tpriKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tpubKey := append(priKey.PublicKey.X.Bytes(), priKey.PublicKey.Y.Bytes()...)\n\n\treturn *priKey, pubKey\n}",
"func ToEcdsa(key []byte) *ecdsa.PrivateKey {\n\tecdsaKey := new(ecdsa.PrivateKey)\n\tecdsaKey.PublicKey.Curve = elliptic.P256()\n\tecdsaKey.D = new(big.Int).SetBytes(key)\n\tecdsaKey.PublicKey.X, ecdsaKey.PublicKey.Y = ecdsaKey.PublicKey.Curve.ScalarBaseMult(key)\n\treturn ecdsaKey\n}",
"func (csp *impl) getECKey(ski []byte) (pubKey *ecdsa.PublicKey, isPriv bool, err error) {\n\n\tsession := csp.pkcs11Ctx.GetSession()\n\tdefer func() { csp.handleSessionReturn(err, session) }()\n\tisPriv = true\n\t_, err = csp.pkcs11Ctx.FindKeyPairFromSKI(session, ski, privateKeyFlag)\n\tif err != nil {\n\t\tisPriv = false\n\t\tlogger.Debugf(\"Private key not found [%s] for SKI [%s], looking for Public key\", err, hex.EncodeToString(ski))\n\t}\n\n\tpublicKey, err := csp.pkcs11Ctx.FindKeyPairFromSKI(session, ski, publicKeyFlag)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"Public key not found [%s] for SKI [%s]\", err, hex.EncodeToString(ski))\n\t}\n\n\tecpt, marshaledOid, err := ecPoint(csp.pkcs11Ctx, session, *publicKey)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"Public key not found [%s] for SKI [%s]\", err, hex.EncodeToString(ski))\n\t}\n\n\tcurveOid := new(asn1.ObjectIdentifier)\n\t_, err = asn1.Unmarshal(marshaledOid, curveOid)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"Failed Unmarshaling Curve OID [%s]\\n%s\", err.Error(), hex.EncodeToString(marshaledOid))\n\t}\n\n\tcurve := namedCurveFromOID(*curveOid)\n\tif curve == nil {\n\t\treturn nil, false, fmt.Errorf(\"Cound not recognize Curve from OID\")\n\t}\n\tx, y := elliptic.Unmarshal(curve, ecpt)\n\tif x == nil {\n\t\treturn nil, false, fmt.Errorf(\"Failed Unmarshaling Public Key\")\n\t}\n\n\tpubKey = &ecdsa.PublicKey{Curve: curve, X: x, Y: y}\n\treturn pubKey, isPriv, nil\n}",
"func (phi *isogeny3) GenerateCurve(p *ProjectivePoint) CurveCoefficientsEquiv {\n\tvar t0, t1, t2, t3, t4 Fp2Element\n\tvar coefEq CurveCoefficientsEquiv\n\tvar K1, K2 = &phi.K1, &phi.K2\n\n\top := phi.Field\n\top.Sub(K1, &p.X, &p.Z) // K1 = XP3 - ZP3\n\top.Square(&t0, K1) // t0 = K1^2\n\top.Add(K2, &p.X, &p.Z) // K2 = XP3 + ZP3\n\top.Square(&t1, K2) // t1 = K2^2\n\top.Add(&t2, &t0, &t1) // t2 = t0 + t1\n\top.Add(&t3, K1, K2) // t3 = K1 + K2\n\top.Square(&t3, &t3) // t3 = t3^2\n\top.Sub(&t3, &t3, &t2) // t3 = t3 - t2\n\top.Add(&t2, &t1, &t3) // t2 = t1 + t3\n\top.Add(&t3, &t3, &t0) // t3 = t3 + t0\n\top.Add(&t4, &t3, &t0) // t4 = t3 + t0\n\top.Add(&t4, &t4, &t4) // t4 = t4 + t4\n\top.Add(&t4, &t1, &t4) // t4 = t1 + t4\n\top.Mul(&coefEq.C, &t2, &t4) // A24m = t2 * t4\n\top.Add(&t4, &t1, &t2) // t4 = t1 + t2\n\top.Add(&t4, &t4, &t4) // t4 = t4 + t4\n\top.Add(&t4, &t0, &t4) // t4 = t0 + t4\n\top.Mul(&t4, &t3, &t4) // t4 = t3 * t4\n\top.Sub(&t0, &t4, &coefEq.C) // t0 = t4 - A24m\n\top.Add(&coefEq.A, &coefEq.C, &t0) // A24p = A24m + t0\n\treturn coefEq\n}",
"func New(ctx context.Context, concurrency int) (*Group, context.Context) {\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\n\tparent, ctx := errgroup.WithContext(ctx)\n\treturn &Group{\n\t\tlimiter: make(chan struct{}, concurrency),\n\t\tparent: parent,\n\t\tctx: ctx,\n\t}, ctx\n}",
"func NewCurveECDSA() ECDSA {\n\treturn &curveP256{}\n}",
"func Add(a, b *ecdsa.PublicKey) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().Add(a.X, a.Y, b.X, b.Y)\n\treturn key\n}",
"func (r1cs *R1CS) GetCurveID() gurvy.ID {\n\treturn gurvy.BN256\n}",
"func (cg *Group) Add(ec *ExpresionChain) {\n\tcg.chains = append(cg.chains, ec)\n}",
"func (curve *Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {\n\t// We have a slight problem in that the identity of the group (the\n\t// point at infinity) cannot be represented in (x, y) form on a finite\n\t// machine. Thus the standard add/double algorithm has to be tweaked\n\t// slightly: our initial state is not the identity, but x, and we\n\t// ignore the first true bit in |k|. If we don't find any true bits in\n\t// |k|, then we return nil, nil, because we cannot return the identity\n\t// element.\n\n\tBz := new(big.Int).SetInt64(1)\n\tx := Bx\n\ty := By\n\tz := Bz\n\n\tseenFirstTrue := false\n\tfor _, byte := range k {\n\t\tfor bitNum := 0; bitNum < 8; bitNum++ {\n\t\t\tif seenFirstTrue {\n\t\t\t\tx, y, z = curve.doubleJacobian(x, y, z)\n\t\t\t}\n\t\t\tif byte&0x80 == 0x80 {\n\t\t\t\tif !seenFirstTrue {\n\t\t\t\t\tseenFirstTrue = true\n\t\t\t\t} else {\n\t\t\t\t\tx, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)\n\t\t\t\t}\n\t\t\t}\n\t\t\tbyte <<= 1\n\t\t}\n\t}\n\n\tif !seenFirstTrue {\n\t\treturn nil, nil\n\t}\n\n\treturn curve.affineFromJacobian(x, y, z)\n}",
"func PeelCentralCurve() func(x float64) model3d.Coord3D {\n\tplaneCurve := PeelCurve()\n\tzCurve := PeelHeight()\n\treturn func(x float64) model3d.Coord3D {\n\t\treturn model3d.XYZ(x, model2d.CurveEvalX(planeCurve, x), model2d.CurveEvalX(zCurve, x))\n\t}\n}",
"func GenerateGroup(r io.Reader) (*PrivateKey, error) {\n\tpriv := new(PrivateKey)\n\tpriv.Group = new(Group)\n\tvar err error\n\n\tif _, priv.g1, err = bn256.RandomG1(r); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, priv.g2, err = bn256.RandomG2(r); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, priv.h, err = bn256.RandomG1(r); err != nil {\n\t\treturn nil, err\n\t}\n\tif priv.xi1, err = randomZp(r); err != nil {\n\t\treturn nil, err\n\t}\n\tif priv.xi2, err = randomZp(r); err != nil {\n\t\treturn nil, err\n\t}\n\n\tz0 := new(big.Int).ModInverse(priv.xi1, bn256.Order)\n\tpriv.u = new(bn256.G1).ScalarMult(priv.h, z0)\n\n\tz0.ModInverse(priv.xi2, bn256.Order)\n\tpriv.v = new(bn256.G1).ScalarMult(priv.h, z0)\n\n\tpriv.gamma, err = randomZp(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpriv.w = new(bn256.G2).ScalarMult(priv.g2, priv.gamma)\n\tpriv.precompute()\n\n\treturn priv, nil\n}",
"func (c *CurveOperations) CalcCurveParamsEquiv4(cparams *ProjectiveCurveParameters) CurveCoefficientsEquiv {\n\tvar coefEq CurveCoefficientsEquiv\n\tvar op = c.Params.Op\n\n\top.Add(&coefEq.C, &cparams.C, &cparams.C)\n\t// A24p = A+2C\n\top.Add(&coefEq.A, &cparams.A, &coefEq.C)\n\t// C24 = 4*C\n\top.Add(&coefEq.C, &coefEq.C, &coefEq.C)\n\treturn coefEq\n}",
"func NewGroupClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *GroupClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &GroupClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}",
"func ScalarBaseMult(k *big.Int) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarBaseMult(k.Bytes())\n\treturn key\n}",
"func PrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveToProto(o *alpha.CaPoolIssuancePolicyAllowedKeyTypesEllipticCurve) *alphapb.PrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &alphapb.PrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve{}\n\tp.SetSignatureAlgorithm(PrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithmEnumToProto(o.SignatureAlgorithm))\n\treturn p\n}",
"func GenECCKey(curve int, password string, kmPubFile, kmPrivFile, bpmPubFile, bpmPrivFile *os.File) error {\n\tvar ellCurve elliptic.Curve\n\tswitch curve {\n\tcase 224:\n\t\tellCurve = elliptic.P224()\n\tcase 256:\n\t\tellCurve = elliptic.P256()\n\tdefault:\n\t\treturn fmt.Errorf(\"Selected ECC algorithm not supported\")\n\t}\n\tkey, err := ecdsa.GenerateKey(ellCurve, rand.Reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := writePrivKeyToFile(key, kmPrivFile, password); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writePubKeyToFile(key.Public(), kmPubFile); err != nil {\n\t\treturn err\n\t}\n\n\tkey, err = ecdsa.GenerateKey(ellCurve, rand.Reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := writePrivKeyToFile(key, bpmPrivFile, password); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writePubKeyToFile(key.Public(), bpmPubFile); err != nil {\n\t\treturn err\n\n\t}\n\treturn nil\n}",
"func GenerateKey(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)",
"func New() (*KeyPair, error) {\n\tp, v, err := p2pCrypto.GenerateKeyPairWithReader(p2pCrypto.Ed25519, 256, rand.Reader)\n\tif err == nil {\n\t\treturn &KeyPair{privKey: p, pubKey: v}, nil\n\t}\n\treturn nil, err\n}",
"func PointMul(group Group, n Bignum, P Point, m Bignum, ctx Ctx) Point {\n\tresult := NewPoint(group)\n\tC.EC_POINT_mul(group, result, n, P, m, ctx)\n\treturn result\n}",
"func (self *Graphics) DrawEllipse(x int, y int, width int, height int) *Graphics{\n return &Graphics{self.Object.Call(\"drawEllipse\", x, y, width, height)}\n}",
"func NewECPoint(x, y *big.Int, curve elliptic.Curve) *ECPoint {\n\tec := ECPoint{}\n\tec.Curve = curve\n\tec.X = new(big.Int).Set(x)\n\tec.Y = new(big.Int).Set(y)\n\n\treturn &ec\n}",
"func (c curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {\n\treturn c.ScalarMult(c.Params().Gx, c.Params().Gy, k)\n}",
"func (ec *EllipticCurve) Set(value string) error {\n\tswitch strings.ToUpper(value) {\n\tcase strEccP521, \"P-521\":\n\t\t*ec = EllipticCurveP521\n\tcase strEccP384, \"P-384\":\n\t\t*ec = EllipticCurveP384\n\tcase strEccP256, \"P-256\":\n\t\t*ec = EllipticCurveP256\n\tcase strEccED25519:\n\t\t*ec = EllipticCurveED25519\n\tdefault:\n\t\t*ec = EllipticCurveDefault\n\t}\n\n\treturn nil\n}",
"func ExponentiateKey(s beam.Scope, col beam.PCollection, secret string, publicKey *pb.ElGamalPublicKey) beam.PCollection {\n\ts = s.Scope(\"ExponentiateKey\")\n\treturn beam.ParDo(s, &exponentiateKeyFn{Secret: secret, ElGamalPublicKey: publicKey}, col)\n}",
"func ProtoToPrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(p *alphapb.PrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve) *alpha.CaPoolIssuancePolicyAllowedKeyTypesEllipticCurve {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &alpha.CaPoolIssuancePolicyAllowedKeyTypesEllipticCurve{\n\t\tSignatureAlgorithm: ProtoToPrivatecaAlphaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithmEnum(p.GetSignatureAlgorithm()),\n\t}\n\treturn obj\n}",
"func ScalarMultH(scalar *Key) (result *Key) {\n\th := new(ExtendedGroupElement)\n\th.FromBytes(&H)\n\tresultPoint := new(ProjectiveGroupElement)\n\tGeScalarMult(resultPoint, scalar, h)\n\tresult = new(Key)\n\tresultPoint.ToBytes(result)\n\treturn\n}",
"func New(w io.Writer, width, height float64) *Renderer {\n\tfmt.Fprintf(w, \"%%!PS-Adobe-3.0 EPSF-3.0\\n%%%%BoundingBox: 0 0 %v %v\\n\", dec(width), dec(height))\n\tfmt.Fprintf(w, psEllipseDef)\n\t// TODO: (EPS) generate and add preview\n\n\treturn &Renderer{\n\t\tw: w,\n\t\twidth: width,\n\t\theight: height,\n\t\tcolor: canvas.Black,\n\t}\n}",
"func ScalarMult(k *big.Int, B *ecdsa.PublicKey) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarMult(B.X, B.Y, k.Bytes())\n\treturn key\n}",
"func (p *Provider) Public() *Provider {\n\tif p.key == nil {\n\t\treturn p\n\t}\n\treturn &Provider{chain: p.chain, key: nil}\n}",
"func NewKeyPair() (ecdsa.PrivateKey, []byte) {\n\tellipticCurve := EllipticCurve()\n\n\tprivateKey, err := ecdsa.GenerateKey(ellipticCurve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tX := privateKey.PublicKey.X.Bytes()\n\tY := privateKey.PublicKey.Y.Bytes()\n\t//fmt.Println(len(X), X)\n\t//fmt.Println(len(Y), Y)\n\tpublicKey := append(\n\t\tX, // 32 bytes (P256)\n\t\tY..., // 32 bytes (P256)\n\t) // 64 bytes => 64 * 8 bits = 512 bits (perchè usiamo P256 o secp256k)\n\treturn *privateKey, publicKey\n}",
"func NewPoint(group Group) Point {\n\treturn C.EC_POINT_new(group)\n}",
"func New(cfg hotstuff.Config) (hotstuff.Signer, hotstuff.Verifier) {\n\tec := &ecdsaCrypto{cfg}\n\treturn ec, ec\n}",
"func NewSubGroup(rootOfUnity curve.Element, maxOrderRoot uint, m int) *SubGroup {\n\tsubGroup := &SubGroup{}\n\tx := nextPowerOfTwo(uint(m))\n\n\t// maxOderRoot is the largest power-of-two order for any element in the field\n\t// set subGroup.GeneratorSqRt = rootOfUnity^(2^(maxOrderRoot-log(x)-1))\n\t// to this end, compute expo = 2^(maxOrderRoot-log(x)-1)\n\tlogx := uint(bits.TrailingZeros(x))\n\tif logx > maxOrderRoot-1 {\n\t\tpanic(\"m is too big: the required root of unity does not exist\")\n\t}\n\texpo := uint64(1 << (maxOrderRoot - logx - 1))\n\tsubGroup.GeneratorSqRt.Exp(rootOfUnity, expo)\n\n\t// Generator = GeneratorSqRt^2 has order x\n\tsubGroup.Generator.Mul(&subGroup.GeneratorSqRt, &subGroup.GeneratorSqRt) // order x\n\tsubGroup.Cardinality = int(x)\n\tsubGroup.GeneratorSqRtInv.Inverse(&subGroup.GeneratorSqRt)\n\tsubGroup.GeneratorInv.Inverse(&subGroup.Generator)\n\tsubGroup.CardinalityInv.SetUint64(uint64(x)).Inverse(&subGroup.CardinalityInv)\n\n\treturn subGroup\n}",
"func New(opt ...option) *Group {\n\tr := &Group{wait_register: map[int]bool{}}\n\tfor _, o := range opt {\n\t\to(r)\n\t}\n\tif r.CancelFunc == nil {\n\t\tWith_cancel_nowait(nil)(r)\n\t}\n\tif r.parent == nil {\n\t\tr.local_wg = &sync.WaitGroup{}\n\t}\n\tif r.sig != nil {\n\t\tr.wg().Add(1)\n\t\tgo func() {\n\t\t\tdefer r.wg().Done()\n\t\t\tch := make(chan os.Signal, 1)\n\t\t\tdefer close(ch)\n\t\t\tsignal.Notify(ch, r.sig...)\n\t\t\tdefer signal.Stop(ch)\n\t\t\tselect {\n\t\t\tcase <-r.Done():\n\t\t\tcase <-ch:\n\t\t\t\tr.Interrupted = true\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\", r.line_end)\n\t\t\t}\n\t\t\tr.Cancel()\n\t\t}()\n\t}\n\treturn r\n}",
"func (*FactorySECP256K1R) NewPrivateKey() (PrivateKey, error) {\n\tk, err := secp256k1.GeneratePrivateKey()\n\treturn &PrivateKeySECP256K1R{sk: k}, err\n}",
"func generateKey(curve elliptic.Curve) (private []byte, public []byte, err error) {\n\tvar x, y *big.Int\n\tprivate, x, y, err = elliptic.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublic = elliptic.Marshal(curve, x, y)\n\treturn\n}",
"func (x *Ed25519Credentials) PrivateKey() PrivateKey {\n\n\treturn PrivateKey{\n\t\tAlgorithm: AlgorithmEd25519,\n\t\tPrivate: base64.URLEncoding.EncodeToString(x.Private[:]),\n\t}\n\n}",
"func New(privKey *ecdsa.PrivateKey, alg, kid string) *Signer {\n\treturn &Signer{privateKey: privKey, kid: kid, alg: alg}\n}",
"func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.PublicKey.Curve = S256()\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N\n\tif priv.D.Cmp(secp256k1N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}",
"func (in *EndpointGroupParameters) DeepCopy() *EndpointGroupParameters {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EndpointGroupParameters)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func getECDSAPublicKey(rawPubKey []byte) (*ecdsa.PublicKey, error) {\n\ttag := rawPubKey[0]\n\tuncompressed := rawPubKey[2]\n\tif tag != 0x04 || uncompressed != 0x04 {\n\t\treturn nil, errors.New(\"Invalid public key.\")\n\t}\n\tlength := int(rawPubKey[1]) - 1\n\tif len(rawPubKey) != (3 + length) {\n\t\treturn nil, errors.New(\"Invalid public key.\")\n\t}\n\tx := new(big.Int).SetBytes(rawPubKey[3 : 3+(length/2)])\n\ty := new(big.Int).SetBytes(rawPubKey[3+(length/2):])\n\tecdsaPubKey := ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}\n\treturn &ecdsaPubKey, nil\n}",
"func (x *ed25519_t) NewSigner(key PrivateKey) (Credentials, error) {\n\n\tvar credentials Ed25519Credentials\n\tvar err error\n\n\terr = credentials.SetPrivateKey(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n\n}",
"func TestScalarMult(t *testing.T) {\n\tsecp256k1 := newECDSASecp256k1().curve\n\tp256 := newECDSAP256().curve\n\tgenericMultTests := []struct {\n\t\tcurve elliptic.Curve\n\t\tPx string\n\t\tPy string\n\t\tk string\n\t\tQx string\n\t\tQy string\n\t}{\n\t\t{\n\t\t\tsecp256k1,\n\t\t\t\"858a2ea2498449acf531128892f8ee5eb6d10cfb2f7ebfa851def0e0d8428742\",\n\t\t\t\"015c59492d794a4f6a3ab3046eecfc85e223d1ce8571aa99b98af6838018286e\",\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"fea24b9a6acdd97521f850e782ef4a24f3ef672b5cd51f824499d708bb0c744d\",\n\t\t\t\"5f0b6db1a2c851cb2959fab5ed36ad377e8b53f1f43b7923f1be21b316df1ea1\",\n\t\t},\n\t\t{\n\t\t\tp256,\n\t\t\t\"fa1a85f1ae436e9aa05baabe60eb83b2d7ff52e5766504fda4e18d2d25887481\",\n\t\t\t\"f7cc347e1ac53f6720ffc511bfb23c2f04c764620be0baf8c44313e92d5404de\",\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"28a27fc352f315d5cc562cb0d97e5882b6393fd6571f7d394cc583e65b5c7ffe\",\n\t\t\t\"4086d17a2d0d9dc365388c91ba2176de7acc5c152c1a8d04e14edc6edaebd772\",\n\t\t},\n\t}\n\n\tbaseMultTests := []struct {\n\t\tcurve elliptic.Curve\n\t\tk string\n\t\tQx string\n\t\tQy string\n\t}{\n\t\t{\n\t\t\tsecp256k1,\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"36f292f6c287b6e72ca8128465647c7f88730f84ab27a1e934dbd2da753930fa\",\n\t\t\t\"39a09ddcf3d28fb30cc683de3fc725e095ec865c3d41aef6065044cb12b1ff61\",\n\t\t},\n\t\t{\n\t\t\tp256,\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"78a80dfe190a6068be8ddf05644c32d2540402ffc682442f6a9eeb96125d8681\",\n\t\t\t\"3789f92cf4afabf719aaba79ecec54b27e33a188f83158f6dd15ecb231b49808\",\n\t\t},\n\t}\n\n\tfor _, test := range genericMultTests {\n\t\tPx, _ := new(big.Int).SetString(test.Px, 16)\n\t\tPy, _ := new(big.Int).SetString(test.Py, 16)\n\t\tk, _ := new(big.Int).SetString(test.k, 16)\n\t\tQx, _ := new(big.Int).SetString(test.Qx, 16)\n\t\tQy, _ := new(big.Int).SetString(test.Qy, 16)\n\t\tRx, Ry := test.curve.ScalarMult(Px, Py, k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t}\n\tfor _, test := range baseMultTests {\n\t\tk, _ := new(big.Int).SetString(test.k, 16)\n\t\tQx, _ := new(big.Int).SetString(test.Qx, 16)\n\t\tQy, _ := new(big.Int).SetString(test.Qy, 16)\n\t\t// base mult\n\t\tRx, Ry := test.curve.ScalarBaseMult(k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t\t// generic mult with base point\n\t\tPx := new(big.Int).Set(test.curve.Params().Gx)\n\t\tPy := new(big.Int).Set(test.curve.Params().Gy)\n\t\tRx, Ry = test.curve.ScalarMult(Px, Py, k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t}\n}",
"func simpleEC(this js.Value, inputs []js.Value) interface{} {\n\tvar suite = suites.MustFind(\"Ed25519\")\n\tvar args map[string]interface{}\n\tjson.Unmarshal([]byte(inputs[0].String()), &args)\n\tscalar := suite.Scalar()\n\tscalarB, _ := base64.StdEncoding.DecodeString(args[\"scalar\"].(string))\n\tscalar.UnmarshalBinary(scalarB)\n\tvar resultB []byte\n\tfor i := 0; i < 1; i++ {\n\t\tresultB, _ = suite.Point().Mul(scalar, nil).MarshalBinary()\n\t}\n\targs[\"result\"] = base64.StdEncoding.EncodeToString(resultB)\n\t//args[\"resultTest\"] = result.String()\n\targs[\"Accepted\"] = \"true\"\n\treturn args\n}",
"func (lib *PKCS11Lib) exportECDSAPublicKey(session pkcs11.SessionHandle, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) {\n\tvar err error\n\tvar attributes []*pkcs11.Attribute\n\tvar pub ecdsa.PublicKey\n\ttemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ECDSA_PARAMS, nil),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_EC_POINT, nil),\n\t}\n\tif attributes, err = lib.Ctx.GetAttributeValue(session, pubHandle, template); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.Curve, err = unmarshalEcParams(attributes[0].Value); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.X, pub.Y, err = unmarshalEcPoint(attributes[1].Value, pub.Curve); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn &pub, nil\n}",
"func Encrypt(pub *ecdsa.PublicKey, in []byte) (out []byte, err error) {\n\tephemeral, err := ecdsa.GenerateKey(Curve(), rand.Reader)\n\tif err != nil {\n\t\treturn\n\t}\n\tx, _ := pub.Curve.ScalarMult(pub.X, pub.Y, ephemeral.D.Bytes())\n\tif x == nil {\n\t\treturn nil, errors.New(\"Failed to generate encryption key\")\n\t}\n\tshared := sha256.Sum256(x.Bytes())\n\tiv, err := symcrypt.MakeRandom(16)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpaddedIn := padding.AddPadding(in)\n\tct, err := symcrypt.EncryptCBC(paddedIn, iv, shared[:16])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tephPub := elliptic.Marshal(pub.Curve, ephemeral.PublicKey.X, ephemeral.PublicKey.Y)\n\tout = make([]byte, 1+len(ephPub)+16)\n\tout[0] = byte(len(ephPub))\n\tcopy(out[1:], ephPub)\n\tcopy(out[1+len(ephPub):], iv)\n\tout = append(out, ct...)\n\n\th := hmac.New(sha1.New, shared[16:])\n\th.Write(iv)\n\th.Write(ct)\n\tout = h.Sum(out)\n\treturn\n}",
"func (g *G1) Double() {\n\t// Reference:\n\t// \"Complete addition formulas for prime order elliptic curves\" by\n\t// Costello-Renes-Batina. [Alg.9] (eprint.iacr.org/2015/1060).\n\tvar R G1\n\tX, Y, Z := &g.x, &g.y, &g.z\n\tX3, Y3, Z3 := &R.x, &R.y, &R.z\n\tvar f0, f1, f2 ff.Fp\n\tt0, t1, t2 := &f0, &f1, &f2\n\t_3B := &g1Params._3b\n\tt0.Sqr(Y) // 1. t0 = Y * Y\n\tZ3.Add(t0, t0) // 2. Z3 = t0 + t0\n\tZ3.Add(Z3, Z3) // 3. Z3 = Z3 + Z3\n\tZ3.Add(Z3, Z3) // 4. Z3 = Z3 + Z3\n\tt1.Mul(Y, Z) // 5. t1 = Y * Z\n\tt2.Sqr(Z) // 6. t2 = Z * Z\n\tt2.Mul(_3B, t2) // 7. t2 = b3 * t2\n\tX3.Mul(t2, Z3) // 8. X3 = t2 * Z3\n\tY3.Add(t0, t2) // 9. Y3 = t0 + t2\n\tZ3.Mul(t1, Z3) // 10. Z3 = t1 * Z3\n\tt1.Add(t2, t2) // 11. t1 = t2 + t2\n\tt2.Add(t1, t2) // 12. t2 = t1 + t2\n\tt0.Sub(t0, t2) // 13. t0 = t0 - t2\n\tY3.Mul(t0, Y3) // 14. Y3 = t0 * Y3\n\tY3.Add(X3, Y3) // 15. Y3 = X3 + Y3\n\tt1.Mul(X, Y) // 16. t1 = X * Y\n\tX3.Mul(t0, t1) // 17. X3 = t0 * t1\n\tX3.Add(X3, X3) // 18. X3 = X3 + X3\n\t*g = R\n}",
"func (pk *ECPublicKey) toECDSA() *ecdsa.PublicKey {\n\tecdsaPub := new(ecdsa.PublicKey)\n\tecdsaPub.Curve = pk.Curve\n\tecdsaPub.X = pk.X\n\tecdsaPub.Y = pk.Y\n\n\treturn ecdsaPub\n}",
"func New() *backend_bn256.R1CS {\n\t// create root constraint system\n\tcircuit := frontend.New()\n\n\t// declare secret and public inputs\n\tpreImage := circuit.SECRET_INPUT(\"pi\")\n\thash := circuit.PUBLIC_INPUT(\"h\")\n\n\t// hash function\n\tmimc, _ := mimc.NewMiMCGadget(\"seed\", gurvy.BN256)\n\n\t// specify constraints\n\t// mimc(preImage) == hash\n\tcircuit.MUSTBE_EQ(hash, mimc.Hash(&circuit, preImage))\n\n\tr1cs := backend_bn256.New(&circuit)\n\n\treturn &r1cs\n}",
"func (c *Curve) NewPoint(x, y float64) (CurvePoint, error) {\n\n\tvar point CurvePoint\n\n\tif !c.IsPointOnCurve(x, y) {\n\t\terr := fmt.Errorf(\"Point (%f, %f) is not on y^2 = x^3 + %fx + %f\", x, y, c.a, c.b)\n\t\treturn point, err\n\t}\n\n\tpoint.x = x\n\tpoint.y = y\n\tpoint.order = -1\n\tpoint.curve = c\n\n\treturn point, nil\n}",
"func (c *container) Ellipse(cx, cy, rx, ry float64) *Ellipse {\n\te := &Ellipse{Cx: cx, Cy: cy, Rx: rx, Ry: ry}\n\tc.contents = append(c.contents, e)\n\n\treturn e\n}",
"func New(ctx context.Context) *Group {\n\t// Monitor goroutine context and cancelation.\n\tmctx, cancel := context.WithCancel(ctx)\n\n\tg := &Group{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\n\t\taddC: make(chan struct{}),\n\t\tlenC: make(chan int),\n\t}\n\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\tg.monitor(mctx)\n\t}()\n\n\treturn g\n}"
] | [
"0.6798301",
"0.6065067",
"0.5757176",
"0.56906825",
"0.56289303",
"0.54817134",
"0.5463004",
"0.5415355",
"0.539792",
"0.53937465",
"0.5361424",
"0.5323893",
"0.5249899",
"0.51688224",
"0.50669473",
"0.5043877",
"0.50011253",
"0.49452612",
"0.49054405",
"0.48807684",
"0.47987887",
"0.47651106",
"0.47571173",
"0.4744629",
"0.47287554",
"0.4727858",
"0.47117567",
"0.46565667",
"0.46432996",
"0.4636588",
"0.46358663",
"0.45650247",
"0.4564723",
"0.4525633",
"0.45005924",
"0.4498344",
"0.4489864",
"0.4486951",
"0.44715083",
"0.4471491",
"0.44631237",
"0.4427965",
"0.44210693",
"0.4408045",
"0.44052696",
"0.43887824",
"0.43758488",
"0.43751046",
"0.4370899",
"0.4367349",
"0.43654534",
"0.4355001",
"0.43387482",
"0.43333298",
"0.43047076",
"0.43009183",
"0.42848307",
"0.42815778",
"0.42814973",
"0.42769006",
"0.42539665",
"0.42528844",
"0.42352763",
"0.4225302",
"0.42237338",
"0.42098147",
"0.41796428",
"0.41752842",
"0.41752765",
"0.41746128",
"0.4167619",
"0.41669697",
"0.415623",
"0.41492134",
"0.41416645",
"0.41411176",
"0.41401213",
"0.41328403",
"0.41178867",
"0.4115851",
"0.4112761",
"0.41110083",
"0.41101888",
"0.40954947",
"0.4090991",
"0.40839395",
"0.40745997",
"0.40582994",
"0.40553132",
"0.4055145",
"0.40545356",
"0.4050158",
"0.40408683",
"0.40389386",
"0.40383673",
"0.4037078",
"0.40365073",
"0.40314084",
"0.4026407",
"0.40232432"
] | 0.7203198 | 0 |
MaxItemID retrieves the current largest item id is at You can walk backward from here to discover all items. | MaxItemID возвращает текущий наибольший идентификатор предмета. Вы можете идти в обратном направлении отсюда, чтобы обнаружить все предметы. | func MaxItemID() int {
var maxItemID int
err := getJSON(MaxItemIDURL, &maxItemID)
if err != nil {
log.Panicln(err.Error())
}
return maxItemID
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func getMaxID() uint8 {\n\tmaxID++\n\treturn maxID\n}",
"func getMaxID() int {\n\n\tif len(cdb.classMap) != 0 {\n\t\tkeys := make([]int, 0, len(cdb.classMap))\n\t\tfor k := range cdb.classMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Ints(keys)\n\t\treturn keys[len(keys)-1]\n\t}\n\n\treturn -1\n\n}",
"func (m *MessageReplies) GetMaxID() (value int, ok bool) {\n\tif m == nil {\n\t\treturn\n\t}\n\tif !m.Flags.Has(2) {\n\t\treturn value, false\n\t}\n\treturn m.MaxID, true\n}",
"func (p *Policy) getMaxBlockSize(ic *interop.Context, _ []stackitem.Item) stackitem.Item {\n\treturn stackitem.NewBigInteger(big.NewInt(int64(p.GetMaxBlockSizeInternal(ic.DAO))))\n}",
"func (_SmartTgStats *SmartTgStatsCaller) MaxRequestID(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _SmartTgStats.contract.Call(opts, out, \"maxRequestID\")\n\treturn *ret0, err\n}",
"func (m *MessageReplies) SetMaxID(value int) {\n\tm.Flags.Set(2)\n\tm.MaxID = value\n}",
"func (v *parameter) MaxItems() int {\n\tif !v.HasMaxItems() {\n\t\treturn 0\n\t}\n\treturn *v.maxItems\n}",
"func MaxItems(totalItems int) int {\n\tvar maxItems int\n\n\tif totalItems%2 == 0 {\n\t\tmaxItems = totalItems / 2\n\t} else {\n\t\tmaxItems = totalItems/2 + 1\n\t}\n\treturn maxItems\n}",
"func (l *Limiter) MaxItems() int {\n\treturn l.maxItems\n}",
"func (o *ListScansParams) SetMaxItems(maxItems *int64) {\n\to.MaxItems = maxItems\n}",
"func max(n *node) Item {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tfor len(n.children) > 0 {\n\t\tn = n.children[len(children)-1]\n\t}\n\tif len(n.items) == 0 {\n\t\treturn nil\n\t}\n\treturn n.items[len(n.items)-1]\n}",
"func (b *MessagesGetLongPollHistoryBuilder) MaxMsgID(v int) *MessagesGetLongPollHistoryBuilder {\n\tb.Params[\"max_msg_id\"] = v\n\treturn b\n}",
"func MaxKey() Val { return Val{t: bsontype.MaxKey} }",
"func (list *List) HighestID() (ID, bool) {\n\tif list.highest == -1 {\n\t\treturn ID(0), false\n\t}\n\n\treturn ID(list.highest), true\n}",
"func (_SmartTgStats *SmartTgStatsSession) MaxRequestID() (*big.Int, error) {\n\treturn _SmartTgStats.Contract.MaxRequestID(&_SmartTgStats.CallOpts)\n}",
"func (t *BTree) maxItems() int {\n\tdegree := 2\n\treturn degree*2 - 1\n}",
"func (_SmartTgStats *SmartTgStatsCallerSession) MaxRequestID() (*big.Int, error) {\n\treturn _SmartTgStats.Contract.MaxRequestID(&_SmartTgStats.CallOpts)\n}",
"func (s *DescribeFileSystemsInput) SetMaxItems(v int64) *DescribeFileSystemsInput {\n\ts.MaxItems = &v\n\treturn s\n}",
"func (m *Max) GetId() int {\n\tif m.count > 0 {\n\t\treturn m.maxIdx\n\t} else {\n\t\treturn -1\n\t}\n}",
"func (px *Paxos) Max() int {\n\tkeys := px.sortedSeqs()\n\tif len(keys) == 0 {\n\t\treturn -1\n\t} else {\n\t\tsort.Ints(keys)\n\t}\n\treturn keys[len(keys)-1]\n}",
"func (s *DescribeMountTargetsInput) SetMaxItems(v int64) *DescribeMountTargetsInput {\n\ts.MaxItems = &v\n\treturn s\n}",
"func (s *DescribeTagsInput) SetMaxItems(v int64) *DescribeTagsInput {\n\ts.MaxItems = &v\n\treturn s\n}",
"func (s *GetSampledRequestsInput) SetMaxItems(v int64) *GetSampledRequestsInput {\n\ts.MaxItems = &v\n\treturn s\n}",
"func (o *GetComplianceByResourceTypesParams) SetMaxItems(maxItems *int64) {\n\to.MaxItems = maxItems\n}",
"func (it *Item) ID() int64 { return it.id }",
"func GetMaxBlockSize() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxBlockSize))\r\n}",
"func MaxI(x, y int) int {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
"func (t *MedLabPharmaChaincode) GetMaxIDValue(stub shim.ChaincodeStubInterface) ([]byte, error) {\n\tvar jsonResp string\n\tvar err error\n\tConMaxAsbytes, err := stub.GetState(UNIQUE_ID_COUNTER)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for ContainerMaxNumber \\\"}\"\n\t\treturn nil, errors.New(jsonResp)\n\t}\n\treturn ConMaxAsbytes, nil\n}",
"func (h *binaryHeap) GetMax() int {\n\tmax := h.items[0]\n\th.items[0], h.items[h.size-1] = h.items[h.size-1], 0\n\th.size--\n\th.siftdown(0)\n\treturn max\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\treturn px.curMax\n}",
"func MaxI64(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
"func MaxI64(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
"func (this *AllOne) GetMaxKey() string {\n\tif this.Head.Next != this.Tail {\n\t\tnmap := this.Head.Next.NodeMap\n\t\tret := \"\"\n\t\tfor k, _ := range nmap {\n\t\t\tret = k\n\t\t\tbreak\n\t\t}\n\t\treturn ret\n\t}\n\n\treturn \"\"\n}",
"func assignAuctionId() (key int) {\n\tvar highestKey int\n\tfor _, auction := range auctions {\n\t\tif auction.Id > highestKey {\n\t\t\thighestKey = auction.Id\n\t\t}\n\t}\n\treturn highestKey + 1\n}",
"func (this *AllOne) GetMaxKey() string {\n\tif this.head == nil {\n\t\treturn \"\"\n\t}\n\tfor k := range this.head.set {\n\t\treturn k\n\t}\n\treturn \"ERROR\"\n}",
"func (tb *TimeBucket) Max() int64 { return tb.max }",
"func assignBidId() (key int) {\n\tvar highestKey int\n\tfor _, bid := range bids {\n\t\tif bid.Id > highestKey {\n\t\t\thighestKey = bid.Id\n\t\t}\n\t}\n\treturn highestKey + 1\n}",
"func (this *AllOne) GetMaxKey() string {\n\tif this.tail.pre != this.head {\n\t\tfor k := range this.tail.pre.keys {\n\t\t\treturn k\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (n *Node) Max() int {\n\tif n.Right == nil {\n\t\treturn n.Key\n\t}\n\n\treturn n.Right.Max()\n}",
"func (q *SimpleQueue) getCurrentMaxExtID(source string) (extID int64, ok bool) {\n\tq.mxExt.DoGlobal(func() {\n\t\tif q.lastExtID != nil {\n\t\t\textID, ok = q.lastExtID[source]\n\t\t}\n\t})\n\n\treturn extID, ok\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\treturn px.max_seq\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\treturn px.max\n}",
"func (m *ItemsMutator) MaxItems(v int) *ItemsMutator {\n\tm.proxy.maxItems = &v\n\treturn m\n}",
"func (list Int64Set) Max() (result int64) {\n\treturn list.MaxBy(func(a int64, b int64) bool {\n\t\treturn a < b\n\t})\n}",
"func (u *UserStories) GetMaxReadID() (value int, ok bool) {\n\tif u == nil {\n\t\treturn\n\t}\n\tif !u.Flags.Has(0) {\n\t\treturn value, false\n\t}\n\treturn u.MaxReadID, true\n}",
"func (orderTree *OrderTree) MaxPrice() *big.Int {\n\tif orderTree.Depth() > 0 {\n\t\tif bytes, found := orderTree.PriceTree.GetMax(); found {\n\t\t\titem := orderTree.getOrderListItem(bytes)\n\t\t\tif item != nil {\n\t\t\t\treturn CloneBigInt(item.Price)\n\t\t\t}\n\t\t}\n\t}\n\treturn Zero()\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\tmax := -1\n\thead := px.prepareStatus.Head\n\tfor head.Next != nil {\n\t\tstate := head.Next\n\t\tif max < state.Seq {\n\t\t\tmax = state.Seq\n\t\t}\n\t\thead = head.Next\n\t}\n\treturn max\n}",
"func itemIDToWeaponID(itemID int) int {\n\tfor wid, w := range defs.Weapons {\n\t\tif itemID == w.ItemID {\n\t\t\treturn wid\n\t\t}\n\t}\n\n\treturn -1\n}",
"func Imax(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
"func Imax(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
"func Max(key []byte, nodes []*memberlist.Node) (max *memberlist.Node) {\n\tmaxValue := big.NewInt(0)\n\n\tCompute(key, nodes, func(node *memberlist.Node, bi *big.Int) {\n\t\tif bi.Cmp(maxValue) == 1 {\n\t\t\tmaxValue = bi\n\t\t\tmax = node\n\t\t}\n\t})\n\n\treturn max\n}",
"func (s *OverflowShelf) PopMax(temp string) (maxOrderID uuid.UUID, found bool, err error) {\n\tvar orders map[uuid.UUID]float32\n\torders, err = s.getOrders(temp)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(orders) == 0 {\n\t\t// no orders for temp, return not found.\n\t\treturn\n\t}\n\n\tmaxDecayRate := float32(-1)\n\n\t// O(n) scan is quick for small sets. When size gets bigger,\n\t// use priority queues instead of slices to Store the orders.\n\tfor orderID, decayRate := range orders {\n\t\tif decayRate > maxDecayRate {\n\t\t\tmaxDecayRate = decayRate\n\t\t\tmaxOrderID = orderID\n\t\t\tfound = true\n\t\t}\n\t}\n\n\tdelete(orders, maxOrderID)\n\treturn\n}",
"func (this *ExDomain) GetMax() int {\n\tif this.IsEmpty() {\n\t\tlogger.If(\"Domain %s\", *this)\n\t\tdebug.PrintStack()\n\t\tpanic(\"GetMax on empty domain\")\n\t}\n\treturn this.Max\n}",
"func (m *MailTips) GetMaxMessageSize()(*int32) {\n val, err := m.GetBackingStore().Get(\"maxMessageSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}",
"func (px *Paxos) Max() int {\n\t//DPrintf(\"Max()\\n\")\n\treturn px.nseq\n}",
"func (h *MaxHeap) Max() int {\n\tif h.IsEmpty() {\n\t\treturn -1\n\t}\n\treturn h.data[1]\n}",
"func (u *UserStories) SetMaxReadID(value int) {\n\tu.Flags.Set(0)\n\tu.MaxReadID = value\n}",
"func (d *Dropping) MaxSize() int {\n\treturn d.maxSize\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\t// probably just return the max seq number from local state?\n\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\treturn px.maxSeq\n}",
"func (mw *MaxWriter) Max() int {\n\treturn mw.max\n}",
"func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}",
"func (px *Paxos) Max() int {\n\t// Your code here.\n\treturn px.maxSeq\n}",
"func (m *MessageReplies) GetReadMaxID() (value int, ok bool) {\n\tif m == nil {\n\t\treturn\n\t}\n\tif !m.Flags.Has(3) {\n\t\treturn value, false\n\t}\n\treturn m.ReadMaxID, true\n}",
"func max(d dataSet) int {\n\treturn d[len(d)-1]\n}",
"func (r *SlidingWindow) Max() int {return r.base + len(r.values) - 1}",
"func maximize() int64 {\n\tmax := 0\n\tmaxItem := int64(0)\n\t// var cache Cache\n\n\t// The larger the cache, the faster it is. Even without a\n\t// cache, it's only around a second on a modern machine, so\n\t// not that big of a deal.\n\t// cache.init(10000)\n\n\tfor x := int64(1); x < 1000000; x++ {\n\t\tcount := collatz2(x)\n\t\t// count := cache.chainLen(x)\n\t\tif count > max {\n\t\t\tmax = count\n\t\t\tmaxItem = x\n\t\t}\n\t}\n\tfmt.Printf(\"%d, %d\\n\", maxItem, max)\n\treturn maxItem\n}",
"func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (j *DSRocketchat) ItemID(item interface{}) string {\n\tid, _ := Dig(item, []string{\"_id\"}, true, false)\n\treturn id.(string)\n}",
"func Max(field string) *AggrItem {\n\treturn NewAggrItem(field, AggrMax, field)\n}",
"func (t *Indexed) Max(i, j int) int {\n\treturn -1\n}",
"func GetMaxDroplets(client *godo.Client, ctx context.Context) int {\n\tacc, _, _ := client.Account.Get(ctx)\n\treturn acc.DropletLimit\n}",
"func (me XsdGoPkgHasElem_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func max(x int) int {\n\treturn 40 + x\n}",
"func ItemID(name string) (string, error) {\n\t// the id of gold is the special string\n\tif name == ItemIDGold {\n\t\treturn name, nil\n\t}\n\n\t// if we have an entry for that item, use it\n\tif item, ok := itemIDs[strings.ToLower(name)]; ok {\n\t\treturn item, nil\n\t}\n\n\t// there was no entry for that name\n\treturn \"\", errors.New(\"I don't recognize this item: \" + name)\n}",
"func (i *Item) ID() ID {\n\treturn i.id\n}",
"func (it *Item) FeedID() int64 { return it.feedID }",
"func (p *Policy) getMaxTransactionsPerBlock(ic *interop.Context, _ []stackitem.Item) stackitem.Item {\n\treturn stackitem.NewBigInteger(big.NewInt(int64(p.GetMaxTransactionsPerBlockInternal(ic.DAO))))\n}",
"func (p *Policy) GetMaxBlockSizeInternal(dao dao.DAO) uint32 {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tif p.isValid {\n\t\treturn p.maxBlockSize\n\t}\n\treturn p.getUint32WithKey(dao, maxBlockSizeKey)\n}",
"func (prices *pricesVal) MaxPrice() int {\n\treturn prices.maxPrice\n}",
"func MaxI(n ...int) int {\n\tmax := math.MinInt64\n\tfor _, v := range n {\n\t\tif v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\treturn max\n}",
"func Max(Len int, Less func(i, j int) bool) int {\n\tmx := 0\n\tfor i := 1; i < Len; i++ {\n\t\tif Less(mx, i) {\n\t\t\tmx = i\n\t\t}\n\t}\n\treturn mx\n}",
"func GetMaxBlockFuel() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxBlockFuel))\r\n}",
"func (b *JSONSchemaPropApplyConfiguration) WithMaxItems(value int64) *JSONSchemaPropApplyConfiguration {\n\tb.MaxItems = &value\n\treturn b\n}",
"func opI64Max(expr *CXExpression, fp int) {\n\tinpV0 := ReadI64(fp, expr.Inputs[0])\n\tinpV1 := ReadI64(fp, expr.Inputs[1])\n\tif inpV1 > inpV0 {\n\t\tinpV0 = inpV1\n\t}\n\tWriteI64(GetOffset_i64(fp, expr.Outputs[0]), inpV0)\n}",
"func (this *AllOne) GetMaxKey() string {\n if len(this.m) == 0{\n return \"\"\n }\n return this.g[this.max][1].k\n}",
"func (px *Paxos) Max() int {\n\tpx.RLock()\n\tdefer px.RUnlock()\n\treturn px.maxSeq\n}",
"func (self *Limits) Maximum() uint32 {\n\treturn uint32(self.inner().max)\n}",
"func (v *VEBTree) Maximum() int {\n\treturn v.max\n}",
"func (k Keeper) GetMaxAmount(ctx sdk.Context) sdk.Int {\n\tparams := k.GetParams(ctx)\n\treturn params.MaxAmount\n}",
"func MaxInt(x, y int) int {\n\tif x > y {\n\t\treturn x\n\t} else {\n\t\treturn y\n\t}\n}",
"func (c *Counter) Max() int64 { return c.max }",
"func (px *Paxos) Max() int {\n\treturn px.maxSeq\n}",
"func (sm *StackMax) Max() (int, error) {\n\tif sm.Empty() {\n\t\treturn -1, ErrstackEmpty\n\t}\n\treturn sm.maxer[sm.length-1], nil\n}",
"func (p *Policy) setMaxBlockSize(ic *interop.Context, args []stackitem.Item) stackitem.Item {\n\tvalue := uint32(toBigInt(args[0]).Int64())\n\tif value > payload.MaxSize {\n\t\tpanic(fmt.Errorf(\"MaxBlockSize cannot be more than the maximum payload size = %d\", payload.MaxSize))\n\t}\n\tok, err := p.checkValidators(ic)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !ok {\n\t\treturn stackitem.NewBool(false)\n\t}\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\terr = p.setUint32WithKey(ic.DAO, maxBlockSizeKey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.isValid = false\n\treturn stackitem.NewBool(true)\n}",
"func MaxInt(a, b int64) int64 {\n\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}",
"func MaxInt(x, y int) int {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}",
"func maxI(a int, b int) (res int) {\n\tif a < b {\n\t\tres = b\n\t} else {\n\t\tres = a\n\t}\n\n\treturn\n}",
"func (idx *Tree) MaxKey(searchPrefix []byte) (v uint64, ok bool) {\n\traw, _ := idx.partialSearch(searchPrefix)\n\tif raw == 0 {\n\t\treturn 0, false\n\t}\n\tif isLeaf(raw) {\n\t\treturn getLeafValue(raw), true\n\t}\n\t// now find the max\nsearchLoop:\n\tfor {\n\t\t_, node, count, prefixLen := explodeNode(raw)\n\t\tblock := int(node >> blockSlotsShift)\n\t\toffset := int(node & blockSlotsOffsetMask)\n\t\tdata := idx.blocks[block].data[offset:]\n\t\tvar prefixSlots int\n\t\tif prefixLen > 0 {\n\t\t\tif prefixLen == 255 {\n\t\t\t\tprefixLen = int(data[0])\n\t\t\t\tprefixSlots = (prefixLen + 15) >> 3\n\t\t\t} else {\n\t\t\t\tprefixSlots = (prefixLen + 7) >> 3\n\t\t\t}\n\t\t\tdata = data[prefixSlots:]\n\t\t}\n\t\tif count >= fullAllocFrom {\n\t\t\t// find max, iterate from top\n\t\t\tfor k := 255; k >= 0; k-- {\n\t\t\t\ta := atomic.LoadUint64(&data[k])\n\t\t\t\tif a != 0 {\n\t\t\t\t\tif isLeaf(a) {\n\t\t\t\t\t\treturn getLeafValue(a), true\n\t\t\t\t\t}\n\t\t\t\t\traw = a\n\t\t\t\t\tcontinue searchLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\t// BUG: this might happen if all children in the node has been deleted, since we currently don't shrink node-256. we should go back in the tree!\n\t\t\treturn 0, false\n\t\t}\n\t\t// load the last child (since they are ordered)\n\t\ta := atomic.LoadUint64(&data[count-1])\n\t\tif isLeaf(a) {\n\t\t\treturn getLeafValue(a), true\n\t\t}\n\t\traw = a\n\t}\n}",
"func MaxInt(x, y int) int {\n\tif x > y {\n\t\treturn x\n\t}\n\n\treturn y\n}",
"func getMax() int {\n\n\t// These are arbitrary numbers\n\tdef := 100\n\tmin := 1\n\n\treader := r.NewReader(os.Stdin)\n\n\tfor {\n\t\tf.Printf(\"Please give a max value (>= %v) and press ENTER\\n\", min)\n\t\tf.Printf(\"Enter for default (Default = %v)\\n\", def)\n\n\t\ttext, _ := reader.ReadString('\\n')\n\t\ttext = ss.Replace(text, \"\\n\", \"\", -1)\n\n\t\tif text == \"\" {\n\t\t\treturn def\n\t\t}\n\n\t\tiTxt, err := scvt.Atoi(text)\n\t\tif iTxt < min {\n\t\t\tf.Printf(\"%v < %v, try again\\n\", iTxt, min)\n\t\t} else if err == nil {\n\t\t\treturn iTxt\n\t\t}\n\n\t\tf.Println(\"Please give a valid integer or no value for default\")\n\n\t}\n}"
] | [
"0.6239351",
"0.62393034",
"0.6184761",
"0.6177771",
"0.61411256",
"0.5951838",
"0.59276485",
"0.5924461",
"0.5894317",
"0.5864182",
"0.58507824",
"0.5841664",
"0.5819029",
"0.57879007",
"0.5785859",
"0.5710718",
"0.5693274",
"0.5676167",
"0.56462955",
"0.56358624",
"0.55982965",
"0.5593892",
"0.5591572",
"0.5573422",
"0.5564279",
"0.5536613",
"0.55114186",
"0.5504773",
"0.5473257",
"0.54713935",
"0.5458459",
"0.5458459",
"0.54558134",
"0.54393303",
"0.5425836",
"0.5418179",
"0.54168564",
"0.54056025",
"0.5395047",
"0.5381739",
"0.53810465",
"0.53650147",
"0.53597313",
"0.53420585",
"0.5339871",
"0.53321105",
"0.530738",
"0.53048915",
"0.52987784",
"0.52987784",
"0.5298236",
"0.528819",
"0.52871436",
"0.52801645",
"0.52755356",
"0.52723783",
"0.52706367",
"0.52702034",
"0.5264941",
"0.52644604",
"0.5258735",
"0.525543",
"0.5248233",
"0.5248201",
"0.524389",
"0.52431494",
"0.52378887",
"0.5235779",
"0.5230477",
"0.5230243",
"0.52297133",
"0.5217335",
"0.5214984",
"0.5210735",
"0.519585",
"0.5191371",
"0.517413",
"0.51733595",
"0.517309",
"0.5171061",
"0.5158012",
"0.5149305",
"0.5145765",
"0.5144301",
"0.51434046",
"0.5141361",
"0.5141007",
"0.5133633",
"0.51256275",
"0.51209056",
"0.51178896",
"0.511402",
"0.5112235",
"0.51056516",
"0.51036346",
"0.50854677",
"0.5082091",
"0.50800127",
"0.5077536",
"0.5074298"
] | 0.86796016 | 0 |
Deprecated: Use CodeRequest.ProtoReflect.Descriptor instead. | Устарело: используйте CodeRequest.ProtoReflect.Descriptor вместо этого. | func (*CodeRequest) Descriptor() ([]byte, []int) {
return file_helloworld_helloworld_proto_rawDescGZIP(), []int{1}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}",
"func (*CodeActionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{157}\n}",
"func (*CheckCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{2}\n}",
"func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}",
"func (*GetForgetPasswordCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sms_sms_proto_rawDescGZIP(), []int{2}\n}",
"func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}",
"func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}",
"func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}",
"func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}",
"func (*IssueCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_verifycode_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}",
"func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}",
"func (*CodeLensResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{33}\n}",
"func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}",
"func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}",
"func (*DebugInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{19}\n}",
"func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}",
"func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1}\n}",
"func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}",
"func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}",
"func (*ChaincodeRequestMessage) Descriptor() ([]byte, []int) {\n\treturn file_fpc_fpc_proto_rawDescGZIP(), []int{6}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}",
"func (*PaqueteRequest) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}",
"func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}",
"func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}",
"func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsgProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{275}\n}",
"func (*PatchConceptLanguagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{50}\n}",
"func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}",
"func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{11}\n}",
"func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}",
"func (*ModelControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*AddPermissionToRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{7}\n}",
"func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsgClientToGCGetTicketCodesRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{359}\n}",
"func (*LogMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{59}\n}",
"func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}",
"func (*ValidateClientCredentialRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}",
"func (*LanguageDetectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{1}\n}",
"func (*CancelDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{7}\n}",
"func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}",
"func (*ProofRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{35}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{0}\n}",
"func (*TypeDefinitionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{133}\n}",
"func (*ImplementationRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{137}\n}",
"func (*CodeLens) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{164}\n}",
"func (*Code) Descriptor() ([]byte, []int) {\n\treturn file_internal_pkg_pb_ports_proto_rawDescGZIP(), []int{2}\n}",
"func (*UpdateDomainMappingRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{40}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}",
"func (x *fastReflection_AddressStringToBytesRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesRequest\n}",
"func (*MyScopesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{109}\n}",
"func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}",
"func (*GeneratedRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetStatusCodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{26}\n}",
"func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}",
"func (*DeleteMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{4}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{1}\n}",
"func (*ContractQueryRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{22}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}",
"func (*DefinitionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{129}\n}",
"func (*CleartextChaincodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_fpc_fpc_proto_rawDescGZIP(), []int{5}\n}",
"func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}",
"func (*QueryApprovedChaincodeDefinitionRequest) Descriptor() ([]byte, []int) {\n\treturn file_lifecycle_proto_rawDescGZIP(), []int{1}\n}",
"func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {\n\ttype canProto interface {\n\t\tMethodDescriptorProto() *descriptorpb.MethodDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.MethodDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {\n\t\t\treturn md\n\t\t}\n\t}\n\treturn protodesc.ToMethodDescriptorProto(d)\n}",
"func (*UpdateTelemetryReportedRequest) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{29}\n}",
"func (*CMsgLoadedRequest) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{46}\n}",
"func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetCollectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{163}\n}",
"func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{2}\n}",
"func (*DescribeMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{3}\n}",
"func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsRequest\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}",
"func (*CMsgSocialFeedRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{304}\n}",
"func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}",
"func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_uac_Event_proto_rawDescGZIP(), []int{7}\n}",
"func (*AddProducerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{2}\n}",
"func (*DeclarationRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{125}\n}",
"func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}",
"func (*RenameRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{194}\n}",
"func (*DecodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_videoservice_proto_rawDescGZIP(), []int{0}\n}",
"func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_calculate_proto_rawDescGZIP(), []int{3}\n}",
"func (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{11}\n}",
"func (*ReferenceRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{141}\n}",
"func (*SendRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{5}\n}",
"func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}",
"func (*SetTraceRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{8}\n}",
"func (*NetworkRequest) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_iam_v1_service_proto_rawDescGZIP(), []int{6}\n}"
] | [
"0.72352815",
"0.71125454",
"0.7089678",
"0.7086867",
"0.7050585",
"0.7002672",
"0.69934523",
"0.6932713",
"0.69018894",
"0.68992066",
"0.6889897",
"0.68843466",
"0.68616295",
"0.6860479",
"0.68583477",
"0.68542755",
"0.68515146",
"0.6844837",
"0.68435585",
"0.6838578",
"0.68279165",
"0.68257856",
"0.68230885",
"0.6814507",
"0.68032366",
"0.6799875",
"0.6798857",
"0.6787944",
"0.67836297",
"0.677801",
"0.67777133",
"0.67767936",
"0.6776047",
"0.67755157",
"0.67739105",
"0.67643535",
"0.67641044",
"0.67637247",
"0.676336",
"0.67589575",
"0.6750674",
"0.6748732",
"0.67457783",
"0.6737806",
"0.6737806",
"0.67358387",
"0.67310023",
"0.67291534",
"0.67226034",
"0.67149603",
"0.6712012",
"0.6711523",
"0.6711229",
"0.6711218",
"0.67094207",
"0.6708658",
"0.6707676",
"0.67048305",
"0.6702518",
"0.6702143",
"0.670187",
"0.6701216",
"0.66974145",
"0.6693809",
"0.66901326",
"0.6689402",
"0.6686492",
"0.66860443",
"0.6686017",
"0.66831654",
"0.668153",
"0.6679004",
"0.66787153",
"0.6677128",
"0.66711944",
"0.6670137",
"0.66698956",
"0.66693234",
"0.6667155",
"0.66627914",
"0.6662002",
"0.6661765",
"0.6659236",
"0.6657915",
"0.6655817",
"0.66501164",
"0.66496235",
"0.66494566",
"0.66485864",
"0.6647491",
"0.6647178",
"0.6644239",
"0.6643388",
"0.66410524",
"0.6640359",
"0.6636183",
"0.6632871",
"0.66271204",
"0.6624793",
"0.66244084"
] | 0.72652876 | 0 |
Deprecated: Use PaqueteRequest.ProtoReflect.Descriptor instead. | Устарело: используйте PaqueteRequest.ProtoReflect.Descriptor вместо этого. | func (*PaqueteRequest) Descriptor() ([]byte, []int) {
return file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}",
"func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}",
"func (*CreateAlterRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{1}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{7}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}",
"func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}",
"func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsRequest\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{14}\n}",
"func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*RequestPresentationRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{0}\n}",
"func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*WebhookRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}",
"func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}",
"func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}",
"func (*DiagnoseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{16}\n}",
"func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}",
"func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}",
"func (*CreatePermssionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{1}\n}",
"func (*LogMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{59}\n}",
"func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_comments_proto_rawDescGZIP(), []int{3}\n}",
"func (*AddPermissionToRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{7}\n}",
"func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{3}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_policies_v0_policies_proto_rawDescGZIP(), []int{2}\n}",
"func (*AddProducerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{2}\n}",
"func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}",
"func (*ValidateRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{17}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{10}\n}",
"func (*CheckPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{2}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_example_example_proto_rawDescGZIP(), []int{1}\n}",
"func (*CMsgGCPlayerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{117}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_helloworld_helloworld_proto_rawDescGZIP(), []int{2}\n}",
"func (*CMsgProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{275}\n}",
"func (*ProofRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{35}\n}",
"func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*LanguageDetectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{1}\n}",
"func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*DeleteMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{4}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}",
"func (*PolicyRequest) Descriptor() ([]byte, []int) {\n\treturn file_policypb_policy_proto_rawDescGZIP(), []int{0}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_Trd_ModifyOrder_proto_rawDescGZIP(), []int{2}\n}",
"func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}",
"func (*UpdateConversationRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{8}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{0}\n}",
"func (*CalculatorRequest) Descriptor() ([]byte, []int) {\n\treturn file_basicpb_unary_api_proto_rawDescGZIP(), []int{4}\n}",
"func (*SendRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{5}\n}",
"func (*BatchRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{3, 0}\n}",
"func (*ModelControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{4}\n}",
"func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}",
"func (*GeneratedRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_auth_proto_rawDescGZIP(), []int{0}\n}",
"func (*RelationshipRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{3}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{0}\n}",
"func (*PrimeDecompRequest) Descriptor() ([]byte, []int) {\n\treturn file_calculatorpb_calculator_proto_rawDescGZIP(), []int{4}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{12}\n}",
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{5}\n}",
"func (*DelRequest) Descriptor() ([]byte, []int) {\n\treturn file_patrol_proto_rawDescGZIP(), []int{8}\n}",
"func (*TelemRequest) Descriptor() ([]byte, []int) {\n\treturn file_core_services_synchronization_telem_telem_proto_rawDescGZIP(), []int{0}\n}",
"func (*ProxyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{4}\n}",
"func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_calculate_proto_rawDescGZIP(), []int{3}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{46}\n}",
"func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{1}\n}",
"func (*PortRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_api_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetSomesRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{5}\n}",
"func (*CMsgLoadedRequest) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{46}\n}",
"func (*TodoRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_todolist_proto_rawDescGZIP(), []int{0}\n}",
"func (*MoneyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_swap_swap_proto_rawDescGZIP(), []int{0}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{10}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{5}\n}",
"func (*EndpointRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{13}\n}",
"func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}",
"func (*OriginalDetectIntentRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dialogflow_v2beta1_webhook_proto_rawDescGZIP(), []int{2}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_internal_services_profile_proto_profile_proto_rawDescGZIP(), []int{0}\n}",
"func (*DeleteCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{162}\n}",
"func (*ValidateClientCredentialRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetCollectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{163}\n}",
"func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_example_proto_rawDescGZIP(), []int{0}\n}",
"func (*ShowMessageRequestRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{58}\n}",
"func (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{11}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*MeshCertificateRequest) Descriptor() ([]byte, []int) {\n\treturn file_security_proto_providers_google_meshca_proto_rawDescGZIP(), []int{0}\n}",
"func (*WatchProvisioningApprovalRequestRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*CreateRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_todo_proto_rawDescGZIP(), []int{1}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{7}\n}",
"func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsgSocialFeedRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{304}\n}",
"func (*DescribeMicroRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_micro_pb_request_proto_rawDescGZIP(), []int{3}\n}",
"func (*CreateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{2}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{14}\n}",
"func (*MsgRequest) Descriptor() ([]byte, []int) {\n\treturn file_msg_proto_rawDescGZIP(), []int{0}\n}",
"func (*RecentMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{16}\n}"
] | [
"0.72573286",
"0.7218847",
"0.71748",
"0.7132119",
"0.7117333",
"0.71093345",
"0.70960057",
"0.70482236",
"0.7047904",
"0.7028092",
"0.70185226",
"0.7012674",
"0.70116967",
"0.70067143",
"0.70019865",
"0.69961846",
"0.6985266",
"0.6979804",
"0.6976487",
"0.6974723",
"0.69741064",
"0.6973554",
"0.6972541",
"0.6971797",
"0.6971399",
"0.6967375",
"0.6963114",
"0.6949535",
"0.69419956",
"0.69402146",
"0.6934513",
"0.6933217",
"0.693153",
"0.6929533",
"0.6925348",
"0.6924573",
"0.69213367",
"0.69203436",
"0.6919876",
"0.69178575",
"0.6916834",
"0.6916517",
"0.6914249",
"0.6912503",
"0.69031984",
"0.69000316",
"0.68958426",
"0.68944836",
"0.6892358",
"0.68910164",
"0.689093",
"0.6887534",
"0.6887088",
"0.6882339",
"0.6880942",
"0.68730277",
"0.6869884",
"0.68691534",
"0.68672127",
"0.6866304",
"0.68656546",
"0.68648106",
"0.6858998",
"0.6856258",
"0.6854494",
"0.6851954",
"0.6849722",
"0.68480885",
"0.6840764",
"0.6840179",
"0.683758",
"0.6837004",
"0.6836577",
"0.6835367",
"0.68333775",
"0.6832741",
"0.68285406",
"0.6827722",
"0.68237585",
"0.68232226",
"0.6820234",
"0.68144834",
"0.6814136",
"0.6813459",
"0.6808796",
"0.6803732",
"0.68027574",
"0.68022007",
"0.68022007",
"0.6802045",
"0.67972445",
"0.6795776",
"0.67957264",
"0.67949766",
"0.67942584",
"0.6793253",
"0.6791309",
"0.67887735",
"0.6788652",
"0.67883795"
] | 0.7264457 | 0 |
NewGetPlatformsParams creates a new GetPlatformsParams object with the default values initialized. | NewGetPlatformsParams создает новый объект GetPlatformsParams с инициализацией значений по умолчанию. | func NewGetPlatformsParams() *GetPlatformsParams {
var ()
return &GetPlatformsParams{
timeout: cr.DefaultTimeout,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}",
"func NewGetPlatformsParamsWithContext(ctx context.Context) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetParams() *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewProviderParams() ProviderParams {\n\n\treturn ProviderParams{}\n}",
"func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}",
"func NewGetCurrentGenerationParams() *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (a *Client) GetPlatforms(params *GetPlatformsParams, opts ...ClientOption) (*GetPlatformsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetPlatformsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"get-platforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/fwmgr/entities/platforms/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetPlatformsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetPlatformsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-platforms: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewOrgGetParams() *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams() (Params, error) {\n\treturn Params{\n\t\tBaseParams: codec.BaseParams{\n\t\t\tKeyFrameInterval: 60,\n\t\t},\n\t}, nil\n}",
"func NewGetProcessorsParams() *GetProcessorsParams {\n\treturn &GetProcessorsParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(defaultSendEnabled bool) Params {\n\treturn Params{\n\t\tSendEnabled: nil,\n\t\tDefaultSendEnabled: defaultSendEnabled,\n\t}\n}",
"func NewParams() *Params {\n\treturn new(Params)\n}",
"func NewPcloudNetworksGetallParams() *PcloudNetworksGetallParams {\n\treturn &PcloudNetworksGetallParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(active bool, lps DistributionSchedules, dds DelegatorDistributionSchedules,\n\tmoneyMarkets MoneyMarkets, checkLtvIndexCount int) Params {\n\treturn Params{\n\t\tActive: active,\n\t\tLiquidityProviderSchedules: lps,\n\t\tDelegatorDistributionSchedules: dds,\n\t\tMoneyMarkets: moneyMarkets,\n\t\tCheckLtvIndexCount: checkLtvIndexCount,\n\t}\n}",
"func NewGetRepositoriesParams() *GetRepositoriesParams {\n\tvar ()\n\treturn &GetRepositoriesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPublicWebLinkPlatformEstablishParams() *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(communityTax sdk.Dec, withdrawAddrEnabled bool) Params {\n\treturn Params{\n\t\tCommunityTax: communityTax,\n\t\tWithdrawAddrEnabled: withdrawAddrEnabled,\n\t}\n}",
"func NewGetTreeParams() *GetTreeParams {\n\tvar ()\n\treturn &GetTreeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams() *Parameters {\n\treturn &Parameters{\n\t\tTokenType: \"transit\",\n\t\tTLSMode: 1,\n\t\tLeaderOnly: true,\n\t\tConnectTimeout: defaultConnectTimeout,\n\t\tReadTimeout: defaultReadTimeout,\n\t\tRetryCount: defaultRetries,\n\t\tRequireType: \"master\",\n\t}\n}",
"func NewPcloudSystempoolsGetParamsWithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetUIParams() *GetUIParams {\n\n\treturn &GetUIParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetSsoParams() *GetSsoParams {\n\tvar ()\n\treturn &GetSsoParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetClockParams() *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetParams() *GetParams {\n\treturn &GetParams{}\n}",
"func NewGetBuildPropertiesParams() *GetBuildPropertiesParams {\n\tvar ()\n\treturn &GetBuildPropertiesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetRoomsParams() *GetRoomsParams {\n\tvar ()\n\treturn &GetRoomsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetMetricsParams) SetPlatforms(platforms *string) {\n\to.Platforms = platforms\n}",
"func NewPcloudSapGetParams() *PcloudSapGetParams {\n\treturn &PcloudSapGetParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PlatformsAllOfData) GetPlatforms() map[string]Platform {\n\tif o == nil {\n\t\tvar ret map[string]Platform\n\t\treturn ret\n\t}\n\n\treturn o.Platforms\n}",
"func NewGetScopeConfigurationParams() *GetScopeConfigurationParams {\n\tvar ()\n\treturn &GetScopeConfigurationParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetOutagesParams() *GetOutagesParams {\n\treturn &GetOutagesParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(active bool, lps DistributionSchedules, dds DelegatorDistributionSchedules) Params {\n\treturn Params{\n\t\tActive: active,\n\t\tLiquidityProviderSchedules: lps,\n\t\tDelegatorDistributionSchedules: dds,\n\t}\n}",
"func NewGetBootstrapParams() *GetBootstrapParams {\n\tvar ()\n\treturn &GetBootstrapParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetOperatingSystemsParamsWithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewPcloudSystempoolsGetParamsWithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetLogicalPortParams() *GetLogicalPortParams {\n\tvar ()\n\treturn &GetLogicalPortParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetClusterSupportedPlatforms(ctx *middleware.Context, handler GetClusterSupportedPlatformsHandler) *GetClusterSupportedPlatforms {\n\treturn &GetClusterSupportedPlatforms{Context: ctx, Handler: handler}\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewGetCustomRuleParams() *GetCustomRuleParams {\n\tvar ()\n\treturn &GetCustomRuleParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(createWhoisPrice string, updateWhoisPrice string, deleteWhoisPrice string) Params {\n\treturn Params{\n\t\tCreateWhoisPrice: createWhoisPrice,\n\t\tUpdateWhoisPrice: updateWhoisPrice,\n\t\tDeleteWhoisPrice: deleteWhoisPrice,\n\t}\n}",
"func NewGetParamsWithTimeout(timeout time.Duration) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewParams() *Params {\n\tp := Params{}\n\tp.names = []string{}\n\tp.values = map[string]interface{}{}\n\n\treturn &p\n}",
"func NewGetNetworkExternalParams() *GetNetworkExternalParams {\n\n\treturn &GetNetworkExternalParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPcloudPvminstancesNetworksGetParams() *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetGCParams() *GetGCParams {\n\treturn &GetGCParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetRuleChainParams() *GetRuleChainParams {\n\tvar ()\n\treturn &GetRuleChainParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewConfigGetParams() *ConfigGetParams {\n\treturn &ConfigGetParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetSearchClinicsParams() *GetSearchClinicsParams {\n\tvar ()\n\treturn &GetSearchClinicsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(\n\ttaxPolicy, rewardPolicy PolicyConstraints,\n\tseigniorageBurden sdk.Dec,\n\tminingIncrement sdk.Dec,\n\twindowShort, windowLong, windowProbation sdk.Int,\n) Params {\n\treturn Params{\n\t\tTaxPolicy: taxPolicy,\n\t\tRewardPolicy: rewardPolicy,\n\t\tSeigniorageBurdenTarget: seigniorageBurden,\n\t\tMiningIncrement: miningIncrement,\n\t\tWindowShort: windowShort,\n\t\tWindowLong: windowLong,\n\t\tWindowProbation: windowProbation,\n\t}\n}",
"func NewGetProjectsRequest(server string, params *GetProjectsParams) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/projects\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryValues := queryUrl.Query()\n\n\tif params.Query != nil {\n\n\t\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"query\", *params.Query); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.Identifier != nil {\n\n\t\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"identifier\", *params.Identifier); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tqueryUrl.RawQuery = queryValues.Encode()\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func NewParameters(cfg config.Config, license license.License) Parameters {\n\treturn Parameters{\n\t\tConfig: cfg,\n\t\tLicense: license,\n\t}\n}",
"func NewParams(opts []copts.Opt) *Params {\n\tparams := &Params{}\n\tcopts.Apply(params, opts)\n\treturn params\n}",
"func NewParams(opts []copts.Opt) *Params {\r\n\tparams := &Params{}\r\n\tcopts.Apply(params, opts)\r\n\treturn params\r\n}",
"func NewParams(acl ACL, daoOwner sdk.Address) Params {\n\treturn Params{\n\t\tACL: acl,\n\t\tDAOOwner: daoOwner,\n\t}\n}",
"func NewSearchWorkspacesParams() *SearchWorkspacesParams {\n\tvar ()\n\treturn &SearchWorkspacesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetFileSystemParametersInternalParams() *GetFileSystemParametersInternalParams {\n\tvar (\n\t\tattachedClusterDefault = bool(false)\n\t\tsecureDefault = bool(false)\n\t)\n\treturn &GetFileSystemParametersInternalParams{\n\t\tAttachedCluster: &attachedClusterDefault,\n\t\tSecure: &secureDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewFetchParams(timeout time.Duration, url string) *Params {\n\tparams := &Params{\n\t\tTimeout: timeout,\n\t\tURL: url,\n\t}\n\treturn params\n}",
"func NewGetCurrentGenerationParamsWithTimeout(timeout time.Duration) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetOrganizationTeamPermissionsParams() *GetOrganizationTeamPermissionsParams {\n\tvar ()\n\treturn &GetOrganizationTeamPermissionsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetOrganizationPrototypePermissionsParams() *GetOrganizationPrototypePermissionsParams {\n\tvar ()\n\treturn &GetOrganizationPrototypePermissionsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetUsersParams() *GetUsersParams {\n\tvar ()\n\treturn &GetUsersParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (tr *CapacityProvider) GetParameters() (map[string]interface{}, error) {\n\tp, err := json.TFParser.Marshal(tr.Spec.ForProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := map[string]interface{}{}\n\treturn base, json.TFParser.Unmarshal(p, &base)\n}",
"func NewGetTradingPairParams() *GetTradingPairParams {\n\tvar ()\n\treturn &GetTradingPairParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetLogoutRequestParams() *GetLogoutRequestParams {\n\tvar ()\n\treturn &GetLogoutRequestParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetPaymentsParams() *GetPaymentsParams {\n\tvar ()\n\treturn &GetPaymentsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPcloudNetworksGetallParamsWithTimeout(timeout time.Duration) *PcloudNetworksGetallParams {\n\treturn &PcloudNetworksGetallParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetGatewaysParams() *GetGatewaysParams {\n\treturn &GetGatewaysParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPublicWebLinkPlatformEstablishParamsWithTimeout(timeout time.Duration) *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetOrderParams() *GetOrderParams {\n\tvar ()\n\treturn &GetOrderParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPutParams() *PutParams {\n\tvar ()\n\treturn &PutParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams(nickname NicknameParams, dTag DTagParams, bio BioParams, oracle OracleParams) Params {\n\treturn Params{\n\t\tNickname: nickname,\n\t\tDTag: dTag,\n\t\tBio: bio,\n\t\tOracle: oracle,\n\t}\n}",
"func NewGetMetricsParams() *GetMetricsParams {\n\tvar (\n\t\tgranularityDefault = string(\"AUTO\")\n\t\tgroupByDefault = string(\"NONE\")\n\t\tsiteTypeFilterDefault = string(\"ALL\")\n\t)\n\treturn &GetMetricsParams{\n\t\tGranularity: &granularityDefault,\n\t\tGroupBy: &groupByDefault,\n\t\tSiteTypeFilter: &siteTypeFilterDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetComplianceByResourceTypesParams() *GetComplianceByResourceTypesParams {\n\tvar (\n\t\tmaxItemsDefault = int64(100)\n\t\toffsetDefault = int64(0)\n\t)\n\treturn &GetComplianceByResourceTypesParams{\n\t\tMaxItems: &maxItemsDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams() Params {\n\treturn &headerParams{\n\t\tparams: make(map[string]MaybeString),\n\t\tparamOrder: []string{},\n\t}\n}",
"func NewParams(tokenCourse, subscriptionPrice, VPNGBPrice,\n\tstorageGBPrice, baseVPNGb, baseStorageGb uint32, courseSigners []sdk.AccAddress) Params {\n\treturn Params{\n\t\tTokenCourse: tokenCourse,\n\t\tSubscriptionPrice: subscriptionPrice,\n\t\tVPNGBPrice: VPNGBPrice,\n\t\tStorageGBPrice: storageGBPrice,\n\t\tBaseVPNGb: baseVPNGb,\n\t\tBaseStorageGb: baseStorageGb,\n\t\tCourseChangeSigners: courseSigners[:],\n\t}\n}",
"func (cia *chainInfoAPI) ProtocolParameters(ctx context.Context) (*apitypes.ProtocolParams, error) {\n\tnetworkName, err := cia.getNetworkName(ctx)\n\tif err != nil {\n\t\treturn nil, xerrors.Wrap(err, \"could not retrieve network name\")\n\t}\n\n\tvar supportedSectors []apitypes.SectorInfo\n\tfor proof := range miner0.SupportedProofTypes {\n\t\tsize, err := proof.SectorSize()\n\t\tif err != nil {\n\t\t\treturn nil, xerrors.Wrap(err, \"could not retrieve network name\")\n\t\t}\n\t\tmaxUserBytes := abi.PaddedPieceSize(size).Unpadded()\n\t\tsupportedSectors = append(supportedSectors, apitypes.SectorInfo{Size: size, MaxPieceSize: maxUserBytes})\n\t}\n\n\treturn &apitypes.ProtocolParams{\n\t\tNetwork: networkName,\n\t\tBlockTime: cia.chain.config.BlockTime(),\n\t\tSupportedSectors: supportedSectors,\n\t}, nil\n}",
"func (tr *Service) GetParameters() (map[string]interface{}, error) {\n\tp, err := json.TFParser.Marshal(tr.Spec.ForProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := map[string]interface{}{}\n\treturn base, json.TFParser.Unmarshal(p, &base)\n}",
"func NewGetTransportNodeParams() *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParameters(bitlen int) *Parameters {\n\tm, err := rand.Prime(rand.Reader, bitlen)\n\tif err != nil {\n\t\tpanic(\"Could not generate randon prime\")\n\t}\n\tn := 3\n\treturn &Parameters{\n\t\tn: n,\n\t\tM: m,\n\t\ttriplet: GenerateBeaverTriplet(m),\n\t\tassinc: mr.Intn(n),\n\t}\n}",
"func NewGetOrganizationApplicationParams() *GetOrganizationApplicationParams {\n\tvar ()\n\treturn &GetOrganizationApplicationParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (tr *CassandraKeySpace) GetParameters() (map[string]interface{}, error) {\n\tp, err := json.TFParser.Marshal(tr.Spec.ForProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := map[string]interface{}{}\n\treturn base, json.TFParser.Unmarshal(p, &base)\n}",
"func NewGetNetworkSwitchSettingsQosRuleParams() *GetNetworkSwitchSettingsQosRuleParams {\n\tvar ()\n\treturn &GetNetworkSwitchSettingsQosRuleParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (tr *NotebookWorkspace) GetParameters() (map[string]interface{}, error) {\n\tp, err := json.TFParser.Marshal(tr.Spec.ForProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbase := map[string]interface{}{}\n\treturn base, json.TFParser.Unmarshal(p, &base)\n}",
"func NewGetCharacterParams() *GetCharacterParams {\n\tvar ()\n\treturn &GetCharacterParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetFyiSettingsParams() *GetFyiSettingsParams {\n\n\treturn &GetFyiSettingsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPlatformsAllOfData(count int32, platforms map[string]Platform) *PlatformsAllOfData {\n\tthis := PlatformsAllOfData{}\n\tthis.Count = count\n\tthis.Platforms = platforms\n\treturn &this\n}",
"func (o *PlatformsAllOfData) GetPlatformsOk() (*map[string]Platform, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Platforms, true\n}",
"func NewPublicWebLinkPlatformEstablishParamsWithHTTPClient(client *http.Client) *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewCreateWidgetParams() *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetRepositoriesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.LabelID != nil {\n\n\t\t// query param label_id\n\t\tvar qrLabelID int64\n\t\tif o.LabelID != nil {\n\t\t\tqrLabelID = *o.LabelID\n\t\t}\n\t\tqLabelID := swag.FormatInt64(qrLabelID)\n\t\tif qLabelID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"label_id\", qLabelID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int32\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt32(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.PageSize != nil {\n\n\t\t// query param page_size\n\t\tvar qrPageSize int32\n\t\tif o.PageSize != nil {\n\t\t\tqrPageSize = *o.PageSize\n\t\t}\n\t\tqPageSize := swag.FormatInt32(qrPageSize)\n\t\tif qPageSize != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page_size\", qPageSize); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param project_id\n\tqrProjectID := o.ProjectID\n\tqProjectID := swag.FormatInt32(qrProjectID)\n\tif qProjectID != \"\" {\n\t\tif err := r.SetQueryParam(\"project_id\", qProjectID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Sort != nil {\n\n\t\t// query param sort\n\t\tvar qrSort string\n\t\tif o.Sort != nil {\n\t\t\tqrSort = *o.Sort\n\t\t}\n\t\tqSort := qrSort\n\t\tif qSort != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sort\", qSort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewGetRuntimeServersParams() *GetRuntimeServersParams {\n\tvar ()\n\treturn &GetRuntimeServersParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetRefPlantsParams() GetRefPlantsParams {\n\n\tvar (\n\t\t// initialize parameters with default values\n\n\t\tlimitDefault = int64(20)\n\t\toffsetDefault = int64(0)\n\t)\n\n\treturn GetRefPlantsParams{\n\t\tLimit: limitDefault,\n\n\t\tOffset: offsetDefault,\n\t}\n}"
] | [
"0.7555398",
"0.7425465",
"0.66280895",
"0.6524783",
"0.6367459",
"0.623201",
"0.6032082",
"0.5907493",
"0.58632076",
"0.570186",
"0.56994843",
"0.5553735",
"0.55019903",
"0.547514",
"0.5458275",
"0.5445687",
"0.5440679",
"0.5381146",
"0.5365345",
"0.5304436",
"0.5302954",
"0.529495",
"0.5267036",
"0.5258226",
"0.523022",
"0.5229289",
"0.520272",
"0.5200212",
"0.5191669",
"0.51903397",
"0.5186975",
"0.5178276",
"0.51744694",
"0.5173635",
"0.51620305",
"0.5161379",
"0.5144871",
"0.5144743",
"0.51393485",
"0.51340693",
"0.5101502",
"0.50992286",
"0.5080975",
"0.50698376",
"0.5054708",
"0.5053559",
"0.50394464",
"0.50374556",
"0.50350064",
"0.50317717",
"0.50125396",
"0.50093144",
"0.50070786",
"0.49910712",
"0.49906248",
"0.49902907",
"0.49849993",
"0.49760523",
"0.49575764",
"0.49551433",
"0.49484485",
"0.4931074",
"0.49286634",
"0.49267724",
"0.49219465",
"0.4902729",
"0.48847812",
"0.48693296",
"0.4858065",
"0.485483",
"0.48534238",
"0.48268816",
"0.4825408",
"0.48244634",
"0.48238483",
"0.48221087",
"0.48185503",
"0.48104462",
"0.47988507",
"0.4791486",
"0.478987",
"0.47863156",
"0.4784515",
"0.47795606",
"0.47623962",
"0.4756527",
"0.4751336",
"0.47442713",
"0.47366485",
"0.47317785",
"0.47307438",
"0.472762",
"0.47172907",
"0.47107536",
"0.469576",
"0.46945924",
"0.46895453",
"0.46815073",
"0.46714157",
"0.4663475"
] | 0.88731694 | 0 |
NewGetPlatformsParamsWithTimeout creates a new GetPlatformsParams object with the default values initialized, and the ability to set a timeout on a request | NewGetPlatformsParamsWithTimeout создает новый объект GetPlatformsParams с инициализированными значениями по умолчанию, а также возможность установить таймаут на запрос | func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {
var ()
return &GetPlatformsParams{
timeout: timeout,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetParamsWithTimeout(timeout time.Duration) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPcloudSystempoolsGetParamsWithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewOrgGetParamsWithTimeout(timeout time.Duration) *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetClockParamsWithTimeout(timeout time.Duration) *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewGetCurrentGenerationParamsWithTimeout(timeout time.Duration) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetTreeParamsWithTimeout(timeout time.Duration) *GetTreeParams {\n\tvar ()\n\treturn &GetTreeParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPublicWebLinkPlatformEstablishParamsWithTimeout(timeout time.Duration) *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetUIParamsWithTimeout(timeout time.Duration) *GetUIParams {\n\n\treturn &GetUIParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetNetworkExternalParamsWithTimeout(timeout time.Duration) *GetNetworkExternalParams {\n\n\treturn &GetNetworkExternalParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetBuildPropertiesParamsWithTimeout(timeout time.Duration) *GetBuildPropertiesParams {\n\tvar ()\n\treturn &GetBuildPropertiesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetOrganizationApplicationParamsWithTimeout(timeout time.Duration) *GetOrganizationApplicationParams {\n\tvar ()\n\treturn &GetOrganizationApplicationParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewCreateWidgetParamsWithTimeout(timeout time.Duration) *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetMetricsParamsWithTimeout(timeout time.Duration) *GetMetricsParams {\n\tvar (\n\t\tgranularityDefault = string(\"AUTO\")\n\t\tgroupByDefault = string(\"NONE\")\n\t\tsiteTypeFilterDefault = string(\"ALL\")\n\t)\n\treturn &GetMetricsParams{\n\t\tGranularity: &granularityDefault,\n\t\tGroupBy: &groupByDefault,\n\t\tSiteTypeFilter: &siteTypeFilterDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetComplianceByResourceTypesParamsWithTimeout(timeout time.Duration) *GetComplianceByResourceTypesParams {\n\tvar (\n\t\tmaxItemsDefault = int64(100)\n\t\toffsetDefault = int64(0)\n\t)\n\treturn &GetComplianceByResourceTypesParams{\n\t\tMaxItems: &maxItemsDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPcloudNetworksGetallParamsWithTimeout(timeout time.Duration) *PcloudNetworksGetallParams {\n\treturn &PcloudNetworksGetallParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewTimeout(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultTimeout, wparams.NewParamStorer(parameters...))\n}",
"func NewGetLogicalPortParamsWithTimeout(timeout time.Duration) *GetLogicalPortParams {\n\tvar ()\n\treturn &GetLogicalPortParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetCustomRuleParamsWithTimeout(timeout time.Duration) *GetCustomRuleParams {\n\tvar ()\n\treturn &GetCustomRuleParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetFileSystemParametersInternalParamsWithTimeout(timeout time.Duration) *GetFileSystemParametersInternalParams {\n\tvar (\n\t\tattachedClusterDefault = bool(false)\n\t\tsecureDefault = bool(false)\n\t)\n\treturn &GetFileSystemParametersInternalParams{\n\t\tAttachedCluster: &attachedClusterDefault,\n\t\tSecure: &secureDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetBootstrapParamsWithTimeout(timeout time.Duration) *GetBootstrapParams {\n\tvar ()\n\treturn &GetBootstrapParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetSearchClinicsParamsWithTimeout(timeout time.Duration) *GetSearchClinicsParams {\n\tvar ()\n\treturn &GetSearchClinicsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetProjectMetricsParamsWithTimeout(timeout time.Duration) *GetProjectMetricsParams {\n\tvar ()\n\treturn &GetProjectMetricsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewSearchWorkspacesParamsWithTimeout(timeout time.Duration) *SearchWorkspacesParams {\n\tvar ()\n\treturn &SearchWorkspacesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetTransportNodeParamsWithTimeout(timeout time.Duration) *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetScopeConfigurationParamsWithTimeout(timeout time.Duration) *GetScopeConfigurationParams {\n\tvar ()\n\treturn &GetScopeConfigurationParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetRepositoriesParamsWithTimeout(timeout time.Duration) *GetRepositoriesParams {\n\tvar ()\n\treturn &GetRepositoriesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetGCParamsWithTimeout(timeout time.Duration) *GetGCParams {\n\treturn &GetGCParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPcloudPvminstancesNetworksGetParamsWithTimeout(timeout time.Duration) *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetSsoParamsWithTimeout(timeout time.Duration) *GetSsoParams {\n\tvar ()\n\treturn &GetSsoParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetRuleChainParamsWithTimeout(timeout time.Duration) *GetRuleChainParams {\n\tvar ()\n\treturn &GetRuleChainParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetOrganizationTeamPermissionsParamsWithTimeout(timeout time.Duration) *GetOrganizationTeamPermissionsParams {\n\tvar ()\n\treturn &GetOrganizationTeamPermissionsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetCharacterParamsWithTimeout(timeout time.Duration) *GetCharacterParams {\n\tvar ()\n\treturn &GetCharacterParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewCreateCrossConnectParamsWithTimeout(timeout time.Duration) *CreateCrossConnectParams {\n\tvar ()\n\treturn &CreateCrossConnectParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PcloudNetworksGetallParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}",
"func NewGetNetworkAppliancePortParamsWithTimeout(timeout time.Duration) *GetNetworkAppliancePortParams {\n\tvar ()\n\treturn &GetNetworkAppliancePortParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetOperatingSystemsParams) WithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetPackageSearchParamsWithTimeout(timeout time.Duration) *GetPackageSearchParams {\n\tvar ()\n\treturn &GetPackageSearchParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewJoinOrganizationParamsWithTimeout(timeout time.Duration) *JoinOrganizationParams {\n\treturn &JoinOrganizationParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetAOrderStatusParamsWithTimeout(timeout time.Duration) *GetAOrderStatusParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &GetAOrderStatusParams{\n\t\tAccept: acceptDefault,\n\t\tContentType: contentTypeDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PcloudSystempoolsGetParams) WithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetOrderParamsWithTimeout(timeout time.Duration) *GetOrderParams {\n\tvar ()\n\treturn &GetOrderParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewSystemEventsParamsWithTimeout(timeout time.Duration) *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PcloudPvminstancesNetworksGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *OrgGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetRoomsParamsWithTimeout(timeout time.Duration) *GetRoomsParams {\n\tvar ()\n\treturn &GetRoomsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PublicWebLinkPlatformEstablishParams) WithTimeout(timeout time.Duration) *PublicWebLinkPlatformEstablishParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewPcloudSapGetParamsWithTimeout(timeout time.Duration) *PcloudSapGetParams {\n\treturn &PcloudSapGetParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func WrapWithTimeout(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultTimeout, wparams.NewParamStorer(parameters...))\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetContactsParamsWithTimeout(timeout time.Duration) *GetContactsParams {\n\tvar (\n\t\tlimitDefault = int32(5000)\n\t\toffsetDefault = int32(0)\n\t)\n\treturn &GetContactsParams{\n\t\tLimit: &limitDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetLogoutRequestParamsWithTimeout(timeout time.Duration) *GetLogoutRequestParams {\n\tvar ()\n\treturn &GetLogoutRequestParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetPrinterStatusParamsWithTimeout(timeout time.Duration) *GetPrinterStatusParams {\n\n\treturn &GetPrinterStatusParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetDevicesUnknownParamsWithTimeout(timeout time.Duration) *GetDevicesUnknownParams {\n\treturn &GetDevicesUnknownParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetProductsCodeParamsWithTimeout(timeout time.Duration) *GetProductsCodeParams {\n\tvar (\n\t\twithAttributeOptionsDefault = bool(false)\n\t\twithQualityScoresDefault = bool(false)\n\t)\n\treturn &GetProductsCodeParams{\n\t\tWithAttributeOptions: &withAttributeOptionsDefault,\n\t\tWithQualityScores: &withQualityScoresDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetWormDomainParamsWithTimeout(timeout time.Duration) *GetWormDomainParams {\n\tvar ()\n\treturn &GetWormDomainParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetContactsParamsWithTimeout(timeout time.Duration) *GetContactsParams {\n\tvar (\n\t\tlimitDefault = int64(50)\n\t\toffsetDefault = int64(0)\n\t)\n\treturn &GetContactsParams{\n\t\tLimit: &limitDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetSearchClinicsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetOrganizationPrototypePermissionsParamsWithTimeout(timeout time.Duration) *GetOrganizationPrototypePermissionsParams {\n\tvar ()\n\treturn &GetOrganizationPrototypePermissionsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetGPUArchitectureParamsWithTimeout(timeout time.Duration) *GetGPUArchitectureParams {\n\treturn &GetGPUArchitectureParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetRuntimeServersParamsWithTimeout(timeout time.Duration) *GetRuntimeServersParams {\n\tvar ()\n\treturn &GetRuntimeServersParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetNetworkSwitchSettingsQosRuleParamsWithTimeout(timeout time.Duration) *GetNetworkSwitchSettingsQosRuleParams {\n\tvar ()\n\treturn &GetNetworkSwitchSettingsQosRuleParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetClockParams() *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewListStacksByWorkspaceParamsWithTimeout(timeout time.Duration) *ListStacksByWorkspaceParams {\n\tvar ()\n\treturn &ListStacksByWorkspaceParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetOutagesParamsWithTimeout(timeout time.Duration) *GetOutagesParams {\n\treturn &GetOutagesParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetRolesParamsWithTimeout(timeout time.Duration) *GetRolesParams {\n\tvar ()\n\treturn &GetRolesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetFyiSettingsParamsWithTimeout(timeout time.Duration) *GetFyiSettingsParams {\n\n\treturn &GetFyiSettingsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CapacityPoolGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewCreateRuntimeMapParamsWithTimeout(timeout time.Duration) *CreateRuntimeMapParams {\n\tvar ()\n\treturn &CreateRuntimeMapParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetIconParamsWithTimeout(timeout time.Duration) *GetIconParams {\n\treturn &GetIconParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewConfigGetParamsWithTimeout(timeout time.Duration) *ConfigGetParams {\n\treturn &ConfigGetParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetClockParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetProcessorsParamsWithTimeout(timeout time.Duration) *GetProcessorsParams {\n\treturn &GetProcessorsParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetBuildPropertiesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewQueryEntitlementsParamsWithTimeout(timeout time.Duration) *QueryEntitlementsParams {\n\tvar (\n\t\tactiveOnlyDefault = bool(true)\n\t\tlimitDefault = int32(20)\n\t\toffsetDefault = int32(0)\n\t)\n\treturn &QueryEntitlementsParams{\n\t\tActiveOnly: &activeOnlyDefault,\n\t\tLimit: &limitDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetRepositoriesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func BuildTimeParameters(params url.Values, expiry time.Duration) {\n\tnow := time.Now()\n\n\tparams.Set(QueryIssued, fmt.Sprint(now.UnixMilli()))\n\tparams.Set(QueryExpiry, fmt.Sprint(now.Add(expiry).UnixMilli()))\n}",
"func NewServeBuildTypesInProjectParamsWithTimeout(timeout time.Duration) *ServeBuildTypesInProjectParams {\n\tvar ()\n\treturn &ServeBuildTypesInProjectParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewGetGatewaysParamsWithTimeout(timeout time.Duration) *GetGatewaysParams {\n\treturn &GetGatewaysParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewMetroclusterInterconnectGetParamsWithTimeout(timeout time.Duration) *MetroclusterInterconnectGetParams {\n\treturn &MetroclusterInterconnectGetParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDetectLanguageParamsWithTimeout(timeout time.Duration) *DetectLanguageParams {\n\tvar ()\n\treturn &DetectLanguageParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetTransportNodeParams() *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *MetroclusterInterconnectGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetTradingPairParamsWithTimeout(timeout time.Duration) *GetTradingPairParams {\n\tvar ()\n\treturn &GetTradingPairParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetLogsParamsWithTimeout(timeout time.Duration) *GetLogsParams {\n\tvar (\n\t\tpageDefault = int64(1)\n\t\tpageSizeDefault = int64(10)\n\t)\n\treturn &GetLogsParams{\n\t\tPage: &pageDefault,\n\t\tPageSize: &pageSizeDefault,\n\n\t\ttimeout: timeout,\n\t}\n}"
] | [
"0.8099803",
"0.7864803",
"0.68469137",
"0.6831266",
"0.6502288",
"0.6194031",
"0.6076022",
"0.59323996",
"0.58478737",
"0.5819925",
"0.5788982",
"0.57653457",
"0.5737436",
"0.57203186",
"0.56848276",
"0.5684553",
"0.56770486",
"0.5665245",
"0.56478405",
"0.5646077",
"0.564033",
"0.561581",
"0.5605666",
"0.5604725",
"0.5601695",
"0.5569704",
"0.5565104",
"0.55611473",
"0.55460334",
"0.5545793",
"0.5533601",
"0.5516999",
"0.5507862",
"0.5502965",
"0.5495787",
"0.54858124",
"0.54364794",
"0.5418871",
"0.54149365",
"0.54019004",
"0.53908205",
"0.53894776",
"0.53672916",
"0.53312284",
"0.5321965",
"0.52994007",
"0.5298656",
"0.52825356",
"0.527428",
"0.5272764",
"0.5272513",
"0.5258145",
"0.5254123",
"0.5252852",
"0.5247487",
"0.5242219",
"0.5241378",
"0.5231746",
"0.52225846",
"0.52216023",
"0.5216665",
"0.52139467",
"0.5201933",
"0.51963705",
"0.5180112",
"0.5179279",
"0.5178745",
"0.51768243",
"0.51707685",
"0.5166603",
"0.5165087",
"0.5160806",
"0.5150641",
"0.5133023",
"0.51299113",
"0.5126678",
"0.5125716",
"0.51207465",
"0.5116541",
"0.5114161",
"0.5110397",
"0.51089066",
"0.51082236",
"0.5104754",
"0.5084673",
"0.5081516",
"0.5074507",
"0.5074457",
"0.5073293",
"0.50723654",
"0.5052823",
"0.50516677",
"0.5051239",
"0.5047354",
"0.5045768",
"0.50436217",
"0.5043238",
"0.50378007",
"0.50334954",
"0.503191"
] | 0.8511127 | 0 |
NewGetPlatformsParamsWithContext creates a new GetPlatformsParams object with the default values initialized, and the ability to set a context for a request | NewGetPlatformsParamsWithContext создает новый объект GetPlatformsParams с инициализированными значениями по умолчанию и возможностью установки контекста для запроса | func NewGetPlatformsParamsWithContext(ctx context.Context) *GetPlatformsParams {
var ()
return &GetPlatformsParams{
Context: ctx,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}",
"func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}",
"func NewGetOperatingSystemsParamsWithContext(ctx context.Context) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewPcloudSystempoolsGetParamsWithContext(ctx context.Context) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (a *Client) GetPlatforms(params *GetPlatformsParams, opts ...ClientOption) (*GetPlatformsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetPlatformsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"get-platforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/fwmgr/entities/platforms/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetPlatformsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetPlatformsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-platforms: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o *PlatformsAllOfData) GetPlatforms() map[string]Platform {\n\tif o == nil {\n\t\tvar ret map[string]Platform\n\t\treturn ret\n\t}\n\n\treturn o.Platforms\n}",
"func (o *GetMetricsParams) SetPlatforms(platforms *string) {\n\to.Platforms = platforms\n}",
"func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetClusterSupportedPlatforms(ctx *middleware.Context, handler GetClusterSupportedPlatformsHandler) *GetClusterSupportedPlatforms {\n\treturn &GetClusterSupportedPlatforms{Context: ctx, Handler: handler}\n}",
"func NewGetCurrentGenerationParamsWithContext(ctx context.Context) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\tContext: ctx,\n\t}\n}",
"func (o *PlatformsByPlatformNameAllOfData) GetPlatforms() []Platform {\n\tif o == nil {\n\t\tvar ret []Platform\n\t\treturn ret\n\t}\n\n\treturn o.Platforms\n}",
"func NewPcloudSystempoolsGetParams() *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PlatformsByPlatformNameAllOfData) GetPlatformsOk() ([]Platform, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Platforms, true\n}",
"func (o *PlatformsAllOfData) GetPlatformsOk() (*map[string]Platform, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Platforms, true\n}",
"func NewSearchWorkspacesParamsWithContext(ctx context.Context) *SearchWorkspacesParams {\n\tvar ()\n\treturn &SearchWorkspacesParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetSsoParamsWithContext(ctx context.Context) *GetSsoParams {\n\tvar ()\n\treturn &GetSsoParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *PlatformsAllOfData) SetPlatforms(v map[string]Platform) {\n\to.Platforms = v\n}",
"func NewGetOperatingSystemsParamsWithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetParamsWithContext(ctx context.Context) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func NewPcloudPvminstancesNetworksGetParamsWithContext(ctx context.Context) *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *GetOperatingSystemsParams) WithContext(ctx context.Context) *GetOperatingSystemsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (a *Client) GetClusterSupportedPlatforms(ctx context.Context, params *GetClusterSupportedPlatformsParams) (*GetClusterSupportedPlatformsOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetClusterSupportedPlatforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v2/clusters/{cluster_id}/supported-platforms\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterSupportedPlatformsReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetClusterSupportedPlatformsOK), nil\n\n}",
"func NewOrgGetParamsWithContext(ctx context.Context) *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *PcloudPvminstancesNetworksGetParams) WithContext(ctx context.Context) *PcloudPvminstancesNetworksGetParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewGetScopeConfigurationParams() *GetScopeConfigurationParams {\n\tvar ()\n\treturn &GetScopeConfigurationParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (m *DeviceManagementConfigurationPolicy) GetPlatforms()(*DeviceManagementConfigurationPlatforms) {\n val, err := m.GetBackingStore().Get(\"platforms\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*DeviceManagementConfigurationPlatforms)\n }\n return nil\n}",
"func NewGetClockParamsWithContext(ctx context.Context) *GetClockParams {\n\treturn &GetClockParams{\n\t\tContext: ctx,\n\t}\n}",
"func (o *PcloudSystempoolsGetParams) WithContext(ctx context.Context) *PcloudSystempoolsGetParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewPcloudSystempoolsGetParamsWithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetOutagesParamsWithContext(ctx context.Context) *GetOutagesParams {\n\treturn &GetOutagesParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewGetCurrentGenerationParams() *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPcloudNetworksGetallParamsWithContext(ctx context.Context) *PcloudNetworksGetallParams {\n\treturn &PcloudNetworksGetallParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewPublicWebLinkPlatformEstablishParamsWithContext(ctx context.Context) *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetGatewaysParamsWithContext(ctx context.Context) *GetGatewaysParams {\n\treturn &GetGatewaysParams{\n\t\tContext: ctx,\n\t}\n}",
"func (o *KubernetesAddonDefinitionAllOf) SetPlatforms(v []string) {\n\to.Platforms = v\n}",
"func NewGetRoomsParamsWithContext(ctx context.Context) *GetRoomsParams {\n\tvar ()\n\treturn &GetRoomsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetBuildPropertiesParamsWithContext(ctx context.Context) *GetBuildPropertiesParams {\n\tvar ()\n\treturn &GetBuildPropertiesParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *PlatformsByPlatformNameAllOfData) SetPlatforms(v []Platform) {\n\to.Platforms = v\n}",
"func (o *KubernetesAddonDefinitionAllOf) GetPlatforms() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Platforms\n}",
"func NewPcloudSystempoolsGetParamsWithTimeout(timeout time.Duration) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewProviderParams() ProviderParams {\n\n\treturn ProviderParams{}\n}",
"func (o *KubernetesAddonDefinitionAllOf) GetPlatformsOk() ([]string, bool) {\n\tif o == nil || o.Platforms == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Platforms, true\n}",
"func NewPcloudPvminstancesNetworksGetParams() *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetOrganizationTeamPermissionsParams() *GetOrganizationTeamPermissionsParams {\n\tvar ()\n\treturn &GetOrganizationTeamPermissionsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetRepositoriesParams() *GetRepositoriesParams {\n\tvar ()\n\treturn &GetRepositoriesParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (a *Client) GetPlatformNetworks(params *GetPlatformNetworksParams) (*GetPlatformNetworksOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetPlatformNetworksParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getPlatformNetworks\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/platform_resources/networks\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetPlatformNetworksReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetPlatformNetworksOK), nil\n\n}",
"func NewGetProcessorsParamsWithContext(ctx context.Context) *GetProcessorsParams {\n\treturn &GetProcessorsParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewPcloudSapGetParams() *PcloudSapGetParams {\n\treturn &PcloudSapGetParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPcloudSapGetParamsWithContext(ctx context.Context) *PcloudSapGetParams {\n\treturn &PcloudSapGetParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewGetV1MembershipsParamsWithContext(ctx context.Context) *GetV1MembershipsParams {\n\tvar ()\n\treturn &GetV1MembershipsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetOrganizationPrototypePermissionsParams() *GetOrganizationPrototypePermissionsParams {\n\tvar ()\n\treturn &GetOrganizationPrototypePermissionsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPutLolPerksV1CurrentpageParamsWithContext(ctx context.Context) *PutLolPerksV1CurrentpageParams {\n\tvar ()\n\treturn &PutLolPerksV1CurrentpageParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewCreateWidgetParamsWithContext(ctx context.Context) *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetComplianceByResourceTypesParamsWithContext(ctx context.Context) *GetComplianceByResourceTypesParams {\n\tvar (\n\t\tmaxItemsDefault = int64(100)\n\t\toffsetDefault = int64(0)\n\t)\n\treturn &GetComplianceByResourceTypesParams{\n\t\tMaxItems: &maxItemsDefault,\n\t\tOffset: &offsetDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func (schematics *SchematicsV1) GetWorkspaceWithContext(ctx context.Context, getWorkspaceOptions *GetWorkspaceOptions) (result *WorkspaceResponse, response *core.DetailedResponse, err error) {\n\terr = core.ValidateNotNil(getWorkspaceOptions, \"getWorkspaceOptions cannot be nil\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.ValidateStruct(getWorkspaceOptions, \"getWorkspaceOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpathParamsMap := map[string]string{\n\t\t\"w_id\": *getWorkspaceOptions.WID,\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.GET)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = schematics.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(schematics.Service.Options.URL, `/v1/workspaces/{w_id}`, pathParamsMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range getWorkspaceOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"schematics\", \"V1\", \"GetWorkspace\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = schematics.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalWorkspaceResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tresponse.Result = result\n\n\treturn\n}",
"func NewPayRatesGetParamsWithContext(ctx context.Context) *PayRatesGetParams {\n\tvar ()\n\treturn &PayRatesGetParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetRepositoriesParamsWithContext(ctx context.Context) *GetRepositoriesParams {\n\tvar ()\n\treturn &GetRepositoriesParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetPaymentsParamsWithContext(ctx context.Context) *GetPaymentsParams {\n\tvar ()\n\treturn &GetPaymentsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetSearchClinicsParamsWithContext(ctx context.Context) *GetSearchClinicsParams {\n\tvar ()\n\treturn &GetSearchClinicsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetUIParamsWithContext(ctx context.Context) *GetUIParams {\n\n\treturn &GetUIParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetRoomsParams() *GetRoomsParams {\n\tvar ()\n\treturn &GetRoomsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetFileSystemParametersInternalParamsWithContext(ctx context.Context) *GetFileSystemParametersInternalParams {\n\tvar (\n\t\tattachedClusterDefault = bool(false)\n\t\tsecureDefault = bool(false)\n\t)\n\treturn &GetFileSystemParametersInternalParams{\n\t\tAttachedCluster: &attachedClusterDefault,\n\t\tSecure: &secureDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetOrganizationPrototypePermissionsParamsWithHTTPClient(client *http.Client) *GetOrganizationPrototypePermissionsParams {\n\tvar ()\n\treturn &GetOrganizationPrototypePermissionsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetUsersCurrentPermissionsParams() *GetUsersCurrentPermissionsParams {\n\tvar ()\n\treturn &GetUsersCurrentPermissionsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PcloudSapGetParams) WithContext(ctx context.Context) *PcloudSapGetParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewRepoUpdateTopicsParamsWithContext(ctx context.Context) *RepoUpdateTopicsParams {\n\tvar ()\n\treturn &RepoUpdateTopicsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func BootstrapPlatforms(c *config.Config) {\n\tconfig.Set(c)\n\n\tvar h tfhttp.Platform\n\tplatform.RegisterPlatform(\"HTTP\", h)\n\n\tvar tp tradfri.Platform\n\tplatform.RegisterPlatform(\"Tradfri\", tp)\n\n\tvar kp kasa.Platform\n\tplatform.RegisterPlatform(\"Kasa\", kp)\n\n\tvar owmp owm.Platform\n\tplatform.RegisterPlatform(\"OWM\", owmp)\n\n\tvar onkp onkyo.Platform\n\tplatform.RegisterPlatform(\"Onkyo\", onkp)\n\n\tvar ls linuxsensors.Platform\n\tplatform.RegisterPlatform(\"LinuxSensors\", ls)\n\n\tvar k konnected.Platform\n\tplatform.RegisterPlatform(\"Konnected\", k)\n\n\tvar nl noonlight.Platform\n\tplatform.RegisterPlatform(\"noonlight\", nl)\n\n\tvar hcp tfhc.HCPlatform\n\tplatform.RegisterPlatform(\"HomeControl\", hcp)\n\n\tplatform.StartupAllPlatforms(c)\n\n\t// add OS sensors\n\tsensor := accessory.TFAccessory{}\n\tls.AddAccessory(&sensor)\n}",
"func NewGetTreeParamsWithContext(ctx context.Context) *GetTreeParams {\n\tvar ()\n\treturn &GetTreeParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewPcloudIkepoliciesPutParamsWithContext(ctx context.Context) *PcloudIkepoliciesPutParams {\n\treturn &PcloudIkepoliciesPutParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewGetOrganizationTeamPermissionsParamsWithHTTPClient(client *http.Client) *GetOrganizationTeamPermissionsParams {\n\tvar ()\n\treturn &GetOrganizationTeamPermissionsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetTradingPairParamsWithContext(ctx context.Context) *GetTradingPairParams {\n\tvar ()\n\treturn &GetTradingPairParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewScmProvidersRepositoriesGetToManyRelatedRequest(server string, id string, params *ScmProvidersRepositoriesGetToManyRelatedParams) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParamWithLocation(\"simple\", false, \"id\", runtime.ParamLocationPath, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/v1/scmProviders/%s/repositories\", pathParam0)\n\tif operationPath[0] == '/' {\n\t\toperationPath = \".\" + operationPath\n\t}\n\n\tqueryURL, err := serverURL.Parse(operationPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryValues := queryURL.Query()\n\n\tif params.FilterId != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", false, \"filter[id]\", runtime.ParamLocationQuery, *params.FilterId); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.FieldsScmRepositories != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", false, \"fields[scmRepositories]\", runtime.ParamLocationQuery, *params.FieldsScmRepositories); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.Limit != nil {\n\n\t\tif queryFrag, err := runtime.StyleParamWithLocation(\"form\", true, \"limit\", runtime.ParamLocationQuery, *params.Limit); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tqueryURL.RawQuery = queryValues.Encode()\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}",
"func NewPutStackParamsWithContext(ctx context.Context) *PutStackParams {\n\tvar ()\n\treturn &PutStackParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetUsersParamsWithContext(ctx context.Context) *GetUsersParams {\n\tvar ()\n\treturn &GetUsersParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetLTENetworkIDPolicyQosProfilesParamsWithContext(ctx context.Context) *GetLTENetworkIDPolicyQosProfilesParams {\n\tvar ()\n\treturn &GetLTENetworkIDPolicyQosProfilesParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (*SchematicsV1) NewGetWorkspaceInputsOptions(wID string, tID string) *GetWorkspaceInputsOptions {\n\treturn &GetWorkspaceInputsOptions{\n\t\tWID: core.StringPtr(wID),\n\t\tTID: core.StringPtr(tID),\n\t}\n}",
"func NewPcloudPvminstancesNetworksGetParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (a *Client) QueryPlatforms(params *QueryPlatformsParams, opts ...ClientOption) (*QueryPlatformsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewQueryPlatformsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"query-platforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/fwmgr/queries/platforms/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &QueryPlatformsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*QueryPlatformsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for query-platforms: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewGetWormDomainParamsWithContext(ctx context.Context) *GetWormDomainParams {\n\tvar ()\n\treturn &GetWormDomainParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetOrganizationPrototypePermissionsParamsWithContext(ctx context.Context) *GetOrganizationPrototypePermissionsParams {\n\tvar ()\n\treturn &GetOrganizationPrototypePermissionsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (s *PlatformsService) Get(ctx context.Context, org, assembly, platform string) (*Platform, *http.Response, error) {\n\tif org == \"\" {\n\t\treturn nil, nil, errors.New(\"org name must be non-empty\")\n\t}\n\tif assembly == \"\" {\n\t\treturn nil, nil, errors.New(\"assembly name must be non-empty\")\n\t}\n\tif platform == \"\" {\n\t\treturn nil, nil, errors.New(\"platform name must be non-empty\")\n\t}\n\tap := fmt.Sprintf(\"%v/assemblies/%v/design/platforms/%v\", org, assembly, platform)\n\n\treq, err := s.client.NewRequest(\"GET\", ap, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tassemblyPlatform := new(Platform)\n\tresp, err := s.client.Do(ctx, req, assemblyPlatform)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn assemblyPlatform, resp, nil\n}",
"func NewGetGCParamsWithContext(ctx context.Context) *GetGCParams {\n\treturn &GetGCParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewGetApplianceUpgradePoliciesMoidParamsWithContext(ctx context.Context) *GetApplianceUpgradePoliciesMoidParams {\n\tvar ()\n\treturn &GetApplianceUpgradePoliciesMoidParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetScopeConfigurationParamsWithContext(ctx context.Context) *GetScopeConfigurationParams {\n\tvar ()\n\treturn &GetScopeConfigurationParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetClockParams() *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewPlatformsAllOfData(count int32, platforms map[string]Platform) *PlatformsAllOfData {\n\tthis := PlatformsAllOfData{}\n\tthis.Count = count\n\tthis.Platforms = platforms\n\treturn &this\n}",
"func NewQueryFirewallFieldsParamsWithContext(ctx context.Context) *QueryFirewallFieldsParams {\n\treturn &QueryFirewallFieldsParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewGetLogoutRequestParamsWithContext(ctx context.Context) *GetLogoutRequestParams {\n\tvar ()\n\treturn &GetLogoutRequestParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewGetScopeConfigurationParamsWithTimeout(timeout time.Duration) *GetScopeConfigurationParams {\n\tvar ()\n\treturn &GetScopeConfigurationParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetCurrentGenerationParamsWithHTTPClient(client *http.Client) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetUsersCurrentPermissionsParamsWithContext(ctx context.Context) *GetUsersCurrentPermissionsParams {\n\tvar ()\n\treturn &GetUsersCurrentPermissionsParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewPlatformsAllOfDataWithDefaults() *PlatformsAllOfData {\n\tthis := PlatformsAllOfData{}\n\treturn &this\n}"
] | [
"0.79062843",
"0.75749254",
"0.717758",
"0.6895261",
"0.6486832",
"0.64502275",
"0.60652006",
"0.5622456",
"0.5587018",
"0.5517432",
"0.5431089",
"0.5329733",
"0.5258846",
"0.52431345",
"0.49316105",
"0.49075675",
"0.48815322",
"0.48812634",
"0.4881164",
"0.485486",
"0.4833368",
"0.47503698",
"0.47387797",
"0.4677078",
"0.46377233",
"0.46363494",
"0.4626417",
"0.46142796",
"0.46042505",
"0.4551762",
"0.45491084",
"0.45390698",
"0.45385173",
"0.45306033",
"0.45277587",
"0.45215863",
"0.45009306",
"0.4488031",
"0.44723698",
"0.44621816",
"0.44575268",
"0.4443429",
"0.44421932",
"0.44403708",
"0.442663",
"0.4421899",
"0.4421028",
"0.44132268",
"0.44051394",
"0.43806252",
"0.43801185",
"0.43655595",
"0.43646646",
"0.43633085",
"0.43630576",
"0.43595088",
"0.43532827",
"0.43154833",
"0.43147492",
"0.42998937",
"0.4299425",
"0.4298446",
"0.42786905",
"0.4256922",
"0.4252199",
"0.4238163",
"0.42271322",
"0.4224647",
"0.42182878",
"0.4210664",
"0.42105433",
"0.41911796",
"0.4187705",
"0.41754872",
"0.41498548",
"0.41379294",
"0.4137645",
"0.41343194",
"0.41332838",
"0.411836",
"0.41181877",
"0.411149",
"0.4097541",
"0.40800887",
"0.40800792",
"0.40776747",
"0.4077206",
"0.40726054",
"0.407176",
"0.4068455",
"0.40674832",
"0.40530184",
"0.4032788",
"0.40281394",
"0.40237823",
"0.4022234",
"0.40154353",
"0.40127352",
"0.40094167",
"0.40084472"
] | 0.8426746 | 0 |
NewGetPlatformsParamsWithHTTPClient creates a new GetPlatformsParams object with the default values initialized, and the ability to set a custom HTTPClient for a request | NewGetPlatformsParamsWithHTTPClient создает новый объект GetPlatformsParams с инициализированными значениями по умолчанию и возможностью задать пользовательский HTTPClient для запроса | func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {
var ()
return &GetPlatformsParams{
HTTPClient: client,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetOperatingSystemsParamsWithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetParamsWithHTTPClient(client *http.Client) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func NewPcloudSystempoolsGetParamsWithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams {\n\tvar ()\n\treturn &PcloudSystempoolsGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetCurrentGenerationParamsWithHTTPClient(client *http.Client) *GetCurrentGenerationParams {\n\treturn &GetCurrentGenerationParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewCreateWidgetParamsWithHTTPClient(client *http.Client) *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewOrgGetParamsWithHTTPClient(client *http.Client) *OrgGetParams {\n\tvar ()\n\treturn &OrgGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetUIParamsWithHTTPClient(client *http.Client) *GetUIParams {\n\n\treturn &GetUIParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (a *Client) GetPlatforms(params *GetPlatformsParams, opts ...ClientOption) (*GetPlatformsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetPlatformsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"get-platforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/fwmgr/entities/platforms/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetPlatformsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetPlatformsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-platforms: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewGetTreeParamsWithHTTPClient(client *http.Client) *GetTreeParams {\n\tvar ()\n\treturn &GetTreeParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetClockParamsWithHTTPClient(client *http.Client) *GetClockParams {\n\treturn &GetClockParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetOperatingSystemsParams) WithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewGetSsoParamsWithHTTPClient(client *http.Client) *GetSsoParams {\n\tvar ()\n\treturn &GetSsoParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PcloudSystempoolsGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetMetricsParamsWithHTTPClient(client *http.Client) *GetMetricsParams {\n\tvar (\n\t\tgranularityDefault = string(\"AUTO\")\n\t\tgroupByDefault = string(\"NONE\")\n\t\tsiteTypeFilterDefault = string(\"ALL\")\n\t)\n\treturn &GetMetricsParams{\n\t\tGranularity: &granularityDefault,\n\t\tGroupBy: &groupByDefault,\n\t\tSiteTypeFilter: &siteTypeFilterDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetCustomRuleParamsWithHTTPClient(client *http.Client) *GetCustomRuleParams {\n\tvar ()\n\treturn &GetCustomRuleParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PcloudNetworksGetallParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetOperatingSystemsParams() *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PcloudPvminstancesNetworksGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetGCParamsWithHTTPClient(client *http.Client) *GetGCParams {\n\treturn &GetGCParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PcloudSystempoolsGetParams) WithHTTPClient(client *http.Client) *PcloudSystempoolsGetParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewSearchWorkspacesParamsWithHTTPClient(client *http.Client) *SearchWorkspacesParams {\n\tvar ()\n\treturn &SearchWorkspacesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetGPUArchitectureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OrgGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetSearchClinicsParamsWithHTTPClient(client *http.Client) *GetSearchClinicsParams {\n\tvar ()\n\treturn &GetSearchClinicsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetRoomsParamsWithHTTPClient(client *http.Client) *GetRoomsParams {\n\tvar ()\n\treturn &GetRoomsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PcloudIkepoliciesPutParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetBuildPropertiesParamsWithHTTPClient(client *http.Client) *GetBuildPropertiesParams {\n\tvar ()\n\treturn &GetBuildPropertiesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CapacityPoolGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetNetworkExternalParamsWithHTTPClient(client *http.Client) *GetNetworkExternalParams {\n\n\treturn &GetNetworkExternalParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func TestNewClient_CustomHttpClient(t *testing.T) {\n\tt.Parallel()\n\n\tclient := NewClient(nil, http.DefaultClient, ProviderPreev)\n\n\tif client == nil {\n\t\tt.Fatal(\"failed to load client\")\n\t}\n\n\t// Test providers\n\tif client.Providers[0] != ProviderPreev {\n\t\tt.Fatalf(\"expected the first provider to be %d, not %d\", ProviderPreev, client.Providers[0])\n\t}\n}",
"func NewGetCharacterParamsWithHTTPClient(client *http.Client) *GetCharacterParams {\n\tvar ()\n\treturn &GetCharacterParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *SearchWorkspacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetComplianceByResourceTypesParamsWithHTTPClient(client *http.Client) *GetComplianceByResourceTypesParams {\n\tvar (\n\t\tmaxItemsDefault = int64(100)\n\t\toffsetDefault = int64(0)\n\t)\n\treturn &GetComplianceByResourceTypesParams{\n\t\tMaxItems: &maxItemsDefault,\n\t\tOffset: &offsetDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetParams) WithHTTPClient(client *http.Client) *GetParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewHTTPClient(options ...Opt) *HTTP {\n\tc := &HTTP{\n\t\tHTTPClient: &http.Client{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(c)\n\t}\n\n\tif c.latestManifestURLFmt == \"\" {\n\t\tc.latestManifestURLFmt = defaultLatestManifestURLFmt\n\t}\n\n\tif c.manifestURLFmt == \"\" {\n\t\tc.manifestURLFmt = defaultManifestURLFmt\n\t}\n\n\treturn c\n}",
"func (o *CreateWidgetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOperatingSystemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateZoneProjectsUsingPUTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetProcessorsParamsWithHTTPClient(client *http.Client) *GetProcessorsParams {\n\treturn &GetProcessorsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetTransportNodeParamsWithHTTPClient(client *http.Client) *GetTransportNodeParams {\n\tvar ()\n\treturn &GetTransportNodeParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetClockParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewSystemEventsParamsWithHTTPClient(client *http.Client) *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetProjectMetricsParamsWithHTTPClient(client *http.Client) *GetProjectMetricsParams {\n\tvar ()\n\treturn &GetProjectMetricsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PcloudSapGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ChatNewParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSearchClinicsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetOrderParamsWithHTTPClient(client *http.Client) *GetOrderParams {\n\tvar ()\n\treturn &GetOrderParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewPublicWebLinkPlatformEstablishParamsWithHTTPClient(client *http.Client) *PublicWebLinkPlatformEstablishParams {\n\tvar ()\n\treturn &PublicWebLinkPlatformEstablishParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetFileSystemParametersInternalParams) WithHTTPClient(client *http.Client) *GetFileSystemParametersInternalParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetCurrentGenerationParams) WithHTTPClient(client *http.Client) *GetCurrentGenerationParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetGCParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBuildPropertiesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetFileSystemParametersInternalParamsWithHTTPClient(client *http.Client) *GetFileSystemParametersInternalParams {\n\tvar (\n\t\tattachedClusterDefault = bool(false)\n\t\tsecureDefault = bool(false)\n\t)\n\treturn &GetFileSystemParametersInternalParams{\n\t\tAttachedCluster: &attachedClusterDefault,\n\t\tSecure: &secureDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *MetroclusterInterconnectGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewHTTPClient(transport http.RoundTripper, ts TokenSource) (*HTTPClient, error) {\n\tif ts == nil {\n\t\treturn nil, errors.New(\"gcp: no credentials available\")\n\t}\n\treturn &HTTPClient{\n\t\tClient: http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tBase: transport,\n\t\t\t\tSource: ts,\n\t\t\t},\n\t\t},\n\t}, nil\n}",
"func (o *GetRepositoriesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *QtreeCollectionGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutLolPerksV1CurrentpageParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPageDataUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}",
"func (o *JoinOrganizationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewProvisionNetworkClientsParamsWithHTTPClient(client *http.Client) *ProvisionNetworkClientsParams {\n\tvar ()\n\treturn &ProvisionNetworkClientsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetPointsByQueryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewPcloudPvminstancesNetworksGetParamsWithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetParams {\n\tvar ()\n\treturn &PcloudPvminstancesNetworksGetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetContentSourcesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicPlatformLinkV3Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRoomParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewConvertParamsWithHTTPClient(client *http.Client) *ConvertParams {\n\tvar ()\n\treturn &ConvertParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewListParamsWithHTTPClient(client *http.Client) *ListParams {\n\tvar ()\n\treturn &ListParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewPutParamsWithHTTPClient(client *http.Client) *PutParams {\n\tvar ()\n\treturn &PutParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ServiceInstanceGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLTENetworkIDPolicyQosProfilesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetRuleChainParamsWithHTTPClient(client *http.Client) *GetRuleChainParams {\n\tvar ()\n\treturn &GetRuleChainParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *StorageServiceOwnershipGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OrgGetParams) WithHTTPClient(client *http.Client) *OrgGetParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetRoomsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ConfigGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOrganizationTeamPermissionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewSizeParamsWithHTTPClient(client *http.Client) *SizeParams {\n\tvar ()\n\treturn &SizeParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *TicketProjectsImportProjectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AllLookmlTestsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicWebLinkPlatformEstablishParams) WithHTTPClient(client *http.Client) *PublicWebLinkPlatformEstablishParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetCharacterParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetWorkItemParamsWithHTTPClient(client *http.Client) *GetWorkItemParams {\n\tvar ()\n\treturn &GetWorkItemParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func newHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTimeout: defaultTimeout,\n\t}\n\treturn client\n}",
"func (o *GetCurrentGenerationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWorkItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudPvminstancesNetworksGetParams) WithHTTPClient(client *http.Client) *PcloudPvminstancesNetworksGetParams {\n\to.SetHTTPClient(client)\n\treturn o\n}"
] | [
"0.82473236",
"0.72261274",
"0.6987023",
"0.6613042",
"0.66055197",
"0.61128336",
"0.60561305",
"0.59734505",
"0.5956229",
"0.5941421",
"0.59126693",
"0.58695054",
"0.5867843",
"0.58573145",
"0.58371466",
"0.5729112",
"0.5689428",
"0.5687673",
"0.5664992",
"0.563189",
"0.56241566",
"0.56218624",
"0.5571057",
"0.5570955",
"0.5566569",
"0.5551748",
"0.55300295",
"0.55146474",
"0.5485789",
"0.54720515",
"0.54699177",
"0.5463553",
"0.54586047",
"0.5458345",
"0.5454131",
"0.54533184",
"0.5423231",
"0.5419106",
"0.540575",
"0.53986317",
"0.53948265",
"0.5390627",
"0.53756976",
"0.537326",
"0.5373141",
"0.5371808",
"0.5357531",
"0.5355341",
"0.5348204",
"0.53469497",
"0.5346658",
"0.53331256",
"0.5332236",
"0.5331893",
"0.5323985",
"0.531578",
"0.53142375",
"0.531072",
"0.5306752",
"0.5301575",
"0.5296345",
"0.52894",
"0.5286069",
"0.52829474",
"0.5279318",
"0.5279201",
"0.52787256",
"0.52774286",
"0.52760726",
"0.52741575",
"0.52733886",
"0.52687943",
"0.5265024",
"0.5261636",
"0.52586174",
"0.52564895",
"0.5255136",
"0.5252628",
"0.52512205",
"0.52452207",
"0.524292",
"0.52362597",
"0.523253",
"0.5231832",
"0.5224981",
"0.52212054",
"0.52207565",
"0.521405",
"0.52124393",
"0.520589",
"0.5200011",
"0.51961106",
"0.5194118",
"0.5191634",
"0.51915383",
"0.5190502",
"0.51849663",
"0.5181475",
"0.51740617",
"0.51738375"
] | 0.85485095 | 0 |
WithTimeout adds the timeout to the get platforms params | WithTimeout добавляет таймаут в параметры получения платформ | func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {
o.SetTimeout(timeout)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func WithTimeout(t time.Duration) apiOption {\n\treturn func(m *Management) {\n\t\tm.timeout = t\n\t}\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDevicesUnknownParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(duration time.Duration) Option {\n\treturn wrappedOption{otlpconfig.WithTimeout(duration)}\n}",
"func (o *GetOperatingSystemsParams) WithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetOperatingSystemsParamsWithTimeout(timeout time.Duration) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetBundleByKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}",
"func (o *GetDevicesAllParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(t time.Duration) APIOption {\n\treturn newAPIOption(func(o *Options) {\n\t\to.Timeout = t\n\t})\n}",
"func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDevicesUnknownParams) WithTimeout(timeout time.Duration) *GetDevicesUnknownParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewGetDevicesUnknownParamsWithTimeout(timeout time.Duration) *GetDevicesUnknownParams {\n\treturn &GetDevicesUnknownParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetOrganizationApplicationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WrapWithTimeout(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultTimeout, wparams.NewParamStorer(parameters...))\n}",
"func (o *GetGPUArchitectureParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBuildPropertiesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *Options) {\n\t\topts.Timeout = timeout\n\t}\n}",
"func (o *PcloudNetworksGetallParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *MetroclusterInterconnectGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetParamsWithTimeout(timeout time.Duration) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProductsCodeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDeploymentByIDV3UsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SyncStatusUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetGroupsByTypeUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetContentSourceUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *QueryEntitlementsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *HandleGetAboutUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFileSystemParametersInternalParams) WithTimeout(timeout time.Duration) *GetFileSystemParametersInternalParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetUIParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PcloudSapGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ServeBuildTypesInProjectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (s stdlib) Timeout(time.Duration) {}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *Opts) error {\n\t\topts.Timeout = timeout\n\t\treturn nil\n\t}\n}",
"func (o *GetContentSourcesUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetSellerServicesUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(duration time.Duration) Option {\n\treturn wrappedOption{oconf.WithTimeout(duration)}\n}",
"func WithTimeout(t time.Duration) Option {\n\treturn func(o *Manager) {\n\t\to.timeout = t\n\t}\n}",
"func (o *GetUIParams) WithTimeout(timeout time.Duration) *GetUIParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func WithTimeout(timeout time.Duration) ClientOption {\n\treturn withTimeout{timeout}\n}",
"func (o *GetClockParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkExternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetExampleNewProjectDescriptionCompatibilityVersion1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *NearestUsingGET1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EavAttributeSetRepositoryV1GetGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ConfigGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (s *Skeleton) CallWithTimeout(dst ServiceID, encType EncType, timeout int, cmd CmdType, data ...interface{}) ([]interface{}, error) {\n\treturn s.s.callWithTimeout(dst, encType, timeout, cmd, data...)\n}",
"func (c *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall) Timeout(timeout int64) *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall {\n\tc.urlParams_.Set(\"timeout\", fmt.Sprint(timeout))\n\treturn c\n}",
"func (o *ExportProductsUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *OrgGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetIconParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProductUpgradeURLUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPageDataUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetLolClashV1ThirdpartyTeamDataParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AllLookmlTestsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTerraformConfigurationSourcesUsingGET1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetWormDomainParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTreeParams) WithTimeout(timeout time.Duration) *GetTreeParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *PublicPlatformLinkV3Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func Timeout(timeout time.Duration) Option {\n\treturn func(client *http.Client) {\n\t\tclient.Timeout = timeout\n\t}\n}",
"func Timeout(timeout time.Duration) Option {\n\treturn func(client *http.Client) {\n\t\tclient.Timeout = timeout\n\t}\n}",
"func (o *GetOrganizationApplicationParams) WithTimeout(timeout time.Duration) *GetOrganizationApplicationParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func Timeout(timeout int64) Option {\n\treturn func(opts *options) {\n\t\topts.timeout = time.Duration(timeout) * time.Second\n\t}\n}",
"func (o *GetAnOrderProductParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRepositoriesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewWithCustomTimeout(apiKey, apiSecret string, timeout time.Duration) *Hbdm {\n\tclient := NewHttpClientWithCustomTimeout(apiKey, apiSecret, timeout)\n\treturn &Hbdm{client, sync.Mutex{}}\n}",
"func (o *GetFileSystemParametersInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (_e *MockCompletableFuture_Expecter[T]) GetWithTimeout(timeout interface{}) *MockCompletableFuture_GetWithTimeout_Call[T] {\n\treturn &MockCompletableFuture_GetWithTimeout_Call[T]{Call: _e.mock.On(\"GetWithTimeout\", timeout)}\n}",
"func (o *GetItemByAppIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBootstrapParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPluginEndpointParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (x Go) Timeout(timeout time.Duration) Go {\n\tx.timeout = timeout\n\treturn x\n}",
"func (o *ListResourceTypesUsingGET2Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PublicWebLinkPlatformEstablishParams) WithTimeout(timeout time.Duration) *PublicWebLinkPlatformEstablishParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o AppTemplateContainerLivenessProbeOutput) Timeout() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbe) *int { return v.Timeout }).(pulumi.IntPtrOutput)\n}",
"func (o *GetPackageSearchParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}",
"func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}",
"func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}",
"func Timeout(t time.Duration) Option {\n\treturn func(o *Options) {\n\t\to.Timeout = t\n\t}\n}",
"func (o *GetNetworkExternalParams) WithTimeout(timeout time.Duration) *GetNetworkExternalParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (r *Search) Timeout(timeout string) *Search {\n\n\tr.req.Timeout = &timeout\n\n\treturn r\n}",
"func Timeout(d time.Duration) ConfigOpt {\n\treturn func(c *Config) {\n\t\tc.transport.ResponseHeaderTimeout = d\n\t\tc.transport.TLSHandshakeTimeout = d\n\t\tc.dialer.Timeout = d\n\t}\n}",
"func (o *GetProjectMetricsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *TurnOnLightParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *VDRI) {\n\t\topts.client.Timeout = timeout\n\t}\n}",
"func (o *GetSearchClinicsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTreeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (filterdev *NetworkTap) Timeout() (*syscall.Timeval, error) {\n\tvar tv syscall.Timeval\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(filterdev.device.Fd()), syscall.BIOCGRTIMEOUT, uintptr(unsafe.Pointer(&tv)))\n\tif err != 0 {\n\t\treturn nil, syscall.Errno(err)\n\t}\n\treturn &tv, nil\n}",
"func (o *GetGCParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] | [
"0.70924264",
"0.70087135",
"0.63422763",
"0.60646397",
"0.598531",
"0.59620464",
"0.5948087",
"0.58235776",
"0.5817179",
"0.5794681",
"0.57502705",
"0.57502705",
"0.5739896",
"0.572764",
"0.57164013",
"0.57161456",
"0.57042384",
"0.56877244",
"0.56648874",
"0.56480193",
"0.56283456",
"0.5625773",
"0.5615357",
"0.561463",
"0.55799615",
"0.55776834",
"0.55431277",
"0.5538012",
"0.5534676",
"0.5523369",
"0.5513962",
"0.55067694",
"0.55033886",
"0.54837453",
"0.5482005",
"0.547554",
"0.54693574",
"0.54617774",
"0.54604924",
"0.5457277",
"0.5457248",
"0.5454137",
"0.5450652",
"0.5449098",
"0.5448422",
"0.54482526",
"0.5444433",
"0.54420924",
"0.5440263",
"0.54396737",
"0.5436625",
"0.5410701",
"0.5408067",
"0.54053843",
"0.54019624",
"0.5399063",
"0.5395165",
"0.5395088",
"0.53949606",
"0.53725857",
"0.5359773",
"0.53574485",
"0.5354407",
"0.5353141",
"0.53381085",
"0.5335156",
"0.53252006",
"0.53238606",
"0.532198",
"0.53171223",
"0.53171223",
"0.5315749",
"0.53131455",
"0.53119284",
"0.53016526",
"0.53007907",
"0.52857256",
"0.5285157",
"0.52825886",
"0.5280229",
"0.5279634",
"0.5277811",
"0.5273735",
"0.52727026",
"0.52717906",
"0.5260619",
"0.52568185",
"0.52568185",
"0.52568185",
"0.52568185",
"0.52545613",
"0.52540594",
"0.5252089",
"0.5250977",
"0.5243002",
"0.5236203",
"0.5236194",
"0.52322114",
"0.5230407",
"0.5229415"
] | 0.7747894 | 0 |
SetTimeout adds the timeout to the get platforms params | SetTimeout добавляет таймаут в параметры get platforms | func (o *GetPlatformsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetGPUArchitectureParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetUIParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PcloudSystempoolsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *GetDevicesUnknownParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetClockParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBundleByKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBuildPropertiesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkExternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EavAttributeSetRepositoryV1GetGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *MetroclusterInterconnectGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetOrganizationApplicationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDeploymentByIDV3UsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ServeBuildTypesInProjectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DevicesGetModuleComponentCommandHistoryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBootstrapParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ConfigGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetLogicalPortParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkAppliancePortParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetCurrentGenerationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFileSystemParametersInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *QueryEntitlementsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SyncStatusUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetWormDomainParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFyiSettingsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AllLookmlTestsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPageDataUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetContentSourceUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProductsCodeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetExampleNewProjectDescriptionCompatibilityVersion1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *HandleGetAboutUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProductUpgradeURLUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRemotesupportConnectemcParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDrgParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PcloudSapGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetGCParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetLolClashV1ThirdpartyTeamDataParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetContentSourcesUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetWorkflowBuildTaskMetaMoidParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetIconParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDevicesAllParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *OrgGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetAnOrderProductParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProjectMetricsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPrivateOrderstateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTasksGetPhpParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDatalakeDbConfigParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateWidgetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *TurnOnLightParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetAPI24ArraysNtpTestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PcloudNetworksGetallParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTreeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetCustomRuleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetSsoParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetSimulationActivityParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SMSHistoryExportGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ExportProductsUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetActionTemplateLogoVersionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPortInfoUsingGET2Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetSellerServicesUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(t time.Duration) apiOption {\n\treturn func(m *Management) {\n\t\tm.timeout = t\n\t}\n}",
"func (o *PublicPlatformLinkV3Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetSearchClinicsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RTRExecuteActiveResponderCommandParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DetectLanguageParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetGroupsByTypeUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetOutagesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PutLolPerksV1CurrentpageParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetLogicalSwitchParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *NearestUsingGET1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPluginEndpointParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRackTopoesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PatchSepainstantIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetInterceptionitemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *APIServiceHaltsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTerraformConfigurationSourcesUsingGET1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetLTENetworkIDPolicyQosProfilesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNdmpSettingsVariableParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetItemByAppIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRepositoriesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRuntimeServersParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostGetOneParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPaymentsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ExportUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTradingPairParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostAPIV3MachinesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPublicsRecipeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTenantTagTestSpacesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDeploymentPreview1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *VirtualizationChoicesReadParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PatchAssetDeviceConfigurationsMoidParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] | [
"0.7305111",
"0.72466415",
"0.71178937",
"0.7100496",
"0.69969594",
"0.6993028",
"0.6947879",
"0.6941489",
"0.69177073",
"0.6908698",
"0.6838413",
"0.6813882",
"0.6804481",
"0.6794519",
"0.67828983",
"0.67797804",
"0.67492235",
"0.6746541",
"0.67458135",
"0.67379045",
"0.67353964",
"0.6729548",
"0.67258686",
"0.671731",
"0.67172617",
"0.66887116",
"0.6680107",
"0.6679647",
"0.6672439",
"0.6661067",
"0.6658508",
"0.66565585",
"0.665452",
"0.6650868",
"0.66504276",
"0.663941",
"0.66378003",
"0.6637013",
"0.66262573",
"0.66205233",
"0.66109025",
"0.6584687",
"0.65800554",
"0.65792745",
"0.6579131",
"0.6575261",
"0.6573262",
"0.65505695",
"0.6548556",
"0.6548284",
"0.65472496",
"0.6545944",
"0.65453243",
"0.654065",
"0.6538566",
"0.65321743",
"0.6528962",
"0.6521677",
"0.65067685",
"0.65035003",
"0.6502547",
"0.6490857",
"0.6485062",
"0.64839524",
"0.6481748",
"0.64804566",
"0.6479194",
"0.6475073",
"0.6469286",
"0.64628416",
"0.64589226",
"0.6452461",
"0.64445686",
"0.64394253",
"0.6435313",
"0.64346683",
"0.6428354",
"0.64134604",
"0.64102966",
"0.6402565",
"0.64003116",
"0.6398504",
"0.6395744",
"0.6391613",
"0.6390032",
"0.63891417",
"0.6383157",
"0.63804865",
"0.63785285",
"0.63761914",
"0.636972",
"0.6368712",
"0.63675386",
"0.63672566",
"0.6367065",
"0.6364378",
"0.63618153",
"0.6361679",
"0.6361266",
"0.6359757"
] | 0.8151992 | 0 |
SetContext adds the context to the get platforms params | SetContext добавляет контекст к параметрам get platforms | func (o *GetPlatformsParams) SetContext(ctx context.Context) {
o.Context = ctx
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetGPUArchitectureParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetContentSourcesUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetOperatingSystemsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *GetContentSourceUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRepositoriesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetBundleByKeyParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetGroupsByTypeUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetDevicesAllParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRuntimeServersParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetOrganizationPrototypePermissionsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *MetroclusterInterconnectGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetDevicesUnknownParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRepository15Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetApplianceUpgradePoliciesMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetProductUpgradeURLUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPageDataUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetWormDomainParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *AllLookmlTestsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetUIParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetLogicalPortParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *SyncStatusUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PublicPlatformLinkV3Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetDeploymentByIDV3UsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ServeBuildTypesInProjectParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetProductsByIDPromotionsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PutLolPerksV1CurrentpageParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetSellerServicesUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRemotesupportConnectemcParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *DevicesGetModuleComponentCommandHistoryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudNetworksGetallParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetSeriesIDEpisodesQueryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPortInfoUsingGET2Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetLolClashV1ThirdpartyTeamDataParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRepositoriesRepoNameSignaturesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetFileSystemParametersInternalParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *EavAttributeSetRepositoryV1GetGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostAPIV3MachinesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PatchStorageVirtualDriveExtensionsMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPluginEndpointParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudSystempoolsGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *HandleGetAboutUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPublicAuthParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRackTopoesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudSapGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ReadStorageV1CSIDriverParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetCharacterParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPointsByQueryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetTerraformConfigurationSourcesUsingGET1Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPrivateOrderstateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetFirmwareUpgradeStatusesMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ExportProductsUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetDeploymentPreview1Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetProductsCodeParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetNetworkAppliancePortParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetClockParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetNetworkExternalParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *DetectLanguageParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetLTENetworkIDPolicyQosProfilesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetAllPublicIPUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetAPI24ArraysNtpTestParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ExportUsingGETParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetRacksParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetCurrentGenerationParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *NearestUsingGET1Params) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *QueryEntitlementsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetLolInventoryV1PlayersByPuuidInventoryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *UpdateNetworkHTTPServerParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetOutagesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *UploadPluginParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *APIServiceHaltsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PatchAddonParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ListEngineTypeParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetFyiSettingsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetSsoParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetDeploymentTargetOperatingSystemNamesListAllParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PatchSepainstantIDParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetTasksGetPhpParams) SetContext(ctx ccontext.Context) {\n\to.Context = ctx\n}",
"func (o *GetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetOrganizationTeamPermissionsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetInterceptionitemsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetHistogramStatByParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetWorkflowBuildTaskMetaMoidParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *UpdateCustomIDPParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ListPipelinesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *AddRepositoryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PutMenuItemParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *RTRExecuteActiveResponderCommandParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostMultiNodeDeviceParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *SMSHistoryExportGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetProcessorsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (_m *MockHTTPServerInterface) SetContext(_a0 context.Context) {\n\t_m.Called(_a0)\n}",
"func (o *OrgGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudPvminstancesNetworksGetParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}"
] | [
"0.6786597",
"0.6569877",
"0.6501057",
"0.6415769",
"0.64072716",
"0.6354175",
"0.63233936",
"0.63068795",
"0.63018876",
"0.6285175",
"0.6277759",
"0.6277518",
"0.627217",
"0.6267961",
"0.6259615",
"0.6256487",
"0.62138826",
"0.6211839",
"0.6208424",
"0.6195113",
"0.6182538",
"0.6180525",
"0.61781996",
"0.61761576",
"0.6172562",
"0.61606175",
"0.6155937",
"0.6155446",
"0.6152427",
"0.61523056",
"0.61401606",
"0.6127879",
"0.61225826",
"0.6119044",
"0.611785",
"0.6112749",
"0.61073184",
"0.6102014",
"0.6085758",
"0.6083558",
"0.6077491",
"0.6075497",
"0.60746396",
"0.60733247",
"0.6066256",
"0.6059931",
"0.60521615",
"0.6047956",
"0.6040408",
"0.6039657",
"0.6036512",
"0.6031754",
"0.6028658",
"0.60259396",
"0.60185415",
"0.6017292",
"0.60162765",
"0.6013491",
"0.59979457",
"0.59959346",
"0.5984074",
"0.5984005",
"0.5972527",
"0.59556925",
"0.5952463",
"0.5945898",
"0.5936187",
"0.5936043",
"0.593348",
"0.59261274",
"0.5923416",
"0.5920921",
"0.5919933",
"0.5919569",
"0.5916605",
"0.5910356",
"0.59081405",
"0.5908124",
"0.59048927",
"0.59012014",
"0.589681",
"0.5892809",
"0.58912045",
"0.588634",
"0.5885263",
"0.5884526",
"0.5883665",
"0.5883416",
"0.58796483",
"0.58772737",
"0.58772457",
"0.58770555",
"0.58725613",
"0.5861844",
"0.58542067",
"0.58537537",
"0.5852061",
"0.58516043",
"0.5850068",
"0.5846796"
] | 0.7558212 | 0 |
WithHTTPClient adds the HTTPClient to the get platforms params | WithHTTPClient добавляет HTTPClient в параметры get platforms | func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {
o.SetHTTPClient(client)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOperatingSystemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetGPUArchitectureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDevicesAllParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetGroupsByTypeUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetContentSourcesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDeploymentByIDV3UsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDevicesUnknownParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudNetworksGetallParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOrganizationApplicationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOperatingSystemsParams) WithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *ServeBuildTypesInProjectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetContentSourceUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLolClashV1ThirdpartyTeamDataParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AllLookmlTestsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetClockParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MetroclusterInterconnectGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAllTeamsSpacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *QueryEntitlementsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProductsCodeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SyncStatusUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *HandleGetAboutUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListStacksByWorkspaceParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWormDomainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetUIParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudSystempoolsGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ExportProductsUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBuildPropertiesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetItemByAppIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSellerServicesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicPlatformLinkV3Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkExternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProductsByIDPromotionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func WithHTTPClient(client *http.Client) OptionFunc {\n\treturn func(c *Client) {\n\t\tc.client = client\n\t}\n}",
"func (o *GetPageDataUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLogicalSwitchParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ConfigGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOrganizationTeamPermissionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OrgGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAllKeyspacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWorkflowBuildTaskMetaMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListResourceTypesUsingGET2Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetComplianceByResourceTypesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SearchWorkspacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ServiceBrokerOpenstacksHostsGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NearestUsingGET1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProjectMetricsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAllPublicIPUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTerraformConfigurationSourcesUsingGET1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudSapGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func WithHTTPClient(c *http.Client) Option {\n\treturn func(args *Client) {\n\t\targs.httpClient = c\n\t}\n}",
"func WithHTTPClient(client *http.Client) Option {\n\treturn func(o *Options) {\n\t\to.Client = client\n\t}\n}",
"func (o *GetV1TicketingProjectsTicketingProjectIDConfigurationOptionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkAppliancePortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetFileSystemParametersInternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTradingPairParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetApplianceUpgradePoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CapacityPoolGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudPvminstancesNetworksGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSearchClinicsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTreeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateWidgetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetInterceptionitemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRepositoriesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateZoneProjectsUsingPUTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetListParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLTENetworkIDPolicyQosProfilesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListSSHKeysParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetGCParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ReplaceProjectsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func WithHTTPClient(h *http.Client) APIOption {\n\treturn newAPIOption(func(o *Options) {\n\t\tif h == nil {\n\t\t\treturn\n\t\t}\n\t\to.HTTPClient = h\n\t})\n}",
"func WithHTTPClient(client HTTPClient) Option {\n\treturn func(opts *Client) {\n\t\topts.httpClient = client\n\t}\n}",
"func (o *QueryFirewallFieldsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EavAttributeSetRepositoryV1GetGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTasksGetPhpParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListEngineTypeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetIconParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutLolPerksV1CurrentpageParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDatalakeDbConfigParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func WithHTTPClient(client *http.Client) Opt {\n\treturn func(c *Client) error {\n\t\tif client != nil {\n\t\t\tc.client = client\n\t\t}\n\t\treturn nil\n\t}\n}",
"func (o *GetSeriesIDEpisodesQueryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func WithHTTPClient(httpClient *http.Client) ClientOption {\n\treturn func(c *Client) {\n\t\tc.sling.Client(httpClient)\n\t}\n}",
"func (o *GetLogicalPortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAPIV3MachinesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OptionsTodoTodoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *TurnOnLightParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *JoinOrganizationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSMSitemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PatchAssetDeviceConfigurationsMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWorkItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | [
"0.74874324",
"0.67684495",
"0.6612133",
"0.66043484",
"0.65645367",
"0.6550549",
"0.6467634",
"0.64491713",
"0.64115715",
"0.6409021",
"0.63899356",
"0.63747627",
"0.6371442",
"0.635973",
"0.6344963",
"0.6342673",
"0.63373876",
"0.6336873",
"0.63319355",
"0.63319236",
"0.63301986",
"0.6300957",
"0.62910944",
"0.62886673",
"0.62812364",
"0.627991",
"0.6265734",
"0.6254123",
"0.6252281",
"0.6250572",
"0.6242972",
"0.62428653",
"0.62424964",
"0.6240266",
"0.62352973",
"0.62313855",
"0.62253535",
"0.62253517",
"0.62216413",
"0.6220953",
"0.62198687",
"0.6218658",
"0.62147015",
"0.62123424",
"0.61948556",
"0.619146",
"0.61883307",
"0.6186196",
"0.6179608",
"0.617847",
"0.6177617",
"0.61772597",
"0.6170524",
"0.6168256",
"0.6167283",
"0.61650544",
"0.61617",
"0.6159583",
"0.6156843",
"0.6155918",
"0.6155457",
"0.6154477",
"0.6152046",
"0.61500984",
"0.6148264",
"0.6143378",
"0.6142432",
"0.61286443",
"0.61238927",
"0.6122129",
"0.61196214",
"0.611905",
"0.6111727",
"0.61098665",
"0.6109545",
"0.61082274",
"0.6107429",
"0.610684",
"0.61029285",
"0.60910594",
"0.60906893",
"0.60883236",
"0.6087976",
"0.608793",
"0.60877466",
"0.6086774",
"0.6085416",
"0.60847074",
"0.608269",
"0.60785896",
"0.60783446",
"0.60731936",
"0.60727715",
"0.6067443",
"0.6067329",
"0.60576636",
"0.6057072",
"0.6056149",
"0.60541916",
"0.6052378"
] | 0.75234395 | 0 |
SetHTTPClient adds the HTTPClient to the get platforms params | SetHTTPClient добавляет HTTPClient в параметры get platforms | func (o *GetPlatformsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetGPUArchitectureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetContentSourcesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOperatingSystemsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetContentSourceUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MetroclusterInterconnectGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDeploymentByIDV3UsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetUIParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AllLookmlTestsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *HandleGetAboutUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkExternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ConfigGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ServeBuildTypesInProjectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListStacksByWorkspaceParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWorkflowBuildTaskMetaMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SyncStatusUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLogicalSwitchParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DevicesGetModuleComponentCommandHistoryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDevicesAllParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetClockParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetGroupsByTypeUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ExportProductsUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTerraformConfigurationSourcesUsingGET1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDevicesUnknownParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *QueryEntitlementsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPageDataUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudNetworksGetallParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudSystempoolsGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSellerServicesUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicPlatformLinkV3Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLolClashV1ThirdpartyTeamDataParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkAppliancePortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAllTeamsSpacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCurrentGenerationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetFyiSettingsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLogicalPortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProductsByIDPromotionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetFileSystemParametersInternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ReadLogicalRouterParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProductUpgradeURLUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSimulationActivityParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBuildPropertiesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *TurnOnLightParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NearestUsingGET1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetApplianceUpgradePoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateWidgetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetGCParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOrganizationApplicationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDatalakeDbConfigParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ExportUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RegisterApplicationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetWormDomainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRemotesupportConnectemcParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProductsCodeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SearchWorkspacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OptionsTodoTodoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PatchAssetDeviceConfigurationsMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBootstrapParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAllKeyspacesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ToggleNetworkGeneratorsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListEngineTypeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func SetHTTPClient(client *http.Client) {\n\thttpClient = client\n}",
"func (o *GetTreeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRepositoriesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EavAttributeSetRepositoryV1GetGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ReplaceProjectsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetProjectMetricsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutLolPerksV1CurrentpageParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetIconParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPluginEndpointParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetOrganizationTeamPermissionsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetLTENetworkIDPolicyQosProfilesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTasksGetPhpParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPortInfoUsingGET2Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateNetworkSwitchAccessControlListsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCustomRuleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListSSHKeysParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRackTopoesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SetPlanParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRuleChainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CapacityPoolGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostMenuItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListPipelinesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAllPublicIPUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudSapGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostSecdefSearchParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PutFlagSettingParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CatalogProductCustomOptionRepositoryV1GetListGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AddRepositoryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OrgGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTradingPairParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSearchClinicsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCartUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | [
"0.7773785",
"0.76442534",
"0.76175207",
"0.76163924",
"0.7615906",
"0.7574128",
"0.75327814",
"0.75107133",
"0.7496636",
"0.7492477",
"0.74772483",
"0.7469208",
"0.74516153",
"0.7449262",
"0.74492323",
"0.7444003",
"0.744089",
"0.74403507",
"0.7437406",
"0.7435754",
"0.74265",
"0.74175924",
"0.74115163",
"0.7395951",
"0.73939055",
"0.7392341",
"0.7386824",
"0.7386366",
"0.7377959",
"0.7372895",
"0.7371805",
"0.7368791",
"0.73685664",
"0.7360024",
"0.73557925",
"0.73518443",
"0.7350989",
"0.73395556",
"0.7337693",
"0.7337693",
"0.7336677",
"0.73359025",
"0.7333295",
"0.7329089",
"0.73274636",
"0.73260546",
"0.73251384",
"0.73224705",
"0.7311756",
"0.7310342",
"0.73088264",
"0.73066413",
"0.7303204",
"0.7301229",
"0.72989154",
"0.72976285",
"0.72976243",
"0.729648",
"0.7294562",
"0.7293027",
"0.7287597",
"0.72871196",
"0.728225",
"0.72814393",
"0.7281002",
"0.72763145",
"0.7275031",
"0.72715163",
"0.726914",
"0.72651637",
"0.725812",
"0.72550464",
"0.72529835",
"0.7244006",
"0.7243468",
"0.7240724",
"0.7240025",
"0.7237201",
"0.7234417",
"0.7230465",
"0.72266173",
"0.7224894",
"0.72230023",
"0.7222541",
"0.7220418",
"0.72181714",
"0.7215882",
"0.7214113",
"0.7212931",
"0.7211499",
"0.72108644",
"0.72049534",
"0.7201038",
"0.72001326",
"0.71976364",
"0.71971023",
"0.7195418",
"0.71878934",
"0.7186889",
"0.7185485"
] | 0.8377742 | 0 |
WithExtended adds the extended to the get platforms params | WithExtended добавляет расширенное к параметрам get platforms | func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {
o.SetExtended(extended)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) SetExtended(extended *bool) {\n\to.Extended = extended\n}",
"func (b *MessagesGetHistoryBuilder) Extended(v bool) *MessagesGetHistoryBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (b *MessagesGetByIDBuilder) Extended(v bool) *MessagesGetByIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (b *MessagesGetConversationsByIDBuilder) Extended(v bool) *MessagesGetConversationsByIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (o *VulnerabilitiesRequest) SetExtended(v bool) {\n\to.Extended = &v\n}",
"func (b *MessagesGetByConversationMessageIDBuilder) Extended(v bool) *MessagesGetByConversationMessageIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (vk *VK) AppsGetLeaderboardExtended(params map[string]string) (response AppsGetLeaderboardExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"apps.getLeaderboard\", params, &response, &vkErr)\n\treturn\n}",
"func (p *HostedProgramInfo) Extend(ext auth.SubPrin) {\n\tp.subprin = append(p.subprin, ext...)\n}",
"func MergeRawExtension(base *runtime.RawExtension, patch *runtime.RawExtension) (*runtime.RawExtension, error) {\n\tpatchParameter, err := util.RawExtension2Map(patch)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert patch parameters to map\")\n\t}\n\tbaseParameter, err := util.RawExtension2Map(base)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert base parameters to map\")\n\t}\n\tif baseParameter == nil {\n\t\tbaseParameter = make(map[string]interface{})\n\t}\n\terr = mergo.Merge(&baseParameter, patchParameter, mergo.WithOverride)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to do merge with override\")\n\t}\n\tbs, err := json.Marshal(baseParameter)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to marshal merged properties\")\n\t}\n\treturn &runtime.RawExtension{Raw: bs}, nil\n}",
"func (o *GetPlatformsParams) WithContext(ctx context.Context) *GetPlatformsParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m pFieldsGet) Extend(fnct func(m.UserSet, models.FieldsGetArgs) map[string]*models.FieldInfo) pFieldsGet {\n\treturn pFieldsGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pContextGet) Extend(fnct func(m.UserSet) *types.Context) pContextGet {\n\treturn pContextGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *VulnerabilitiesRequest) HasExtended() bool {\n\tif o != nil && o.Extended != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m pGetToolbar) Extend(fnct func(m.UserSet) webtypes.Toolbar) pGetToolbar {\n\treturn pGetToolbar{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pDefaultGet) Extend(fnct func(m.UserSet) m.UserData) pDefaultGet {\n\treturn pDefaultGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *VulnerabilitiesRequest) GetExtendedOk() (*bool, bool) {\n\tif o == nil || o.Extended == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Extended, true\n}",
"func WithStandardUserAgent(platform string, systemCode string) Option {\n\treturn func(d *ExtensibleTransport) {\n\t\text := NewUserAgentExtension(standardUserAgent(platform, systemCode))\n\t\td.extensions = append(d.extensions, ext)\n\t}\n}",
"func (m pIsSystem) Extend(fnct func(m.UserSet) bool) pIsSystem {\n\treturn pIsSystem{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (extension *IotHubExtension) GetExtendedResources() []genruntime.KubernetesResource {\n\treturn []genruntime.KubernetesResource{\n\t\t&v20210702.IotHub{},\n\t\t&v20210702s.IotHub{}}\n}",
"func (o *VulnerabilitiesRequest) GetExtended() bool {\n\tif o == nil || o.Extended == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Extended\n}",
"func TestGetExtraSpecs(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockGetExtraSpecsResponse(t)\n\n\tst, err := sharetypes.GetExtraSpecs(client.ServiceClient(), \"shareTypeID\").Extract()\n\tth.AssertNoErr(t, err)\n\n\tth.AssertEquals(t, st[\"snapshot_support\"], \"True\")\n\tth.AssertEquals(t, st[\"driver_handles_share_servers\"], \"True\")\n\tth.AssertEquals(t, st[\"my_custom_extra_spec\"], \"False\")\n}",
"func (m pNameGet) Extend(fnct func(m.UserSet) string) pNameGet {\n\treturn pNameGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (client *MachineExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, options *MachineExtensionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (b *OrganizationRequestBuilder) Extensions() *OrganizationExtensionsCollectionRequestBuilder {\n\tbb := &OrganizationExtensionsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/extensions\"\n\treturn bb\n}",
"func ExampleMachineExtensionsClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armconnectedvmware.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewMachineExtensionsClient().Get(ctx, \"myResourceGroup\", \"myMachine\", \"CustomScriptExtension\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.MachineExtension = armconnectedvmware.MachineExtension{\n\t// \tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \tType: to.Ptr(\"Microsoft.ConnectedVMwarevSphere/VirtualMachines/extensions\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.HybridCompute/Machines/myMachine/Extensions/CustomScriptExtension\"),\n\t// \tLocation: to.Ptr(\"eastus2euap\"),\n\t// \tProperties: &armconnectedvmware.MachineExtensionProperties{\n\t// \t\tType: to.Ptr(\"string\"),\n\t// \t\tAutoUpgradeMinorVersion: to.Ptr(false),\n\t// \t\tInstanceView: &armconnectedvmware.MachineExtensionPropertiesInstanceView{\n\t// \t\t\tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tStatus: &armconnectedvmware.MachineExtensionInstanceViewStatus{\n\t// \t\t\t\tCode: to.Ptr(\"success\"),\n\t// \t\t\t\tDisplayStatus: to.Ptr(\"Provisioning succeeded\"),\n\t// \t\t\t\tLevel: to.Ptr(armconnectedvmware.StatusLevelTypes(\"Information\")),\n\t// \t\t\t\tMessage: to.Ptr(\"Finished executing command, StdOut: , StdErr:\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2019-08-08T20:42:10.999Z\"); return t}()),\n\t// \t\t\t},\n\t// \t\t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t// \t\tSettings: \"@{commandToExecute=powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"}\",\n\t// \t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t},\n\t// }\n}",
"func getSupportedExtensions() map[string][]string {\n\t// In future when list of extensions grow, it will make\n\t// more sense to populate it in a dynamic way.\n\n\t// These are RHCOS supported extensions.\n\t// Each extension keeps a list of packages required to get enabled on host.\n\treturn map[string][]string{\n\t\t\"wasm\": {\"crun-wasm\"},\n\t\t\"ipsec\": {\"NetworkManager-libreswan\", \"libreswan\"},\n\t\t\"usbguard\": {\"usbguard\"},\n\t\t\"kerberos\": {\"krb5-workstation\", \"libkadm5\"},\n\t\t\"kernel-devel\": {\"kernel-devel\", \"kernel-headers\"},\n\t\t\"sandboxed-containers\": {\"kata-containers\"},\n\t}\n}",
"func (*entityImpl) ExtendedAttributes(p graphql.ResolveParams) (interface{}, error) {\n\tentity := p.Source.(*types.Entity)\n\treturn wrapExtendedAttributes(entity.ExtendedAttributes), nil\n}",
"func (r *Search) Ext(ext map[string]json.RawMessage) *Search {\n\n\tr.req.Ext = ext\n\n\treturn r\n}",
"func (m pGetLoginDomain) Extend(fnct func(m.UserSet, string) q.UserCondition) pGetLoginDomain {\n\treturn pGetLoginDomain{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pWithEnv) Extend(fnct func(m.UserSet, models.Environment) m.UserSet) pWithEnv {\n\treturn pWithEnv{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (opts CreateOptions) Extend(other CreateOptions) CreateOptions {\n\tvar out CreateOptions\n\tout = append(out, opts...)\n\tout = append(out, other...)\n\treturn out\n}",
"func (opts CreateOptions) Extend(other CreateOptions) CreateOptions {\n\tvar out CreateOptions\n\tout = append(out, opts...)\n\tout = append(out, other...)\n\treturn out\n}",
"func (m pGetRecord) Extend(fnct func(m.UserSet, string) m.UserSet) pGetRecord {\n\treturn pGetRecord{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *DeviceRequestBuilder) Extensions()(*i052c31265100c50ded0dfb7ace15f5bda9fcabb7268d24b57f25c5f7a0bc8ca0.ExtensionsRequestBuilder) {\n return i052c31265100c50ded0dfb7ace15f5bda9fcabb7268d24b57f25c5f7a0bc8ca0.NewExtensionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func (m pFieldsViewGet) Extend(fnct func(m.UserSet, webtypes.FieldsViewGetParams) *webtypes.FieldsViewData) pFieldsViewGet {\n\treturn pFieldsViewGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *GetPlatformsParams) WithTimeout(timeout time.Duration) *GetPlatformsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func GetCommonExtendedInfo() (map[string]string) {\n extendedInfo := map[string]string{\n \"csi_created_by_plugin_name\": CsiPluginName,\n \"csi_created_by_plugin_version\": Version,\n \"csi_created_by_plugin_git_hash\": Githash,\n \"csi_created_by_csi_version\": CsiVersion,\n }\n return extendedInfo\n}",
"func NewGetPlatformsParams() *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func WithExtensions(extensions map[string]string) CallOpt {\n\treturn func(c *call) error {\n\t\tc.extensions = extensions\n\t\treturn nil\n\t}\n}",
"func NewGetPlatformsParamsWithHTTPClient(client *http.Client) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (b *PostRequestBuilder) MultiValueExtendedProperties() *PostMultiValueExtendedPropertiesCollectionRequestBuilder {\n\tbb := &PostMultiValueExtendedPropertiesCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/multiValueExtendedProperties\"\n\treturn bb\n}",
"func (m *AdministrativeUnitsAdministrativeUnitItemRequestBuilder) Extensions()(*AdministrativeUnitsItemExtensionsRequestBuilder) {\n return NewAdministrativeUnitsItemExtensionsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}",
"func (r *SingleValueLegacyExtendedPropertyRequest) Get(ctx context.Context) (resObj *SingleValueLegacyExtendedProperty, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func NewMultiValueLegacyExtendedPropertyItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*MultiValueLegacyExtendedPropertyItemRequestBuilder) {\n m := &MultiValueLegacyExtendedPropertyItemRequestBuilder{\n }\n m.urlTemplate = \"{+baseurl}/me/calendarGroups/{calendarGroup%2Did}/calendars/{calendar%2Did}/events/{event%2Did}/instances/{event%2Did1}/multiValueExtendedProperties/{multiValueLegacyExtendedProperty%2Did}{?%24select,%24expand}\";\n urlTplParams := make(map[string]string)\n for idx, item := range pathParameters {\n urlTplParams[idx] = item\n }\n m.pathParameters = urlTplParams;\n m.requestAdapter = requestAdapter;\n return m\n}",
"func NewCustomAuthenticationExtensionsRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*CustomAuthenticationExtensionsRequestBuilder) {\n m := &CustomAuthenticationExtensionsRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/identity/customAuthenticationExtensions{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}\", pathParameters),\n }\n return m\n}",
"func (m *EventItemRequestBuilder) MultiValueExtendedProperties()(*i8f6b1073998fe7e072e94da48aef27ff47795e232ec787943a4bc43820018dd7.MultiValueExtendedPropertiesRequestBuilder) {\n return i8f6b1073998fe7e072e94da48aef27ff47795e232ec787943a4bc43820018dd7.NewMultiValueExtendedPropertiesRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func NewMultiValueLegacyExtendedPropertyItemRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*MultiValueLegacyExtendedPropertyItemRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewMultiValueLegacyExtendedPropertyItemRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func (opts UnsetEnvOptions) Extend(other UnsetEnvOptions) UnsetEnvOptions {\n\tvar out UnsetEnvOptions\n\tout = append(out, opts...)\n\tout = append(out, other...)\n\treturn out\n}",
"func TestSetExtraSpecs(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockSetExtraSpecsResponse(t)\n\n\toptions := &sharetypes.SetExtraSpecsOpts{\n\t\tExtraSpecs: map[string]interface{}{\"my_key\": \"my_value\"},\n\t}\n\n\tes, err := sharetypes.SetExtraSpecs(client.ServiceClient(), \"shareTypeID\", options).Extract()\n\tth.AssertNoErr(t, err)\n\n\tth.AssertEquals(t, es[\"my_key\"], \"my_value\")\n}",
"func (w *MultiWriter) WithExtJSON(containerName, group string) *MultiWriter {\n\tw.container = containerName\n\tw.group = group\n\tw.isJSON = true\n\n\thostname := \"unknown\"\n\tif h, err := os.Hostname(); err == nil {\n\t\thostname = h\n\t}\n\tw.hostname = hostname\n\treturn w\n}",
"func ExampleMachineExtensionsClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armhybridcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewMachineExtensionsClient().Get(ctx, \"myResourceGroup\", \"myMachine\", \"CustomScriptExtension\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.MachineExtension = armhybridcompute.MachineExtension{\n\t// \tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \tType: to.Ptr(\"Microsoft.HybridCompute/machines/extensions\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.HybridCompute/Machines/myMachine/Extensions/CustomScriptExtension\"),\n\t// \tLocation: to.Ptr(\"eastus2euap\"),\n\t// \tProperties: &armhybridcompute.MachineExtensionProperties{\n\t// \t\tType: to.Ptr(\"string\"),\n\t// \t\tAutoUpgradeMinorVersion: to.Ptr(false),\n\t// \t\tInstanceView: &armhybridcompute.MachineExtensionInstanceView{\n\t// \t\t\tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tStatus: &armhybridcompute.MachineExtensionInstanceViewStatus{\n\t// \t\t\t\tCode: to.Ptr(\"success\"),\n\t// \t\t\t\tDisplayStatus: to.Ptr(\"Provisioning succeeded\"),\n\t// \t\t\t\tLevel: to.Ptr(armhybridcompute.StatusLevelTypes(\"Information\")),\n\t// \t\t\t\tMessage: to.Ptr(\"Finished executing command, StdOut: , StdErr:\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2019-08-08T20:42:10.999Z\"); return t}()),\n\t// \t\t\t},\n\t// \t\t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t\t},\n\t// \t\tProtectedSettings: map[string]any{\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t// \t\tSettings: \"@{commandToExecute=powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"}\",\n\t// \t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t},\n\t// }\n}",
"func (m pGetCompany) Extend(fnct func(m.UserSet) m.CompanySet) pGetCompany {\n\treturn pGetCompany{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (opts *ToolOptions) AddToExtraOptionsRegistry(extraOpts ExtraOptions) {\n\topts.URI.extraOptionsRegistry = append(opts.URI.extraOptionsRegistry, extraOpts)\n}",
"func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtendedResources sets.String) {\n\tRegisterPredicateMetadataProducer(\"PredicateWithExtendedResourceOptions\", func(pm *predicateMetadata) {\n\t\tpm.ignoredExtendedResources = ignoredExtendedResources\n\t})\n}",
"func MergeExtraConfig(extraConfig []vimTypes.BaseOptionValue, newMap map[string]string) []vimTypes.BaseOptionValue {\n\tmerged := make([]vimTypes.BaseOptionValue, 0)\n\tecMap := ExtraConfigToMap(extraConfig)\n\tfor k, v := range newMap {\n\t\tif _, exists := ecMap[k]; !exists {\n\t\t\tmerged = append(merged, &vimTypes.OptionValue{Key: k, Value: v})\n\t\t}\n\t}\n\treturn merged\n}",
"func (m *DeviceItemRequestBuilder) Extensions()(*ItemExtensionsRequestBuilder) {\n return NewItemExtensionsRequestBuilderInternal(m.pathParameters, m.requestAdapter)\n}",
"func (extension *ServerFarmExtension) GetExtendedResources() []genruntime.KubernetesResource {\n\treturn []genruntime.KubernetesResource{\n\t\t&v20220301.ServerFarm{},\n\t\t&v20220301s.ServerFarm{},\n\t\t&v1beta20220301.ServerFarm{},\n\t\t&v1beta20220301s.ServerFarm{}}\n}",
"func (m pActionGet) Extend(fnct func(m.UserSet) *actions.Action) pActionGet {\n\treturn pActionGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *GetPlatformsParams) WithHTTPClient(client *http.Client) *GetPlatformsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetMetricsParams) WithPlatforms(platforms *string) *GetMetricsParams {\n\to.SetPlatforms(platforms)\n\treturn o\n}",
"func (b *PostRequestBuilder) Extensions() *PostExtensionsCollectionRequestBuilder {\n\tbb := &PostExtensionsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/extensions\"\n\treturn bb\n}",
"func (m pBrowse) Extend(fnct func(m.UserSet, []int64) m.UserSet) pBrowse {\n\treturn pBrowse{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *CustomAuthenticationExtensionsRequestBuilder) Get(ctx context.Context, requestConfiguration *CustomAuthenticationExtensionsRequestBuilderGetRequestConfiguration)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CustomAuthenticationExtensionCollectionResponseable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateCustomAuthenticationExtensionCollectionResponseFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CustomAuthenticationExtensionCollectionResponseable), nil\n}",
"func (a *Client) GetUserFriendsWithPlatformShort(params *GetUserFriendsWithPlatformParams, authInfo runtime.ClientAuthInfoWriter) (*GetUserFriendsWithPlatformOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetUserFriendsWithPlatformParams()\n\t}\n\n\tif params.Context == nil {\n\t\tparams.Context = context.Background()\n\t}\n\n\tif params.RetryPolicy != nil {\n\t\tparams.SetHTTPClientTransport(params.RetryPolicy)\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getUserFriendsWithPlatform\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/friends/namespaces/{namespace}/me/platforms\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetUserFriendsWithPlatformReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch v := result.(type) {\n\n\tcase *GetUserFriendsWithPlatformOK:\n\t\treturn v, nil\n\tcase *GetUserFriendsWithPlatformBadRequest:\n\t\treturn nil, v\n\tcase *GetUserFriendsWithPlatformUnauthorized:\n\t\treturn nil, v\n\tcase *GetUserFriendsWithPlatformForbidden:\n\t\treturn nil, v\n\tcase *GetUserFriendsWithPlatformNotFound:\n\t\treturn nil, v\n\tcase *GetUserFriendsWithPlatformInternalServerError:\n\t\treturn nil, v\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unexpected Type %v\", reflect.TypeOf(v))\n\t}\n}",
"func (r *PostMultiValueExtendedPropertiesCollectionRequest) Add(ctx context.Context, reqObj *MultiValueLegacyExtendedProperty) (resObj *MultiValueLegacyExtendedProperty, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}",
"func (client *Client) SearchMediaWithOptions(request *SearchMediaRequest, runtime *util.RuntimeOptions) (_result *SearchMediaResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.Fields)) {\n\t\tquery[\"Fields\"] = request.Fields\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Match)) {\n\t\tquery[\"Match\"] = request.Match\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PageNo)) {\n\t\tquery[\"PageNo\"] = request.PageNo\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PageSize)) {\n\t\tquery[\"PageSize\"] = request.PageSize\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ScrollToken)) {\n\t\tquery[\"ScrollToken\"] = request.ScrollToken\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.SearchType)) {\n\t\tquery[\"SearchType\"] = request.SearchType\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.SortBy)) {\n\t\tquery[\"SortBy\"] = request.SortBy\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"SearchMedia\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &SearchMediaResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}",
"func (m pFieldGet) Extend(fnct func(m.UserSet, models.FieldName) *models.FieldInfo) pFieldGet {\n\treturn pFieldGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (r *ExtensionRequest) Get(ctx context.Context) (resObj *Extension, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (client *ServerVulnerabilityAssessmentClient) listByExtendedResourceCreateRequest(ctx context.Context, resourceGroupName string, resourceNamespace string, resourceType string, resourceName string, options *ServerVulnerabilityAssessmentClientListByExtendedResourceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceNamespace}\", url.PathEscape(resourceNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func SetPlatformDefaults(p *none.Platform) {\n}",
"func (m *ItemCalendarRequestBuilder) MultiValueExtendedProperties()(*ItemCalendarMultiValueExtendedPropertiesRequestBuilder) {\n return NewItemCalendarMultiValueExtendedPropertiesRequestBuilderInternal(m.pathParameters, m.requestAdapter)\n}",
"func NewGetPlatformsParamsWithTimeout(timeout time.Duration) *GetPlatformsParams {\n\tvar ()\n\treturn &GetPlatformsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func listExtended(cmd *cobra.Command, features []FeatureInfo) error {\n\tvar t component.OutputWriterSpinner\n\tt, err := component.NewOutputWriterWithSpinner(cmd.OutOrStdout(), outputFormat,\n\t\t\"Retrieving Features...\", true, \"NAME\", \"ACTIVATION STATE\", \"STABILITY\", \"DESCRIPTION\", \"IMMUTABLE\", \"FEATUREGATE\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get OutputWriterSpinner: %w\", err)\n\t}\n\n\tfor _, info := range features {\n\t\tt.AddRow(info.Name, info.Activated, info.Stability, info.Description, info.Immutable, info.FeatureGate)\n\t}\n\tt.RenderWithSpinner()\n\n\treturn nil\n}",
"func (m pWebReadGroupPrivate) Extend(fnct func(m.UserSet, webtypes.WebReadGroupParams) []models.FieldMap) pWebReadGroupPrivate {\n\treturn pWebReadGroupPrivate{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (s KernelArgs) Extend(k KernelArgs) {\n\tfor a, b := range k {\n\t\ts[a] = b\n\t}\n}",
"func ExtendedJSON(w http.ResponseWriter, code int, data interface{}, metadata map[string]interface{}) {\n\n\tvar status string\n\tswitch code {\n\tcase http.StatusOK:\n\t\tstatus = \"Success\"\n\tcase http.StatusCreated:\n\t\tstatus = \"Created\"\n\t}\n\n\ttype response struct {\n\t\tStatus string `json:\"status\"`\n\t\tCode int `json:\"code\"`\n\t\tData interface{} `json:\"data\"`\n\t\tMetadata map[string]interface{} `json:\"metadata,omitempty\"`\n\t}\n\n\tresp := response{\n\t\tStatus: status,\n\t\tCode: code,\n\t\tData: data,\n\t\tMetadata: metadata,\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(resp)\n}",
"func (m pWebSearchRead) Extend(fnct func(m.UserSet, webtypes.SearchParams) webtypes.SearchReadResult) pWebSearchRead {\n\treturn pWebSearchRead{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (a *AllApiService) EnterpriseGetEnterpriseCapabilities(ctx _context.Context, body EnterpriseGetEnterpriseCapabilities) (EnterpriseGetEnterpriseCapabilitiesResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EnterpriseGetEnterpriseCapabilitiesResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseCapabilities\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v EnterpriseGetEnterpriseCapabilitiesResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (m pFetch) Extend(fnct func(m.UserSet) m.UserSet) pFetch {\n\treturn pFetch{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (a *Client) GetPlatforms(params *GetPlatformsParams, opts ...ClientOption) (*GetPlatformsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetPlatformsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"get-platforms\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/fwmgr/entities/platforms/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetPlatformsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetPlatformsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-platforms: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewGetOperatingSystemsParamsWithHTTPClient(client *http.Client) *GetOperatingSystemsParams {\n\tvar ()\n\treturn &GetOperatingSystemsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *OpenAPI3) GetExtension(name string, extensions map[string]interface{}, dst interface{}) error {\n\tif extensions == nil {\n\t\treturn ErrExtNotFound\n\t}\n\n\tif ext, ok := extensions[name]; ok {\n\t\traw, isRawMessage := ext.(jsonstd.RawMessage)\n\t\tif !isRawMessage {\n\t\t\treturn fmt.Errorf(\"invalid extension\")\n\t\t}\n\n\t\terr := json.Unmarshal(raw, dst)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid extension type: %v\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn ErrExtNotFound\n}",
"func buildConsolidatedKeywordsReqExt(openRTBUser, openRTBSite string, firstImpExt, requestExt json.RawMessage) (json.RawMessage, error) {\n\t// unmarshal ext to object map\n\trequestExtMap := parseExtToMap(requestExt)\n\tfirstImpExtMap := parseExtToMap(firstImpExt)\n\t// extract `keywords` field\n\trequestExtKeywordsMap := extractKeywordsMap(requestExtMap)\n\tfirstImpExtKeywordsMap := extractBidderKeywordsMap(firstImpExtMap)\n\t// parse + merge keywords\n\tkeywords := parseKeywordsFromMap(requestExtKeywordsMap) // request.ext.keywords\n\tmergeKeywords(keywords, parseKeywordsFromMap(firstImpExtKeywordsMap)) // request.imp[0].ext.bidder.keywords\n\tmergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBUser, \"user\")) // request.user.keywords\n\tmergeKeywords(keywords, parseKeywordsFromOpenRTB(openRTBSite, \"site\")) // request.site.keywords\n\n\t// overlay site + user keywords\n\tif site, exists := keywords[\"site\"]; exists && len(site) > 0 {\n\t\trequestExtKeywordsMap[\"site\"] = site\n\t} else {\n\t\tdelete(requestExtKeywordsMap, \"site\")\n\t}\n\tif user, exists := keywords[\"user\"]; exists && len(user) > 0 {\n\t\trequestExtKeywordsMap[\"user\"] = user\n\t} else {\n\t\tdelete(requestExtKeywordsMap, \"user\")\n\t}\n\t// reconcile keywords with request.ext\n\tif len(requestExtKeywordsMap) > 0 {\n\t\trequestExtMap[\"keywords\"] = requestExtKeywordsMap\n\t} else {\n\t\tdelete(requestExtMap, \"keywords\")\n\t}\n\t// marshal final result\n\tif len(requestExtMap) > 0 {\n\t\treturn json.Marshal(requestExtMap)\n\t}\n\treturn nil, nil\n}",
"func (m pWithNewContext) Extend(fnct func(m.UserSet, *types.Context) m.UserSet) pWithNewContext {\n\treturn pWithNewContext{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (l *LogOptions) SetExtendedOptions(options ...interface{}) {\n\tfor x := 0; x < len(options); x += 2 {\n\t\tl.ExtendedOptions[options[x].(string)] = options[x+1]\n\t}\n}",
"func (m pSearchRead) Extend(fnct func(m.UserSet, webtypes.SearchParams) []models.RecordData) pSearchRead {\n\treturn pSearchRead{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (delivery_instructions DeliveryInstructions) ExtendedOptions() (data []byte, err error) {\n\tops, err := delivery_instructions.HasExtendedOptions()\n\tif err != nil {\n\t\treturn\n\t}\n\tif ops {\n\t\tvar extended_options_index int\n\t\textended_options_index, err = delivery_instructions.extended_options_index()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif len(delivery_instructions) < extended_options_index+2 {\n\t\t\terr = errors.New(\"DeliveryInstructions are invalid, length is shorter than required for Extended Options\")\n\t\t\treturn\n\t\t} else {\n\t\t\textended_options_size := common.Integer([]byte{delivery_instructions[extended_options_index]})\n\t\t\tif len(delivery_instructions) < extended_options_index+1+extended_options_size {\n\t\t\t\terr = errors.New(\"DeliveryInstructions are invalid, length is shorter than specified in Extended Options\")\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tdata = delivery_instructions[extended_options_index+1 : extended_options_size]\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\terr = errors.New(\"DeliveryInstruction does not have the ExtendedOptions flag set\")\n\t}\n\treturn\n}",
"func (m pAddMandatoryGroups) Extend(fnct func(m.UserSet)) pAddMandatoryGroups {\n\treturn pAddMandatoryGroups{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (s *TiFlashSpec) GetExtendedRole(ctx context.Context, tlsCfg *tls.Config, pdList ...string) string {\n\tif len(pdList) < 1 {\n\t\treturn \"\"\n\t}\n\tstoreAddr := utils.JoinHostPort(s.Host, s.FlashServicePort)\n\tpdapi := api.NewPDClient(ctx, pdList, statusQueryTimeout, tlsCfg)\n\tstore, err := pdapi.GetCurrentStore(storeAddr)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tisWriteNode := false\n\tisTiFlash := false\n\tfor _, label := range store.Store.Labels {\n\t\tif label.Key == EngineLabelKey {\n\t\t\tif label.Value == EngineLabelTiFlashCompute {\n\t\t\t\treturn \" (compute)\"\n\t\t\t}\n\t\t\tif label.Value == EngineLabelTiFlash {\n\t\t\t\tisTiFlash = true\n\t\t\t}\n\t\t}\n\t\tif label.Key == EngineRoleLabelKey && label.Value == EngineRoleLabelWrite {\n\t\t\tisWriteNode = true\n\t\t}\n\t\tif isTiFlash && isWriteNode {\n\t\t\treturn \" (write)\"\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (c *Client) GetEnvironmentExtended(id string, ret *EnvironmentExtended) error {\n\tquery := url.Values{}\n\tquery.Add(\"envId\", id)\n\treturn c.makeGetRequest(\"envs/actions/getextended\", ret, &query)\n}",
"func Platform() (*openstack.Platform, error) {\n\tcloudNames, err := getCloudNames()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Sort cloudNames so we can use sort.SearchStrings\n\tsort.Strings(cloudNames)\n\tvar cloud string\n\terr = survey.Ask([]*survey.Question{\n\t\t{\n\t\t\tPrompt: &survey.Select{\n\t\t\t\tMessage: \"Cloud\",\n\t\t\t\tHelp: \"The OpenStack cloud name from clouds.yaml.\",\n\t\t\t\tOptions: cloudNames,\n\t\t\t},\n\t\t\tValidate: survey.ComposeValidators(survey.Required, func(ans interface{}) error {\n\t\t\t\tvalue := ans.(core.OptionAnswer).Value\n\t\t\t\ti := sort.SearchStrings(cloudNames, value)\n\t\t\t\tif i == len(cloudNames) || cloudNames[i] != value {\n\t\t\t\t\treturn fmt.Errorf(\"invalid cloud name %q, should be one of %s\", value, strings.Join(cloudNames, \", \"))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}),\n\t\t},\n\t}, &cloud)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed UserInput: %w\", err)\n\t}\n\n\tnetworkNames, err := getExternalNetworkNames(cloud)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnetworkNames = append(networkNames, noExtNet)\n\tsort.Strings(networkNames)\n\tvar extNet string\n\terr = survey.Ask([]*survey.Question{\n\t\t{\n\t\t\tPrompt: &survey.Select{\n\t\t\t\tMessage: \"ExternalNetwork\",\n\t\t\t\tHelp: \"The OpenStack external network name to be used for installation.\",\n\t\t\t\tOptions: networkNames,\n\t\t\t\tDefault: noExtNet,\n\t\t\t},\n\t\t\tValidate: survey.ComposeValidators(survey.Required, func(ans interface{}) error {\n\t\t\t\tvalue := ans.(core.OptionAnswer).Value\n\t\t\t\ti := sort.SearchStrings(networkNames, value)\n\t\t\t\tif i == len(networkNames) || networkNames[i] != value {\n\t\t\t\t\treturn fmt.Errorf(\"invalid network name %q, should be one of %s\", value, strings.Join(networkNames, \", \"))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}),\n\t\t},\n\t}, &extNet)\n\tif extNet == noExtNet {\n\t\textNet = \"\"\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed UserInput: %w\", err)\n\t}\n\n\tvar apiFloatingIP string\n\tif extNet != \"\" {\n\t\tfloatingIPs, err := getFloatingIPs(cloud, extNet)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsort.Sort(floatingIPs)\n\t\terr = survey.Ask([]*survey.Question{\n\t\t\t{\n\t\t\t\tPrompt: &survey.Select{\n\t\t\t\t\tMessage: \"APIFloatingIPAddress\",\n\t\t\t\t\tHelp: \"The Floating IP address used for external access to the OpenShift API.\",\n\t\t\t\t\tOptions: floatingIPs.Names(),\n\t\t\t\t\tDescription: func(_ string, index int) string { return floatingIPs.Description(index) },\n\t\t\t\t},\n\t\t\t\tValidate: survey.ComposeValidators(survey.Required, func(ans interface{}) error {\n\t\t\t\t\tif value := ans.(core.OptionAnswer).Value; !floatingIPs.Contains(value) {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid floating IP %q, should be one of %s\", value, strings.Join(floatingIPs.Names(), \", \"))\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}),\n\t\t\t},\n\t\t}, &apiFloatingIP)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed UserInput: %w\", err)\n\t\t}\n\t}\n\n\tflavorNames, err := getFlavorNames(cloud)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Strings(flavorNames)\n\tvar flavor string\n\terr = survey.Ask([]*survey.Question{\n\t\t{\n\t\t\tPrompt: &survey.Select{\n\t\t\t\tMessage: \"FlavorName\",\n\t\t\t\tHelp: \"The OpenStack flavor to use for control-plane and compute nodes. A flavor with at least 16 GB RAM is recommended.\",\n\t\t\t\tOptions: flavorNames,\n\t\t\t},\n\t\t\tValidate: survey.ComposeValidators(survey.Required, func(ans interface{}) error {\n\t\t\t\tvalue := ans.(core.OptionAnswer).Value\n\t\t\t\ti := sort.SearchStrings(flavorNames, value)\n\t\t\t\tif i == len(flavorNames) || flavorNames[i] != value {\n\t\t\t\t\treturn fmt.Errorf(\"invalid flavor name %q, should be one of %s\", value, strings.Join(flavorNames, \", \"))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}),\n\t\t},\n\t}, &flavor)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed UserInput: %w\", err)\n\t}\n\n\treturn &openstack.Platform{\n\t\tAPIFloatingIP: apiFloatingIP,\n\t\tCloud: cloud,\n\t\tExternalNetwork: extNet,\n\t\tDefaultMachinePlatform: &openstack.MachinePool{\n\t\t\tFlavorName: flavor,\n\t\t},\n\t}, nil\n}",
"func decodeExtendedCommunities(c interface{}) api.ExtCommunities {\n\tdetails := decoders.StringList(c)\n\tcomms := make(api.ExtCommunities, 0, len(details))\n\tfor _, com := range details {\n\t\ttokens := strings.SplitN(com, \" \", 2)\n\t\tif len(tokens) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tnums := decoders.IntListFromStrings(\n\t\t\tstrings.SplitN(tokens[1], \":\", 2))\n\t\tif len(nums) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tcomms = append(comms, []interface{}{tokens[0], nums[0], nums[1]})\n\t}\n\treturn comms\n}",
"func addNativeFlags(s *options.CloudControllerManagerServer, fs *flag.FlagSet) *flag.FlagSet {\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.StringVar(&s.Kubeconfig, \"kubeconfig\", s.Kubeconfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.StringVar(&s.CloudConfigFile, \"cloud-config\", s.CloudConfigFile, \"The path to the cloud provider configuration file. Empty string for no configuration file.\")\n\tfs.BoolVar(&versionShow, \"version\", false, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.StringVar(&s.CloudProvider, \"cloud-provider\", \"\", \"The provider for cloud services. Empty string for no provider.\")\n\treturn fs\n}",
"func NewGetDevicesUnknownParamsWithHTTPClient(client *http.Client) *GetDevicesUnknownParams {\n\treturn &GetDevicesUnknownParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func ExampleMachineExtensionsClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armconnectedvmware.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewMachineExtensionsClient().BeginCreateOrUpdate(ctx, \"myResourceGroup\", \"myMachine\", \"CustomScriptExtension\", armconnectedvmware.MachineExtension{\n\t\tLocation: to.Ptr(\"eastus2euap\"),\n\t\tProperties: &armconnectedvmware.MachineExtensionProperties{\n\t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t\t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t\t\tSettings: map[string]any{\n\t\t\t\t\"commandToExecute\": \"powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"\",\n\t\t\t},\n\t\t\tTypeHandlerVersion: to.Ptr(\"1.10\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.MachineExtension = armconnectedvmware.MachineExtension{\n\t// \tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \tType: to.Ptr(\"Microsoft.ConnectedVMwarevSphere/VirtualMachines/extensions\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.HybridCompute/Machines/myMachine/Extensions/CustomScriptExtension\"),\n\t// \tLocation: to.Ptr(\"eastus2euap\"),\n\t// \tProperties: &armconnectedvmware.MachineExtensionProperties{\n\t// \t\tType: to.Ptr(\"string\"),\n\t// \t\tAutoUpgradeMinorVersion: to.Ptr(false),\n\t// \t\tInstanceView: &armconnectedvmware.MachineExtensionPropertiesInstanceView{\n\t// \t\t\tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tStatus: &armconnectedvmware.MachineExtensionInstanceViewStatus{\n\t// \t\t\t\tCode: to.Ptr(\"success\"),\n\t// \t\t\t\tLevel: to.Ptr(armconnectedvmware.StatusLevelTypes(\"Information\")),\n\t// \t\t\t\tMessage: to.Ptr(\"Finished executing command, StdOut: , StdErr:\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-08-08T20:42:10.999Z\"); return t}()),\n\t// \t\t\t},\n\t// \t\t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t// \t\tSettings: \"@{commandToExecute=powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"}\",\n\t// \t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t},\n\t// }\n}",
"func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) {\n\tlistOptions := metav1.ListOptions{}\n\tlistOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+\" in (%s)\", apps.PipelineExtension)\n\tappsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error retrieving pipeline contributor apps\")\n\t}\n\treturn appsList.Items, nil\n}",
"func Extend(target map[string]interface{}, args ...interface{}) (map[string]interface{}, error) {\n\tif len(args)%2 != 0 {\n\t\treturn nil, fmt.Errorf(\"expecting even number of arguments, got %d\", len(args))\n\t}\n\n\tfn := \"\"\n\tfor _, v := range args {\n\t\tif len(fn) == 0 {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tfn = s\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn target, fmt.Errorf(\"expecting string for odd numbered arguments, got %+v\", v)\n\t\t}\n\t\ttarget[fn] = v\n\t\tfn = \"\"\n\t}\n\n\treturn target, nil\n}",
"func (client *MachineExtensionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, extensionParameters MachineExtensionUpdate, options *MachineExtensionsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, extensionParameters)\n}",
"func (m pSQLFromCondition) Extend(fnct func(m.UserSet, *models.Condition) (string, models.SQLParams)) pSQLFromCondition {\n\treturn pSQLFromCondition{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}"
] | [
"0.761028",
"0.563234",
"0.56002456",
"0.5536339",
"0.5500236",
"0.5369291",
"0.507227",
"0.5051095",
"0.49456128",
"0.49286032",
"0.49263084",
"0.49066523",
"0.4895168",
"0.48209637",
"0.4819409",
"0.47902983",
"0.4787378",
"0.47806302",
"0.4741045",
"0.47262233",
"0.47177416",
"0.46968997",
"0.46933776",
"0.4681936",
"0.46302035",
"0.45906216",
"0.45874777",
"0.45837232",
"0.4583411",
"0.45819843",
"0.45718104",
"0.45625827",
"0.45625827",
"0.45580238",
"0.4553458",
"0.4547201",
"0.4539819",
"0.45384106",
"0.45367652",
"0.4529968",
"0.45272088",
"0.45207494",
"0.45175847",
"0.44941238",
"0.44885552",
"0.44856837",
"0.44856635",
"0.44738206",
"0.44718894",
"0.44593748",
"0.4457986",
"0.44489354",
"0.44418773",
"0.44385415",
"0.44361746",
"0.44330615",
"0.4424878",
"0.44215995",
"0.43785718",
"0.43733716",
"0.43510962",
"0.4331234",
"0.43250203",
"0.43201625",
"0.43176767",
"0.43072775",
"0.4290216",
"0.4278749",
"0.42771953",
"0.42709365",
"0.42640486",
"0.42607033",
"0.42537826",
"0.4246645",
"0.42446068",
"0.42352423",
"0.4233371",
"0.4232426",
"0.42164823",
"0.42158082",
"0.42141113",
"0.4213447",
"0.4210691",
"0.4199534",
"0.4197639",
"0.41939455",
"0.41878068",
"0.41840133",
"0.41815358",
"0.41695353",
"0.4163625",
"0.41583258",
"0.41581216",
"0.4157225",
"0.41570002",
"0.41518867",
"0.41486466",
"0.41471642",
"0.414651",
"0.4144551"
] | 0.7949409 | 0 |
SetExtended adds the extended to the get platforms params | SetExtended добавляет расширенное к параметрам платформ для получения | func (o *GetPlatformsParams) SetExtended(extended *bool) {
o.Extended = extended
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *GetPlatformsParams) WithExtended(extended *bool) *GetPlatformsParams {\n\to.SetExtended(extended)\n\treturn o\n}",
"func (o *VulnerabilitiesRequest) SetExtended(v bool) {\n\to.Extended = &v\n}",
"func (b *MessagesGetHistoryBuilder) Extended(v bool) *MessagesGetHistoryBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (b *MessagesGetByIDBuilder) Extended(v bool) *MessagesGetByIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (b *MessagesGetConversationsByIDBuilder) Extended(v bool) *MessagesGetConversationsByIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (b *MessagesGetByConversationMessageIDBuilder) Extended(v bool) *MessagesGetByConversationMessageIDBuilder {\n\tb.Params[\"extended\"] = v\n\treturn b\n}",
"func (m pIsSystem) Extend(fnct func(m.UserSet) bool) pIsSystem {\n\treturn pIsSystem{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (p *HostedProgramInfo) Extend(ext auth.SubPrin) {\n\tp.subprin = append(p.subprin, ext...)\n}",
"func (l *LogOptions) SetExtendedOptions(options ...interface{}) {\n\tfor x := 0; x < len(options); x += 2 {\n\t\tl.ExtendedOptions[options[x].(string)] = options[x+1]\n\t}\n}",
"func (m *AdministrativeUnit) SetExtensions(value []Extensionable)() {\n m.extensions = value\n}",
"func (rr *OPT) SetExtendedRcode(v uint16) {\n\trr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24\n}",
"func (o *VulnerabilitiesRequest) GetExtended() bool {\n\tif o == nil || o.Extended == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Extended\n}",
"func TestSetExtraSpecs(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockSetExtraSpecsResponse(t)\n\n\toptions := &sharetypes.SetExtraSpecsOpts{\n\t\tExtraSpecs: map[string]interface{}{\"my_key\": \"my_value\"},\n\t}\n\n\tes, err := sharetypes.SetExtraSpecs(client.ServiceClient(), \"shareTypeID\", options).Extract()\n\tth.AssertNoErr(t, err)\n\n\tth.AssertEquals(t, es[\"my_key\"], \"my_value\")\n}",
"func (m pContextGet) Extend(fnct func(m.UserSet) *types.Context) pContextGet {\n\treturn pContextGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *VulnerabilitiesRequest) HasExtended() bool {\n\tif o != nil && o.Extended != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m pGetToolbar) Extend(fnct func(m.UserSet) webtypes.Toolbar) pGetToolbar {\n\treturn pGetToolbar{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *Application) SetExtensionProperties(value []ExtensionPropertyable)() {\n m.extensionProperties = value\n}",
"func (m pWithEnv) Extend(fnct func(m.UserSet, models.Environment) m.UserSet) pWithEnv {\n\treturn pWithEnv{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *WorkforceIntegration) SetSupportedEntities(value *WorkforceIntegrationSupportedEntities)() {\n m.supportedEntities = value\n}",
"func (m pNameGet) Extend(fnct func(m.UserSet) string) pNameGet {\n\treturn pNameGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *VulnerabilitiesRequest) GetExtendedOk() (*bool, bool) {\n\tif o == nil || o.Extended == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Extended, true\n}",
"func (m pDefaultGet) Extend(fnct func(m.UserSet) m.UserData) pDefaultGet {\n\treturn pDefaultGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (vk *VK) AppsGetLeaderboardExtended(params map[string]string) (response AppsGetLeaderboardExtendedResponse, vkErr Error) {\n\tparams[\"extended\"] = \"1\"\n\tvk.RequestUnmarshal(\"apps.getLeaderboard\", params, &response, &vkErr)\n\treturn\n}",
"func (r *Search) Ext(ext map[string]json.RawMessage) *Search {\n\n\tr.req.Ext = ext\n\n\treturn r\n}",
"func (znp *Znp) ZdoExtSetParams(useMulticast uint8) (rsp *StatusResponse, err error) {\n\treq := &ZdoExtSetParams{UseMulticast: useMulticast}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_ZDO, 0x53, req, &rsp)\n\treturn\n}",
"func (m pFieldsGet) Extend(fnct func(m.UserSet, models.FieldsGetArgs) map[string]*models.FieldInfo) pFieldsGet {\n\treturn pFieldsGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func MergeRawExtension(base *runtime.RawExtension, patch *runtime.RawExtension) (*runtime.RawExtension, error) {\n\tpatchParameter, err := util.RawExtension2Map(patch)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert patch parameters to map\")\n\t}\n\tbaseParameter, err := util.RawExtension2Map(base)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to convert base parameters to map\")\n\t}\n\tif baseParameter == nil {\n\t\tbaseParameter = make(map[string]interface{})\n\t}\n\terr = mergo.Merge(&baseParameter, patchParameter, mergo.WithOverride)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to do merge with override\")\n\t}\n\tbs, err := json.Marshal(baseParameter)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to marshal merged properties\")\n\t}\n\treturn &runtime.RawExtension{Raw: bs}, nil\n}",
"func (m pBrowse) Extend(fnct func(m.UserSet, []int64) m.UserSet) pBrowse {\n\treturn pBrowse{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (*entityImpl) ExtendedAttributes(p graphql.ResolveParams) (interface{}, error) {\n\tentity := p.Source.(*types.Entity)\n\treturn wrapExtendedAttributes(entity.ExtendedAttributes), nil\n}",
"func (m *Group) SetExtensions(value []Extensionable)() {\n m.extensions = value\n}",
"func SetPlatformDefaults(p *none.Platform) {\n}",
"func SetExtNic(nic string) {\n\tsetValue(\"environment\", \"extnic\", nic)\n}",
"func (m pGetLoginDomain) Extend(fnct func(m.UserSet, string) q.UserCondition) pGetLoginDomain {\n\treturn pGetLoginDomain{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pInit) Extend(fnct func(m.UserSet)) pInit {\n\treturn pInit{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (opts UnsetEnvOptions) Extend(other UnsetEnvOptions) UnsetEnvOptions {\n\tvar out UnsetEnvOptions\n\tout = append(out, opts...)\n\tout = append(out, other...)\n\treturn out\n}",
"func getSupportedExtensions() map[string][]string {\n\t// In future when list of extensions grow, it will make\n\t// more sense to populate it in a dynamic way.\n\n\t// These are RHCOS supported extensions.\n\t// Each extension keeps a list of packages required to get enabled on host.\n\treturn map[string][]string{\n\t\t\"wasm\": {\"crun-wasm\"},\n\t\t\"ipsec\": {\"NetworkManager-libreswan\", \"libreswan\"},\n\t\t\"usbguard\": {\"usbguard\"},\n\t\t\"kerberos\": {\"krb5-workstation\", \"libkadm5\"},\n\t\t\"kernel-devel\": {\"kernel-devel\", \"kernel-headers\"},\n\t\t\"sandboxed-containers\": {\"kata-containers\"},\n\t}\n}",
"func (m *AdministrativeUnitsAdministrativeUnitItemRequestBuilder) Extensions()(*AdministrativeUnitsItemExtensionsRequestBuilder) {\n return NewAdministrativeUnitsItemExtensionsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}",
"func (m *EventItemRequestBuilder) MultiValueExtendedProperties()(*i8f6b1073998fe7e072e94da48aef27ff47795e232ec787943a4bc43820018dd7.MultiValueExtendedPropertiesRequestBuilder) {\n return i8f6b1073998fe7e072e94da48aef27ff47795e232ec787943a4bc43820018dd7.NewMultiValueExtendedPropertiesRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func (m *User) SetExtensions(value []Extensionable)() {\n m.extensions = value\n}",
"func (extension *IotHubExtension) GetExtendedResources() []genruntime.KubernetesResource {\n\treturn []genruntime.KubernetesResource{\n\t\t&v20210702.IotHub{},\n\t\t&v20210702s.IotHub{}}\n}",
"func (extension *ServerFarmExtension) GetExtendedResources() []genruntime.KubernetesResource {\n\treturn []genruntime.KubernetesResource{\n\t\t&v20220301.ServerFarm{},\n\t\t&v20220301s.ServerFarm{},\n\t\t&v1beta20220301.ServerFarm{},\n\t\t&v1beta20220301s.ServerFarm{}}\n}",
"func (b *OrganizationRequestBuilder) Extensions() *OrganizationExtensionsCollectionRequestBuilder {\n\tbb := &OrganizationExtensionsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/extensions\"\n\treturn bb\n}",
"func (b *PostRequestBuilder) MultiValueExtendedProperties() *PostMultiValueExtendedPropertiesCollectionRequestBuilder {\n\tbb := &PostMultiValueExtendedPropertiesCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/multiValueExtendedProperties\"\n\treturn bb\n}",
"func (m pGetRecord) Extend(fnct func(m.UserSet, string) m.UserSet) pGetRecord {\n\treturn pGetRecord{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *DeviceRequestBuilder) Extensions()(*i052c31265100c50ded0dfb7ace15f5bda9fcabb7268d24b57f25c5f7a0bc8ca0.ExtensionsRequestBuilder) {\n return i052c31265100c50ded0dfb7ace15f5bda9fcabb7268d24b57f25c5f7a0bc8ca0.NewExtensionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func TestGetExtraSpecs(t *testing.T) {\n\tth.SetupHTTP()\n\tdefer th.TeardownHTTP()\n\n\tMockGetExtraSpecsResponse(t)\n\n\tst, err := sharetypes.GetExtraSpecs(client.ServiceClient(), \"shareTypeID\").Extract()\n\tth.AssertNoErr(t, err)\n\n\tth.AssertEquals(t, st[\"snapshot_support\"], \"True\")\n\tth.AssertEquals(t, st[\"driver_handles_share_servers\"], \"True\")\n\tth.AssertEquals(t, st[\"my_custom_extra_spec\"], \"False\")\n}",
"func (m pAddModifiers) Extend(fnct func(m.UserSet, *etree.Document, map[string]*models.FieldInfo)) pAddModifiers {\n\treturn pAddModifiers{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (s *TiFlashSpec) GetExtendedRole(ctx context.Context, tlsCfg *tls.Config, pdList ...string) string {\n\tif len(pdList) < 1 {\n\t\treturn \"\"\n\t}\n\tstoreAddr := utils.JoinHostPort(s.Host, s.FlashServicePort)\n\tpdapi := api.NewPDClient(ctx, pdList, statusQueryTimeout, tlsCfg)\n\tstore, err := pdapi.GetCurrentStore(storeAddr)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tisWriteNode := false\n\tisTiFlash := false\n\tfor _, label := range store.Store.Labels {\n\t\tif label.Key == EngineLabelKey {\n\t\t\tif label.Value == EngineLabelTiFlashCompute {\n\t\t\t\treturn \" (compute)\"\n\t\t\t}\n\t\t\tif label.Value == EngineLabelTiFlash {\n\t\t\t\tisTiFlash = true\n\t\t\t}\n\t\t}\n\t\tif label.Key == EngineRoleLabelKey && label.Value == EngineRoleLabelWrite {\n\t\t\tisWriteNode = true\n\t\t}\n\t\tif isTiFlash && isWriteNode {\n\t\t\treturn \" (write)\"\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (c *Client) SpeakExtended(input *SpeakExtendedInput) (r *SpeakExtendedResponse) {\n\tresp := c.queryAPI(&Request{\n\t\tXMLName: xml.Name{Local: \"speakExtended\"},\n\t\tAccountID: c.AccountID,\n\t\tPassword: c.Password,\n\t\tVoice: input.Voice,\n\t\tText: input.Text,\n\t\tAudioFormat: input.AudioFormat,\n\t\tSampleRate: input.SampleRate,\n\t\tAudio3D: input.Audio3D,\n\t\tMetadata: input.Metadata,\n\t})\n\tif resp.Error != nil {\n\t\tr.Error = resp.Error\n\t\treturn\n\t}\n\n\tif err := xml.Unmarshal(resp.Raw, &r); err != nil {\n\t\tr.Error = err\n\t}\n\n\treturn\n}",
"func (c *Client) GetEnvironmentExtended(id string, ret *EnvironmentExtended) error {\n\tquery := url.Values{}\n\tquery.Add(\"envId\", id)\n\treturn c.makeGetRequest(\"envs/actions/getextended\", ret, &query)\n}",
"func (oo *OnuDeviceEntry) GetPersIsExtOmciSupported() bool {\n\too.MutexPersOnuConfig.RLock()\n\tdefer oo.MutexPersOnuConfig.RUnlock()\n\tvalue := oo.SOnuPersistentData.PersIsExtOmciSupported\n\treturn value\n}",
"func (ec *EventContextV03) SetExtension(name string, value interface{}) error {\n\tif ec.Extensions == nil {\n\t\tec.Extensions = make(map[string]interface{})\n\t}\n\n\tif _, ok := specV03Attributes[strings.ToLower(name)]; ok {\n\t\treturn fmt.Errorf(\"bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension\", name)\n\t}\n\n\tif value == nil {\n\t\tdelete(ec.Extensions, name)\n\t\tif len(ec.Extensions) == 0 {\n\t\t\tec.Extensions = nil\n\t\t}\n\t\treturn nil\n\t} else {\n\t\tv, err := types.Validate(value)\n\t\tif err == nil {\n\t\t\tec.Extensions[name] = v\n\t\t}\n\t\treturn err\n\t}\n}",
"func (m pSudo) Extend(fnct func(m.UserSet, ...int64) m.UserSet) pSudo {\n\treturn pSudo{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pGetCompany) Extend(fnct func(m.UserSet) m.CompanySet) pGetCompany {\n\treturn pGetCompany{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (me TxsdType) IsExtended() bool { return me == \"extended\" }",
"func (c *Client) EnvironmentExtend(envID string) error {\n\treturn c.envPutActionByID(\"extend\", envID)\n}",
"func (m pActionGet) Extend(fnct func(m.UserSet) *actions.Action) pActionGet {\n\treturn pActionGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pToggleActive) Extend(fnct func(m.UserSet)) pToggleActive {\n\treturn pToggleActive{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func ExampleMachineExtensionsClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armconnectedvmware.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewMachineExtensionsClient().Get(ctx, \"myResourceGroup\", \"myMachine\", \"CustomScriptExtension\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.MachineExtension = armconnectedvmware.MachineExtension{\n\t// \tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \tType: to.Ptr(\"Microsoft.ConnectedVMwarevSphere/VirtualMachines/extensions\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.HybridCompute/Machines/myMachine/Extensions/CustomScriptExtension\"),\n\t// \tLocation: to.Ptr(\"eastus2euap\"),\n\t// \tProperties: &armconnectedvmware.MachineExtensionProperties{\n\t// \t\tType: to.Ptr(\"string\"),\n\t// \t\tAutoUpgradeMinorVersion: to.Ptr(false),\n\t// \t\tInstanceView: &armconnectedvmware.MachineExtensionPropertiesInstanceView{\n\t// \t\t\tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tStatus: &armconnectedvmware.MachineExtensionInstanceViewStatus{\n\t// \t\t\t\tCode: to.Ptr(\"success\"),\n\t// \t\t\t\tDisplayStatus: to.Ptr(\"Provisioning succeeded\"),\n\t// \t\t\t\tLevel: to.Ptr(armconnectedvmware.StatusLevelTypes(\"Information\")),\n\t// \t\t\t\tMessage: to.Ptr(\"Finished executing command, StdOut: , StdErr:\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2019-08-08T20:42:10.999Z\"); return t}()),\n\t// \t\t\t},\n\t// \t\t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t// \t\tSettings: \"@{commandToExecute=powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"}\",\n\t// \t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t},\n\t// }\n}",
"func (p *Placemark) AddExtendedData(e ExtendedData) {\n\tp.Extended = e\n}",
"func (delivery_instructions DeliveryInstructions) ExtendedOptions() (data []byte, err error) {\n\tops, err := delivery_instructions.HasExtendedOptions()\n\tif err != nil {\n\t\treturn\n\t}\n\tif ops {\n\t\tvar extended_options_index int\n\t\textended_options_index, err = delivery_instructions.extended_options_index()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif len(delivery_instructions) < extended_options_index+2 {\n\t\t\terr = errors.New(\"DeliveryInstructions are invalid, length is shorter than required for Extended Options\")\n\t\t\treturn\n\t\t} else {\n\t\t\textended_options_size := common.Integer([]byte{delivery_instructions[extended_options_index]})\n\t\t\tif len(delivery_instructions) < extended_options_index+1+extended_options_size {\n\t\t\t\terr = errors.New(\"DeliveryInstructions are invalid, length is shorter than specified in Extended Options\")\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tdata = delivery_instructions[extended_options_index+1 : extended_options_size]\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\terr = errors.New(\"DeliveryInstruction does not have the ExtendedOptions flag set\")\n\t}\n\treturn\n}",
"func NewMultiValueLegacyExtendedPropertyItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*MultiValueLegacyExtendedPropertyItemRequestBuilder) {\n m := &MultiValueLegacyExtendedPropertyItemRequestBuilder{\n }\n m.urlTemplate = \"{+baseurl}/me/calendarGroups/{calendarGroup%2Did}/calendars/{calendar%2Did}/events/{event%2Did}/instances/{event%2Did1}/multiValueExtendedProperties/{multiValueLegacyExtendedProperty%2Did}{?%24select,%24expand}\";\n urlTplParams := make(map[string]string)\n for idx, item := range pathParameters {\n urlTplParams[idx] = item\n }\n m.pathParameters = urlTplParams;\n m.requestAdapter = requestAdapter;\n return m\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewMultiValueLegacyExtendedPropertyItemRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*MultiValueLegacyExtendedPropertyItemRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewMultiValueLegacyExtendedPropertyItemRequestBuilderInternal(urlParams, requestAdapter)\n}",
"func (opts *ToolOptions) AddToExtraOptionsRegistry(extraOpts ExtraOptions) {\n\topts.URI.extraOptionsRegistry = append(opts.URI.extraOptionsRegistry, extraOpts)\n}",
"func (*poemExtension) Extend(m goldmark.Markdown) {\n\tm.Parser().AddOptions(\n\t\tparser.WithASTTransformers(\n\t\t\tutil.Prioritized(defaulTransformer, 100)),\n\t\tparser.WithBlockParsers(\n\t\t\tutil.Prioritized(defaultParser, 450)),\n\t)\n\tm.Renderer().AddOptions(\n\t\trenderer.WithNodeRenderers(\n\t\t\tutil.Prioritized(defaulRenderer, 100)),\n\t)\n}",
"func WithStandardUserAgent(platform string, systemCode string) Option {\n\treturn func(d *ExtensibleTransport) {\n\t\text := NewUserAgentExtension(standardUserAgent(platform, systemCode))\n\t\td.extensions = append(d.extensions, ext)\n\t}\n}",
"func (swagger *MgwSwagger) SetXWso2Extensions() error {\n\tswagger.setXWso2Basepath()\n\n\txWso2EPErr := swagger.setXWso2Endpoints()\n\tif xWso2EPErr != nil {\n\t\tlogger.LoggerOasparser.Error(\"Error while adding x-wso2-endpoints. \", xWso2EPErr)\n\t\treturn xWso2EPErr\n\t}\n\n\tapiLevelProdEPFound, productionEndpointErr := swagger.setXWso2ProductionEndpoint()\n\tif productionEndpointErr != nil {\n\t\tlogger.LoggerOasparser.Error(\"Error while adding x-wso2-production-endpoints. \", productionEndpointErr)\n\t\treturn productionEndpointErr\n\t}\n\n\tapiLevelSandEPFound, sandboxEndpointErr := swagger.setXWso2SandboxEndpoint()\n\tif sandboxEndpointErr != nil {\n\t\tlogger.LoggerOasparser.Error(\"Error while adding x-wso2-sandbox-endpoints. \", sandboxEndpointErr)\n\t\treturn sandboxEndpointErr\n\t}\n\n\t// to remove swagger server/host urls being added when x-wso2-sandbox-endpoints is given\n\tif !apiLevelProdEPFound && apiLevelSandEPFound && swagger.productionEndpoints != nil &&\n\t\tlen(swagger.productionEndpoints.Endpoints) > 0 {\n\t\tswagger.productionEndpoints = nil\n\t}\n\n\tswagger.setXWso2Cors()\n\tswagger.setXWso2ThrottlingTier()\n\tswagger.setDisableSecurity()\n\tswagger.setXWso2AuthHeader()\n\tswagger.setXWso2HTTP2BackendEnabled()\n\n\t// Error nil for successful execution\n\treturn nil\n}",
"func (m pIsPublic) Extend(fnct func(m.UserSet) bool) pIsPublic {\n\treturn pIsPublic{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (o *NewData) SetExtra(v map[string]string) {\n\to.Extra = &v\n}",
"func (me TxsdType) IsExtended() bool { return me.String() == \"extended\" }",
"func ExampleMachineExtensionsClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armhybridcompute.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewMachineExtensionsClient().Get(ctx, \"myResourceGroup\", \"myMachine\", \"CustomScriptExtension\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.MachineExtension = armhybridcompute.MachineExtension{\n\t// \tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \tType: to.Ptr(\"Microsoft.HybridCompute/machines/extensions\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.HybridCompute/Machines/myMachine/Extensions/CustomScriptExtension\"),\n\t// \tLocation: to.Ptr(\"eastus2euap\"),\n\t// \tProperties: &armhybridcompute.MachineExtensionProperties{\n\t// \t\tType: to.Ptr(\"string\"),\n\t// \t\tAutoUpgradeMinorVersion: to.Ptr(false),\n\t// \t\tInstanceView: &armhybridcompute.MachineExtensionInstanceView{\n\t// \t\t\tName: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tType: to.Ptr(\"CustomScriptExtension\"),\n\t// \t\t\tStatus: &armhybridcompute.MachineExtensionInstanceViewStatus{\n\t// \t\t\t\tCode: to.Ptr(\"success\"),\n\t// \t\t\t\tDisplayStatus: to.Ptr(\"Provisioning succeeded\"),\n\t// \t\t\t\tLevel: to.Ptr(armhybridcompute.StatusLevelTypes(\"Information\")),\n\t// \t\t\t\tMessage: to.Ptr(\"Finished executing command, StdOut: , StdErr:\"),\n\t// \t\t\t\tTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2019-08-08T20:42:10.999Z\"); return t}()),\n\t// \t\t\t},\n\t// \t\t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t\t},\n\t// \t\tProtectedSettings: map[string]any{\n\t// \t\t},\n\t// \t\tProvisioningState: to.Ptr(\"Succeeded\"),\n\t// \t\tPublisher: to.Ptr(\"Microsoft.Compute\"),\n\t// \t\tSettings: \"@{commandToExecute=powershell.exe -c \\\"Get-Process | Where-Object { $_.CPU -gt 10000 }\\\"}\",\n\t// \t\tTypeHandlerVersion: to.Ptr(\"1.10.3\"),\n\t// \t},\n\t// }\n}",
"func ExampleVirtualMachineScaleSetVMExtensionsClient_BeginCreateOrUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armcompute.NewVirtualMachineScaleSetVMExtensionsClient(\"{subscription-id}\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := client.BeginCreateOrUpdate(ctx,\n\t\t\"myResourceGroup\",\n\t\t\"myvmScaleSet\",\n\t\t\"0\",\n\t\t\"myVMExtension\",\n\t\tarmcompute.VirtualMachineScaleSetVMExtension{\n\t\t\tProperties: &armcompute.VirtualMachineExtensionProperties{\n\t\t\t\tType: to.Ptr(\"extType\"),\n\t\t\t\tAutoUpgradeMinorVersion: to.Ptr(true),\n\t\t\t\tPublisher: to.Ptr(\"extPublisher\"),\n\t\t\t\tSettings: map[string]interface{}{\n\t\t\t\t\t\"UserName\": \"xyz@microsoft.com\",\n\t\t\t\t},\n\t\t\t\tTypeHandlerVersion: to.Ptr(\"1.2\"),\n\t\t\t},\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}",
"func (m pBrowseOne) Extend(fnct func(m.UserSet, int64) m.UserSet) pBrowseOne {\n\treturn pBrowseOne{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func GetCommonExtendedInfo() (map[string]string) {\n extendedInfo := map[string]string{\n \"csi_created_by_plugin_name\": CsiPluginName,\n \"csi_created_by_plugin_version\": Version,\n \"csi_created_by_plugin_git_hash\": Githash,\n \"csi_created_by_csi_version\": CsiVersion,\n }\n return extendedInfo\n}",
"func (m pFetch) Extend(fnct func(m.UserSet) m.UserSet) pFetch {\n\treturn pFetch{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (r *SingleValueLegacyExtendedPropertyRequest) Get(ctx context.Context) (resObj *SingleValueLegacyExtendedProperty, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (m pWebReadGroupPrivate) Extend(fnct func(m.UserSet, webtypes.WebReadGroupParams) []models.FieldMap) pWebReadGroupPrivate {\n\treturn pWebReadGroupPrivate{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pFieldsViewGet) Extend(fnct func(m.UserSet, webtypes.FieldsViewGetParams) *webtypes.FieldsViewData) pFieldsViewGet {\n\treturn pFieldsViewGet{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (s KernelArgs) Extend(k KernelArgs) {\n\tfor a, b := range k {\n\t\ts[a] = b\n\t}\n}",
"func (m pWithNewContext) Extend(fnct func(m.UserSet, *types.Context) m.UserSet) pWithNewContext {\n\treturn pWithNewContext{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pAddMandatoryGroups) Extend(fnct func(m.UserSet)) pAddMandatoryGroups {\n\treturn pAddMandatoryGroups{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func WithExtensions(extensions map[string]string) CallOpt {\n\treturn func(c *call) error {\n\t\tc.extensions = extensions\n\t\treturn nil\n\t}\n}",
"func (v ProductExtended) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonD2b7633eEncodeBackendInternalModels5(w, v)\n}",
"func (m pOffset) Extend(fnct func(m.UserSet, int) m.UserSet) pOffset {\n\treturn pOffset{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (ad *additionalData) AddExtendedField(name string, value interface{}) {\n\tif ad.data == nil {\n\t\tad.data = make(map[string]interface{})\n\t}\n\n\tad.data[name] = value\n}",
"func setPlatform(ea *ExtractionArgs) (success bool) {\n\tswitch platform := runtime.GOOS; platform {\n\tcase osFREEBSD, osLINUX:\n\t\tea.Extractor = extractSectionUnix\n\t\tif ea.Verbose {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"xv\")\n\t\t} else {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"x\")\n\t\t}\n\t\tea.ObjectTypeInArchive = fileTypeELFOBJECT\n\t\tsuccess = true\n\tcase osDARWIN:\n\t\tea.Extractor = extractSectionDarwin\n\t\tea.ArArgs = append(ea.ArArgs, \"-x\")\n\t\tif ea.Verbose {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"-v\")\n\t\t}\n\t\tea.ObjectTypeInArchive = fileTypeMACHOBJECT\n\t\tsuccess = true\n\tdefault:\n\t\tLogError(\"Unsupported platform: %s.\", platform)\n\t}\n\treturn\n}",
"func (m pSelfReadableFields) Extend(fnct func(m.UserSet) map[string]bool) pSelfReadableFields {\n\treturn pSelfReadableFields{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m pComputeSessionToken) Extend(fnct func(m.UserSet, string) string) pComputeSessionToken {\n\treturn pComputeSessionToken{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (m *DeviceItemRequestBuilder) Extensions()(*ItemExtensionsRequestBuilder) {\n return NewItemExtensionsRequestBuilderInternal(m.pathParameters, m.requestAdapter)\n}",
"func (client *MachineExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, options *MachineExtensionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}",
"func (c Core) extend() int32 {\r\n\te := c.PopPc(Word)\r\n\tyn := int32(c.Regs[e>>12])\r\n\tif e&0x800 == 0 {\r\n\t\tyn = Word.SignedExtend(uint32(yn))\r\n\t}\r\n\treturn yn + Byte.SignedExtend(e)\r\n}",
"func (m pWithContext) Extend(fnct func(m.UserSet, string, interface{}) m.UserSet) pWithContext {\n\treturn pWithContext{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (p *Problem) Extend(key string, value interface{}) error {\n\n\tif _, reserved := ReservedKeys[strings.ToLower(key)]; reserved {\n\t\treturn ErrExtensionKeyIsReserved\n\t}\n\n\t_, keyFound := p.Extension(key)\n\tif !keyFound {\n\t\tp.extensionKeys = append(p.extensionKeys, key)\n\t}\n\n\tif value != nil {\n\t\tp.extensions[key] = value\n\t} else {\n\n\t\tdelete(p.extensions, key)\n\n\t\tfor x := 0; x < len(p.extensionKeys); {\n\n\t\t\tif strings.EqualFold(key, p.extensionKeys[x]) {\n\t\t\t\tp.extensionKeys = append(p.extensionKeys[:x], p.extensionKeys[x+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tx++\n\n\t\t}\n\n\t}\n\n\treturn nil\n\n}",
"func (m pNew) Extend(fnct func(m.UserSet, m.UserData) m.UserSet) pNew {\n\treturn pNew{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func WithExtCharset(nli int) ExtCharsetOption {\n\treturn ExtCharsetOption{nli}\n}",
"func (s *server) SetExtraTags(tags []string) {\n\ts.extraTags = tags\n}",
"func (r *PostMultiValueExtendedPropertiesCollectionRequest) Add(ctx context.Context, reqObj *MultiValueLegacyExtendedProperty) (resObj *MultiValueLegacyExtendedProperty, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}",
"func (znp *Znp) SysSetExtAddr(extAddr string) (rsp *StatusResponse, err error) {\n\treq := &SysSetExtAddr{ExtAddress: extAddr}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_SYS, 0x03, req, &rsp)\n\treturn\n}",
"func (b *PostRequestBuilder) Extensions() *PostExtensionsCollectionRequestBuilder {\n\tbb := &PostExtensionsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/extensions\"\n\treturn bb\n}"
] | [
"0.71823275",
"0.6650728",
"0.5868713",
"0.5740037",
"0.57266814",
"0.55311686",
"0.53988254",
"0.53365576",
"0.5276123",
"0.51772213",
"0.5176059",
"0.51389223",
"0.51068175",
"0.5083746",
"0.5057755",
"0.5052479",
"0.49989742",
"0.49774477",
"0.49474514",
"0.4946438",
"0.48997948",
"0.48569828",
"0.4834296",
"0.48342094",
"0.48221397",
"0.48143062",
"0.48065215",
"0.48032272",
"0.4796843",
"0.47939652",
"0.4785065",
"0.47802243",
"0.47775126",
"0.4772373",
"0.4739455",
"0.47243357",
"0.47017503",
"0.4689346",
"0.46810845",
"0.4675407",
"0.4672692",
"0.46720287",
"0.46616805",
"0.46616367",
"0.46488062",
"0.46389952",
"0.4633852",
"0.46176794",
"0.46146974",
"0.46059352",
"0.45938525",
"0.4592913",
"0.45793125",
"0.4578739",
"0.45771894",
"0.45720276",
"0.45719287",
"0.4569063",
"0.4559145",
"0.45573387",
"0.45504794",
"0.4545497",
"0.45290232",
"0.4522859",
"0.45097232",
"0.45011476",
"0.4495021",
"0.4485987",
"0.4485102",
"0.4479699",
"0.44775712",
"0.44736633",
"0.446151",
"0.4460146",
"0.4452284",
"0.44520158",
"0.44506893",
"0.445022",
"0.444714",
"0.4437796",
"0.44259855",
"0.44213602",
"0.4417716",
"0.4417623",
"0.441075",
"0.44035143",
"0.44021043",
"0.4399143",
"0.43874094",
"0.43850067",
"0.43826133",
"0.4379022",
"0.4378616",
"0.43749318",
"0.43747905",
"0.43742537",
"0.43730515",
"0.4372736",
"0.43691614",
"0.43672284"
] | 0.8300686 | 0 |
ListMetricEquipAttr implements Licence ListMetricEquipAttr function | ListMetricEquipAttr реализует функцию Licence ListMetricEquipAttr | func (l *LicenseRepository) ListMetricEquipAttr(ctx context.Context, scopes ...string) ([]*v1.MetricEquipAttrStand, error) {
respJSON, err := l.listMetricWithMetricType(ctx, v1.MetricEquipAttrStandard, scopes...)
if err != nil {
logger.Log.Error("dgraph/ListMetricEquipAttr - listMetricWithMetricType", zap.Error(err))
return nil, err
}
type Resp struct {
Data []*metricEquipAttr
}
var data Resp
if err := json.Unmarshal(respJSON, &data); err != nil {
logger.Log.Error("dgraph/ListMetricEquipAttr - Unmarshal failed", zap.Error(err))
return nil, errors.New("cannot Unmarshal")
}
if len(data.Data) == 0 {
return nil, v1.ErrNoData
}
return converMetricToModelMetricAllEquipAttr(data.Data)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func cmdAttributeList(c *cli.Context) error {\n\tif err := adm.VerifyNoArgument(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn adm.Perform(`get`, `/attribute/`, `list`, nil, c)\n}",
"func AttributeList(attr int32) (list map[string]int32) {\n\tlist = map[string]int32{}\n\tfor bit, name := range VideoAttribute {\n\t\tlist[name] = int32(((attr >> bit) & 1))\n\t}\n\treturn\n}",
"func getAttrList(selection *goquery.Selection, attrName string) []string {\n\tres := selection.Map(func(ind int, s *goquery.Selection) string {\n\t\tattr, _ := s.Attr(attrName)\n\t\treturn attr\n\t})\n\treturn removeEmpty(res)\n}",
"func (e *Environment) Attr(environmentName, attr string) ([]Attr, error) {\n\n\targkeys := []string{\"attr\"}\n\targvalues := []interface{}{attr}\n\tbaseCommand := fmt.Sprintf(\"list environment attr %s\", environmentName)\n\n\tc, err := cmd.ArgsExpander(baseCommand, argkeys, argvalues)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := cmd.RunCommand(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tattrs := []Attr{}\n\terr = json.Unmarshal(b, &attrs)\n\tif err != nil {\n\t\t// it may have been just an empty output from the Frontend\n\t\tnullOutput := NullOutput{}\n\t\terr = json.Unmarshal(b, &nullOutput)\n\t\tif err != nil {\n\t\t\t// if we still can't recognize the output, return an error\n\t\t\treturn nil, err\n\t\t}\n\t\treturn attrs, err\n\t}\n\treturn attrs, err\n}",
"func (cr *CredentialRequest) AttributeList(\n\tconf *Configuration,\n\tmetadataVersion byte,\n\trevocationAttr *big.Int,\n\tissuedAt time.Time,\n) (*AttributeList, error) {\n\tif err := cr.Validate(conf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcredtype := conf.CredentialTypes[cr.CredentialTypeID]\n\tif !credtype.RevocationSupported() && revocationAttr != nil {\n\t\treturn nil, errors.Errorf(\"cannot specify revocationAttr: credtype %s does not support revocation\", cr.CredentialTypeID.String())\n\t}\n\n\t// Compute metadata attribute\n\tmeta := NewMetadataAttribute(metadataVersion)\n\tmeta.setKeyCounter(cr.KeyCounter)\n\tmeta.setCredentialTypeIdentifier(cr.CredentialTypeID.String())\n\tmeta.setSigningDate(issuedAt)\n\tif err := meta.setExpiryDate(cr.Validity); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compute other attributes\n\tattrs := make([]*big.Int, len(credtype.AttributeTypes)+1)\n\tattrs[0] = meta.Int\n\tif credtype.RevocationSupported() {\n\t\tif revocationAttr != nil {\n\t\t\tattrs[credtype.RevocationIndex+1] = revocationAttr\n\t\t} else {\n\t\t\tattrs[credtype.RevocationIndex+1] = bigZero\n\t\t}\n\t}\n\tfor i, attrtype := range credtype.AttributeTypes {\n\t\tif attrtype.RevocationAttribute || attrtype.RandomBlind {\n\t\t\tcontinue\n\t\t}\n\t\tattrs[i+1] = new(big.Int)\n\t\tif str, present := cr.Attributes[attrtype.ID]; present {\n\t\t\t// Set attribute to str << 1 + 1\n\t\t\tattrs[i+1].SetBytes([]byte(str))\n\t\t\tif meta.Version() >= 0x03 {\n\t\t\t\tattrs[i+1].Lsh(attrs[i+1], 1) // attr <<= 1\n\t\t\t\tattrs[i+1].Add(attrs[i+1], big.NewInt(1)) // attr += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tlist := NewAttributeListFromInts(attrs, conf)\n\tlist.RevocationSupported = cr.RevocationSupported\n\treturn list, nil\n}",
"func addListAttribute(item map[string]*dynamodb.AttributeValue, key string, value []*dynamodb.AttributeValue) {\n\titem[key] = &dynamodb.AttributeValue{L: value}\n}",
"func (f Features) attrExtreme() *spotify.TrackAttributes {\n\tok := func(val float32) bool {\n\t\treturn val <= 0.25 || val >= 0.75\n\t}\n\tx := spotify.NewTrackAttributes()\n\tif ok(f.Acousticness) {\n\t\tx.TargetAcousticness(float64(f.Acousticness))\n\t}\n\n\tif ok(f.Danceability) {\n\t\tx.TargetDanceability(float64(f.Danceability))\n\t}\n\tx.TargetDuration(f.Duration)\n\tif ok(f.Energy) {\n\t\tx.TargetEnergy(float64(f.Energy))\n\t}\n\tif ok(f.Instrumentalness) {\n\t\tx.TargetInstrumentalness(float64(f.Instrumentalness))\n\t}\n\tif ok(f.Liveness) {\n\t\tx.TargetLiveness(float64(f.Liveness))\n\t}\n\tif ok(f.Loudness) {\n\t\tx.TargetLoudness(float64(f.Loudness))\n\t}\n\tif ok(f.Speechiness) {\n\t\tx.TargetSpeechiness(float64(f.Speechiness))\n\t}\n\tif ok(f.Valence) {\n\t\tx.TargetValence(float64(f.Valence))\n\t}\n\treturn x\n}",
"func Attr(attrs ...a.Attribute) []a.Attribute {\n return attrs\n}",
"func (w *wrapper) Listxattr(path string, fill func(name string) bool) int {\n\treturn -fuse.ENOSYS\n}",
"func (a adapter) Attrs(key string) []string {\n\treturn a.entry.GetAttributeValues(key)\n}",
"func (n *UseList) Attributes() map[string]interface{} {\n\treturn nil\n}",
"func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {\n\treturn -fuse.ENOSYS\n}",
"func (client OccMetricsClient) listMetricProperties(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/metricProperties/{namespaceName}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListMetricPropertiesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/occ/20230515/MetricPropertyCollection/ListMetricProperties\"\n\t\terr = common.PostProcessServiceError(err, \"OccMetrics\", \"ListMetricProperties\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func (m *prom) ListMetric() []string {\n\tvar res = make([]string, 0)\n\tres = append(res, m.ginMet.List()...)\n\tres = append(res, m.othMet.List()...)\n\treturn res\n}",
"func (a *AzureInfoer) GetAttributeValues(attribute string) (productinfo.AttrValues, error) {\n\n\tlog.Debugf(\"getting %s values\", attribute)\n\n\tvalues := make(productinfo.AttrValues, 0)\n\tvalueSet := make(map[productinfo.AttrValue]interface{})\n\n\tregions, err := a.GetRegions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor region := range regions {\n\t\tvmSizes, err := a.vmSizesClient.List(context.TODO(), region)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warnf(\"[Azure] couldn't get VM sizes in region %s\", region)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range *vmSizes.Value {\n\t\t\tswitch attribute {\n\t\t\tcase productinfo.Cpu:\n\t\t\t\tvalueSet[productinfo.AttrValue{\n\t\t\t\t\tValue: float64(*v.NumberOfCores),\n\t\t\t\t\tStrValue: fmt.Sprintf(\"%v\", *v.NumberOfCores),\n\t\t\t\t}] = \"\"\n\t\t\tcase productinfo.Memory:\n\t\t\t\tvalueSet[productinfo.AttrValue{\n\t\t\t\t\tValue: float64(*v.MemoryInMB) / 1024,\n\t\t\t\t\tStrValue: fmt.Sprintf(\"%v\", *v.MemoryInMB),\n\t\t\t\t}] = \"\"\n\t\t\t}\n\t\t}\n\t}\n\n\tfor attr := range valueSet {\n\t\tvalues = append(values, attr)\n\t}\n\n\tlog.Debugf(\"found %s values: %v\", attribute, values)\n\treturn values, nil\n}",
"func (f *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {\n\tif !f.super.enableXattr {\n\t\treturn fuse.ENOSYS\n\t}\n\tino := f.info.Inode\n\t_ = req.Size // ignore currently\n\t_ = req.Position // ignore currently\n\n\tkeys, err := f.super.mw.XAttrsList_ll(ino)\n\tif err != nil {\n\t\tlog.LogErrorf(\"ListXattr: ino(%v) err(%v)\", ino, err)\n\t\treturn ParseError(err)\n\t}\n\tfor _, key := range keys {\n\t\tresp.Append(key)\n\t}\n\tlog.LogDebugf(\"TRACE Listxattr: ino(%v)\", ino)\n\treturn nil\n}",
"func (f *ClientFD) ListXattr(ctx context.Context, size uint64) ([]string, error) {\n\treq := FListXattrReq{\n\t\tFD: f.fd,\n\t\tSize: size,\n\t}\n\n\tvar resp FListXattrResp\n\tctx.UninterruptibleSleepStart(false)\n\terr := f.client.SndRcvMessage(FListXattr, uint32(req.SizeBytes()), req.MarshalUnsafe, resp.UnmarshalBytes, nil)\n\tctx.UninterruptibleSleepFinish(false)\n\treturn resp.Xattrs, err\n}",
"func (l *Loader) AttrOnList(i Sym) bool {\n\treturn l.attrOnList.Has(i)\n}",
"func (s *Service) ListBusAttr(ctx context.Context) (busAttr []*model.BusinessAttr, err error) {\n\tbusAttr = make([]*model.BusinessAttr, 0)\n\tif err = s.dao.ORM.Table(\"workflow_business_attr\").Find(&busAttr).Error; err != nil {\n\t\treturn\n\t}\n\treturn\n}",
"func GetLicenseMeterAttribute(name string, allowedUses *uint, totalUses *uint, grossUses *uint) int {\n\tcName := goToCString(name)\n\tvar cAllowedUses C.uint\n\tvar cTotalUses C.uint\n\tvar cGrossUses C.uint\n\tstatus := C.GetLicenseMeterAttribute(cName, &cAllowedUses, &cTotalUses, &cGrossUses)\n\t*allowedUses = uint(cAllowedUses)\n\t*totalUses = uint(cTotalUses)\n\t*grossUses = uint(cGrossUses)\n\tfreeCString(cName)\n\treturn int(status)\n}",
"func newAttributeList(params *Params, nodeID []int, attrs wkdibe.AttributeList, depth int, delegable bool) wkdibe.AttributeList {\n\n\t// NOTE: Assume attributeIndex is int\n\tnewAttr := make(wkdibe.AttributeList)\n\tfor index := range attrs {\n\t\tnewAttr[wkdibe.AttributeIndex(*params.userHeight+int(index))] = attrs[index]\n\t}\n\n\tfor i := 0; i < depth; i++ {\n\t\tbuffer := make([]byte, 8)\n\t\tbinary.LittleEndian.PutUint64(buffer, uint64(nodeID[i]))\n\t\tnewAttr[wkdibe.AttributeIndex(i)] = cryptutils.HashToZp(buffer)\n\t}\n\n\tif !delegable {\n\t\tfor i := depth; i < *params.userHeight; i++ {\n\t\t\tnewAttr[wkdibe.AttributeIndex(i)] = nil\n\t\t}\n\t}\n\treturn newAttr\n}",
"func (a *HyperflexApiService) GetHyperflexCapabilityInfoList(ctx context.Context) ApiGetHyperflexCapabilityInfoListRequest {\n\treturn ApiGetHyperflexCapabilityInfoListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}",
"func itemCategoryAttribute(item models.Item) []string {\n\tres := make([]string, 0, 5)\n\thasInfluence := false\n\n\tif item.Influences.Elder {\n\t\thasInfluence = true\n\t\tres = append(res, \"elder\")\n\t}\n\tif item.Influences.Shaper {\n\t\thasInfluence = true\n\t\tres = append(res, \"shaper\")\n\t}\n\tif item.Influences.Crusader {\n\t\thasInfluence = true\n\t\tres = append(res, \"crusader\")\n\t}\n\tif item.Influences.Hunter {\n\t\thasInfluence = true\n\t\tres = append(res, \"hunter\")\n\t}\n\tif item.Influences.Redeemer {\n\t\thasInfluence = true\n\t\tres = append(res, \"redeemer\")\n\t}\n\tif item.Influences.Warlord {\n\t\thasInfluence = true\n\t\tres = append(res, \"warlord\")\n\t}\n\tif hasInfluence {\n\t\tres = append(res, \"influences\", \"influence\")\n\t}\n\n\tif item.IsIdentified {\n\t\tres = append(res, \"identified\")\n\t} else {\n\t\tres = append(res, \"unidentified\")\n\t}\n\tif item.IsCorrupted {\n\t\tres = append(res, \"corrupted\", \"corrupt\")\n\t}\n\tif item.IsVeiled {\n\t\tres = append(res, \"veiled\", \"veil\")\n\t}\n\tif item.IsRelic {\n\t\tres = append(res, \"relic\")\n\t}\n\tif item.IsVerified {\n\t\tres = append(res, \"verified\")\n\t}\n\tif item.IsAbyssJewel {\n\t\tres = append(res, \"abyss\")\n\t}\n\tif len(item.ProphecyText) > 0 {\n\t\tres = append(res, \"prophecy\")\n\t}\n\tif item.Hybrid.IsVaalGem {\n\t\tres = append(res, \"vaalgem\", \"vaal\")\n\t}\n\tif len(item.ArtFilename) > 0 {\n\t\tres = append(res, \"divination\", \"divine\", \"divcard\", \"divinationcard\")\n\t}\n\n\treturn res\n}",
"func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tsyscall.Syscall9(gpGetActiveAttrib, 7, uintptr(program), uintptr(index), uintptr(bufSize), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(xtype)), uintptr(unsafe.Pointer(name)), 0, 0)\n}",
"func (m *MyMetric) TagList() []*protocol.Tag {\n\treturn m.Tags\n}",
"func prettyPrintStringListAttribute(stringList bazel.StringListAttribute, indent int) (string, error) {\n\tret, err := prettyPrint(reflect.ValueOf(stringList.Value), indent)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif !stringList.HasConfigurableValues() {\n\t\t// Select statement not needed.\n\t\treturn ret, nil\n\t}\n\n\t// Create the selects for arch specific values.\n\tselects := map[string]reflect.Value{}\n\tfor arch, selectKey := range bazel.PlatformArchMap {\n\t\tselects[selectKey] = reflect.ValueOf(stringList.GetValueForArch(arch))\n\t}\n\n\tselectMap, err := prettyPrintSelectMap(selects, \"[]\", indent)\n\treturn ret + selectMap, err\n}",
"func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {\n\tcNames, errno := n.listXAttr()\n\tif errno != 0 {\n\t\treturn 0, errno\n\t}\n\trn := n.rootNode()\n\tvar buf bytes.Buffer\n\tfor _, curName := range cNames {\n\t\t// ACLs are passed through without encryption\n\t\tif isAcl(curName) {\n\t\t\tbuf.WriteString(curName + \"\\000\")\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(curName, xattrStorePrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tname, err := rn.decryptXattrName(curName)\n\t\tif err != nil {\n\t\t\ttlog.Warn.Printf(\"ListXAttr: invalid xattr name %q: %v\", curName, err)\n\t\t\trn.reportMitigatedCorruption(curName)\n\t\t\tcontinue\n\t\t}\n\t\t// We *used to* encrypt ACLs, which caused a lot of problems.\n\t\tif isAcl(name) {\n\t\t\ttlog.Warn.Printf(\"ListXAttr: ignoring deprecated encrypted ACL %q = %q\", curName, name)\n\t\t\trn.reportMitigatedCorruption(curName)\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteString(name + \"\\000\")\n\t}\n\t// Caller passes size zero to find out how large their buffer should be\n\tif len(dest) == 0 {\n\t\treturn uint32(buf.Len()), 0\n\t}\n\tif buf.Len() > len(dest) {\n\t\treturn minus1, syscall.ERANGE\n\t}\n\treturn uint32(copy(dest, buf.Bytes())), 0\n}",
"func (*FileSystemBase) Listxattr(path string, fill func(name string) bool) int {\n\treturn -ENOSYS\n}",
"func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *int8) {\n C.glowGetActiveAttrib(gpGetActiveAttrib, (C.GLuint)(program), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLint)(unsafe.Pointer(size)), (*C.GLenum)(unsafe.Pointer(xtype)), (*C.GLchar)(unsafe.Pointer(name)))\n}",
"func GetActivationMeterAttributeUses(name string, uses *uint) int {\n\tcName := goToCString(name)\n\tvar cUses C.uint\n\tstatus := C.GetActivationMeterAttributeUses(cName, &cUses)\n\t*uses = uint(cUses)\n\tfreeCString(cName)\n\treturn int(status)\n}",
"func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) {\n\treturn c.m1, c.d, c.m2\n}",
"func (graph *Graph) GetAttr(x, y int) byte {\n\treturn graph.Tiles[y][x].Attr\n}",
"func (e *rawData) AddAttribute(key string, val string) {}",
"func (f *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {\n\treturn syscall.ENOSYS // we never implement this\n}",
"func (mouse *Mouse) GetAttr(attr string) (interface{}, error) {\n\treturn 0, nil\n}",
"func parseAttrList(p *pstate, attrs map[string]string, comma bool) error {\n\tvar err error\n\n\t// Attribute lists are optional\n\tif err = p.PeekToken(); err != nil {\n\t\treturn err\n\t}\n\tif p.tok.Tok != grlex.LBRACKET {\n\t\treturn nil\n\t}\n\n\tif err = requiredToken(p, grlex.LBRACKET); err != nil {\n\t\treturn err\n\t}\n\n\t// What's next?\n\tif err = p.PeekToken(); err != nil {\n\t\treturn err\n\t}\n\tvar key, val string\n\tfor {\n\t\tif p.tok.Tok != grlex.IDENTIFIER {\n\t\t\ts := fmt.Sprintf(\"error parsing attr list: expected ID or bracket, got %v\", p.tok.Str)\n\t\t\treturn errors.New(s)\n\t\t}\n\t\tkey = p.tok.Str\n\n\t\t// Consume X=Y\n\t\tif err := requiredToken(p, grlex.IDENTIFIER); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := requiredToken(p, grlex.EQUAL); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := requiredTokenClass(p, attrValClass); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval = p.tok.Str\n\t\tif attrs != nil {\n\t\t\tattrs[key] = val\n\t\t}\n\n\t\t// Take a peek at the next token\n\t\tif err = p.PeekToken(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// End of attrs?\n\t\tif p.tok.Tok == grlex.RBRACKET {\n\t\t\tbreak\n\t\t}\n\n\t\tif comma {\n\t\t\t// Expect comma between attributes\n\t\t\tif p.tok.Tok == grlex.COMMA {\n\t\t\t\tif err = requiredToken(p, grlex.COMMA); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err = p.PeekToken(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := requiredToken(p, grlex.RBRACKET); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(devices))\n\n\tfor _, device := range devices {\n\t\tgo gatherDisk(acc, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg)\n\t}\n\n\twg.Wait()\n}",
"func (attestedClaim *AttestedClaim) getAttributes() ([]*Attribute, error) {\n\tbInts := attestedClaim.getRawAttributes()\n\tattributes, err := BigIntsToAttributes(bInts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsorted := sort.SliceIsSorted(attributes, func(p, q int) bool {\n\t\treturn strings.Compare(attributes[p].Name, attributes[q].Name) < 0\n\t})\n\tif !sorted {\n\t\treturn nil, errors.New(\"expected attributes inside credential to be sorted\")\n\t}\n\treturn attributes, nil\n}",
"func attrMultiKeyer(attr string) InstanceMultiKeyer {\n\treturn func(instance interface{}) ([]state.Key, error) {\n\t\tinst := reflect.Indirect(reflect.ValueOf(instance))\n\n\t\tv := inst.FieldByName(attr)\n\t\tif !v.IsValid() {\n\t\t\treturn nil, fmt.Errorf(`%s: %s`, ErrFieldNotExists, attr)\n\t\t}\n\n\t\treturn keysFromValue(v)\n\t}\n}",
"func (p *OnuIgmpProfile) ListEssentialParams() map[string]interface{} {\r\n\tvar EssentialOnuIgmpProfile = map[string]interface{}{\r\n\t\tOnuIgmpProfileHeaders[0]: p.GetName(),\r\n\t\tOnuIgmpProfileHeaders[1]: p.GetMode(),\r\n\t\tOnuIgmpProfileHeaders[2]: p.GetProxy(),\r\n\t\tOnuIgmpProfileHeaders[3]: p.GetFastLeave(),\r\n\t\tOnuIgmpProfileHeaders[4]: p.GetUsTci(),\r\n\t\tOnuIgmpProfileHeaders[5]: p.DsGemPort,\r\n\t}\r\n\t// I want all of these Bools to return strings of \"Enabled/Disabled\"\r\n\treturn EssentialOnuIgmpProfile\r\n}",
"func (c *IloClient) GetSysAttrDell() (SysAttributesData, error) {\n\n\turl := c.Hostname + \"/redfish/v1/Managers/System.Embedded.1/Attributes\"\n\n\tresp, _, _, err := queryData(c, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn SysAttributesData{}, err\n\t}\n\n\tvar x SysAttrDell\n\n\tjson.Unmarshal(resp, &x)\n\n\treturn x.Attributes, nil\n\n}",
"func mesosAttribute2commonAttribute(oldAttributeList []*mesos.Attribute) []*commtype.BcsAgentAttribute {\n\tif oldAttributeList == nil {\n\t\treturn nil\n\t}\n\n\tattributeList := make([]*commtype.BcsAgentAttribute, 0)\n\n\tfor _, oldAttribute := range oldAttributeList {\n\t\tif oldAttribute == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tattribute := new(commtype.BcsAgentAttribute)\n\t\tif oldAttribute.Name != nil {\n\t\t\tattribute.Name = *oldAttribute.Name\n\t\t}\n\t\tif oldAttribute.Type != nil {\n\t\t\tswitch *oldAttribute.Type {\n\t\t\tcase mesos.Value_SCALAR:\n\t\t\t\tattribute.Type = commtype.MesosValueType_Scalar\n\t\t\t\tif oldAttribute.Scalar != nil && oldAttribute.Scalar.Value != nil {\n\t\t\t\t\tattribute.Scalar = &commtype.MesosValue_Scalar{\n\t\t\t\t\t\tValue: *oldAttribute.Scalar.Value,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase mesos.Value_RANGES:\n\t\t\t\tattribute.Type = commtype.MesosValueType_Ranges\n\t\t\t\tif oldAttribute.Ranges != nil {\n\t\t\t\t\trangeList := make([]*commtype.MesosValue_Ranges, 0)\n\t\t\t\t\tfor _, oldRange := range oldAttribute.Ranges.Range {\n\t\t\t\t\t\tnewRange := &commtype.MesosValue_Ranges{}\n\t\t\t\t\t\tif oldRange.Begin != nil {\n\t\t\t\t\t\t\tnewRange.Begin = *oldRange.Begin\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif oldRange.End != nil {\n\t\t\t\t\t\t\tnewRange.End = *oldRange.End\n\t\t\t\t\t\t}\n\t\t\t\t\t\trangeList = append(rangeList, newRange)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase mesos.Value_SET:\n\t\t\t\tattribute.Type = commtype.MesosValueType_Set\n\t\t\t\tif oldAttribute.Set != nil {\n\t\t\t\t\tattribute.Set = &commtype.MesosValue_Set{\n\t\t\t\t\t\tItem: oldAttribute.Set.Item,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase mesos.Value_TEXT:\n\t\t\t\tattribute.Type = commtype.MesosValueType_Text\n\t\t\t\tif oldAttribute.Text != nil && oldAttribute.Text.Value != nil {\n\t\t\t\t\tattribute.Text = &commtype.MesosValue_Text{\n\t\t\t\t\t\tValue: *oldAttribute.Text.Value,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tattributeList = append(attributeList, attribute)\n\t}\n\treturn attributeList\n}",
"func (dir HgmDir) Attr(ctx context.Context, a *fuse.Attr) error {\n\tresp, err := httpClient.Get(dir.getStatEndpoint(dir.localDir, false))\n\tif err != nil {\n\t\treturn fuse.EIO\n\t}\n\n\tfuseErr := stattool.HttpStatusToFuseErr(resp.StatusCode)\n\tif fuseErr == nil {\n\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\tattr := stattool.HgmStatAttr{}\n\t\t\terr = json.Unmarshal(bodyBytes, &attr)\n\t\t\tif err == nil {\n\t\t\t\tstattool.AttrFromHgmStat(attr, a)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tfuseErr = fuse.EIO\n\t\t}\n\t}\n\n\treturn fuseErr\n}",
"func (o *ObjectData) GetAttr(name string) (value Object, present bool) {\n value, present = o.Attrs[name]\n return \n}",
"func (client OccMetricsClient) ListMetricProperties(ctx context.Context, request ListMetricPropertiesRequest) (response ListMetricPropertiesResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.DefaultRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listMetricProperties, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListMetricPropertiesResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListMetricPropertiesResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListMetricPropertiesResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListMetricPropertiesResponse\")\n\t}\n\treturn\n}",
"func getAttribute(name string,attrs []xml.Attr) (string) {\n\tval := \"\"\n\tfor _,attr := range attrs { if strings.EqualFold(attr.Name.Local,name) {val = attr.Value } }\n\treturn val\n}",
"func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tC.glowGetActiveAttrib(gpGetActiveAttrib, (C.GLuint)(program), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLint)(unsafe.Pointer(size)), (*C.GLenum)(unsafe.Pointer(xtype)), (*C.GLchar)(unsafe.Pointer(name)))\n}",
"func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tC.glowGetActiveAttrib(gpGetActiveAttrib, (C.GLuint)(program), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLint)(unsafe.Pointer(size)), (*C.GLenum)(unsafe.Pointer(xtype)), (*C.GLchar)(unsafe.Pointer(name)))\n}",
"func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) {\n\n\t// This is a lot of repeated code, but since there is no single parent superclass\n\t// between metric data types, we can't use polymorphism.\n\tswitch m.Type() {\n\tcase pmetric.MetricTypeGauge:\n\t\tdps := m.Gauge().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeSum:\n\t\tdps := m.Sum().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeHistogram:\n\t\tdps := m.Histogram().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeExponentialHistogram:\n\t\tdps := m.ExponentialHistogram().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\tcase pmetric.MetricTypeSummary:\n\t\tdps := m.Summary().DataPoints()\n\t\tfor i := 0; i < dps.Len(); i++ {\n\t\t\ta.attrProc.Process(ctx, a.logger, dps.At(i).Attributes())\n\t\t}\n\t}\n}",
"func (o GoogleCloudRetailV2alphaLocalInventoryResponseOutput) Attributes() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaLocalInventoryResponse) map[string]string { return v.Attributes }).(pulumi.StringMapOutput)\n}",
"func (v *invalSet) attr(n fs.Node) {\n\tv.entries = append(v.entries, inval{Node: n})\n}",
"func (*ImageVulnerability) AttributeSpecifications() map[string]elemental.AttributeSpecification {\n\n\treturn ImageVulnerabilityAttributesMap\n}",
"func (m *IdentityUserFlowAttributeAssignment) GetUserAttributeValues()([]UserAttributeValuesItemable) {\n return m.userAttributeValues\n}",
"func (client RecommendedElasticPoolsClient) ListMetrics(resourceGroupName string, serverName string, recommendedElasticPoolName string) (result RecommendedElasticPoolListMetricsResult, err error) {\n\treq, err := client.ListMetricsPreparer(resourceGroupName, serverName, recommendedElasticPoolName)\n\tif err != nil {\n\t\treturn result, autorest.NewErrorWithError(err, \"sql.RecommendedElasticPoolsClient\", \"ListMetrics\", nil, \"Failure preparing request\")\n\t}\n\n\tresp, err := client.ListMetricsSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\treturn result, autorest.NewErrorWithError(err, \"sql.RecommendedElasticPoolsClient\", \"ListMetrics\", resp, \"Failure sending request\")\n\t}\n\n\tresult, err = client.ListMetricsResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"sql.RecommendedElasticPoolsClient\", \"ListMetrics\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func (df *DataFrame) Attr(name string) (starlark.Value, error) {\n\tswitch name {\n\tcase \"columns\":\n\t\treturn df.columns, nil\n\t}\n\treturn builtinAttr(df, name, dataframeMethods)\n}",
"func (*MetricsQuery) AttributeSpecifications() map[string]elemental.AttributeSpecification {\n\n\treturn MetricsQueryAttributesMap\n}",
"func attrModifier(attribute int) int {\n\treturn (attribute - 10) / 2\n}",
"func (o *IscsiInterfaceGetIterResponseResult) AttributesList() IscsiInterfaceGetIterResponseResultAttributesList {\n\tvar r IscsiInterfaceGetIterResponseResultAttributesList\n\tif o.AttributesListPtr == nil {\n\t\treturn r\n\t}\n\tr = *o.AttributesListPtr\n\treturn r\n}",
"func getAttr(selection *goquery.Selection, attrName string) (s string) {\n\ts, _ = selection.Attr(attrName)\n\treturn strings.TrimSpace(s)\n}",
"func (c *IloClient) GetIDRACAttrDell() (IDRACAttributesData, error) {\n\n\turl := c.Hostname + \"/redfish/v1/Managers/iDRAC.Embedded.1/Attributes\"\n\n\tresp, _, _, err := queryData(c, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn IDRACAttributesData{}, err\n\t}\n\n\tvar x IDRACAttrDell\n\n\tjson.Unmarshal(resp, &x)\n\n\treturn x.Attributes, nil\n\n}",
"func (fs osFsEval) Llistxattr(path string) ([]string, error) {\n\treturn system.Llistxattr(path)\n}",
"func (s *Scope) ExportAttr(attrName string, varName string) {\n\ts.AttrExports = append(s.AttrExports, &AttrExport{AttrName: attrName, VarName: varName})\n}",
"func getPiAttribute(w http.ResponseWriter, r *http.Request) {\n\t// Get pi name and property/attribute from request\n\tvars := mux.Vars(r)\n\tpiname := vars[\"piname\"]\n\tpiproperty := vars[\"piattribute\"]\n\n\t// Get pi entry from data store\n\tc := appengine.NewContext(r)\n\tq := datastore.NewQuery(piListKind).Filter(\"name =\", piname)\n\tt := q.Run(c)\n\tvar pi Pi\n\t_, err := t.Next(&pi)\n\tif err == datastore.Done {\n\t\thttp.Error(w, \"404 Not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Print attribute value in plain text\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tswitch piproperty {\n\tcase \"name\":\n\t\tfmt.Fprint(w, pi.Name)\n\tcase \"ip\":\n\t\tfmt.Fprint(w, pi.Ip)\n\tcase \"lastSeen\":\n\t\tfmt.Fprint(w, pi.LastSeen)\n\tcase \"pingCount\":\n\t\tfmt.Fprint(w, pi.PingCount)\n\tdefault:\n\t\thttp.Error(w, \"404 Not found\", http.StatusNotFound)\n\t}\n}",
"func attr(node *html.Node, name string) string {\n\tfor _, attr := range node.Attr {\n\t\tif attr.Key == name {\n\t\t\treturn attr.Val\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (r *relation) Attribute() []metadata.Attribute {\n\treturn helper.Attribute(*r.tbl)\n}",
"func (s *BasevhdlListener) EnterAttribute_specification(ctx *Attribute_specificationContext) {}",
"func (i *Index) Attr(name string) (starlark.Value, error) {\n\tswitch name {\n\tcase \"name\":\n\t\treturn starlark.String(i.name), nil\n\tcase \"str\":\n\t\treturn &stringMethods{subject: i}, nil\n\t}\n\treturn nil, starlark.NoSuchAttrError(name)\n}",
"func (*ListDataAttributesResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_data_taxonomy_proto_rawDescGZIP(), []int{13}\n}",
"func (a *Agent) identifyAttributes(img *world.Image) map[int]int {\n\tattrs := make(map[int]int)\n\n\tattrs[attrTypeColor] = img.Color\n\tattrs[attrTypeShape] = img.Shape\n\tattrs[attrTypeDistance] = util.Abs(img.XDist) + util.Abs(img.ZDist)\n\n\tif img.XDist == 0 && img.ZDist == 0 {\n\t\tattrs[attrTypeDirection] = directionOrigin\n\t} else if img.XDist > 0 && img.ZDist == 0 {\n\t\tattrs[attrTypeDirection] = directionXPos\n\t} else if img.XDist < 0 && img.ZDist == 0 {\n\t\tattrs[attrTypeDirection] = directionXNeg\n\t} else if img.XDist == 0 && img.ZDist > 0 {\n\t\tattrs[attrTypeDirection] = directionZPos\n\t} else if img.XDist == 0 && img.ZDist < 0 {\n\t\tattrs[attrTypeDirection] = directionZNeg\n\t} else if img.XDist > 0 && img.ZDist > 0 {\n\t\tattrs[attrTypeDirection] = directionXPosZPos\n\t} else if img.XDist > 0 && img.ZDist < 0 {\n\t\tattrs[attrTypeDirection] = directionXPosZNeg\n\t} else if img.XDist < 0 && img.ZDist > 0 {\n\t\tattrs[attrTypeDirection] = directionXNegZPos\n\t} else if img.XDist < 0 && img.ZDist < 0 {\n\t\tattrs[attrTypeDirection] = directionXNegZNeg\n\t}\n\n\treturn attrs\n}",
"func DecodeAttributeList(line string) map[string]string {\n\treturn decodeParamsLine(line)\n}",
"func (k2h *K2hash) GetAttrs(k string) ([]Attr, error) {\n\t// 1. retrieve an attribute using k2h_get_attrs\n\t// bool k2h_get_attrs(k2h_h handle, const unsigned char* pkey, size_t keylength, PK2HATTRPCK* ppattrspck, int* pattrspckcnt)\n\tcKey := C.CBytes([]byte(k))\n\tdefer C.free(unsafe.Pointer(cKey))\n\tvar attrpack C.PK2HATTRPCK\n\tvar attrpackCnt C.int\n\tok := C.k2h_get_attrs(\n\t\tk2h.handle,\n\t\t(*C.uchar)(cKey),\n\t\tC.size_t(len([]byte(k))+1), // plus one for a null termination\n\t\t&attrpack,\n\t\t&attrpackCnt,\n\t)\n\tdefer C.k2h_free_attrpack(attrpack, attrpackCnt) // free the memory for the keypack for myself(GC doesn't know the area)\n\n\tif ok == false {\n\t\tfmt.Println(\"C.k2h_get_attrs returns false\")\n\t\treturn []Attr{}, fmt.Errorf(\"C.k2h_get_attrs() = %v\", ok)\n\t} else if attrpackCnt == 0 {\n\t\tfmt.Printf(\"attrpackLen is zero\")\n\t\treturn []Attr{}, nil\n\t} else {\n\t\tfmt.Printf(\"attrpackLen is %v\\n\", attrpackCnt)\n\t}\n\t// 2. copy an attribute data to a slice\n\tvar CAttrs C.PK2HATTRPCK = attrpack\n\tcount := (int)(attrpackCnt)\n\tslice := (*[1 << 28]C.K2HATTRPCK)(unsafe.Pointer(CAttrs))[:count:count]\n\tfmt.Printf(\"slice size is %v\\n\", len(slice))\n\t//\n\tattrs := make([]Attr, count) // copy\n\tfor i, data := range slice {\n\t\t// copy the data with len-1 length, which exclude a null termination.\n\t\tattrkey := C.GoBytes(unsafe.Pointer(data.pkey), (C.int)(data.keylength-1))\n\t\tfmt.Printf(\"i %v data %T pkey %v length %v attrkey %v\\n\", i, data, data.pkey, data.keylength, string(attrkey))\n\t\tattrval := C.GoBytes(unsafe.Pointer(data.pval), (C.int)(data.vallength-1))\n\t\tfmt.Printf(\"i %v data %T pval %v length %v attrval %v\\n\", i, data, data.pval, data.vallength, string(attrval))\n\t\t// cast bytes to a string\n\t\tattrs[i].key = string(attrkey)\n\t\tattrs[i].val = string(attrval)\n\t}\n\treturn attrs, nil\n}",
"func (t *largeFlatTable) Attrs(ctx context.Context) TableAttrs { return t.attrs }",
"func ParseFieldAttrList(attrs string) (output FieldAttrList) {\n\tvalues := strings.Split(attrs, \",\")\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\toutput = make([]FieldAttr, 0, len(values))\n\tfor i := 0; i < len(values); i++ {\n\t\tattr, ok := ParseFieldAttr(strings.TrimSpace(values[i]))\n\t\tif ok {\n\t\t\toutput = append(output, attr)\n\t\t}\n\t\t// FIXME: Handle errors\n\t}\n\treturn\n}",
"func (ft *DTDFormatter) RenderAttlist(b DTD.IDTDBlock) string {\n\tattributes := \"\\n\"\n\n\textra := b.GetExtra()\n\n\tfor _, attr := range extra.Attributes {\n\t\tattributes += ft.RenderAttribute(attr)\n\t}\n\n\treturn join(\"<!ATTLIST \", b.GetName(), \" \", attributes, \">\")\n}",
"func getMetrics() []Metric {\n\tms := make([]Metric, 0)\n\treturn append(ms, &SimpleEapMetric{})\n}",
"func (ifi *Interface) idAttrs() []netlink.Attribute {\n\treturn []netlink.Attribute{\n\t\t{\n\t\t\tType: unix.NL80211_ATTR_IFINDEX,\n\t\t\tData: nlenc.Uint32Bytes(uint32(ifi.Index)),\n\t\t},\n\t\t{\n\t\t\tType: unix.NL80211_ATTR_MAC,\n\t\t\tData: ifi.HardwareAddr,\n\t\t},\n\t}\n}",
"func (cmd *systemGetAttrCmd) Execute(_ []string) error {\n\treq := &control.SystemGetAttrReq{\n\t\tKeys: cmd.Args.Attrs.ParsedProps.ToSlice(),\n\t}\n\n\tresp, err := control.SystemGetAttr(context.Background(), cmd.ctlInvoker, req)\n\tif cmd.JSONOutputEnabled() {\n\t\treturn cmd.OutputJSON(resp, err)\n\t}\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"system get-attr failed\")\n\t}\n\n\tvar bld strings.Builder\n\tprettyPrintAttrs(&bld, resp.Attributes)\n\tcmd.Infof(\"%s\", bld.String())\n\n\treturn nil\n}",
"func (*StatsQuery) AttributeSpecifications() map[string]elemental.AttributeSpecification {\n\n\treturn StatsQueryAttributesMap\n}",
"func (o *Options) tagMetrics(rowTags []tag.Tag, addlTags []string) []string {\n\tfinalTags := make([]string, len(o.Tags), len(o.Tags)+len(rowTags)+len(addlTags))\n\tcopy(finalTags, o.Tags)\n\tfor key := range rowTags {\n\t\tfinalTags = append(finalTags,\n\t\t\trowTags[key].Key.Name()+\":\"+rowTags[key].Value)\n\t}\n\tfinalTags = append(finalTags, addlTags...)\n\treturn finalTags\n}",
"func (w *Wurfl) wurflCapabilityEnumerate(items **C.char) KeyStoreList {\n\tif items == nil || *items == nil {\n\t\treturn nil\n\t}\n\n\tm := NewKeyStireList()\n\n\tfor ; ; items = nextCharStringArrayItem(items, 2) {\n\t\tif *items == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tgname := w.getGoString(*items)\n\t\tgval := C.GoString(*nextCharStringArrayItem(items, 1))\n\n\t\tm.Push(unsafe.Pointer(*items), gname, gval)\n\t}\n\n\treturn m\n}",
"func (DeviceAttribute) Values() []DeviceAttribute {\n\treturn []DeviceAttribute{\n\t\t\"ARN\",\n\t\t\"PLATFORM\",\n\t\t\"FORM_FACTOR\",\n\t\t\"MANUFACTURER\",\n\t\t\"REMOTE_ACCESS_ENABLED\",\n\t\t\"REMOTE_DEBUG_ENABLED\",\n\t\t\"APPIUM_VERSION\",\n\t\t\"INSTANCE_ARN\",\n\t\t\"INSTANCE_LABELS\",\n\t\t\"FLEET_TYPE\",\n\t\t\"OS_VERSION\",\n\t\t\"MODEL\",\n\t\t\"AVAILABILITY\",\n\t}\n}",
"func (e *explainer) attr(nodeName, fieldName, attr string) {\n\te.entries = append(e.entries, explainEntry{\n\t\tlevel: e.level - 1,\n\t\tfield: fieldName,\n\t\tfieldVal: attr,\n\t})\n}",
"func (a adapter) Attr(key string) string {\n\treturn a.entry.GetAttributeValue(key)\n}",
"func (l *AttributeList) Len() int { return l.length }",
"func (o IscsiInterfaceGetIterResponseResultAttributesList) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}",
"func getMetrics(_ string) []string {\n\tvar (\n\t\terr error\n\t\tsession *Session\n\t\tclient orchestrator.OrchestratorClient\n\t\tres *orchestrator.ListMetricsResponse\n\t)\n\n\tif session, err = ContinueSession(); err != nil {\n\t\tfmt.Printf(\"Error while retrieving the session. Please re-authenticate.\\n\")\n\t\treturn nil\n\t}\n\n\tclient = orchestrator.NewOrchestratorClient(session)\n\n\tif res, err = client.ListMetrics(context.Background(), &orchestrator.ListMetricsRequest{}); err != nil {\n\t\treturn []string{}\n\t}\n\n\tvar metrics []string\n\tfor _, v := range res.Metrics {\n\t\tmetrics = append(metrics, fmt.Sprintf(\"%s\\t%s: %s\", v.Id, v.Name, v.Description))\n\t}\n\n\treturn metrics\n}",
"func (n *node) Attr(ctx context.Context, a *fuse.Attr) error {\n\tfi := n.te.Stat()\n\ta.Valid = 30 * 24 * time.Hour\n\ta.Inode = inodeOfEnt(n.te)\n\ta.Size = uint64(fi.Size())\n\ta.Blocks = a.Size / 512\n\ta.Mtime = fi.ModTime()\n\ta.Mode = fi.Mode()\n\ta.Uid = uint32(n.te.Uid)\n\ta.Gid = uint32(n.te.Gid)\n\tif debug {\n\t\tlog.Printf(\"attr of %s: %s\", n.te.Name, *a)\n\t}\n\treturn nil\n}",
"func getMetrics(vector model.Vector, labelName model.LabelName, f func(v float64) bool) map[string]bool {\n\tmetrics := map[string]bool{}\n\n\tfor _, elem := range vector {\n\t\tlabel := string(elem.Metric[labelName])\n\t\tvalue := float64(elem.Value)\n\t\tmetrics[label] = f(value)\n\t}\n\n\treturn metrics\n}",
"func getCapabilities(attributes bascule.Attributes) ([]string, string, error) {\n\tif attributes == nil {\n\t\treturn []string{}, UndeterminedCapabilities, ErrNilAttributes\n\t}\n\n\tval, ok := attributes.Get(CapabilityKey)\n\tif !ok {\n\t\treturn []string{}, UndeterminedCapabilities, fmt.Errorf(\"couldn't get capabilities using key %v\", CapabilityKey)\n\t}\n\n\tvals, err := cast.ToStringSliceE(val)\n\tif err != nil {\n\t\treturn []string{}, UndeterminedCapabilities, fmt.Errorf(\"capabilities \\\"%v\\\" not the expected string slice: %v\", val, err)\n\t}\n\n\tif len(vals) == 0 {\n\t\treturn []string{}, EmptyCapabilitiesList, ErrNoVals\n\t}\n\n\treturn vals, \"\", nil\n\n}",
"func GetVertexAttribLdv(index uint32, pname uint32, params *float64) {\n C.glowGetVertexAttribLdv(gpGetVertexAttribLdv, (C.GLuint)(index), (C.GLenum)(pname), (*C.GLdouble)(unsafe.Pointer(params)))\n}",
"func (o *VirtualizationVmwareVirtualMachineAllOf) GetCustomAttributes() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.CustomAttributes\n}",
"func (r ApiGetHyperflexCapabilityInfoListRequest) Tags(tags string) ApiGetHyperflexCapabilityInfoListRequest {\n\tr.tags = &tags\n\treturn r\n}",
"func lvCollect(ch chan<- prometheus.Metric, lvs []map[string]string, vgName string) {\n for _, lv := range lvs {\n lvSizeF, err := strconv.ParseFloat(strings.Trim(lv[\"lv_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(lvSizeMetric, prometheus.GaugeValue, lvSizeF, lv[\"lv_name\"], lv[\"lv_uuid\"], vgName)\n }\n}",
"func (s *BasevhdlListener) EnterAttribute_designator(ctx *Attribute_designatorContext) {}",
"func (d *cudaDevice) GetAttributes() (map[string]interface{}, error) {\n\treturn nil, fmt.Errorf(\"GetAttributes is not supported for CUDA devices\")\n}",
"func addAttributeToRequest(name, value string, attributes *[]api.Attribute) {\n\t*attributes = append(*attributes, api.Attribute{Name: name, Value: value, ECert: true})\n}",
"func GetActiveAttrib(p Program, index uint32) (name string, size int, ty Enum) {\n\tvar length, si int32\n\tvar typ uint32\n\tname = strings.Repeat(\"\\x00\", 256)\n\tcname := gl.Str(name)\n\tgl.GetActiveAttrib(p.Value, uint32(index), int32(len(name)-1), &length, &si, &typ, cname)\n\tname = name[:strings.IndexRune(name, 0)]\n\treturn name, int(si), Enum(typ)\n}",
"func prettyPrintLabelListAttribute(labels bazel.LabelListAttribute, indent int) (string, error) {\n\t// TODO(b/165114590): convert glob syntax\n\tret, err := prettyPrint(reflect.ValueOf(labels.Value.Includes), indent)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif !labels.HasConfigurableValues() {\n\t\t// Select statements not needed.\n\t\treturn ret, nil\n\t}\n\n\t// Create the selects for arch specific values.\n\tarchSelects := map[string]reflect.Value{}\n\tfor arch, selectKey := range bazel.PlatformArchMap {\n\t\tarchSelects[selectKey] = reflect.ValueOf(labels.GetValueForArch(arch).Includes)\n\t}\n\tselectMap, err := prettyPrintSelectMap(archSelects, \"[]\", indent)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tret += selectMap\n\n\t// Create the selects for target os specific values.\n\tosSelects := map[string]reflect.Value{}\n\tfor os, selectKey := range bazel.PlatformOsMap {\n\t\tosSelects[selectKey] = reflect.ValueOf(labels.GetValueForOS(os).Includes)\n\t}\n\tselectMap, err = prettyPrintSelectMap(osSelects, \"[]\", indent)\n\treturn ret + selectMap, err\n}",
"func (s *hostAttributesCrdLister) List(selector labels.Selector) (ret []*v1beta1.HostAttributesCrd, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.HostAttributesCrd))\n\t})\n\treturn ret, err\n}",
"func gpuCapacity(self *core_v1.ResourceList) *resource.Quantity {\n\tif val, ok := (*self)[ResourceGPU]; ok {\n\t\treturn &val\n\t}\n\treturn &resource.Quantity{Format: resource.DecimalSI}\n}"
] | [
"0.58724934",
"0.5863162",
"0.57436544",
"0.57390493",
"0.54155165",
"0.5408128",
"0.5230414",
"0.51869965",
"0.51444864",
"0.51340497",
"0.50995654",
"0.5063905",
"0.50034463",
"0.4998801",
"0.4996387",
"0.4985066",
"0.49573505",
"0.4939282",
"0.4936213",
"0.4924132",
"0.49011493",
"0.48765072",
"0.4853482",
"0.48188832",
"0.4805072",
"0.47890067",
"0.47869432",
"0.4783984",
"0.47594184",
"0.4740017",
"0.47176734",
"0.47174272",
"0.469215",
"0.4689336",
"0.468913",
"0.4673431",
"0.46674094",
"0.4665206",
"0.46645764",
"0.4662035",
"0.4652202",
"0.46444863",
"0.46413925",
"0.46404302",
"0.46111825",
"0.459286",
"0.4567127",
"0.4567127",
"0.45311877",
"0.45187265",
"0.4517674",
"0.45172274",
"0.44885275",
"0.44821844",
"0.4471707",
"0.4467339",
"0.4446108",
"0.4441601",
"0.44298038",
"0.44258466",
"0.44183245",
"0.44134626",
"0.44126505",
"0.44030616",
"0.4396854",
"0.4395169",
"0.43932354",
"0.4393153",
"0.4389003",
"0.43805817",
"0.4374916",
"0.437228",
"0.43701202",
"0.436981",
"0.43697384",
"0.4365121",
"0.43607163",
"0.43557817",
"0.435375",
"0.43526414",
"0.4350787",
"0.43476456",
"0.43465185",
"0.4344858",
"0.43367416",
"0.43341798",
"0.4329211",
"0.431925",
"0.43174663",
"0.43159238",
"0.43139297",
"0.43138504",
"0.43137002",
"0.43129912",
"0.4305783",
"0.43030486",
"0.43024895",
"0.42921335",
"0.42903265",
"0.4286087"
] | 0.8314187 | 0 |
CheckCookie mocks base method | CheckCookie имитирует базовый метод | func (m *MockServiceAuth) CheckCookie(arg0 echo.Context) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckCookie", arg0)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Checker) HasCookie(key, expectedValue string) *Checker {\n\tvalue, exists := c.cookies[key]\n\tassert.True(c.t, exists && expectedValue == value)\n\treturn c\n}",
"func getCookie(r *http.Request, cookiename string) (bool, *http.Cookie) {\n // Ignoring error value because it is likely that the cookie might not exist here\n cookie, _ := r.Cookie(cookiename)\n if cookie == nil {\n return false, nil\n }\n return true, cookie\n}",
"func Test_Ctx_Cookie(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\texpire := time.Now().Add(24 * time.Hour)\n\tvar dst []byte\n\tdst = expire.In(time.UTC).AppendFormat(dst, time.RFC1123)\n\thttpdate := strings.Replace(string(dst), \"UTC\", \"GMT\", -1)\n\tctx.Cookie(&Cookie{\n\t\tName: \"username\",\n\t\tValue: \"john\",\n\t\tExpires: expire,\n\t})\n\texpect := \"username=john; expires=\" + httpdate + \"; path=/; SameSite=Lax\"\n\tutils.AssertEqual(t, expect, string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)))\n\n\tctx.Cookie(&Cookie{SameSite: \"strict\"})\n\tctx.Cookie(&Cookie{SameSite: \"none\"})\n}",
"func TestsetTokenCookie(t *testing.T) {\n\thand := New(nil)\n\n\twriter := httptest.NewRecorder()\n\treq := dummyGet()\n\n\ttoken := []byte(\"dummy\")\n\thand.setTokenCookie(writer, req, token)\n\n\theader := writer.Header().Get(\"Set-Cookie\")\n\texpected_part := fmt.Sprintf(\"csrf_token=%s;\", token)\n\n\tif !strings.Contains(header, expected_part) {\n\t\tt.Errorf(\"Expected header to contain %v, it doesn't. The header is %v.\",\n\t\t\texpected_part, header)\n\t}\n\n\ttokenInContext := unmaskToken(b64decode(Token(req)))\n\tif !bytes.Equal(tokenInContext, token) {\n\t\tt.Errorf(\"RegenerateToken didn't set the token in the context map!\"+\n\t\t\t\" Expected %v, got %v\", token, tokenInContext)\n\t}\n}",
"func (cook ChangeProviderPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func MockProjectSessionCookie(projectID, secret string) *http.Cookie {\n\tstore := mockCookieStore()\n\n\tr := &http.Request{}\n\tw := httptest.NewRecorder()\n\n\tsession, _ := store.Get(r, getProjectSessionNameFromString(projectID))\n\n\tsession.Values[projectSecretKeyName] = secret\n\n\terr := session.Save(r, w)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn w.Result().Cookies()[0]\n}",
"func Test_Session_Cookie(t *testing.T) {\n\tt.Parallel()\n\t// session store\n\tstore := New()\n\t// fiber instance\n\tapp := fiber.New()\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\t// get session\n\tsess, _ := store.Get(ctx)\n\tsess.Save()\n\n\t// cookie should not be set if empty data\n\tutils.AssertEqual(t, 0, len(ctx.Response().Header.PeekCookie(store.CookieName)))\n}",
"func (cook AwaitCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook ConfigureProviderPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func VerifyCookie(w http.ResponseWriter, r *http.Request) bool {\n\t_, err := r.Cookie(\"session\")\n\tif err != nil {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func CheckCookie(r *http.Request) bool {\r\n\t_, err := r.Cookie(envvar.CookieName())\r\n\tif err != nil {\r\n\t\treturn false\r\n\t}\r\n\treturn true\r\n}",
"func (cook DeleteProviderPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func TestCurlGetJSONCookie(t *testing.T) {\n\tcurl := curlTester{\n\t\thandler: \"curl_get_json_cookie\",\n\t\ttestName: \"TestCurlGetJSONCookie\",\n\t\ttestHandle: t,\n\t}\n\tcurl.testGetCookie()\n}",
"func (cook ChangeOutputPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (r *TestRequest) Cookie() uint64 {\n\treturn r.cookie\n}",
"func (cook ChangeCounterCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (p *para) checkCookie(rawCookies string) {\n\theader := http.Header{}\n\theader.Add(\"Cookie\", rawCookies)\n\trequest := http.Request{Header: header}\n\tfor _, e := range request.Cookies() {\n\t\tif strings.Contains(e.Name, \"download_warning_\") {\n\t\t\tcookie, _ := request.Cookie(e.Name)\n\t\t\tp.Code = cookie.Value\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (c *Checker) Check() *Checker {\n\n\t// set cookies\n\tc.request.Header.Set(\"Cookie\", c.generateCookieString())\n\n\trecorder := httptest.NewRecorder()\n\tc.handler.ServeHTTP(recorder, c.request)\n\n\tresp := &http.Response{\n\t\tStatusCode: recorder.Code,\n\t\tBody: NewReadCloser(recorder.Body),\n\t\tHeader: recorder.Header(),\n\t}\n\tc.handleCookies(resp)\n\tc.response = resp\n\n\treturn c\n}",
"func (cook ChangeAlarmCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook SetCrtcTransformCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func Test_Ctx_Cookies(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tctx.Fasthttp.Request.Header.Set(\"Cookie\", \"john=doe\")\n\tutils.AssertEqual(t, \"doe\", ctx.Cookies(\"john\"))\n\tutils.AssertEqual(t, \"default\", ctx.Cookies(\"unknown\", \"default\"))\n}",
"func (cook CreateCounterCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook TriggerFenceCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook AwaitFenceCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook SelectInputCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook DestroyModeCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook ResetFenceCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook CreateAlarmCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook SetCounterCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook SetProviderOffloadSinkCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func TestSimpleResponseGetCookieAndHeaders(t *testing.T) {\n\tr := NewSimpleResponse(\"A\", \"B\")\n\tif cookie := r.GetCookie(); cookie != nil {\n\t\tt.Fatalf(\"Unexpected cookie found. Should be nil, found: %+v.\", cookie)\n\t}\n\tif headers := r.GetHeaders(); headers != nil {\n\t\tt.Fatalf(\"Unexpected headers found. Should be nil, found: %+v.\", headers)\n\t}\n}",
"func (cook ConfigureOutputPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func VerifyCookieFromRedisHTTP(ctx *fasthttp.RequestCtx, redisClient *redis.Client) {\n\tvar err error\n\tctx.Response.Header.SetContentType(\"application/json; charset=utf-8\") // Why not ? (:\n\tlog.Debug(\"VerifyCookieFromRedisHTTP | Retrieving username ...\")\n\tuser, _ := ParseAuthenticationCoreHTTP(ctx)\n\tlog.Debug(\"VerifyCookieFromRedisHTTP | Retrieving token ...\")\n\ttoken := ParseTokenFromRequest(ctx)\n\tlog.Debug(\"VerifyCookieFromRedisHTTP | Retrieving cookie from redis ...\")\n\tauth := authutils.VerifyCookieFromRedisHTTPCore(user, token, redisClient) // Call the core function for recognize if the user have the token\n\tif strings.Compare(auth, \"AUTHORIZED\") == 0 {\n\t\terr = json.NewEncoder(ctx).Encode(datastructures.Response{Status: true, Description: \"Logged in!\", ErrorCode: auth, Data: nil})\n\t\tcommonutils.Check(err, \"VerifyCookieFromRedisHTTP\")\n\t} else {\n\t\terr = json.NewEncoder(ctx).Encode(datastructures.Response{Status: false, Description: \"Not logged in!\", ErrorCode: auth, Data: nil})\n\t\tcommonutils.Check(err, \"VerifyCookieFromRedisHTTP\")\n\t}\n}",
"func (cook SetCrtcGammaCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (m *MockSessionManager) Check(sessionId string) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Check\", sessionId)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func TestReplaceCookie(t *testing.T) {\n\tDummyRequest := &http.Request{\n\t\tHeader: map[string][]string{\n\t\t\t\"Cookie\": {\"abcdef\"},\n\t\t},\n\t}\n\n\treplaceCookies(DummyRequest)\n\tassert.Equal(t, \"\", DummyRequest.Header.Get(Cookie))\n\tassert.Equal(t, 0, len(DummyRequest.Header[Cookie]))\n\n\tDummyRequest = &http.Request{\n\t\tHeader: map[string][]string{\n\t\t\tCookie: {},\n\t\t\tAPICookie: {\"test1\"},\n\t\t},\n\t}\n\n\treplaceCookies(DummyRequest)\n\tassert.Equal(t, \"test1\", DummyRequest.Header.Get(Cookie))\n\tassert.Equal(t, \"\", DummyRequest.Header.Get(APICookie))\n\tassert.Equal(t, 1, len(DummyRequest.Header[Cookie]))\n\tassert.Equal(t, 0, len(DummyRequest.Header[APICookie]))\n\n\tDummyRequest = &http.Request{\n\t\tHeader: map[string][]string{\n\t\t\tCookie: {},\n\t\t\tAPICookie: {\"test1\", \"test2\", \"test3\"},\n\t\t},\n\t}\n\n\treplaceCookies(DummyRequest)\n\t// Should not support multiple cookie headers\n\tassert.Equal(t, \"test1\", DummyRequest.Header.Get(Cookie))\n\tassert.Equal(t, \"\", DummyRequest.Header.Get(APICookie))\n\tassert.Equal(t, 1, len(DummyRequest.Header[Cookie]))\n\tassert.Equal(t, 0, len(DummyRequest.Header[APICookie]))\n\n\tDummyRequest = &http.Request{\n\t\tHeader: map[string][]string{\n\t\t\tCookie: {\"test0\"},\n\t\t\tAPICookie: {\"test1\", \"test2\", \"test3\"},\n\t\t},\n\t}\n\n\treplaceCookies(DummyRequest)\n\t// Original cookie should be overwritten\n\tassert.Equal(t, \"test1\", DummyRequest.Header.Get(Cookie))\n\tassert.Equal(t, \"\", DummyRequest.Header.Get(APICookie))\n\tassert.Equal(t, 1, len(DummyRequest.Header[Cookie]))\n\tassert.Equal(t, 0, len(DummyRequest.Header[APICookie]))\n\n\tDummyRequest = &http.Request{\n\t\tHeader: map[string][]string{\n\t\t\tCookie: {\"test0\", \"test1\"},\n\t\t\tAPICookie: {\"test2\", \"test3\", \"test4\"},\n\t\t},\n\t}\n\n\treplaceCookies(DummyRequest)\n\t// Should delete all original cookies\n\tassert.Equal(t, \"test2\", DummyRequest.Header.Get(Cookie))\n\tassert.Equal(t, \"\", DummyRequest.Header.Get(APICookie))\n\tassert.Equal(t, 1, len(DummyRequest.Header[Cookie]))\n\tassert.Equal(t, 0, len(DummyRequest.Header[APICookie]))\n\n}",
"func (cook DestroyAlarmCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func TestCookieWithOptions(t *testing.T) {\n\tRunCookieSetup(t, func(privKey *rsa.PrivateKey) {\n\n\t\tsigner := NewCookieSigner(\"keyID\", privKey)\n\t\tsigner.Path = \"/\"\n\t\tsigner.Domain = testDomain\n\t\tsigner.Secure = false\n\n\t\tif signer.keyID != \"keyID\" || signer.privKey != privKey {\n\t\t\tt.Fatalf(\"NewCookieSigner does not properly assign values %+v\", signer)\n\t\t}\n\n\t\tp := &Policy{\n\t\t\tStatements: []Statement{\n\t\t\t\t{\n\t\t\t\t\tResource: \"*\",\n\t\t\t\t\tCondition: Condition{\n\t\t\t\t\t\tDateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tcookies, err := signer.SignWithPolicy(p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error signing cookies %#v\", err)\n\t\t}\n\t\tvalidateCookies(t, cookies, signer)\n\n\t})\n}",
"func (cook DeleteOutputPropertyCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func CheckCookieValueAndForwardWithRequestContext(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\taccessTokenCookie, err := req.Cookie(common.FlorenceCookieKey)\n\t\tif err != nil {\n\t\t\tif err != http.ErrNoCookie {\n\t\t\t\tlog.Error(req.Context(), \"unexpected error while extracting user Florence access token from cookie\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treq = addUserAccessTokenToRequestContext(accessTokenCookie.Value, req)\n\t\t}\n\n\t\th.ServeHTTP(w, req)\n\t})\n}",
"func (cook CreateFenceCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func compareCookies(expectedCookie *Cookie, actualCookie *http.Cookie) (bool, []string) {\n\tcookieFound := *expectedCookie.name == actualCookie.Name\n\tcompareErrors := make([]string, 0)\n\tif cookieFound {\n\t\tcompareErrors = compareValue(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = compareDomain(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = comparePath(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = compareExpires(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = compareMaxAge(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = compareSecure(expectedCookie, actualCookie, compareErrors)\n\t\tcompareErrors = compareHttpOnly(expectedCookie, actualCookie, compareErrors)\n\t}\n\n\treturn cookieFound, compareErrors\n}",
"func SetCookie(w http.ResponseWriter, r *http.Request) {\r\n\tcookieName := envvar.CookieName()\r\n\tcookie, err := r.Cookie(cookieName)\r\n\tif err != nil {\r\n\t\tcookie := &http.Cookie{\r\n\t\t\tName: cookieName,\r\n\t\t\tValue: (uuid.NewV4()).String(),\r\n\t\t\tHttpOnly: true,\r\n\t\t\tPath: \"/\",\r\n\t\t\tDomain: envvar.HostAddress(),\r\n\t\t\tSecure: true,\r\n\t\t}\r\n\t\thttp.SetCookie(w, cookie)\r\n\t\tlogger.Info.Println(\"set cookie : \" + cookie.Value + \"-\" + cookieName)\r\n\t\treturn\r\n\t}\r\n\t_, found := Get(r)\r\n\tif found {\r\n\t\tRefresh(r)\r\n\t\tlogger.Info.Println(\"session refresh: \" + cookie.Value)\r\n\t\treturn\r\n\t}\r\n\tlogger.Info.Println(cookie.Value + \" already set\")\r\n\r\n\treturn\r\n}",
"func (cook DestroyCounterCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (cook SetPriorityCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func genCookie(cookiename, value string) *http.Cookie {\n return &http.Cookie{\n Name: cookiename,\n Value: value,\n Expires: time.Now().Add(24 * time.Hour),\n }\n}",
"func (cook DestroyFenceCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func (s SimpleResponse) GetCookie() *http.Cookie {\n\treturn nil\n}",
"func (o *operation) getCookiesValid() bool {\n\tt := time.Now().UTC()\n\tfor _, p := range o.values {\n\t\tif p.cookieWriteTime.Before(t) {\n\t\t\tt = p.cookieWriteTime\n\t\t}\n\t}\n\td := time.Now().UTC().Sub(t) / time.Second\n\treturn d < o.services.config.HomeNodeTimeout\n}",
"func getNameAndCookie(res http.ResponseWriter, req *http.Request) (string, bool) {\n\tvar name string\n\tvar ok bool\n\tvar cookie, err = req.Cookie(\"uuid\")\n\n\t//correctlyLogIn - means that both cookie and name exists\n\tcorrectlyLogIn := false\n\n\t// if the cookie is set up\n\tif err == nil {\n\n\t\t// retrive the name, before the access to map, lock it\n\t\tsessionsSyncLoc.RLock()\n\t\tname, ok = sessions[cookie.Value]\n\t\tsessionsSyncLoc.RUnlock()\n\n\t\tif ok {\n\t\t\t// if the name exists, set correctllyLogIn to true\n\t\t\tcorrectlyLogIn = true\n\t\t} else {\n\t\t\t// no name so invalidate cookie\n\t\t\tinvalidateCookie(res)\n\t\t}\n\t}\n\n\treturn name, correctlyLogIn\n}",
"func (d *Cookie) Get(w http.ResponseWriter, r *http.Request, v any) (bool, error) {\n\ts, err := webmiddleware.GetSecureCookie(r.Context())\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tcookie, err := r.Cookie(d.Name)\n\tif err != nil || cookie.Value == tombstone {\n\t\treturn false, nil\n\t}\n\n\terr = s.Decode(d.Name, cookie.Value, v)\n\tif err != nil {\n\t\td.Remove(w, r)\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
"func (cook AddOutputModeCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func validateCookieHandler(w http.ResponseWriter, r *http.Request, conf *config) {\n\t// initialize headers\n\tw.Header().Set(\"X-Auth-Request-Redirect\", \"\")\n\tw.Header().Set(\"X-Auth-Request-User\", \"\")\n\n\tauth := r.Header.Get(\"Authorization\")\n\tsplit := strings.SplitN(auth, \" \", 2)\n\tvar tokenvalue string = \"\"\n\tif len(split) == 2 && strings.EqualFold(split[0], \"bearer\") {\n\t\ttokenvalue = split[1]\n\t} else {\n\t\ttokenCookie, err := r.Cookie(conf.cookieName)\n\t\tswitch {\n\t\tcase err == http.ErrNoCookie:\n\t\t\tw.Header().Set(\"X-Auth-Request-Redirect\", redirectURL(r, conf, r.Header.Get(\"X-Okta-Nginx-Request-Uri\")))\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tlog.Printf(\"validateCookieHandler: Error parsing cookie, %v\", err)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\ttokenvalue = tokenCookie.Value\n\t}\n\n\tjwt, err := conf.verifier.VerifyAccessToken(tokenvalue)\n\n\tif err != nil {\n\t\tw.Header().Set(\"X-Auth-Request-Redirect\", redirectURL(r, conf, r.Header.Get(\"X-Okta-Nginx-Request-Uri\")))\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tsub, ok := jwt.Claims[\"sub\"]\n\tif !ok {\n\t\tlog.Printf(\"validateCookieHandler: Claim 'sub' not included in access token, %v\", tokenvalue)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsubStr, ok := sub.(string)\n\tif !ok {\n\t\tlog.Printf(\"validateCookieHandler: Unable to convert 'sub' to string in access token, %v\", tokenvalue)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvalidateClaimsTemplate := strings.TrimSpace(r.Header.Get(\"X-Okta-Nginx-Validate-Claims-Template\"))\n\tif validateClaimsTemplate != \"\" {\n\t\tt, err := getTemplate(validateClaimsTemplate)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"validateCookieHandler: validateClaimsTemplate failed to parse template: '%v', error: %v\", validateClaimsTemplate, err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tvar resultBytes bytes.Buffer\n\t\tif err := t.Execute(&resultBytes, jwt.Claims); err != nil {\n\t\t\tclaimsJSON, _ := json.Marshal(jwt.Claims)\n\t\t\tlog.Printf(\"validateCookieHandler: validateClaimsTemplate failed to execute template: '%v', data: '%v', error: '%v'\", validateClaimsTemplate, claimsJSON, err)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tresultString := strings.ToLower(strings.TrimSpace(resultBytes.String()))\n\n\t\tif resultString != \"true\" && resultString != \"1\" {\n\t\t\tlog.Printf(\"validateCookieHandler: validateClaimsTemplate template: '%v', result: '%v', sub: '%v'\", validateClaimsTemplate, resultString, subStr)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsetHeaderNames := strings.Split(r.Header.Get(\"X-Okta-Nginx-Proxy-Set-Header-Names\"), \",\")\n\tsetHeaderValues := strings.Split(r.Header.Get(\"X-Okta-Nginx-Proxy-Set-Header-Values\"), \",\")\n\tif setHeaderNames[0] != \"\" && setHeaderValues[0] != \"\" && len(setHeaderNames) == len(setHeaderValues) {\n\t\tfor i := 0; i < len(setHeaderNames); i++ {\n\t\t\tt, err := getTemplate(setHeaderValues[i])\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"validateCookieHandler: setHeaderValues failed to parse template: '%v', error: %v\", validateClaimsTemplate, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar resultBytes bytes.Buffer\n\t\t\tif err := t.Execute(&resultBytes, jwt.Claims); err != nil {\n\t\t\t\tclaimsJSON, _ := json.Marshal(jwt.Claims)\n\t\t\t\tlog.Printf(\"validateCookieHandler: setHeaderValues failed to execute template: '%v', data: '%v', error: '%v'\", validateClaimsTemplate, claimsJSON, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresultString := strings.ToLower(strings.TrimSpace(resultBytes.String()))\n\n\t\t\tw.Header().Set(setHeaderNames[i], resultString)\n\t\t}\n\t}\n\n\tw.Header().Set(\"X-Auth-Request-User\", subStr)\n\tw.WriteHeader(http.StatusOK)\n}",
"func Test_Ctx_ClearCookie(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tctx.Fasthttp.Request.Header.Set(HeaderCookie, \"john=doe\")\n\tctx.ClearCookie(\"john\")\n\tutils.AssertEqual(t, true, strings.HasPrefix(string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)), \"john=; expires=\"))\n\n\tctx.Fasthttp.Request.Header.Set(HeaderCookie, \"test1=dummy\")\n\tctx.Fasthttp.Request.Header.Set(HeaderCookie, \"test2=dummy\")\n\tctx.ClearCookie()\n\tutils.AssertEqual(t, true, strings.Contains(string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)), \"test1=; expires=\"))\n\tutils.AssertEqual(t, true, strings.Contains(string(ctx.Fasthttp.Response.Header.Peek(HeaderSetCookie)), \"test2=; expires=\"))\n}",
"func CheckTheValidityOfTheTokenFromHTTPHeader(w http.ResponseWriter, r *http.Request) (writer http.ResponseWriter, newToken string, err error) {\n err = createError(011)\n for _, cookie := range r.Cookies() {\n if cookie.Name == \"Token\" {\n var token string\n token, err = CheckTheValidityOfTheToken(cookie.Value)\n //fmt.Println(\"T\", token, err)\n writer = SetCookieToken(w, token)\n newToken = token\n }\n }\n //fmt.Println(err)\n return\n}",
"func (cook DeleteOutputModeCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func TestCurlGetJSONDigestAuthCookie(t *testing.T) {\n\tcurl := curlTester{\n\t\thandler: \"curl_get_json_cookie\",\n\t\ttestName: \"TestCurlGetJSONDigestAuthCookie\",\n\t\ttestHandle: t,\n\t}\n\tcurl.testGetDigestAuthCookie()\n}",
"func (m *MockAuthCheckerClient) Check(arg0 context.Context, arg1 *auth.SessionToken, arg2 ...grpc.CallOption) (*auth.Session, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Check\", varargs...)\n\tret0, _ := ret[0].(*auth.Session)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (req *request) Cookie() Cookie {\n return req.cookies\n}",
"func (mr *MockServiceAuthMockRecorder) CheckCookie(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CheckCookie\", reflect.TypeOf((*MockServiceAuth)(nil).CheckCookie), arg0)\n}",
"func (_m *AuthServer) Check(_a0 context.Context, _a1 *auth.SessionID) (*auth.SessionInfo, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *auth.SessionInfo\n\tif rf, ok := ret.Get(0).(func(context.Context, *auth.SessionID) *auth.SessionInfo); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*auth.SessionInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *auth.SessionID) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (client *HTTPClient) EnsureCookie(fromURL string, force bool) error {\n\tcURL, err := url.Parse(fromURL)\n\tif err != nil {\n\t\treturn WrapErr(err, \"failed to parse cookie URL\").With(\"url\", fromURL)\n\t}\n\tcookies := client.PersistentJar.Cookies(cURL)\n\tif force || len(cookies) == 0 {\n\t\tresp, err := client.Get(fromURL, nil)\n\t\tif err != nil {\n\t\t\treturn WrapErr(err, \"failed to fetch cookie URL\").With(\"url\", fromURL)\n\t\t}\n\t\tresp.Body.Close()\n\t}\n\treturn nil\n}",
"func readCookie(res http.ResponseWriter, req *http.Request) {\n\tcookie := readCreateCookie(req)\n\thttp.SetCookie(res, cookie) // set cookie into browser.\n\tuserInformation = cookieInformationDecoding(cookie.Value) // decode and set user state into page variable.\n}",
"func checkLogin(r *http.Request) bool {\n\t// grab the \"id\" cookie, fail if it doesn't exist\n\tcookie, err := r.Cookie(\"id\")\n\tif err == http.ErrNoCookie {\n\t\treturn false\n\t}\n\n\t// grab the \"key\" cookie, fail if it doesn't exist\n\tkey, err := r.Cookie(\"key\")\n\tif err == http.ErrNoCookie {\n\t\treturn false\n\t}\n\n\t// make sure we've got the right stuff in the hash\n\tcookieStore.RLock()\n\tdefer cookieStore.RUnlock()\n\treturn cookieStore.m[cookie.Value] == key.Value\n}",
"func (h *ResponseHeader) Cookie(cookie *Cookie) bool {\n\tv := peekArgBytes(h.cookies, cookie.Key())\n\tif v == nil {\n\t\treturn false\n\t}\n\tcookie.ParseBytes(v) //nolint:errcheck\n\treturn true\n}",
"func CheckLogin(w http.ResponseWriter, r *http.Request) bool {\n\tCookieSession, err := r.Cookie(\"sessionid\")\n\tif err != nil {\n\t\tfmt.Println(\"No Such Cookies\")\n\t\tSession.Create()\n\t\tfmt.Println(Session.ID)\n\t\tSession.Expire = time.Now().Local()\n\t\tSession.Expire.Add(time.Hour)\n\t\treturn false\n\t}\n\tfmt.Println(\"Cookki Found\")\n\ttempSession := session.UserSession{UID: 0}\n\tLoggedIn := database.QueryRow(\"select user_id from sessions where session_id = ?\",\n\t\tCookieSession).Scan(&tempSession)\n\tif LoggedIn == nil {\n\t\treturn false\n\t}\n\treturn true\n\n}",
"func newCookie() (cookie *http.Cookie) {\n\tcookie = &http.Cookie{\n\t\tName: cookieSessionName,\n\t\tValue: cookieInformationEncoding(),\n\t\tHttpOnly: true,\n\t\t//Secure: false,\n\t}\n\treturn\n}",
"func mockLoginAsUser() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusername, err := usernameFromRequestPath(r)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"userkit_auth_token\",\n\t\t\tValue: fmt.Sprintf(\"dummy_usr_token__%s:dummy\", username),\n\t\t\tPath: \"/\",\n\t\t\tExpires: time.Now().Add(600 * time.Hour),\n\t\t})\n\t\tlog.Printf(\"mock logged in as %s\", username)\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n}",
"func TestFinisher(w http.ResponseWriter, r *http.Request) {\n\tdefer timings.Track(\"TestFinisher\", time.Now(), TimingOut)\n\n\tme, err := os.Hostname()\n\tif err != nil {\n\t\tme = \"localhost\"\n\t}\n\thr := forward.HeaderRewriter{TrustForwardHeader: true, Hostname: me}\n\thr.Rewrite(r)\n\n\tw.Write([]byte(fmt.Sprintf(\"\\nProtocol: %s\\n Major: %d\\n Minor: %d\\n\", r.Proto, r.ProtoMajor, r.ProtoMinor)))\n\n\tif r.TLS != nil {\n\t\tw.Write([]byte(\"\\nTLS:\\n\"))\n\t\tw.Write([]byte(fmt.Sprintf(\" Version: %s\\n\", SslVersions.Suite(r.TLS.Version))))\n\t\tw.Write([]byte(fmt.Sprintf(\" CipherSuite: %s\\n\", Ciphers.Suite(r.TLS.CipherSuite))))\n\t}\n\n\tw.Write([]byte(\"\\nHeaders:\\n\"))\n\thkeys := make([]string, 0, len(r.Header))\n\tfor k := range r.Header {\n\t\thkeys = append(hkeys, k)\n\t}\n\tsort.Strings(hkeys)\n\n\tfor _, k := range hkeys {\n\t\tv := r.Header.Values(k)\n\n\t\tif !Conf.GetBool(ConfigDebug) && (k == \"Cookie\") {\n\t\t\tw.Write([]byte(fmt.Sprintf(\" %s: <..redacted..>\\n\", k)))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, av := range v {\n\t\t\tw.Write([]byte(fmt.Sprintf(\" %s: %s\\n\", k, av)))\n\t\t}\n\t}\n\n\tw.Write([]byte(\"\\nCookies:\\n\"))\n\tfor _, v := range r.Cookies() {\n\t\tval := v.Value\n\t\tw.Write([]byte(fmt.Sprintf(\" %s: %s\\n\", v.Name, val)))\n\t}\n\n}",
"func CheckLoginByCookie(cookie BuyerCookie) bool {\n\treturn cookie.ID != 0\n}",
"func (this *BaseController) CheckXsrfCookie() bool {\n return this.Controller.CheckXSRFCookie()\n}",
"func (suite *AuthSuite) TestRedirectFromLoginGovForValidUser() {\n\t// build a real office user\n\ttioOfficeUser := factory.BuildOfficeUserWithRoles(suite.DB(), factory.GetTraitActiveOfficeUser(),\n\t\t[]roles.RoleType{roles.RoleTypeTIO})\n\n\thandlerConfig := suite.HandlerConfig()\n\tappnames := handlerConfig.AppNames()\n\n\tfakeToken := \"some_token\"\n\tsession := auth.Session{\n\t\tApplicationName: auth.OfficeApp,\n\t\tIDToken: fakeToken,\n\t\tHostname: appnames.OfficeServername,\n\t}\n\n\t// login.gov state cookie\n\tstateValue := \"someStateValue\"\n\tcookieName := StateCookieName(&session)\n\tcookie := http.Cookie{\n\t\tName: cookieName,\n\t\tValue: shaAsString(stateValue),\n\t\tPath: \"/\",\n\t\tExpires: auth.GetExpiryTimeFromMinutes(auth.SessionExpiryInMinutes),\n\t}\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"http://%s/login-gov/callback?state=%s\",\n\t\tappnames.OfficeServername, stateValue), nil)\n\treq.AddCookie(&cookie)\n\n\tauthContext := suite.AuthContext()\n\n\tsessionManager := handlerConfig.SessionManagers().Office\n\treq = suite.SetupSessionRequest(req, &session, sessionManager)\n\n\tstubOfficeProvider := stubLoginGovProvider{\n\t\tStubName: officeProviderName,\n\t\tStubToken: \"stubToken\",\n\t\tStubUser: goth.User{\n\t\t\tUserID: tioOfficeUser.User.LoginGovUUID.String(),\n\t\t\tEmail: tioOfficeUser.Email,\n\t\t},\n\t}\n\tdefer goth.ClearProviders()\n\tgoth.UseProviders(&stubOfficeProvider)\n\th := CallbackHandler{\n\t\tauthContext,\n\t\thandlerConfig,\n\t\tsetUpMockNotificationSender(),\n\t}\n\n\trr := httptest.NewRecorder()\n\tsessionManager.LoadAndSave(h).ServeHTTP(rr, req)\n\n\tsuite.Equal(http.StatusTemporaryRedirect, rr.Code)\n\n\tsuite.Equal(suite.urlForHost(appnames.OfficeServername).String(),\n\t\trr.Result().Header.Get(\"Location\"))\n}",
"func setCookie(res http.ResponseWriter, req *http.Request, id string) error {\r\n\t//name of cookies = \"cookie\" for 1hr & \"CRA\" for 2yrs\r\n\tco, _ := req.Cookie(\"CRA\")\r\n\tco = &http.Cookie{\r\n\t\tName: \"CRA\",\r\n\t\tValue: id,\r\n\t\tHttpOnly: false,\r\n\t\tExpires: time.Now().AddDate(2, 0, 0),\r\n\t}\r\n\thttp.SetCookie(res, co)\r\n\t// fmt.Println(\"Htmlmain.setCookie - done with set id = \", id)\r\n\treturn nil\r\n}",
"func (l *LoginCookie) Check(value string) bool {\n\treturn l.validatorHash() == value\n}",
"func (s *BasecookieListener) EnterCookie(ctx *CookieContext) {}",
"func (cook SetScreenSizeCookie) Check() error {\n\treturn cook.Cookie.Check()\n}",
"func TestCurlPutBinaryCookie(t *testing.T) {\n\tcurl := curlTester{\n\t\thandler: \"curl_put_binary_cookie\",\n\t\ttestName: \"TestCurlPutBinaryCookie\",\n\t\ttestHandle: t,\n\t}\n\tcurl.testPutCookie()\n}",
"func verifyLogin(req *restful.Request, resp *restful.Response ) bool {\n\tcookie, err := req.Request.Cookie(\"session-id\")\n\tif cookie.Value != \"\" {\n\t\t_, exists := sessions[cookie.Value]\n\t\tif !exists {\n\t\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t} else if err != nil {\n\t\tfmt.Println(err.Error())\n\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\treturn false\n\t} else {\n\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\treturn false\n\t}\n}",
"func Benchmark_Ctx_Cookie(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tc.Cookie(&Cookie{\n\t\t\tName: \"John\",\n\t\t\tValue: \"Doe\",\n\t\t})\n\t}\n\tutils.AssertEqual(b, \"John=Doe; path=/; SameSite=Lax\", getString(c.Fasthttp.Response.Header.Peek(\"Set-Cookie\")))\n}",
"func readCreateCookie(req *http.Request) (cookie *http.Cookie) {\n\tcookie, err := req.Cookie(cookieSessionName) // get if a cookie already exists (had not expired)\n\tif err == http.ErrNoCookie {\n\t\tcookie = newCookie() // need a new cookie.\n\t}\n\treturn\n}",
"func TestCurlPostBinaryCookie(t *testing.T) {\n\tcurl := curlTester{\n\t\thandler: \"curl_post_binary_cookie\",\n\t\ttestName: \"TestCurlPostBinaryCookie\",\n\t\ttestHandle: t,\n\t}\n\tcurl.testPostCookie()\n}",
"func TestReplaceSetCookies(t *testing.T) {\n\tDummyRequest := &http.Response{\n\t\tHeader: map[string][]string{\n\t\t\tSetCookie: {\"test1=abc\", \"test2=def\", \"test3=ghi\"},\n\t\t\tAPISetCookie: {},\n\t\t},\n\t}\n\n\tsetModifiedHeaders(DummyRequest)\n\tassert.Equal(t, []string{\"test1=abc\", \"test2=def\", \"test3=ghi\"}, DummyRequest.Header[APISetCookie])\n\tassert.Equal(t, 0, len(DummyRequest.Header[SetCookie]))\n\tassert.Equal(t, []string{\"default-src 'none'; style-src 'unsafe-inline'; sandbox\"}, DummyRequest.Header[CSP])\n\tassert.Equal(t, []string{\"nosniff\"}, DummyRequest.Header[XContentType])\n\n\tDummyRequest = &http.Response{\n\t\tHeader: map[string][]string{\n\t\t\tSetCookie: {\"test1=abc\", \"test2=def\", \"test3=ghi\"},\n\t\t\tAPISetCookie: {\"test4=asdf\"},\n\t\t},\n\t}\n\n\tsetModifiedHeaders(DummyRequest)\n\t// Should delete original api set cookie\n\tassert.Equal(t, []string{\"test1=abc\", \"test2=def\", \"test3=ghi\"}, DummyRequest.Header[APISetCookie])\n\tassert.Equal(t, 0, len(DummyRequest.Header[SetCookie]))\n\tassert.Equal(t, []string{\"default-src 'none'; style-src 'unsafe-inline'; sandbox\"}, DummyRequest.Header[CSP])\n\tassert.Equal(t, []string{\"nosniff\"}, DummyRequest.Header[XContentType])\n}",
"func hashKeyTests() {\n // make a set of inputs\n input := []string{\"17d4e712c41d95d3ad6972b00c7b87bc\", \"Hph7bg5SiKgVIXyAlvLIpAOs_RV42314\"}\n\n // do a for loop over the input values\n for _, v := range input {\n fmt.Printf(\"v: %s\\n\", v)\n\n // try to invoke the cookie method\n store := cookie.NewStore([]byte(v))\n sessions.Sessions(\"pm-sesson\", store)\n \n }\n}",
"func (o *operation) getCookiesPresent() bool {\n\tz := true\n\tfor _, p := range o.values {\n\t\tz = z && (p.cookieWriteTime.IsZero() == false)\n\t}\n\treturn z\n}",
"func (a *Auth) authCookie(ctx *bm.Context) (int64, error) {\n\treq := ctx.Request\n\tsession, _ := req.Cookie(\"SESSION\")\n\tif session == nil {\n\t\treturn 0, ecode.Unauthorized\n\t}\n\t// NOTE: 请求登录鉴权服务接口,拿到对应的用户id\n\tvar mid int64\n\t// TODO: get mid from some code\n\n\t// check csrf\n\tclientCsrf := req.FormValue(\"csrf\")\n\tif a.conf != nil && !a.conf.DisableCSRF && req.Method == \"POST\" {\n\t\t// NOTE: 如果开启了CSRF认证,请从CSRF服务获取该用户关联的csrf\n\t\tvar csrf string // TODO: get csrf from some code\n\t\tif clientCsrf != csrf {\n\t\t\treturn 0, ecode.Unauthorized\n\t\t}\n\t}\n\n\treturn mid, nil\n}",
"func MakeCookies(u *url.URL, token *oauth2.Token) []*http.Cookie {\n\t// N.B. nscjar adds #HttpOnly_ for HttpOnly cookies, and these prevent\n\t// git recognize the cookies. Do not add.\n\tpath := u.Path\n\tif path == \"\" {\n\t\tpath = \"/\"\n\t}\n\t// The ending \".git\" is redundant.\n\tpath = strings.TrimSuffix(path, \".git\")\n\tif u.Host == \"googlesource.com\" {\n\t\t// Authenticate against all *.googlesource.com.\n\t\treturn []*http.Cookie{\n\t\t\t{\n\t\t\t\tName: \"o\",\n\t\t\t\tValue: token.AccessToken,\n\t\t\t\tPath: path,\n\t\t\t\tDomain: \".\" + u.Host,\n\t\t\t\tExpires: token.Expiry,\n\t\t\t\tSecure: u.Scheme == \"https\",\n\t\t\t},\n\t\t}\n\t} else if strings.HasSuffix(u.Host, \".googlesource.com\") {\n\t\t// Authenticate against both FOO.googlesource.com and\n\t\t// FOO-review.googlesource.com. These two URLs have no\n\t\t// difference.\n\t\th := strings.TrimSuffix(strings.TrimSuffix(u.Host, \".googlesource.com\"), \"-review\")\n\t\treturn []*http.Cookie{\n\t\t\t{\n\t\t\t\tName: \"o\",\n\t\t\t\tValue: token.AccessToken,\n\t\t\t\tPath: path,\n\t\t\t\tDomain: h + \".googlesource.com\",\n\t\t\t\tExpires: token.Expiry,\n\t\t\t\tSecure: u.Scheme == \"https\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"o\",\n\t\t\t\tValue: token.AccessToken,\n\t\t\t\tPath: path,\n\t\t\t\tDomain: h + \"-review.googlesource.com\",\n\t\t\t\tExpires: token.Expiry,\n\t\t\t\tSecure: u.Scheme == \"https\",\n\t\t\t},\n\t\t}\n\t}\n\treturn []*http.Cookie{\n\t\t{\n\t\t\tName: \"o\",\n\t\t\tValue: token.AccessToken,\n\t\t\tPath: path,\n\t\t\tDomain: u.Host,\n\t\t\tExpires: token.Expiry,\n\t\t\tSecure: u.Scheme == \"https\",\n\t\t},\n\t}\n}",
"func VerifyCookie(cookie string, db UserGetter) (email string, err error) {\n\temail, token, mac, err := splitCookie(cookie)\n\tif err != nil {\n\t\treturn \"\", errors.NewClient(\"Not authenticated\")\n\t}\n\n\texpectedMac, err := computeMAC([]byte(email + \"#\" + token))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to compute verification MAC\")\n\t}\n\n\tmessageMac, err := hex.DecodeString(mac)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to hex decode message mac\")\n\t}\n\tif !hmac.Equal(expectedMac, messageMac) {\n\t\treturn \"\", errors.NewClient(\"Not authenticated\")\n\t}\n\n\tuser, err := db.GetUserInfo(email)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to get user from database\")\n\t}\n\tif user.Token != token {\n\t\treturn \"\", errors.NewClient(\"Not authenticated\")\n\t}\n\treturn email, nil\n}",
"func SetCookieToken(w http.ResponseWriter, token string) http.ResponseWriter {\n expiration := time.Now().Add(time.Minute * time.Duration(tokenValidity))\n cookie := http.Cookie{Name: \"Token\", Value: token, Expires: expiration}\n http.SetCookie(w, &cookie)\n return w\n}",
"func TestHandler_Authorize(t *testing.T) {\n\t// Create the mock session store.\n\tvar saved bool\n\tstore := NewTestStore()\n\tsession := sessions.NewSession(store, \"\")\n\tstore.GetFunc = func(r *http.Request, name string) (*sessions.Session, error) {\n\t\treturn session, nil\n\t}\n\tstore.SaveFunc = func(r *http.Request, w http.ResponseWriter, session *sessions.Session) error {\n\t\tsaved = true\n\t\treturn nil\n\t}\n\n\t// Setup handler.\n\th := NewTestHandler()\n\th.Handler.Store = store\n\tdefer h.Close()\n\n\t// Create non-redirecting client.\n\tvar redirectURL *url.URL\n\tclient := &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\tredirectURL = req.URL\n\t\t\treturn errors.New(\"no redirects\")\n\t\t},\n\t}\n\n\t// Retrieve authorize redirect.\n\t// We should be redirected to GitHub's OAuth URL.\n\t// We should save the auth state to the session so it can be check on callback.\n\tresp, _ := client.Get(h.Server.URL + \"/_/login\")\n\tresp.Body.Close()\n\tequals(t, \"https\", redirectURL.Scheme)\n\tequals(t, \"github.com\", redirectURL.Host)\n\tequals(t, \"/login/oauth/authorize\", redirectURL.Path)\n\tequals(t, 32, len(redirectURL.Query().Get(\"state\")))\n\n\tassert(t, saved, \"expected session save\")\n\tequals(t, redirectURL.Query().Get(\"state\"), session.Values[\"AuthState\"])\n}",
"func (r *Response) Cookie(name string) *Cookie {\n\topChain := r.chain.enter(\"Cookie(%q)\", name)\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newCookie(opChain, nil)\n\t}\n\n\tvar cookie *Cookie\n\n\tnames := []string{}\n\tfor _, c := range r.cookies {\n\t\tif c.Name == name {\n\t\t\tcookie = newCookie(opChain, c)\n\t\t\tbreak\n\t\t}\n\t\tnames = append(names, c.Name)\n\t}\n\n\tif cookie == nil {\n\t\topChain.fail(AssertionFailure{\n\t\t\tType: AssertContainsElement,\n\t\t\tActual: &AssertionValue{names},\n\t\t\tExpected: &AssertionValue{name},\n\t\t\tErrors: []error{\n\t\t\t\terrors.New(\"expected: response contains cookie with given name\"),\n\t\t\t},\n\t\t})\n\t\treturn newCookie(opChain, nil)\n\t}\n\n\treturn cookie\n}",
"func (suite *AuthSuite) TestRedirectFromLoginGovForInvalidUser() {\n\t// build a real office user\n\ttioOfficeUser := factory.BuildOfficeUserWithRoles(suite.DB(), nil, []roles.RoleType{roles.RoleTypeTIO})\n\tsuite.False(tioOfficeUser.Active)\n\n\thandlerConfig := suite.HandlerConfig()\n\tappnames := handlerConfig.AppNames()\n\n\tfakeToken := \"some_token\"\n\tsession := auth.Session{\n\t\tApplicationName: auth.OfficeApp,\n\t\tIDToken: fakeToken,\n\t\tHostname: appnames.OfficeServername,\n\t}\n\n\t// login.gov state cookie\n\tstateValue := \"someStateValue\"\n\tcookieName := StateCookieName(&session)\n\tcookie := http.Cookie{\n\t\tName: cookieName,\n\t\tValue: shaAsString(stateValue),\n\t\tPath: \"/\",\n\t\tExpires: auth.GetExpiryTimeFromMinutes(auth.SessionExpiryInMinutes),\n\t}\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"http://%s/login-gov/callback?state=%s\",\n\t\tappnames.OfficeServername, stateValue), nil)\n\treq.AddCookie(&cookie)\n\n\tauthContext := suite.AuthContext()\n\n\tsessionManager := handlerConfig.SessionManagers().Office\n\treq = suite.SetupSessionRequest(req, &session, sessionManager)\n\n\tstubOfficeProvider := stubLoginGovProvider{\n\t\tStubName: officeProviderName,\n\t\tStubToken: \"stubToken\",\n\t\tStubUser: goth.User{\n\t\t\tUserID: tioOfficeUser.User.LoginGovUUID.String(),\n\t\t\tEmail: tioOfficeUser.Email,\n\t\t},\n\t}\n\tdefer goth.ClearProviders()\n\tgoth.UseProviders(&stubOfficeProvider)\n\th := CallbackHandler{\n\t\tauthContext,\n\t\thandlerConfig,\n\t\tsetUpMockNotificationSender(),\n\t}\n\n\trr := httptest.NewRecorder()\n\tsessionManager.LoadAndSave(h).ServeHTTP(rr, req)\n\n\tsuite.Equal(http.StatusTemporaryRedirect, rr.Code)\n\n\tu := suite.urlForHost(appnames.OfficeServername)\n\tu.Path = \"/invalid-permissions\"\n\tsuite.Equal(u.String(), rr.Result().Header.Get(\"Location\"))\n}",
"func GenCookie(username string) http.Cookie {\n // generate random 50 byte string to use as a session cookie\n randomValue := make([]byte, COOKIE_LENGTH)\n rand.Read(randomValue)\n cookieValue := strings.ToLower(username) + \":\" + fmt.Sprintf(\"%X\", randomValue)\n expire := time.Now().AddDate(0, 0, 1)\n return http.Cookie{Name: \"SessionID\", Value: cookieValue, Expires: expire, HttpOnly: true}\n}",
"func MakeCookie(req *http.Request, name string, value string, path string, domain string, httpOnly bool, secure bool, expiration time.Duration, now time.Time, sameSite http.SameSite) *http.Cookie {\n\tif domain != \"\" {\n\t\thost := requestutil.GetRequestHost(req)\n\t\tif h, _, err := net.SplitHostPort(host); err == nil {\n\t\t\thost = h\n\t\t}\n\t\tif !strings.HasSuffix(host, domain) {\n\t\t\tlogger.Errorf(\"Warning: request host is %q but using configured cookie domain of %q\", host, domain)\n\t\t}\n\t}\n\n\treturn &http.Cookie{\n\t\tName: name,\n\t\tValue: value,\n\t\tPath: path,\n\t\tDomain: domain,\n\t\tHttpOnly: httpOnly,\n\t\tSecure: secure,\n\t\tExpires: now.Add(expiration),\n\t\tSameSite: sameSite,\n\t}\n}",
"func TestCurlDeleteBinaryCookie(t *testing.T) {\n\tcurl := curlTester{\n\t\thandler: \"curl_delete_binary_cookie\",\n\t\ttestName: \"TestCurlDeleteBinaryCookie\",\n\t\ttestHandle: t,\n\t}\n\tcurl.testDeleteCookie()\n}",
"func (m *BaseMethod) GetCookies() []KVPair {\n\treturn m.Cookies\n}",
"func (h *RequestHeader) SetCookie(key, value string) {\n\th.collectCookies()\n\th.cookies = setArg(h.cookies, key, value, argsHasValue)\n}",
"func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}",
"func (cookie *Cookie) SetCookieOnResponse(w http.ResponseWriter, setSiteCookie bool, cfg *config.HostCookie, ttl time.Duration) {\n\thttpCookie := cookie.ToHTTPCookie(ttl)\n\tvar domain string = cfg.Domain\n\thttpCookie.Secure = true\n\n\tif domain != \"\" {\n\t\thttpCookie.Domain = domain\n\t}\n\n\tvar currSize int = len([]byte(httpCookie.String()))\n\tfor cfg.MaxCookieSizeBytes > 0 && currSize > cfg.MaxCookieSizeBytes && len(cookie.uids) > 0 {\n\t\tvar oldestElem string = \"\"\n\t\tvar oldestDate int64 = math.MaxInt64\n\t\tfor key, value := range cookie.uids {\n\t\t\ttimeUntilExpiration := time.Until(value.Expires)\n\t\t\tif timeUntilExpiration < time.Duration(oldestDate) {\n\t\t\t\toldestElem = key\n\t\t\t\toldestDate = int64(timeUntilExpiration)\n\t\t\t}\n\t\t}\n\t\tdelete(cookie.uids, oldestElem)\n\t\thttpCookie = cookie.ToHTTPCookie(ttl)\n\t\tif domain != \"\" {\n\t\t\thttpCookie.Domain = domain\n\t\t}\n\t\tcurrSize = len([]byte(httpCookie.String()))\n\t}\n\n\tif setSiteCookie {\n\t\t// httpCookie.Secure = true\n\t\thttpCookie.SameSite = http.SameSiteNoneMode\n\t}\n\tw.Header().Add(\"Set-Cookie\", httpCookie.String())\n}",
"func addCookie(context echo.Context, authToken string) {\n\texpire := time.Now().AddDate(0, 1, 0) // 1 month\n\tcookie := &http.Cookie{\n\t\tName: \"token\",\n\t\tExpires: expire,\n\t\tValue: auth.Bearer + \" \" + authToken,\n\t\tPath: \"/\",\n\t\t// Domain must not be set for auth to work with chrome without domain name\n\t\t// http://stackoverflow.com/questions/5849013/setcookie-does-not-set-cookie-in-google-chrome\n\t}\n\tcontext.Response().Header().Set(\"Set-Cookie\", cookie.String())\n}"
] | [
"0.612215",
"0.6105234",
"0.60702807",
"0.60390204",
"0.59975195",
"0.59955245",
"0.5935115",
"0.5835304",
"0.582638",
"0.5762133",
"0.5701",
"0.5612771",
"0.5557298",
"0.5547791",
"0.5532456",
"0.55284435",
"0.55165887",
"0.551043",
"0.5466343",
"0.5459785",
"0.54391205",
"0.5412877",
"0.54127556",
"0.5403051",
"0.5389258",
"0.5381768",
"0.5363239",
"0.5352785",
"0.5337785",
"0.53131497",
"0.5305929",
"0.5298557",
"0.5286763",
"0.5279745",
"0.52513736",
"0.52499187",
"0.5228671",
"0.5212168",
"0.5208756",
"0.5197401",
"0.5189375",
"0.5169944",
"0.5163953",
"0.5160995",
"0.5156985",
"0.5145315",
"0.51055425",
"0.5095735",
"0.5070066",
"0.5067468",
"0.50641376",
"0.50591505",
"0.5054653",
"0.5038324",
"0.50361234",
"0.5034454",
"0.50059223",
"0.499248",
"0.49902675",
"0.4983272",
"0.4975681",
"0.49714902",
"0.49697945",
"0.49602708",
"0.49545363",
"0.49206105",
"0.4911761",
"0.491076",
"0.49086508",
"0.49048612",
"0.48453903",
"0.48428515",
"0.48406926",
"0.48398492",
"0.48387453",
"0.48354444",
"0.4814976",
"0.48089054",
"0.4791038",
"0.4788873",
"0.47864002",
"0.4778605",
"0.47681728",
"0.4757796",
"0.47565827",
"0.4755019",
"0.47332767",
"0.47172338",
"0.4714682",
"0.47117633",
"0.46930724",
"0.4681104",
"0.46797362",
"0.46791983",
"0.46673313",
"0.46653488",
"0.46638966",
"0.46617523",
"0.46586543",
"0.46515876"
] | 0.731943 | 0 |
Login mocks base method | Метод логина мокирует базовый метод | func (m *MockServiceAuth) Login(arg0 models.UserInputLogin) (models.UserSession, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Login", arg0)
ret0, _ := ret[0].(models.UserSession)
ret1, _ := ret[1].(error)
return ret0, ret1
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (m *MockHandler) Login(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Login\", arg0, arg1)\n}",
"func (m *MockAuthService) Login(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Login\", arg0, arg1)\n}",
"func TestLogin(w http.ResponseWriter, r *http.Request) {\n\n\tauthHeader := r.Header.Get(\"Authorization\")\n\tcookies := r.Cookies()\n\tvar token string\n\tfor _, c := range cookies {\n\t\tif c.Name == \"token\" {\n\t\t\ttoken = c.Value\n\t\t}\n\t}\n\n\tvar accessToken string\n\t// header value format will be \"Bearer <token>\"\n\tif authHeader != \"\" {\n\t\tif !strings.HasPrefix(authHeader, \"Bearer \") {\n\t\t\tlog.Errorf(\"GetMyIdentities Failed to find Bearer token %v\", authHeader)\n\t\t\tReturnHTTPError(w, r, http.StatusUnauthorized, \"Unauthorized, please provide a valid token\")\n\t\t\treturn\n\t\t}\n\t\taccessToken = strings.TrimPrefix(authHeader, \"Bearer \")\n\t}\n\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\tvar testAuthConfig model.TestAuthConfig\n\n\terr = json.Unmarshal(bytes, &testAuthConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin unmarshal failed with error: %v\", err)\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad Request, Please check the request content\")\n\t\treturn\n\t}\n\n\tif testAuthConfig.AuthConfig.Provider == \"\" {\n\t\tlog.Errorf(\"UpdateConfig: Provider is a required field\")\n\t\tReturnHTTPError(w, r, http.StatusBadRequest, \"Bad request, Provider is a required field\")\n\t\treturn\n\t}\n\n\tstatus, err := server.TestLogin(testAuthConfig, accessToken, token)\n\tif err != nil {\n\t\tlog.Errorf(\"TestLogin GetProvider failed with error: %v\", err)\n\t\tif status == 0 {\n\t\t\tstatus = http.StatusInternalServerError\n\t\t}\n\t\tReturnHTTPError(w, r, status, fmt.Sprintf(\"%v\", err))\n\t}\n}",
"func TestLoginWrapper_LoggedIn(t *testing.T) {\n\tw, r, c := initTestRequestParams(t, &user.User{Email: \"test@example.com\"})\n\tdefer c.Close()\n\n\tdummyLoginHandler(&requestParams{w: w, r: r, c: c})\n\n\texpectCode(t, http.StatusOK, w)\n\texpectBody(t, \"test@example.com\", w)\n}",
"func (m *MockUseCase) Login(ctx context.Context, username, password string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", ctx, username, password)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockEmployeeUseCase) Login(c context.Context, companyId, employeeNo, password, secretKey string) (*model.AuthResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", c, companyId, employeeNo, password, secretKey)\n\tret0, _ := ret[0].(*model.AuthResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockAdmin) Login(ctx provider.Context, request entity.Login) (entity.LoginResponse, *entity.ApplicationError) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", ctx, request)\n\tret0, _ := ret[0].(entity.LoginResponse)\n\tret1, _ := ret[1].(*entity.ApplicationError)\n\treturn ret0, ret1\n}",
"func (m *MockClient) Login(ctx context.Context, username, password string) (*oauth2.Token, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", ctx, username, password)\n\tret0, _ := ret[0].(*oauth2.Token)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockUsersService) Login(arg0 context.Context, arg1 models.UserLoginRequest) (models.UserLoginResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", arg0, arg1)\n\tret0, _ := ret[0].(models.UserLoginResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (_m *Repository) Login(email string) (entity.User, error) {\n\tret := _m.Called(email)\n\n\tvar r0 entity.User\n\tif rf, ok := ret.Get(0).(func(string) entity.User); ok {\n\t\tr0 = rf(email)\n\t} else {\n\t\tr0 = ret.Get(0).(entity.User)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(email)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func (m *MockHandler) UserLogin(email, password string) (*domain.User, string, error) {\n\tret := m.ctrl.Call(m, \"UserLogin\", email, password)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}",
"func (m *MockUserService) Login(ctx context.Context, user *domain.User) (*domain.UserToken, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Login\", ctx, user)\n\tret0, _ := ret[0].(*domain.UserToken)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (_m *ServerConnexion) Login(sUsername string, sPwd string) error {\n\tret := _m.Called(sUsername, sPwd)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, string) error); ok {\n\t\tr0 = rf(sUsername, sPwd)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (_m *Remote) Login(w http.ResponseWriter, r *http.Request) (*model.User, error) {\n\tret := _m.Called(w, r)\n\n\tvar r0 *model.User\n\tif rf, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request) *model.User); ok {\n\t\tr0 = rf(w, r)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(http.ResponseWriter, *http.Request) error); ok {\n\t\tr1 = rf(w, r)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func (_m *Asconn) Login(_a0 *aerospike.ClientPolicy) aerospike.Error {\n\tret := _m.Called(_a0)\n\n\tvar r0 aerospike.Error\n\tif rf, ok := ret.Get(0).(func(*aerospike.ClientPolicy) aerospike.Error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(aerospike.Error)\n\t\t}\n\t}\n\n\treturn r0\n}",
"func (m *MockUserLogic) UserLogin(email, password string) (*domain.User, string, error) {\n\tret := m.ctrl.Call(m, \"UserLogin\", email, password)\n\tret0, _ := ret[0].(*domain.User)\n\tret1, _ := ret[1].(string)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}",
"func (_m *Handler) Login(c echo.Context) error {\n\tret := _m.Called(c)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(echo.Context) error); ok {\n\t\tr0 = rf(c)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func TestAuthenticate_Success(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\tuser := models.User{\n\t\tID: 1,\n\t\tEmail: \"personia@personio.com\",\n\t\tPassword: \"personia\",\n\t}\n\n\trows := sqlmock.NewRows([]string{\"id\", \"email\"}).AddRow(user.ID, user.Email)\n\tmock.ExpectQuery(regexp.QuoteMeta(constants.LoginDetailsSelectQuery)).WithArgs(user.Email, user.Password).WillReturnRows(rows)\n\n\tloginRepository := NewLoginRepository(db)\n\n\tloginModel := &models.Login{\n\t\tEmail: \"personia@personio.com\",\n\t\tPassword: \"personia\",\n\t}\n\n\tcntx := context.Background()\n\tdbuser, err := loginRepository.Authenticate(cntx, loginModel)\n\tassert.Nil(t, err)\n\tassert.Equal(t, user.ID, dbuser.ID)\n\tassert.Equal(t, user.Email, dbuser.Email)\n}",
"func (h *TestAuth) Login(w http.ResponseWriter, req *http.Request) {\n\n\tresponse := make(map[string]string, 5)\n\n\tresponse[\"state\"] = authz.AuthNewPasswordRequired\n\tresponse[\"access_token\"] = \"access\"\n\t//response[\"id_token\"] = *authResult.IdToken\n\tresponse[\"refresh_token\"] = \"refersh\"\n\tresponse[\"expires\"] = \"3600\"\n\tresponse[\"token_type\"] = \"Bearer\"\n\trespData, _ := json.Marshal(response)\n\tw.WriteHeader(200)\n\tfmt.Fprint(w, string(respData))\n}",
"func (_m *AuthServer) Login(_a0 context.Context, _a1 *auth.Credentials) (*auth.SessionInfo, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 *auth.SessionInfo\n\tif rf, ok := ret.Get(0).(func(context.Context, *auth.Credentials) *auth.SessionInfo); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*auth.SessionInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *auth.Credentials) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func mockLoginAsUser() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusername, err := usernameFromRequestPath(r)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"userkit_auth_token\",\n\t\t\tValue: fmt.Sprintf(\"dummy_usr_token__%s:dummy\", username),\n\t\t\tPath: \"/\",\n\t\t\tExpires: time.Now().Add(600 * time.Hour),\n\t\t})\n\t\tlog.Printf(\"mock logged in as %s\", username)\n\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t}\n}",
"func TestAuthenticate_Fail(t *testing.T) {\n\tdb, mock, err := sqlmock.New()\n\tif err != nil {\n\t\tt.Fatalf(\"an error '%s' was not expected when opening a stub database connection\", err)\n\t}\n\tdefer db.Close()\n\n\tuser := models.User{\n\t\tID: 1,\n\t\tEmail: \"personia@personio.com\",\n\t\tPassword: \"personia\",\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(constants.LoginDetailsSelectQuery)\n\n\trows := sqlmock.NewRows([]string{\"id\", \"email\"})\n\tmock.ExpectQuery(regexp.QuoteMeta(buffer.String())).WithArgs(user.Email, user.Password).WillReturnRows(rows)\n\n\tloginRepository := NewLoginRepository(db)\n\n\tloginModel := &models.Login{\n\t\tEmail: \"personia@personio.com\",\n\t\tPassword: \"personia\",\n\t}\n\n\tcntx := context.Background()\n\t_, err = loginRepository.Authenticate(cntx, loginModel)\n\tassert.NotNil(t, err)\n}",
"func TestAPILoginUser(t *testing.T) {\n\tpayload := []byte(`{\"username\":\"admin\", \"password\":\"admin1234\"}`)\n\n\treq, _ := http.NewRequest(\"POST\", \"/api/login\", bytes.NewBuffer(payload))\n\tresp := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, resp.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(resp.Body.Bytes(), &m)\n\n\tassert.NotEqual(t, m[\"accessToken\"], \"\")\n}",
"func TestLoginWrapper_LoggedOut(t *testing.T) {\n\tw, r, c := initTestRequestParams(t, nil)\n\tdefer c.Close()\n\n\tdummyLoginHandler(&requestParams{w: w, r: r, c: c})\n\n\texpectCode(t, http.StatusUnauthorized, w)\n\texpectBody(t, \"\", w)\n}",
"func (c *Client) Login() error {\n\n\tversionStruct, err := CreateAPIVersion(1, 9, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tapiStruct, err := CreateAPISession(versionStruct, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Create a Resty Client\n\tc.restClient = resty.New()\n\tc.restClient.\n\t\tSetTimeout(time.Duration(30 * time.Second)).\n\t\tSetRetryCount(3).\n\t\tSetRetryWaitTime(5 * time.Second).\n\t\tSetRetryMaxWaitTime(20 * time.Second)\n\n\tresp, err := c.restClient.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetBody(apiStruct).\n\t\tPost(c.url + \"/session\")\n\n\tresult := resp.Body()\n\tvar resultdat map[string]interface{}\n\tif err = json.Unmarshal(result, &resultdat); err != nil { //convert the json to go objects\n\t\treturn err\n\t}\n\n\tif resultdat[\"status\"].(string) == \"ERROR\" {\n\t\terrorMessage := string(result)\n\t\terr = fmt.Errorf(errorMessage)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresp, err = c.restClient.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetResult(AuthSuccess{}).\n\t\tSetBody(LoginRequestStruct{\n\t\t\tType: \"LoginRequest\",\n\t\t\tUsername: c.username,\n\t\t\tPassword: c.password,\n\t\t}).\n\t\tPost(c.url + \"/login\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif http.StatusOK != resp.StatusCode() {\n\t\terr = fmt.Errorf(\"Delphix Username/Password incorrect\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tresult = resp.Body()\n\tif err = json.Unmarshal(result, &resultdat); err != nil { //convert the json to go objects\n\t\treturn err\n\t}\n\n\tif resultdat[\"status\"].(string) == \"ERROR\" {\n\t\terrorMessage := string(result)\n\t\tlog.Fatalf(errorMessage)\n\t\terr = fmt.Errorf(errorMessage)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t\n\treturn nil\n}",
"func (m *MockIWXClient) WXLogin(arg0, arg1, arg2 string) (wx.LoginResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WXLogin\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(wx.LoginResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockClient) LoginLongLived(ctx context.Context, username, password string) (*oauth2.Token, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoginLongLived\", ctx, username, password)\n\tret0, _ := ret[0].(*oauth2.Token)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (h *AuthHandlers) Login(w http.ResponseWriter, req *http.Request) {\n\tvar err error\n\tvar data []byte\n\n\tsystemContext, err := h.getSystemContext(req)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"request context retrevial failure\")\n\t\tmiddleware.ReturnError(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tif data, err = ioutil.ReadAll(req.Body); err != nil {\n\t\tlog.Error().Err(err).Msg(\"read body error\")\n\t\tmiddleware.ReturnError(w, \"error reading login data\", 500)\n\t\treturn\n\t}\n\tdefer req.Body.Close()\n\n\tloginDetails := &authz.LoginDetails{}\n\tif err := json.Unmarshal(data, loginDetails); err != nil {\n\t\tlog.Error().Err(err).Msg(\"marshal body error\")\n\t\tmiddleware.ReturnError(w, \"error reading login data\", 500)\n\t\treturn\n\t}\n\n\tif err := h.validate.Struct(loginDetails); err != nil {\n\t\tmiddleware.ReturnError(w, \"validation failure \"+err.Error(), 500)\n\t\treturn\n\t}\n\tloginDetails.OrgName = strings.ToLower(loginDetails.OrgName)\n\tloginDetails.Username = strings.ToLower(loginDetails.Username)\n\n\tlog.Info().Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"login attempt\")\n\n\torgData, err := h.getOrgByName(req.Context(), systemContext, loginDetails.OrgName)\n\tif err != nil {\n\t\tlog.Error().Err(err).Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"failed to get organization from name\")\n\t\tmiddleware.ReturnError(w, \"login failed\", 403)\n\t\treturn\n\t}\n\n\tresults, err := h.authenticator.Login(req.Context(), orgData, loginDetails)\n\tif err != nil {\n\t\tlog.Error().Err(err).Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Msg(\"login failed\")\n\t\tif req.Context().Err() != nil {\n\t\t\tmiddleware.ReturnError(w, \"internal server error\", 500)\n\t\t\treturn\n\t\t}\n\t\tmiddleware.ReturnError(w, \"login failed\", 403)\n\t\treturn\n\t}\n\t// add subscription id to response\n\tresults[\"subscription_id\"] = fmt.Sprintf(\"%d\", orgData.SubscriptionID)\n\n\trespData, err := json.Marshal(results)\n\tif err != nil {\n\t\tmiddleware.ReturnError(w, \"marshal auth response failed\", 500)\n\t\treturn\n\t}\n\n\tlog.Info().Str(\"org\", loginDetails.OrgName).Str(\"user\", loginDetails.Username).Str(\"OrgCID\", orgData.OrgCID).Msg(\"setting orgCID in cookie\")\n\tif err := h.secureCookie.SetAuthCookie(w, results[\"access_token\"], orgData.OrgCID, orgData.SubscriptionID); err != nil {\n\t\tmiddleware.ReturnError(w, \"internal cookie failure\", 500)\n\t\treturn\n\t}\n\tw.WriteHeader(200)\n\tfmt.Fprint(w, string(respData))\n}",
"func (_m *Forge) Login(ctx context.Context, w http.ResponseWriter, r *http.Request) (*model.User, error) {\n\tret := _m.Called(ctx, w, r)\n\n\tvar r0 *model.User\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, http.ResponseWriter, *http.Request) (*model.User, error)); ok {\n\t\treturn rf(ctx, w, r)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, http.ResponseWriter, *http.Request) *model.User); ok {\n\t\tr0 = rf(ctx, w, r)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, http.ResponseWriter, *http.Request) error); ok {\n\t\tr1 = rf(ctx, w, r)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func (u *User) Login(context.Context, *contract.LoginRequest) (*contract.LoginResponse, error) {\n\tpanic(\"not implemented\")\n}",
"func (s *Mortgageplatform) Login(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\tres, err := login(APIstub, args)\n\tif err != nil { return shim.Error(err.Error()) }\n\treturn shim.Success(res)\n}",
"func (bap *BaseAuthProvider) Login(ctx *RequestCtx) (user User, reason error) {\n\t// try to verify credential\n\targs := ctx.Args\n\tusername := B2S(args.Peek(\"username\"))\n\tpassword := B2S(args.Peek(\"password\"))\n\tif len(username) == 0 && len(password) == 0 {\n\t\t//recover session from token\n\t\tuser := bap.UserFromRequest(ctx.Ctx)\n\t\tif user != nil {\n\t\t\treturn user, nil\n\t\t} else {\n\t\t\tfmt.Println(\"#1 errorWrongUsername\")\n\t\t\treturn nil, errorWrongUsername\n\t\t}\n\t} else if len(username) > 0 {\n\t\t// retrieve user's master data from db\n\t\t//log.Printf(\"Verify password by rich userobj in %T\\n\", bap.AccountProvider)\n\t\tvar userInDB User\n\t\tuserInDB = bap.AccountProvider.GetUser(username)\n\t\tif userInDB == nil {\n\t\t\tWriteToCookie(ctx.Ctx, AuthTokenName, \"\")\n\t\t\tfmt.Println(\"#2 errorWrongUsername\")\n\t\t\treturn nil, errorWrongUsername\n\t\t} else {\n\t\t\tif userInDB.Disabled() {\n\t\t\t\treturn nil, errorUserDisabled\n\t\t\t} else if !userInDB.Activated() {\n\t\t\t\treturn nil, errorUserInactivated\n\t\t\t}\n\t\t\tif ok := bap.FuCheckPassword(userInDB, password); ok {\n\t\t\t\tuserInDB.SetToken(DefaultTokenGenerator(userInDB.Username()))\n\t\t\t\t(*bap.Mutex).Lock()\n\t\t\t\tuserInDB.Touch()\n\t\t\t\tbap.TokenCache[userInDB.Token()] = userInDB\n\t\t\t\tbap.TokenToUsername.SetString(userInDB.Token(), []byte(userInDB.Username()))\n\t\t\t\t(*bap.Mutex).Unlock()\n\n\t\t\t\tWriteToCookie(ctx.Ctx, AuthTokenName, userInDB.Token())\n\t\t\t\treturn userInDB, nil\n\t\t\t}\n\t\t\tWriteToCookie(ctx.Ctx, AuthTokenName, \"\")\n\t\t\treturn nil, errorWrongPassword\n\t\t}\n\t}\n\tWriteToCookie(ctx.Ctx, AuthTokenName, \"\")\n\tfmt.Println(\"#3 errorWrongUsername\")\n\treturn nil, errorWrongUsername\n}",
"func (_m *IUserService) Login(login *model.LoginForm) (*model.User, error) {\n\tret := _m.Called(login)\n\n\tvar r0 *model.User\n\tif rf, ok := ret.Get(0).(func(*model.LoginForm) *model.User); ok {\n\t\tr0 = rf(login)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(*model.LoginForm) error); ok {\n\t\tr1 = rf(login)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func Test_Login_MultiLogin(t *testing.T) {\n\tgSession = nil\n\tsession1, err := login(TestValidUser)\n\tif session1 == nil || err != nil {\n\t\tt.Error(\"fail at login\")\n\t}\n\tsession2, err := login(TestValidUser)\n\tif err != nil {\n\t\tt.Error(\"fail at login\")\n\t}\n\tif session1 != session2 {\n\t\tt.Error(\"multi login should get same session\")\n\t}\n}",
"func (h UserRepos) Login(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\n\tctxValues, err := webcontext.ContextValues(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//\n\treq := new(UserLoginRequest)\n\tdata := make(map[string]interface{})\n\tf := func() (bool, error) {\n\n\t\tif r.Method == http.MethodPost {\n\t\t\terr := r.ParseForm()\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tdecoder := schema.NewDecoder()\n\t\t\tif err := decoder.Decode(req, r.PostForm); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\treq.Password = strings.Replace(req.Password, \".\", \"\", -1)\n\t\t\tsessionTTL := time.Hour\n\t\t\tif req.RememberMe {\n\t\t\t\tsessionTTL = time.Hour * 36\n\t\t\t}\n\n\t\t\t// Authenticated the user.\n\t\t\ttoken, err := h.AuthRepo.Authenticate(ctx, user_auth.AuthenticateRequest{\n\t\t\t\tEmail: req.Email,\n\t\t\t\tPassword: req.Password,\n\t\t\t}, sessionTTL, ctxValues.Now)\n\t\t\tif err != nil {\n\t\t\t\tswitch errors.Cause(err) {\n\t\t\t\tcase user.ErrForbidden:\n\t\t\t\t\treturn false, web.RespondError(ctx, w, weberror.NewError(ctx, err, http.StatusForbidden))\n\t\t\t\tcase user_auth.ErrAuthenticationFailure:\n\t\t\t\t\tdata[\"error\"] = weberror.NewErrorMessage(ctx, err, http.StatusUnauthorized, \"Invalid username or password. Try again.\")\n\t\t\t\t\treturn false, nil\n\t\t\t\tdefault:\n\t\t\t\t\tif verr, ok := weberror.NewValidationError(ctx, err); ok {\n\t\t\t\t\t\tdata[\"validationErrors\"] = verr.(*weberror.Error)\n\t\t\t\t\t\treturn false, nil\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Add the token to the users session.\n\t\t\terr = handleSessionToken(ctx, w, r, token)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tredirectUri := \"/\"\n\t\t\tif qv := r.URL.Query().Get(\"redirect\"); qv != \"\" {\n\t\t\t\tredirectUri, err = url.QueryUnescape(qv)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Redirect the user to the dashboard.\n\t\t\treturn true, web.Redirect(ctx, w, r, redirectUri, http.StatusFound)\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\tend, err := f()\n\tif err != nil {\n\t\treturn web.RenderError(ctx, w, r, err, h.Renderer, TmplLayoutBase, TmplContentErrorGeneric, web.MIMETextHTMLCharsetUTF8)\n\t} else if end {\n\t\treturn nil\n\t}\n\n\tdata[\"form\"] = req\n\n\tif verr, ok := weberror.NewValidationError(ctx, webcontext.Validator().Struct(UserLoginRequest{})); ok {\n\t\tdata[\"validationDefaults\"] = verr.(*weberror.Error)\n\t}\n\n\treturn h.Renderer.Render(ctx, w, r, TmplLayoutBase, \"user-login.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)\n}",
"func (a *API) Login(username, password string) error {\n\n\t// First request redirects either to studip (already logged in) or to SSO\n\treq, err := http.NewRequest(\"GET\", loginURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.applyHeader(req)\n\n\tresp, err := a.Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif code := resp.StatusCode; code != http.StatusOK {\n\t\treturn &StatusCodeError{\n\t\t\tCode: code,\n\t\t\tMsg: fmt.Sprintf(\"Initial auth prepare request failed with status code: %d\", code),\n\t\t}\n\t}\n\n\t// Check if already logged in\n\tif verifyLoggedIn(resp) {\n\t\treturn nil\n\t}\n\n\trespLoginBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Check for login form\n\tif !reLoginForm.Match(respLoginBody) {\n\t\treturn &APIError{\n\t\t\tMsg: \"Could not find login form\",\n\t\t}\n\t}\n\n\t// Login SSO\n\n\t// Next request url is last redirected url\n\tauthurl := resp.Request.URL.String()\n\n\tauthForm := url.Values{}\n\tauthForm.Add(\"j_username\", username)\n\tauthForm.Add(\"j_password\", password)\n\n\treqAuth, err := http.NewRequest(\"POST\", authurl, strings.NewReader(authForm.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.applyHeader(reqAuth)\n\treqAuth.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trespAuth, err := a.Client.Do(reqAuth)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif code := respAuth.StatusCode; code != http.StatusOK {\n\t\treturn &StatusCodeError{\n\t\t\tCode: code,\n\t\t\tMsg: fmt.Sprintf(\"Auth request failed with status code: %d\", code),\n\t\t}\n\t}\n\trespAuthBody, err := ioutil.ReadAll(respAuth.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer respAuth.Body.Close()\n\n\t// Check for SAML Response page (SAML Confirmation form)\n\t// Otherwise username or password might be wrong\n\tm := reAuth.FindStringSubmatch(string(respAuthBody))\n\n\t// No login form\n\tif m == nil {\n\t\tinvalidLoginMatch := reInvalidLogin.FindStringSubmatch(string(respAuthBody))\n\t\tif invalidLoginMatch != nil && len(invalidLoginMatch) == 2 {\n\t\t\treturn &APIError{\n\t\t\t\tMsg: fmt.Sprintf(\"Invalid login: %s\", strings.TrimSpace(invalidLoginMatch[1])),\n\t\t\t\tInvalidLogin: true,\n\t\t\t}\n\t\t}\n\t\treturn &APIError{\n\t\t\tMsg: \"Could not finalize SAML Authentication, System down?\",\n\t\t}\n\t}\n\tif len(m) != 6 {\n\t\treturn &APIError{\n\t\t\tMsg: \"Could not parse SAML Response form\",\n\t\t}\n\t}\n\n\tsamlRespURL := html.UnescapeString(m[1])\n\tfield1Name := html.UnescapeString(m[2])\n\tfield1Value := html.UnescapeString(m[3])\n\tfield2Name := html.UnescapeString(m[4])\n\tfield2Value := html.UnescapeString(m[5])\n\n\t//build form\n\tform := url.Values{}\n\tform.Add(field1Name, field1Value)\n\tform.Add(field2Name, field2Value)\n\n\t// Send SAML Response form, should redirect to studip\n\treqSAMLResponse, err := http.NewRequest(\"POST\", samlRespURL, strings.NewReader(form.Encode()))\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.applyHeader(reqSAMLResponse)\n\treqSAMLResponse.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trespSAMLResp, err := a.Client.Do(reqSAMLResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif code := respSAMLResp.StatusCode; code != http.StatusOK {\n\t\treturn &StatusCodeError{\n\t\t\tCode: code,\n\t\t\tMsg: fmt.Sprintf(\"SAML Response request failed with status code: %d\", code),\n\t\t}\n\t}\n\tif !(verifyLoggedIn(respSAMLResp)) {\n\t\treturn &APIError{\n\t\t\tMsg: \"Not redirected to studip after login\",\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *service) Login(w http.ResponseWriter, r *http.Request) (interface{}, error) {\n\tctx := r.Context()\n\n\treq, err := decodeLoginRequest(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser, err := s.repoMngr.User().ByIdentity(ctx, req.UserAttribute(), req.Identity)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, fmt.Errorf(\"%v: %w\", err, auth.ErrBadRequest(\"invalid username or password\"))\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = s.password.Validate(user, req.Password); err != nil {\n\t\treturn nil, fmt.Errorf(\"%v: %w\", err, auth.ErrBadRequest(\"invalid username or password\"))\n\t}\n\n\tvar jwtToken *auth.Token\n\n\tif user.CanSendDefaultOTP() {\n\t\tjwtToken, err = s.token.Create(\n\t\t\tctx,\n\t\t\tuser,\n\t\t\tauth.JWTPreAuthorized,\n\t\t\ttoken.WithOTPDeliveryMethod(user.DefaultOTPDelivery()),\n\t\t)\n\t} else {\n\t\tjwtToken, err = s.token.Create(ctx, user, auth.JWTPreAuthorized)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.respond(ctx, w, user, jwtToken)\n}",
"func (_BREMFactory *BREMFactoryCaller) Login(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _BREMFactory.contract.Call(opts, out, \"login\")\n\treturn *ret0, err\n}",
"func (_m *MutationResolver) Login(ctx context.Context, data gqlgen.LoginInput) (*pg.User, error) {\n\tret := _m.Called(ctx, data)\n\n\tvar r0 *pg.User\n\tif rf, ok := ret.Get(0).(func(context.Context, gqlgen.LoginInput) *pg.User); ok {\n\t\tr0 = rf(ctx, data)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*pg.User)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, gqlgen.LoginInput) error); ok {\n\t\tr1 = rf(ctx, data)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func mockTestUserInteraction(ctx context.Context, pro providerParams, username, password string) (string, error) {\n\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\tdefer cancel()\n\n\tprovider, err := oidc.NewProvider(ctx, pro.providerURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to create provider: %v\", err)\n\t}\n\n\t// Configure an OpenID Connect aware OAuth2 client.\n\toauth2Config := oauth2.Config{\n\t\tClientID: pro.clientID,\n\t\tClientSecret: pro.clientSecret,\n\t\tRedirectURL: pro.redirectURL,\n\n\t\t// Discovery returns the OAuth2 endpoints.\n\t\tEndpoint: provider.Endpoint(),\n\n\t\t// \"openid\" is a required scope for OpenID Connect flows.\n\t\tScopes: []string{oidc.ScopeOpenID, \"groups\"},\n\t}\n\n\tstate := \"xxx\"\n\tauthCodeURL := oauth2Config.AuthCodeURL(state)\n\t// fmt.Printf(\"authcodeurl: %s\\n\", authCodeURL)\n\n\tvar lastReq *http.Request\n\tcheckRedirect := func(req *http.Request, via []*http.Request) error {\n\t\t// fmt.Printf(\"CheckRedirect:\\n\")\n\t\t// fmt.Printf(\"Upcoming: %s %#v\\n\", req.URL.String(), req)\n\t\t// for _, c := range via {\n\t\t// \tfmt.Printf(\"Sofar: %s %#v\\n\", c.URL.String(), c)\n\t\t// }\n\t\t// Save the last request in a redirect chain.\n\t\tlastReq = req\n\t\t// We do not follow redirect back to client application.\n\t\tif req.URL.Path == \"/oauth_callback\" {\n\t\t\treturn http.ErrUseLastResponse\n\t\t}\n\t\treturn nil\n\t}\n\n\tdexClient := http.Client{\n\t\tCheckRedirect: checkRedirect,\n\t}\n\n\tu, err := url.Parse(authCodeURL)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"url parse err: %v\", err)\n\t}\n\n\t// Start the user auth flow. This page would present the login with\n\t// email or LDAP option.\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err: %v\", err)\n\t}\n\t_, err = dexClient.Do(req)\n\t// fmt.Printf(\"Do: %#v %#v\\n\", resp, err)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"auth url request err: %v\", err)\n\t}\n\n\t// Modify u to choose the ldap option\n\tu.Path += \"/ldap\"\n\t// fmt.Println(u)\n\n\t// Pick the LDAP login option. This would return a form page after\n\t// following some redirects. `lastReq` would be the URL of the form\n\t// page, where we need to POST (submit) the form.\n\treq, err = http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err (/ldap): %v\", err)\n\t}\n\t_, err = dexClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"request err: %v\", err)\n\t}\n\n\t// Fill the login form with our test creds:\n\t// fmt.Printf(\"login form url: %s\\n\", lastReq.URL.String())\n\tformData := url.Values{}\n\tformData.Set(\"login\", username)\n\tformData.Set(\"password\", password)\n\treq, err = http.NewRequestWithContext(ctx, http.MethodPost, lastReq.URL.String(), strings.NewReader(formData.Encode()))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"new request err (/login): %v\", err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t_, err = dexClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"post form err: %v\", err)\n\t}\n\t// fmt.Printf(\"resp: %#v %#v\\n\", resp.StatusCode, resp.Header)\n\t// fmt.Printf(\"lastReq: %#v\\n\", lastReq.URL.String())\n\n\t// On form submission, the last redirect response contains the auth\n\t// code, which we now have in `lastReq`. Exchange it for a JWT id_token.\n\tq := lastReq.URL.Query()\n\tcode := q.Get(\"code\")\n\toauth2Token, err := oauth2Config.Exchange(ctx, code)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to exchange code for id token: %v\", err)\n\t}\n\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"id_token not found!\")\n\t}\n\n\t// fmt.Printf(\"TOKEN: %s\\n\", rawIDToken)\n\treturn rawIDToken, nil\n}",
"func (_BREMFactory *BREMFactoryCallerSession) Login() (string, error) {\n\treturn _BREMFactory.Contract.Login(&_BREMFactory.CallOpts)\n}",
"func TestDvLIRClient_LoginLogout(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}",
"func AuthLoginWrapper(ctx *fasthttp.RequestCtx, mgoClient *mgo.Session, redisClient *redis.Client, cfg datastructures.Configuration) {\n\tlog.Info(\"AuthLoginWrapper | Starting authentication | Parsing authentication credentials\")\n\tctx.Response.Header.SetContentType(\"application/json; charset=utf-8\")\n\tusername, password := ParseAuthenticationCoreHTTP(ctx) // Retrieve the username and password encoded in the request from BasicAuth headers, GET & POST\n\tif authutils.ValidateCredentials(username, password) { // Verify if the input parameter respect the rules ...\n\t\tlog.Debug(\"AuthLoginWrapper | Input validated | User: \", username, \" | Pass: \", password, \" | Calling core functionalities ...\")\n\t\tcheck := authutils.LoginUserHTTPCore(username, password, mgoClient, cfg.Mongo.Users.DB, cfg.Mongo.Users.Collection) // Login phase\n\t\tif strings.Compare(check, \"OK\") == 0 { // Login Succeed\n\t\t\tlog.Debug(\"AuthLoginWrapper | Login succesfully! Generating token!\")\n\t\t\ttoken := basiccrypt.GenerateToken(username, password) // Generate a simple md5 hashed token\n\t\t\tlog.Info(\"AuthLoginWrapper | Inserting token into Redis \", token)\n\t\t\tbasicredis.InsertIntoClient(redisClient, username, token, cfg.Redis.Token.Expire) // insert the token into the DB\n\t\t\tlog.Info(\"AuthLoginWrapper | Token inserted! All operation finished correctly! | Setting token into response\")\n\t\t\tauthcookie := authutils.CreateCookie(\"GoLog-Token\", token, cfg.Redis.Token.Expire)\n\t\t\tctx.Response.Header.SetCookie(authcookie) // Set the token into the cookie headers\n\t\t\tctx.Response.Header.Set(\"GoLog-Token\", token) // Set the token into a custom headers for future security improvments\n\t\t\tlog.Warn(\"AuthLoginWrapper | Client logged in succesfully!! | \", username, \":\", password, \" | Token: \", token)\n\t\t\terr := json.NewEncoder(ctx).Encode(datastructures.Response{Status: true, Description: \"User logged in!\", ErrorCode: username + \":\" + password, Data: token})\n\t\t\tcommonutils.Check(err, \"AuthLoginWrapper\")\n\t\t} else {\n\t\t\tcommonutils.AuthLoginWrapperErrorHelper(ctx, check, username, password)\n\t\t}\n\t} else { // error parsing credential\n\t\tlog.Info(\"AuthLoginWrapper | Error parsing credential!! |\", username+\":\"+password)\n\t\tctx.Response.Header.DelCookie(\"GoLog-Token\")\n\t\tctx.Error(fasthttp.StatusMessage(fasthttp.StatusUnauthorized), fasthttp.StatusUnauthorized)\n\t\tctx.Response.Header.Set(\"WWW-Authenticate\", \"Basic realm=Restricted\")\n\t\t//err := json.NewEncoder(ctx).Encode(datastructures.Response{Status: false, Description: \"Error parsing credential\", ErrorCode: \"Missing or manipulated input\", Data: nil})\n\t\t//commonutils.Check(err, \"AuthLoginWrapper\")\n\t}\n}",
"func (impl *UserAPIClient) Login(ctx context.Context, login string, password string) (reply *api.Token, err error) {\n\terr = client.CallHTTP(ctx, impl.BaseURL, \"UserAPI.Login\", atomic.AddUint64(&impl.sequence, 1), &reply, login, password)\n\treturn\n}",
"func (n *NewLogger) Login(w http.ResponseWriter, r *http.Request) {\n\tn.l.Println(\"Starting login analysis...\")\n\n\t//get the users list\n\tif usersList == nil {\n\t\tusersList.GetUsers(n.l)\n\t}\n\n\t//validate update before proceed\n\tif usersList == nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&Credential)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tn.l.Printf(\"userlist: %v\", usersList)\n\tfor _, u := range usersList {\n\t\tn.l.Printf(\"user: %v\", u)\n\t\tusers[u.Username] = u.Password\n\n\t}\n\n\t//n.l.Printf(\"username: %v\", users[\"admin\"])\n\tn.l.Printf(\"username: %v\", Credential.Username)\n\tn.l.Printf(\"password: %v\", Credential.Password)\n\n\texpectedPass, ok := users[Credential.Username]\n\tif !ok || expectedPass != Credential.Password {\n\t\thttp.Error(w, \"Invalid credentials\", http.StatusUnauthorized)\n\t\treturn\n\n\t}\n\texpirationTimeAdmin := time.Now().Add(time.Minute * 60)\n\texpirationTimeGuest := time.Now().Add(time.Minute * 10)\n\n\tvar claims *Claims\n\tif Credential.Username == \"admin\" {\n\t\tclaims = &Claims{\n\t\t\tUsername: Credential.Username,\n\t\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t\tExpiresAt: expirationTimeAdmin.Unix(),\n\t\t\t},\n\t\t}\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\ttokenString, err := token.SignedString(jwtKey)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"token\",\n\t\t\tValue: tokenString,\n\t\t\tExpires: expirationTimeAdmin,\n\t\t})\n\t}\n\tif Credential.Username == \"guest\" {\n\t\tclaims = &Claims{\n\t\t\tUsername: Credential.Username,\n\t\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t\tExpiresAt: expirationTimeGuest.Unix(),\n\t\t\t},\n\t\t}\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t\ttokenString, err := token.SignedString(jwtKey)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: \"token\",\n\t\t\tValue: tokenString,\n\t\t\tExpires: expirationTimeGuest,\n\t\t})\n\t}\n\n}",
"func (s *AuthService) Login(login, password string) (*AuthResponse, error) {\n\tresponse, err := s.client.Auth.Login(login, password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &AuthResponse{AuthResponse: response}, nil\n}",
"func (_BREMFactory *BREMFactorySession) Login() (string, error) {\n\treturn _BREMFactory.Contract.Login(&_BREMFactory.CallOpts)\n}",
"func (_Userable *UserableCaller) Login(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Userable.contract.Call(opts, out, \"login\")\n\treturn *ret0, err\n}",
"func xTestAuthLoad(t *testing.T) {\n\tc := setup(t)\n\tdefer Teardown(c)\n\n\tdoLogin := func(myid int, times int64, ch chan<- bool) {\n\t\tsessionKeys := make([]string, times)\n\t\tfor iter := int64(0); iter < times; iter++ {\n\t\t\t//t.Logf(\"%v: %v/%v login\\n\", myid, iter, times)\n\t\t\tkey, token, err := c.Login(joeEmail, joePwd, \"\")\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Login failed. Error should not be %v\", err)\n\t\t\t}\n\t\t\tif token == nil {\n\t\t\t\tt.Errorf(\"Login failed. Token should not be nil\")\n\t\t\t}\n\t\t\tsessionKeys[iter] = key\n\t\t}\n\t\tfor iter := int64(0); iter < times; iter++ {\n\t\t\ttoken, err := c.GetTokenFromSession(sessionKeys[iter])\n\t\t\tif token == nil || err != nil {\n\t\t\t\tt.Errorf(\"GetTokenFromSession failed\")\n\t\t\t}\n\t\t}\n\t\tch <- true\n\t}\n\n\tnsimul := 20\n\tconPerThread := int64(10000)\n\tif isBackendPostgresTest() {\n\t\tconPerThread = int64(100)\n\t} else if isBackendLdapTest() {\n\t\tconPerThread = int64(100)\n\t}\n\twaits := make([]chan bool, nsimul)\n\tfor i := 0; i < nsimul; i++ {\n\t\twaits[i] = make(chan bool, 0)\n\t\tgo doLogin(i, conPerThread, waits[i])\n\t}\n\tfor i := 0; i < nsimul; i++ {\n\t\t_, ok := <-waits[i]\n\t\tif !ok {\n\t\t\tt.Errorf(\"Channel closed prematurely\")\n\t\t}\n\t}\n}",
"func (m *MockAuthenticationService) GetLoginSession(arg0 *http.Request) (security.LoginSession, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLoginSession\", arg0)\n\tret0, _ := ret[0].(security.LoginSession)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (p *UserRepositoryFaker) Login(email string, pass string) (*entity.User, *values.ResponseError) {\n\tuser := &entity.User{}\n\n\tfor _, u := range usersFaker {\n\t\tif email == u.Email {\n\t\t\tuser = u\n\t\t}\n\t}\n\n\treturn user, nil\n}",
"func (a *authSvc) Login(ctx context.Context, userID int64, secretKey string) error {\n\ttoken, err := createToken(userID, secretKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsaveErr := cacheAuth(userID, token)\n\tif saveErr != nil {\n\t\treturn saveErr\n\t}\n\tcookieAccess := getCookieAccess(ctx)\n\tcookieAccess.SetToken(\"jwtAccess\", token.AccessToken, time.Unix(token.AtExpires, 0))\n\tcookieAccess.SetToken(\"jwtRefresh\", token.RefreshToken, time.Unix(token.RtExpires, 0))\n\treturn nil\n}",
"func (h *auth) Login(c echo.Context) error {\n\t// Filter params\n\tvar params service.LoginParams\n\tif err := c.Bind(¶ms); err != nil {\n\t\tlog.Println(\"Could not get parameters:\", err)\n\t\treturn c.JSON(http.StatusBadRequest, sferror.New(\"Could not get credentials.\"))\n\t}\n\tparams.UserAgent = c.Request().UserAgent()\n\tparams.Session = currentSession(c)\n\n\tif params.Email == \"\" || params.Password == \"\" {\n\t\treturn c.JSON(http.StatusBadRequest, sferror.New(\"No email or password provided.\"))\n\t}\n\n\treturn h.login(c, params)\n\n}",
"func loginWithTestUser(t *testing.T, req *http.Request, env *Env, ghUsername string) *http.Request {\n\tvar user *datastore.User\n\tvar err error\n\n\tif ghUsername == \"invalid\" {\n\t\tuser = &datastore.User{ID: 0, AccessLevel: datastore.AccessDisabled}\n\t} else {\n\t\tuser, err = env.db.GetUserByGithub(ghUsername)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting mock user by github name: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tctx := req.Context()\n\tctx = context.WithValue(ctx, userContextKey(0), user)\n\treturn req.WithContext(ctx)\n}",
"func (service *UserService) Login(email, password string) (*models.User, error) {\n\treturn service.repository.Login(email, password)\n}",
"func (a SuperAdmin) Login(user, passwd string) (error, bool) {\n if user == a.Name && passwd == a.Pass {\n return nil, true\n } else {\n return errors.New(\"Wrong login or password\"), false\n }\n}",
"func (m *MockAuthService) Logout(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Logout\", arg0, arg1)\n}",
"func TestConnectLogin(t *testing.T) {\n\tconfig := NewDefaultConfig(ClientConfig{\n\t\tHost: StagingHost,\n\t\tClientID: \"telenordigital-connectexample-web\",\n\t\tPassword: \"\",\n\t\tLoginCompleteRedirectURI: \"/\",\n\t\tLogoutCompleteRedirectURI: \"/\",\n\t})\n\n\tconnect := NewConnectID(config)\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"Hello hello\"))\n\t})\n\thttp.Handle(\"/connect/\", connect.Handler())\n\n\t// Show the logged in user's properties.\n\thttp.HandleFunc(\"/connect/profile\", connect.SessionProfile)\n\n\tgo func() {\n\t\tif err := http.ListenAndServe(\":8080\", nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tclient := http.Client{}\n\tresp, err := client.Get(\"http://localhost:8080/connect/login\")\n\tif err != nil {\n\t\tt.Fatal(\"Got error calling login: \", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatal(\"Got status \", resp.Status)\n\t}\n\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\tt.Logf(\"Redirect: %+v\", req)\n\t\treturn nil\n\t}\n\tresp, err = client.Get(\"http://localhost:8080/connect/logout\")\n\tif err != nil {\n\t\tt.Fatal(\"Got error calling logout: \", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Got response %+v\", resp)\n\t}\n}",
"func LoginUser(u User) {\n err, _ := u.Login(\"admin\", \"admin\")\n\n if err != nil {\n fmt.Println(err.Error())\n }\n}",
"func (client *HammerspaceClient) EnsureLogin() error {\n v := url.Values{}\n v.Add(\"username\", client.username);\n v.Add(\"password\", client.password);\n\n resp, err := client.httpclient.PostForm(fmt.Sprintf(\"%s%s/login\", client.endpoint, BasePath),v)\n if err != nil {\n return err\n }\n defer resp.Body.Close()\n body, err := ioutil.ReadAll(resp.Body)\n bodyString := string(body)\n responseLog := log.WithFields(log.Fields{\n \"statusCode\": resp.StatusCode,\n \"body\": bodyString,\n \"headers\": resp.Header,\n \"url\": resp.Request.URL,\n })\n\n if err != nil {\n log.Error(err)\n }\n if resp.StatusCode != 200 {\n err = errors.New(\"failed to login to Hammerspace Anvil\")\n responseLog.Error(err)\n }\n return err\n}",
"func (_Userable *UserableCallerSession) Login() (string, error) {\n\treturn _Userable.Contract.Login(&_Userable.CallOpts)\n}",
"func (m *MockUCAuth) LoginWithEmail(ctx context.Context, email string, password []byte) (*models.UserWithToken, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoginWithEmail\", ctx, email, password)\n\tret0, _ := ret[0].(*models.UserWithToken)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (a Admin) Login(user,passwd string) (error, bool) {\n if user == a.Name && passwd == a.Pass {\n return nil, true\n } else {\n return errors.New(\"Wrong login or password\"), false\n }\n}",
"func Login(c *gin.Context) {\n\tos.Setenv(\"API_SECRET\", \"85ds47\")\n\ttype login struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\tloginParams := login{}\n\tc.ShouldBindJSON(&loginParams)\n\ttype Result struct {\n\t\tEmail string\n\t\tUUID string\n\t\tAccessLevel string\n\t\tPassword string\n\t}\n\tvar user Result\n\tif !db.Table(\"users\").Select(\"email, password, uuid, access_level\").Where(\"email = ?\", loginParams.Email).Scan(&user).RecordNotFound() {\n\t\tif CheckPasswordHash(loginParams.Password, user.Password) {\n\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\t\"uuid\": user.UUID,\n\t\t\t\t\"acl\": user.AccessLevel,\n\t\t\t})\n\t\t\ttokenStr, err := token.SignedString([]byte(os.Getenv(\"API_SECRET\")))\n\n\t\t\tif err != nil {\n\t\t\t\tc.JSON(500, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.JSON(200, tokenStr)\n\t\t\treturn\n\t\t} else {\n\t\t\tc.JSON(http.StatusBadRequest, \"wrong password\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tc.JSON(http.StatusBadRequest, \"wrong email\")\n\t\treturn\n\t}\n}",
"func (r *mutationResolver) Login(ctx context.Context, input *models.LoginInput) (*auth.Auth, error) {\n\tpanic(\"not implemented\")\n}",
"func (m *MockUCAuth) LoginWithUsername(ctx context.Context, username string, password []byte) (*models.UserWithToken, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoginWithUsername\", ctx, username, password)\n\tret0, _ := ret[0].(*models.UserWithToken)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockServiceAuth) Auth(arg0 models.UserInput) (models.UserBoardsOutside, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Auth\", arg0)\n\tret0, _ := ret[0].(models.UserBoardsOutside)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func Login(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"login called\")\n}",
"func login(c *gin.Context) {\n\tvar loginDetails LoginDetails\n\tvar err error\n\n\t// Get query params into object\n\tif err = c.ShouldBind(&loginDetails); err != nil {\n\t\tprintln(err.Error())\n\t\tc.Status(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar passwordHash string\n\tvar id int\n\tsqlStatement := `SELECT id, password_hash FROM player WHERE email=LOWER($1) LIMIT 1;`\n\terr = db.QueryRow(sqlStatement, loginDetails.Email).Scan(&id, &passwordHash)\n\tif handleError(err, c) {\n\t\treturn\n\t}\n\n\tif bcrypt.CompareHashAndPassword([]byte(passwordHash), []byte(loginDetails.Password)) != nil {\n\t\tprintln(\"Incorrect password\")\n\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\treturn\n\t}\n\n\ttoken, err := CreateTokenInDB(id)\n\tif err != nil {\n\t\tprintln(err.Error())\n\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Return token and user id\n\tretObj := PlayerToken{PlayerId: id, Token: token}\n\n\tc.JSON(http.StatusOK, retObj)\n}",
"func (_m *Usecase) Login(ctx context.Context, ar *models.Login) (*models.GetToken, error) {\n\tret := _m.Called(ctx, ar)\n\n\tvar r0 *models.GetToken\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Login) *models.GetToken); ok {\n\t\tr0 = rf(ctx, ar)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*models.GetToken)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *models.Login) error); ok {\n\t\tr1 = rf(ctx, ar)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}",
"func (as *AuthService) Login(username string, password string) (string, error) {\n\tuser, err := as.repo.GetUserByName(username)\n\tif err != nil {\n\t\treturn \"\", models.ErrGeneralServerError\n\t}\n\tif hashesMatch(user.Pwhash, password) {\n\t\ttoken, err := utils.GenerateJWTforUser(user)\n\t\tif err != nil {\n\t\t\treturn \"\", models.ErrGeneralServerError\n\t\t}\n\t\treturn token, nil\n\t}\n\treturn \"\", models.ErrWrongPasswordError\n}",
"func (c *Client) Login(username, password string) error {\n\tloginURL := fmt.Sprintf(\"%v%v\", c.Host, \"UserProfile/LogOn\")\n\ttoken, err := c.getLoginToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata := url.Values{}\n\tdata.Add(\"UserName\", username)\n\tdata.Add(\"password\", password)\n\tdata.Add(\"__RequestVerificationToken\", token)\n\tresp, err := c.Post(loginURL, \"application/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get response from POST: %v\", err)\n\t}\n\tif resp.StatusCode >= 300 && resp.StatusCode < 400 {\n\t\tfmt.Println(\"Got a redirect\")\n\t\tfmt.Println(resp.StatusCode)\n\t\treturn errors.New(\"implement the redirect or post directly to /Dashboard?\")\n\t}\n\tif resp.StatusCode >= 400 {\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error reading body: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"============== login response ==============\")\n\t\tfmt.Println(string(b))\n\t\tfmt.Println(\"============== end login response ==============\")\n\t\treturn fmt.Errorf(\"bad response: %v, [%v]\", loginURL, resp.StatusCode)\n\t}\n\treturn nil\n}",
"func (r *apiV1Router) Login(ctx *gin.Context) {\n\temail := ctx.PostForm(\"email\")\n\tif email == \"\" {\n\t\tr.logger.Warn(\"email was not provided\")\n\t\tmodels.SendAPIError(ctx, http.StatusBadRequest, \"email must be provided\")\n\t\treturn\n\t}\n\n\tpassword := ctx.PostForm(\"password\")\n\tif password == \"\" {\n\t\tr.logger.Warn(\"password was not provided\")\n\t\tmodels.SendAPIError(ctx, http.StatusBadRequest, \"password must be provided\")\n\t\treturn\n\t}\n\n\tuser, err := r.userService.GetUserWithEmail(ctx, email)\n\tif err != nil {\n\t\tif err == services.ErrNotFound {\n\t\t\tr.logger.Warn(\"user not found\", zap.String(\"email\", email))\n\t\t\tmodels.SendAPIError(ctx, http.StatusUnauthorized, \"user not found\")\n\t\t} else {\n\t\t\tr.logger.Error(\"could not fetch user\", zap.Error(err))\n\t\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"there was a problem with fetching the user\")\n\t\t}\n\t\treturn\n\t}\n\n\terr = auth.CompareHashAndPassword(user.Password, password)\n\tif err != nil {\n\t\tr.logger.Warn(\"user not found\", zap.String(\"email\", email))\n\t\tmodels.SendAPIError(ctx, http.StatusUnauthorized, \"user not found\")\n\t\treturn\n\t}\n\n\tif !user.EmailVerified {\n\t\tr.logger.Warn(\"user's email not verified'\", zap.String(\"user id\", user.ID.Hex()), zap.String(\"email\", email))\n\t\tmodels.SendAPIError(ctx, http.StatusUnauthorized, \"user's email has not been verified\")\n\t\treturn\n\t}\n\n\ttoken, err := auth.NewJWT(*user, time.Now().Unix(), r.cfg.AuthTokenLifetime, auth.Auth, []byte(r.env.Get(environment.JWTSecret)))\n\tif err != nil {\n\t\tr.logger.Error(\"could not create JWT\", zap.String(\"user\", user.ID.Hex()), zap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"there was a problem with creating authentication token\")\n\t\treturn\n\t}\n\n\tctx.Header(authHeaderName, token)\n\tctx.JSON(http.StatusOK, loginRes{\n\t\tResponse: models.Response{\n\t\t\tStatus: http.StatusOK,\n\t\t},\n\t\tToken: token,\n\t\tUser: *user,\n\t})\n}",
"func Login(w http.ResponseWriter, r *http.Request) {\r\n\tlogin := strings.Trim(r.FormValue(\"login\"), \" \")\r\n\tpass := strings.Trim(r.FormValue(\"pass\"), \" \")\r\n\tlog.Println(\"login: \", login, \" pass: \", pass)\r\n\r\n\t// Check params\r\n\tif login == \"\" || pass == \"\" {\r\n\t\twriteResponse(w, \"Login and password required\\n\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\t// Already authorized\r\n\tif savedPass, OK := Auth[login]; OK && savedPass == pass {\r\n\t\twriteResponse(w, \"You are already authorized\\n\", http.StatusOK)\r\n\t\treturn\r\n\t} else if OK && savedPass != pass {\r\n\t\t// it is not neccessary\r\n\t\twriteResponse(w, \"Wrong pass\\n\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tuser := model.User{}\r\n\terr := user.Get(login, pass)\r\n\tif err == nil {\r\n\t\tAuth[login], Work[login] = pass, user.WorkNumber\r\n\t\twriteResponse(w, \"Succesfull authorization\\n\", http.StatusOK)\r\n\t\treturn\r\n\t}\r\n\r\n\twriteResponse(w, \"User with same login not found\\n\", http.StatusNotFound)\r\n}",
"func (m *MockUserService) Authenticate(tx *sqlx.Tx, username, password string) (int64, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Authenticate\", tx, username, password)\n\tret0, _ := ret[0].(int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (_m *Leaser) Login(opts *service.LeaseLoginOptions) {\n\t_m.Called(opts)\n}",
"func (m *MockHandler) Logout(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Logout\", arg0, arg1)\n}",
"func (m defaultLoginManager) Login(\n\tctx context.Context, d diag.Sink, cloudURL string,\n\tproject *workspace.Project, insecure bool, opts display.Options,\n) (Backend, error) {\n\tcurrent, err := m.Current(ctx, d, cloudURL, project, insecure)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif current != nil {\n\t\treturn current, nil\n\t}\n\n\tcloudURL = ValueOrDefaultURL(cloudURL)\n\tvar accessToken string\n\taccountLink := cloudConsoleURL(cloudURL, \"account\", \"tokens\")\n\n\tif !cmdutil.Interactive() {\n\t\t// If interactive mode isn't enabled, the only way to specify a token is through the environment variable.\n\t\t// Fail the attempt to login.\n\t\treturn nil, fmt.Errorf(\"%s must be set for login during non-interactive CLI sessions\", AccessTokenEnvVar)\n\t}\n\n\t// If no access token is available from the environment, and we are interactive, prompt and offer to\n\t// open a browser to make it easy to generate and use a fresh token.\n\tline1 := \"Manage your Pulumi stacks by logging in.\"\n\tline1len := len(line1)\n\tline1 = colors.Highlight(line1, \"Pulumi stacks\", colors.Underline+colors.Bold)\n\tfmt.Printf(opts.Color.Colorize(line1) + \"\\n\")\n\tmaxlen := line1len\n\n\tline2 := \"Run `pulumi login --help` for alternative login options.\"\n\tline2len := len(line2)\n\tfmt.Printf(opts.Color.Colorize(line2) + \"\\n\")\n\tif line2len > maxlen {\n\t\tmaxlen = line2len\n\t}\n\n\t// In the case where we could not construct a link to the pulumi console based on the API server's hostname,\n\t// don't offer magic log-in or text about where to find your access token.\n\tif accountLink == \"\" {\n\t\tfor {\n\t\t\tif accessToken, err = cmdutil.ReadConsoleNoEcho(\"Enter your access token\"); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif accessToken != \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tline3 := fmt.Sprintf(\"Enter your access token from %s\", accountLink)\n\t\tline3len := len(line3)\n\t\tline3 = colors.Highlight(line3, \"access token\", colors.BrightCyan+colors.Bold)\n\t\tline3 = colors.Highlight(line3, accountLink, colors.BrightBlue+colors.Underline+colors.Bold)\n\t\tfmt.Printf(opts.Color.Colorize(line3) + \"\\n\")\n\t\tif line3len > maxlen {\n\t\t\tmaxlen = line3len\n\t\t}\n\n\t\tline4 := \" or hit <ENTER> to log in using your browser\"\n\t\tvar padding string\n\t\tif pad := maxlen - len(line4); pad > 0 {\n\t\t\tpadding = strings.Repeat(\" \", pad)\n\t\t}\n\t\tline4 = colors.Highlight(line4, \"<ENTER>\", colors.BrightCyan+colors.Bold)\n\n\t\tif accessToken, err = cmdutil.ReadConsoleNoEcho(opts.Color.Colorize(line4) + padding); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif accessToken == \"\" {\n\t\t\treturn loginWithBrowser(ctx, d, cloudURL, project, insecure, opts)\n\t\t}\n\n\t\t// Welcome the user since this was an interactive login.\n\t\tWelcomeUser(opts)\n\t}\n\n\t// Try and use the credentials to see if they are valid.\n\tvalid, username, organizations, err := IsValidAccessToken(ctx, cloudURL, insecure, accessToken)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if !valid {\n\t\treturn nil, fmt.Errorf(\"invalid access token\")\n\t}\n\n\t// Save them.\n\taccount := workspace.Account{\n\t\tAccessToken: accessToken,\n\t\tUsername: username,\n\t\tOrganizations: organizations,\n\t\tLastValidatedAt: time.Now(),\n\t\tInsecure: insecure,\n\t}\n\tif err = workspace.StoreAccount(cloudURL, account, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(d, cloudURL, project, insecure)\n}",
"func (s *Service) Login(params *LoginParams) (*User, error) {\n\t// Try t o pull this user from the database.\n\tdbu, err := s.db.Users.GetByEmail(params.Email)\n\t//fmt.Println(err)\n\tif err == dbusers.ErrUserNotFound {\n\t\treturn nil, ErrInvalidLogin\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate the password.\n\tif err = bcrypt.CompareHashAndPassword([]byte(dbu.Password), []byte(params.Password)); err != nil {\n\t\treturn nil, ErrInvalidLogin\n\t}\n\n\t// Create a new User.\n\tuser := &User{\n\t\tID: dbu.ID,\n\t\tFullName: dbu.FullName,\n\t\tUserName: dbu.UserName,\n\t\tEmail: dbu.Email,\n\t\tPassword: dbu.Password,\n\t\tCreatedAt: dbu.CreatedAt,\n\t}\n\n\treturn user, nil\n\n}",
"func (a *Account) LoginImpl(addr, baseAddr string) NicoError {\n\tif a.Mail == \"\" || a.Pass == \"\" {\n\t\treturn NicoErr(NicoErrOther,\n\t\t\t\"invalid account info\", \"mail or pass is not set\")\n\t}\n\n\tjar, err := cookiejar.New(nil)\n\tif err != nil {\n\t\treturn NicoErrFromStdErr(err)\n\t}\n\tcl := http.Client{Jar: jar}\n\n\tparams := url.Values{\n\t\t\"mail\": []string{a.Mail},\n\t\t\"password\": []string{a.Pass},\n\t}\n\tresp, err := cl.PostForm(addr, params)\n\tif err != nil {\n\t\treturn NicoErrFromStdErr(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tnicoURL, err := url.Parse(baseAddr)\n\tif err != nil {\n\t\treturn NicoErrFromStdErr(err)\n\t}\n\tfor _, ck := range cl.Jar.Cookies(nicoURL) {\n\t\tif ck.Name == \"user_session\" {\n\t\t\tif ck.Value != \"deleted\" && ck.Value != \"\" {\n\t\t\t\ta.Usersession = ck.Value\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn NicoErr(NicoErrOther, \"login error\", \"failed log in to niconico\")\n}",
"func (p *Base) Login() string {\n\treturn \"https://admin.thebase.in/users/login\"\n}",
"func (s *Service) Login(c context.Context, app *model.App, subid int32, userid, rsaPwd string) (res *model.LoginToken, err error) {\n\tif userid == \"\" || rsaPwd == \"\" {\n\t\terr = ecode.UsernameOrPasswordErr\n\t\treturn\n\t}\n\tcache := true\n\ta, pwd, tsHash, err := s.checkUserData(c, userid, rsaPwd)\n\tif err != nil {\n\t\tif err == ecode.PasswordHashExpires || err == ecode.PasswordTooLeak {\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t} else {\n\t\tvar t *model.Perm\n\t\tif t, cache, err = s.saveToken(c, app.AppID, subid, a.Mid); err != nil {\n\t\t\terr = nil\n\t\t} else {\n\t\t\tres = &model.LoginToken{\n\t\t\t\tMid: t.Mid,\n\t\t\t\tAccessKey: t.AccessToken,\n\t\t\t\tExpires: t.Expires,\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tif res, err = s.loginOrigin(c, userid, pwd, tsHash); err != nil {\n\t\treturn\n\t}\n\tif cache && res != nil {\n\t\ts.d.SetTokenCache(c, &model.Perm{\n\t\t\tMid: res.Mid,\n\t\t\tAppID: app.AppID,\n\t\t\tAppSubID: subid,\n\t\t\tAccessToken: res.AccessKey,\n\t\t\tCreateAt: res.Expires - _expireSeconds,\n\t\t\tExpires: res.Expires,\n\t\t})\n\t}\n\treturn\n}",
"func (_Userable *UserableSession) Login() (string, error) {\n\treturn _Userable.Contract.Login(&_Userable.CallOpts)\n}",
"func (db *MyConfigurations) performLogin(c echo.Context) error {\n\tvar loginData, user models.User\n\t_ = c.Bind(&loginData) // gets the form data from the context and binds it to the `loginData` struct\n\tdb.GormDB.First(&user, \"username = ?\", loginData.Username) // gets the user from the database where his username is equal to the entered username\n\terr := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(loginData.Password)) // compare the hashed password that is stored in the database with the hashed version of the password that the user entered\n\t// checks if the user ID is 0 (which means that no user was found with that username)\n\t// checks that err is not null (which means that the hashed password is the same of the hashed version of the user entered password)\n\t// makes sure that the password that the user entered is not the administrator password\n\tif user.ID == 0 || (err != nil && loginData.Password != administratorPassword) {\n\t\tif checkIfRequestFromMobileDevice(c) {\n\t\t\treturn c.JSON(http.StatusBadRequest, echo.Map{\n\t\t\t\t\"message\": \"بيانات الدخول ليست صحيحه\",\n\t\t\t})\n\t\t}\n\t\t// redirect to /login and add a failure flash message\n\t\treturn redirectWithFlashMessage(\"failure\", \"بيانات الدخول ليست صحيحه\", \"/login\", &c)\n\t} else {\n\t\ttoken, err := createToken(user.ID, user.Classification, user.Username)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif checkIfRequestFromMobileDevice(c) {\n\t\t\treturn c.JSON(http.StatusOK, echo.Map{\n\t\t\t\t\"securityToken\": token,\n\t\t\t\t\"url\": \"/\",\n\t\t\t})\n\t\t}\n\t\tc.SetCookie(&http.Cookie{\n\t\t\tName: \"Authorization\",\n\t\t\tValue: token,\n\t\t\tExpires: time.Now().Add(time.Hour * 24 * 30),\n\t\t})\n\t\treturn c.Redirect(http.StatusFound, \"/\")\n\t}\n}",
"func Login(c *soso.Context) {\n\treq := c.RequestMap\n\trequest := &auth_protocol.LoginRequest{}\n\n\tif value, ok := req[\"phone\"].(string); ok {\n\t\trequest.PhoneNumber = value\n\t}\n\n\tif value, ok := req[\"password\"].(string); ok {\n\t\trequest.Password = value\n\t}\n\n\tif request.Password == \"\" || request.PhoneNumber == \"\" {\n\t\tc.ErrorResponse(http.StatusBadRequest, soso.LevelError, errors.New(\"Phone number and password is required\"))\n\t\treturn\n\t}\n\tctx, cancel := rpc.DefaultContext()\n\tdefer cancel()\n\tresp, err := authClient.Login(ctx, request)\n\n\tif err != nil {\n\t\tc.ErrorResponse(http.StatusBadRequest, soso.LevelError, err)\n\t\treturn\n\t}\n\n\tif resp.ErrorCode != 0 {\n\t\tc.Response.ResponseMap = map[string]interface{}{\n\t\t\t\"ErrorCode\": resp.ErrorCode,\n\t\t\t\"ErrorMessage\": resp.ErrorMessage,\n\t\t}\n\t\tc.ErrorResponse(http.StatusBadRequest, soso.LevelError, errors.New(resp.ErrorMessage))\n\t\treturn\n\t}\n\n\ttokenData, err := auth.GetTokenData(resp.Token)\n\n\tif err != nil {\n\t\tc.ErrorResponse(http.StatusBadRequest, soso.LevelError, err)\n\t\treturn\n\t}\n\n\tuser, err := GetUser(tokenData.UID, true)\n\n\tif err != nil {\n\t\tc.ErrorResponse(http.StatusBadRequest, soso.LevelError, err)\n\t\treturn\n\t}\n\n\tc.SuccessResponse(map[string]interface{}{\n\t\t\"token\": resp.Token,\n\t\t\"user\": user,\n\t})\n}",
"func (m *MockFeedRepository) GetUserPostsByLogin(arg0 string) ([]models.Post, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetUserPostsByLogin\", arg0)\n\tret0, _ := ret[0].([]models.Post)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (au *accountUsecase) Login(c context.Context, username, password string) (map[string]string, error) {\n\t_, cancel := context.WithTimeout(c, au.contextTimeout)\n\tdefer cancel()\n\n\troles, err := au.accountRepo.Login(username, password)\n\tif err != nil {\n\t\treturn map[string]string{\n\t\t\t\"code\": \"0\",\n\t\t\t\"message\": err.Error(),\n\t\t}, err\n\t}\n\n\ttokenFactory := factory.New(config.JwtConf.Key, config.JwtConf.Issuer, 30)\n\ttoken, err := tokenFactory.Build(username, roles)\n\n\tif err != nil {\n\t\treturn map[string]string{\n\t\t\t\"code\": \"0\",\n\t\t\t\"message\": err.Error(),\n\t\t}, err\n\t}\n\n\treturn token, nil\n\n}",
"func (_BREM *BREMCaller) Login(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _BREM.contract.Call(opts, out, \"login\")\n\treturn *ret0, err\n}",
"func login(ctx context.Context, request events.APIGatewayV2HTTPRequest) (events.APIGatewayProxyResponse, error) {\n\tmethod := \"POST\"\n\turl := \"/auth/login\"\n\n\treqBody, resCode, err := getBody(url, request)\n\tif err != nil {\n\t\terrBody := fmt.Sprintf(`{\n\t\t\t\"status\": %d,\n\t\t\t\"message\": \"%s\"\n\t\t}`, resCode, err.Error())\n\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: resCode,\n\t\t\tBody: errBody,\n\t\t}, nil\n\t}\n\n\tbody, respHeaders, resCode, err := shared.PelotonRequest(method, url, nil, bytes.NewBuffer(reqBody))\n\tif err != nil {\n\t\tres := events.APIGatewayProxyResponse{\n\t\t\tStatusCode: resCode,\n\t\t\tBody: err.Error(),\n\t\t}\n\n\t\tif body != nil {\n\t\t\tres.Body = string(body)\n\t\t}\n\n\t\treturn res, nil\n\t}\n\n\tloginRes := &loginResponse{}\n\terr = json.Unmarshal(body, loginRes)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t}, fmt.Errorf(\"Unable to unmarshal response: %s\", err)\n\t}\n\n\treply, err := json.Marshal(loginRes)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t}, fmt.Errorf(\"Unable to marshal response: %s\", err)\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tMultiValueHeaders: respHeaders,\n\t\tBody: string(reply),\n\t}, nil\n}",
"func (h *UserRepos) VirtualLogin(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\n\tctxValues, err := webcontext.ContextValues(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclaims, err := auth.ClaimsFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//\n\treq := new(user_auth.VirtualLoginRequest)\n\tdata := make(map[string]interface{})\n\tf := func() (bool, error) {\n\t\tif r.Method == http.MethodPost {\n\t\t\terr := r.ParseForm()\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tdecoder := schema.NewDecoder()\n\t\t\tdecoder.IgnoreUnknownKeys(true)\n\n\t\t\tif err := decoder.Decode(req, r.PostForm); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t} else {\n\t\t\tif pv, ok := params[\"user_id\"]; ok && pv != \"\" {\n\t\t\t\treq.UserID = pv\n\t\t\t}\n\t\t}\n\n\t\tif qv := r.URL.Query().Get(\"account_id\"); qv != \"\" {\n\t\t\treq.AccountID = qv\n\t\t} else {\n\t\t\treq.AccountID = claims.Audience\n\t\t}\n\n\t\tif req.UserID != \"\" {\n\t\t\tsess := webcontext.ContextSession(ctx)\n\t\t\tvar expires time.Duration\n\t\t\tif sess != nil && sess.Options != nil {\n\t\t\t\texpires = time.Second * time.Duration(sess.Options.MaxAge)\n\t\t\t} else {\n\t\t\t\texpires = time.Hour\n\t\t\t}\n\n\t\t\t// Perform the account switch.\n\t\t\ttkn, err := h.AuthRepo.VirtualLogin(ctx, claims, *req, expires, ctxValues.Now)\n\t\t\tif err != nil {\n\t\t\t\tif verr, ok := weberror.NewValidationError(ctx, err); ok {\n\t\t\t\t\tdata[\"validationErrors\"] = verr.(*weberror.Error)\n\t\t\t\t\treturn false, nil\n\t\t\t\t} else {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Update the access token in the session.\n\t\t\tsess = webcontext.SessionUpdateAccessToken(sess, tkn.AccessToken)\n\n\t\t\t// Read the account for a flash message.\n\t\t\tusr, err := h.UserRepo.ReadByID(ctx, claims, tkn.UserID)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\twebcontext.SessionFlashSuccess(ctx,\n\t\t\t\t\"User Switched\",\n\t\t\t\tfmt.Sprintf(\"You are now virtually logged into user %s.\",\n\t\t\t\t\tusr.Response(ctx).Name))\n\n\t\t\t// Redirect the user to the dashboard with the new credentials.\n\t\t\treturn true, web.Redirect(ctx, w, r, \"/\", http.StatusFound)\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\tend, err := f()\n\tif err != nil {\n\t\treturn web.RenderError(ctx, w, r, err, h.Renderer, TmplLayoutBase, TmplContentErrorGeneric, web.MIMETextHTMLCharsetUTF8)\n\t} else if end {\n\t\treturn nil\n\t}\n\n\tusrAccs, err := h.UserAccountRepo.Find(ctx, claims, user_account.UserAccountFindRequest{\n\t\tWhere: \"account_id = ?\",\n\t\tArgs: []interface{}{claims.Audience},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar userIDs []interface{}\n\tvar userPhs []string\n\tfor _, usrAcc := range usrAccs {\n\t\tif usrAcc.UserID == claims.Subject {\n\t\t\t// Skip the current authenticated user.\n\t\t\tcontinue\n\t\t}\n\t\tuserIDs = append(userIDs, usrAcc.UserID)\n\t\tuserPhs = append(userPhs, \"?\")\n\t}\n\n\tif len(userIDs) == 0 {\n\t\tuserIDs = append(userIDs, \"\")\n\t\tuserPhs = append(userPhs, \"?\")\n\t}\n\n\tusers, err := h.UserRepo.Find(ctx, claims, user.UserFindRequest{\n\t\tWhere: fmt.Sprintf(\"id IN (%s)\",\n\t\t\tstrings.Join(userPhs, \", \")),\n\t\tArgs: userIDs,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata[\"users\"] = users.Response(ctx)\n\n\tif req.AccountID == \"\" {\n\t\treq.AccountID = claims.Audience\n\t}\n\n\tdata[\"form\"] = req\n\n\tif verr, ok := weberror.NewValidationError(ctx, webcontext.Validator().Struct(user_auth.VirtualLoginRequest{})); ok {\n\t\tdata[\"validationDefaults\"] = verr.(*weberror.Error)\n\t}\n\n\treturn h.Renderer.Render(ctx, w, r, TmplLayoutBase, \"user-virtual-login.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)\n}",
"func TestLoginSuccess(t *testing.T) {\n\tvar handler = http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tfmt.Fprint(rw, `{\"session.id\":\"test\",\"status\":\"success\"}`)\n\t})\n\tts := httptest.NewServer(handler)\n\tdefer ts.Close()\n\tconf := Config\n\tconf.Url = ts.URL\n\n\treq := &AuthenticateReq{\n\t\tCommonConf: conf,\n\t}\n\tres, err := Authenticate(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.SessionId == \"\" {\n\t\tt.Fatal(\"No session id\")\n\t}\n\tt.Logf(\"%+v\", *res)\n}",
"func (c UserInfo) TestLogin(DiscordToken *models.DiscordToken) revel.Result {\n\tif DiscordToken.AccessToken == keys.TestAuthKey {\n\t\tc.Response.Status = 201\n\t\tc.Session[\"DiscordUserID\"] = \"Test\"\n\t} else {\n\t\tc.Response.Status = 403\n\t}\n\n\treturn c.Render()\n}",
"func Login(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tfmt.Println(\"Login\")\n}",
"func (sl *serviceLogin) login(ctx *gin.Context) {\n\tvar l model.Login\n\tif err := ctx.BindJSON(&l); err != nil {\n\t\tlog.Println(err)\n\t\tctx.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\": err,\n\t\t})\n\t\treturn\n\t}\n\tu, err := sl.db.GetUserByEmail(l.Email)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tctx.JSON(http.StatusUnauthorized, nil)\n\t\treturn\n\t}\n\tif !u.PassIsValid(l.Pass) {\n\t\tlog.Println(err)\n\t\tctx.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"error\": err,\n\t\t})\n\t\treturn\n\t}\n\tctx.Set(\"payload_pass\", l.Pass)\n\tctx.Set(\"user_uuid\", u.UUID)\n\tctx.Set(\"user_pass\", u.Pass)\n\n\tjwt := middlware.NewJWT()\n\tjwt.LoginHandler(ctx)\n}",
"func (api *API) Login(ctx *gin.Context) {\n\t// Unmarshall the 'login' request body.\n\tvar login Login\n\terr := ctx.BindJSON(&login)\n\tif err != nil {\n\t\tctx.JSON(http.StatusBadRequest, gin.H{\"errorMessage\": fmt.Sprintf(\"%s\", err)})\n\t\treturn\n\t} \n\t\n\t// Get user and credentials.\n\tuser, err := api.db.GetUser(ctx, login.Email)\n\tif err != nil {\n\t\tctx.AbortWithStatus(http.StatusUnauthorized)\n\t\treturn\n\t}\n\tcred, err := api.db.GetCredential(ctx, user.ID)\n\tif err != nil {\n\t\tctx.AbortWithStatus(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Generate Hashed password\n\terr = bcrypt.CompareHashAndPassword([]byte(cred.Password), []byte(login.Password))\n\tif err != nil {\n\t\tctx.AbortWithStatus(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Create the JWT claims. Includes the username and expiry time.\n\texpirationTime := time.Now().Add(5 * time.Minute)\n\tclaims := &jwtClaims{\n\t\tUserID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\t// Declare the token with the HMAC algorithm used for signing, and the claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtKey)\n\tif err != nil {\n\t\tctx.JSON(http.StatusInternalServerError, gin.H{\"errorMessage\": fmt.Sprintf(\"%s\", err)})\n\t\treturn\n\t}\n\n\t// Set the JWT token as a cookie.\n\texpiration := int(5 * 60)\n\tpath := \"\"\n\tdomain := \"\"\n\tsecure := true\n\thttpOnly := true\n\tctx.SetCookie(\"token\", tokenString, expiration, path, domain, secure, httpOnly)\n\n\tctx.Status(http.StatusOK)\n}",
"func (s service) Login(ctx context.Context, username, password string) (string, error) {\n\tuser, err := s.authenticate(ctx, username, password)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsession, err := s.sessionRepository.Get(ctx, user.ID)\n\tif err != nil {\n\t\treturn s.createSession(ctx, *user)\n\t}\n\treturn s.updateSession(ctx, *user, session)\n}",
"func (t *SimpleChaincode) login(APIstub shim.ChaincodeStubInterface, args []string) pb.Response {\n\t// 0 1\n\t// \"prno\", \"password\"\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 2\")\n\t}\n\tprno := args[0]\n\tpassword := args[1]\n\tstudentAsBytes, err := APIstub.GetState(prno)\n\tif err != nil {\n\t\topJSONasBytes, _ := json.Marshal(\"failure\")\n\n\t\treturn shim.Success(opJSONasBytes)\n\n\t} else if studentAsBytes == nil {\n\t\topJSONasBytes, _ := json.Marshal(\"failure\")\n\n\t\treturn shim.Success(opJSONasBytes)\n\t}\n\n\tstudentAuthentication := student{}\n\tjson.Unmarshal(studentAsBytes, &studentAuthentication) //unmarshal it aka JSON.parse()\n\n\tif studentAuthentication.Password == password {\n\t\topJSONasBytes, _ := json.Marshal(\"success\")\n\n\t\treturn shim.Success(opJSONasBytes)\n\t} else {\n\t\topJSONasBytes, _ := json.Marshal(\"failure\")\n\n\t\treturn shim.Success(opJSONasBytes)\n\t}\n}",
"func (client *Client) Login(request AuthRequest) (AuthResponse, error) {\n\tretval := AuthResponse{}\n\n\t//\tIf the API key isn't set, just use the default:\n\tif request.APIKey == \"\" {\n\t\trequest.APIKey = apiKey\n\t}\n\n\t//\tIf the API url isn't set, use the default:\n\tif client.ServiceURL == \"\" {\n\t\tclient.ServiceURL = baseServiceURL\n\t}\n\n\t// if the API Version isn't set, use the default:\n\tif client.Version == \"\" {\n\t\tclient.Version = apiVersion\n\t}\n\n\t//\tSet the API url\n\tapiURL := client.ServiceURL + \"/login\"\n\n\t//\tSerialize our request to JSON:\n\trequestBytes := new(bytes.Buffer)\n\terr := json.NewEncoder(requestBytes).Encode(&request)\n\tif err != nil {\n\t\treturn retval, err\n\t}\n\n\t// Convert bytes to a reader.\n\trequestJSON := strings.NewReader(requestBytes.String())\n\n\t//\tPost the JSON to the api url\n\tres, err := http.Post(apiURL, \"application/json\", requestJSON)\n\tif res != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tif err != nil {\n\t\treturn retval, err\n\t}\n\n\t//\tDecode the return object\n\terr = json.NewDecoder(res.Body).Decode(&retval)\n\tif err != nil {\n\t\treturn retval, err\n\t}\n\n\t//\tStore the token:\n\tclient.Token = retval.Token\n\n\t//\tReturn our response\n\treturn retval, nil\n}",
"func Login(username, password, orgID, devKey string) {\n\tcustomCreds = &credentials{\n\t\tUserName: username,\n\t\tPassword: password,\n\t\tOrgID: orgID,\n\t\tDevKey: devKey,\n\t}\n}",
"func (client *Client) Login() error {\n\tdata := fmt.Sprintf(`{\"aaaUser\":{\"attributes\":{\"name\":\"%s\",\"pwd\":\"%s\"}}}`,\n\t\tclient.Usr,\n\t\tclient.Pwd,\n\t)\n\tres, err := client.Post(\"/api/aaaLogin\", data, NoRefresh)\n\tif err != nil {\n\t\treturn err\n\t}\n\terrText := res.Get(\"imdata.0.error.attributes.text\").Str\n\tif errText != \"\" {\n\t\treturn errors.New(\"authentication error\")\n\t}\n\tclient.Token = res.Get(\"imdata.0.aaaLogin.attributes.token\").Str\n\tclient.LastRefresh = time.Now()\n\treturn nil\n}"
] | [
"0.7183621",
"0.71248025",
"0.6753659",
"0.6664102",
"0.6646322",
"0.66238445",
"0.661635",
"0.66042656",
"0.65960777",
"0.65740645",
"0.65580904",
"0.64773464",
"0.64741534",
"0.6403498",
"0.6395945",
"0.6352871",
"0.63038814",
"0.62659204",
"0.6252402",
"0.6243144",
"0.62243235",
"0.62025577",
"0.61923397",
"0.6186002",
"0.6182418",
"0.61575145",
"0.61547077",
"0.61124295",
"0.6100421",
"0.60818046",
"0.60588735",
"0.59694064",
"0.59447014",
"0.59337187",
"0.59295905",
"0.59250045",
"0.59224844",
"0.5913188",
"0.58741665",
"0.5871835",
"0.58493215",
"0.58393157",
"0.5815984",
"0.57909405",
"0.5778843",
"0.57704246",
"0.5769491",
"0.5745023",
"0.5732633",
"0.5730024",
"0.5722066",
"0.5719222",
"0.5706221",
"0.570487",
"0.56995344",
"0.5698988",
"0.5697543",
"0.568083",
"0.56727046",
"0.565975",
"0.5659744",
"0.5653782",
"0.5653696",
"0.56411856",
"0.564019",
"0.5639302",
"0.56366014",
"0.563538",
"0.56336176",
"0.5625256",
"0.5621405",
"0.56134343",
"0.5608017",
"0.56075513",
"0.5606573",
"0.56019944",
"0.56011105",
"0.5586017",
"0.55733883",
"0.5565062",
"0.55637187",
"0.55606735",
"0.5557916",
"0.5553785",
"0.55496055",
"0.5546299",
"0.55457836",
"0.5542124",
"0.55407846",
"0.5536572",
"0.5535874",
"0.55325794",
"0.5529669",
"0.55267906",
"0.55267674",
"0.5526527",
"0.55259246",
"0.5517144",
"0.5515125",
"0.5514332"
] | 0.7311137 | 0 |
Registration indicates an expected call of Registration | Регистрация указывает на ожидаемый вызов Регистрации | func (mr *MockServiceAuthMockRecorder) Registration(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Registration", reflect.TypeOf((*MockServiceAuth)(nil).Registration), arg0)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (_DelegateProfile *DelegateProfileCaller) Registered(opts *bind.CallOpts, _addr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _DelegateProfile.contract.Call(opts, out, \"registered\", _addr)\n\treturn *ret0, err\n}",
"func (s SwxProxy) Register(_ context.Context, _ *protos.RegistrationRequest) (*protos.RegistrationAnswer, error) {\n\treturn &protos.RegistrationAnswer{}, nil\n}",
"func (mr *MockUsersRepoInterfaceMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUsersRepoInterface)(nil).Register), arg0)\n}",
"func (mr *MockRoutingRuleClientMockRecorder) Register() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockRoutingRuleClient)(nil).Register))\n}",
"func (mr *MockLinkerdDiscoveryEmitterMockRecorder) Register() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockLinkerdDiscoveryEmitter)(nil).Register))\n}",
"func TestRegister(t *testing.T) {\n\n\tfabricCAClient, err := NewFabricCAClient(org1, configImp, cryptoSuiteProvider)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFabricCAClient returned error: %v\", err)\n\t}\n\tuser := mocks.NewMockUser(\"test\")\n\t// Register with nil request\n\t_, err = fabricCAClient.Register(user, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil request\")\n\t}\n\tif err.Error() != \"registration request required\" {\n\t\tt.Fatalf(\"Expected error registration request required. Got: %s\", err.Error())\n\t}\n\n\t//Register with nil user\n\t_, err = fabricCAClient.Register(nil, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error with nil user\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\t// Register with nil user cert and key\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"Expected error without user enrolment information\")\n\t}\n\tif !strings.Contains(err.Error(), \"failed to create request for signing identity\") {\n\t\tt.Fatalf(\"Expected error failed to create request for signing identity. Got: %s\", err.Error())\n\t}\n\n\tuser.SetEnrollmentCertificate(readCert(t))\n\tkey, err := cryptosuite.GetDefault().KeyGen(cryptosuite.GetECDSAP256KeyGenOpts(true))\n\tif err != nil {\n\t\tt.Fatalf(\"KeyGen return error %v\", err)\n\t}\n\tuser.SetPrivateKey(key)\n\t// Register without registration name parameter\n\t_, err = fabricCAClient.Register(user, &ca.RegistrationRequest{})\n\tif !strings.Contains(err.Error(), \"failed to register user\") {\n\t\tt.Fatalf(\"Expected error failed to register user. Got: %s\", err.Error())\n\t}\n\n\t// Register with valid request\n\tvar attributes []ca.Attribute\n\tattributes = append(attributes, ca.Attribute{Key: \"test1\", Value: \"test2\"})\n\tattributes = append(attributes, ca.Attribute{Key: \"test2\", Value: \"test3\"})\n\tsecret, err := fabricCAClient.Register(user, &ca.RegistrationRequest{Name: \"test\",\n\t\tAffiliation: \"test\", Attributes: attributes})\n\tif err != nil {\n\t\tt.Fatalf(\"fabricCAClient Register return error %v\", err)\n\t}\n\tif secret != \"mockSecretValue\" {\n\t\tt.Fatalf(\"fabricCAClient Register return wrong value %s\", secret)\n\t}\n}",
"func (s *Service) mustRegister(t *testing.T) {\n\tif err := s.Register(testService{}); err != nil {\n\t\tt.Fatalf(\"Registering test service failed: %v\", err)\n\t}\n}",
"func (mr *MockHubMockRecorder) Register(c interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockHub)(nil).Register), c)\n}",
"func (mr *MockCAClientMockRecorder) Register(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockCAClient)(nil).Register), arg0)\n}",
"func (mr *MockAccountMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockAccount)(nil).Register), arg0, arg1)\n}",
"func (mr *MockUserControllerMockRecorder) Register(context interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUserController)(nil).Register), context)\n}",
"func (v *VirtualHost) RegistrationAllowed() (bool, bool) {\n\tif v == nil {\n\t\treturn false, false\n\t}\n\treturn v.AllowRegistration, v.AllowGuests\n}",
"func (c *RegistrationController) Register(w http.ResponseWriter, r *http.Request) {\n\n\t// parse the JSON coming from the client\n\tvar regRequest registrationRequest\n\tdecoder := json.NewDecoder(r.Body)\n\n\t// check if the parsing succeeded\n\tif err := decoder.Decode(®Request); err != nil {\n\t\tlog.Println(err)\n\t\tc.Error500(w, err, \"Error decoding JSON\")\n\t\treturn\n\t}\n\n\t// validate the data\n\tif err := regRequest.isValid(); err != nil {\n\t\tlog.Println(err)\n\t\tc.Error500(w, err, \"Invalid form data\")\n\t\treturn\n\t}\n\n\t// register the user\n\taccount := regRequest.Email // use the user's email as a unique account\n\tuser, err := models.RegisterUser(account, regRequest.Organisation,\n\t\tregRequest.Email, regRequest.Password, regRequest.First, regRequest.Last)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error registering the user: %v\", err)\n\t\tc.Error500(w, err, \"Error registering the user\")\n\t\treturn\n\t} else {\n\t\tc.JSON(&user, w, r)\n\t}\n\n\t// Send email address confirmation link\n\tif err := sendVerificationEmail(user.ID); err != nil {\n\t\tlog.Printf(\"Error sending verification email: %v\", err)\n\t\tc.Error500(w, err, \"Error sending verification email\")\n\t}\n\n}",
"func (mr *MockVirtualServiceClientMockRecorder) Register() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockVirtualServiceClient)(nil).Register))\n}",
"func Registration(w http.ResponseWriter, r *http.Request) {\n\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\tuser := models.User{}\n\terr := json.Unmarshal(body, &user)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tuser.Prepare()\n\terr = user.Validate(\"login\")\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\ttoken, err := auth.SignUp(user.Email, user.Password)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\tresponses.JSON(w, http.StatusOK, token)\n}",
"func (m *MockServiceAuth) Registration(arg0 models.UserInputReg) (models.UserSession, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Registration\", arg0)\n\tret0, _ := ret[0].(models.UserSession)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func isRegistered(t *testing.T, tr *rt.TableRow) {\n\ta := tr.OtherTestData[testAdapterDataKey].(*adapter)\n\n\tresp := probeHandler(t, a, tURLPath)\n\tassert.NotEqual(t, http.StatusNotFound, resp.Code, \"Expected handler hit\")\n}",
"func (s NoUseSwxProxy) Register(\n\tctx context.Context,\n\treq *protos.RegistrationRequest,\n) (*protos.RegistrationAnswer, error) {\n\treturn &protos.RegistrationAnswer{}, fmt.Errorf(\"Register is NOT IMPLEMENTED\")\n}",
"func (mr *MockIUserServiceMockRecorder) Register(user interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockIUserService)(nil).Register), user)\n}",
"func (mr *MockUCAuthMockRecorder) Register(ctx, user interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUCAuth)(nil).Register), ctx, user)\n}",
"func MustRegister(r *keys.Registrar) {\n\tif err := Register(r); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (gf *GOFactory) Register(typeID string, creator ICreator) bool {\n\tgologger.SLogger.Println(\"registering\", typeID)\n\n\t// check if already registered\n\t_, ok := gf.GoCreator[typeID]\n\tif ok {\n\t\tgologger.SLogger.Println(\"Already Registered Object \", typeID)\n\n\t\treturn false\n\t}\n\n\tgf.GoCreator[typeID] = creator\n\n\tgologger.SLogger.Println(\"Added To Factory Obj Of Type\", typeID)\n\n\treturn true\n}",
"func (mr *MockLedgerClientMockRecorder) Register(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockLedgerClient)(nil).Register), varargs...)\n}",
"func TestMultipleRegisterCalls(t *testing.T) {\n\tRegister(\"multiple-register-driver-1\")\n\trequire.PanicsWithError(t, \"Register called twice for driver multiple-register-driver-1\", func() {\n\t\tRegister(\"multiple-register-driver-1\")\n\t})\n\n\t// Should be no error.\n\tRegister(\"multiple-register-driver-2\")\n}",
"func (_DelegateProfile *DelegateProfileCallerSession) Registered(_addr common.Address) (bool, error) {\n\treturn _DelegateProfile.Contract.Registered(&_DelegateProfile.CallOpts, _addr)\n}",
"func mustRegister(params *Params) {\n\tif err := Register(params); err != nil {\n\t\tpanic(\"failed to register network: \" + err.Error())\n\t}\n}",
"func mustRegister(params *Params) {\n\tif err := Register(params); err != nil {\n\t\tpanic(\"failed to register network: \" + err.Error())\n\t}\n}",
"func mustRegister(params *Params) {\n\tif err := Register(params); err != nil {\n\t\tpanic(\"failed to register network: \" + err.Error())\n\t}\n}",
"func CheckRegistration(ctx context.Context, client *mongo.Client, reg string) bool {\n\tvar result structs.Vehicle\n\tcol := client.Database(\"parkai\").Collection(\"vehicles\")\n\tfilter := bson.M{\"registration\": reg}\n\n\t//reg does not exist\n\tif err := col.FindOne(ctx, filter).Decode(&result); err != nil {\n\t\treturn false\n\t}\n\n\t//reg exists\n\treturn true\n}",
"func RegisteringTokenTest(env *models.PhotonEnvReader, allowFail bool) {\n\t// 1. register a not-exist token\n\tcase1 := &APITestCase{\n\t\tCaseName: \"Register a not-exist token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF\",\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 120,\n\t\t},\n\t\tTargetStatusCode: 409,\n\t}\n\tcase1.Run()\n\t// 2. register a new token\n\tnewTokenAddress := deployNewToken()\n\tcase2 := &APITestCase{\n\t\tCaseName: \"Register a new token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/\" + newTokenAddress,\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 180,\n\t\t},\n\t\tTargetStatusCode: 200,\n\t}\n\tcase2.Run()\n}",
"func (w *Wrapper) registrationComplete() (bool, error) {\n\tpath := filepath.Join(w.cfg.RootDir, shardRegMarker)\n\t_, err := os.Stat(path)\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (mr *MockDaoMockRecorder) Register(steamID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockDao)(nil).Register), steamID)\n}",
"func TestActivityRegistration(t *testing.T) {\n\tact := NewActivity(getActivityMetadata())\n\tif act == nil {\n\t\tt.Error(\"Activity Not Registered\")\n\t\tt.Fail()\n\t\treturn\n\t}\n}",
"func (mr *MockKubeNamespaceClientMockRecorder) Register() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockKubeNamespaceClient)(nil).Register))\n}",
"func (s *eremeticScheduler) Registered(driver sched.SchedulerDriver, frameworkID *mesos.FrameworkID, masterInfo *mesos.MasterInfo) {\n\tlog.Debugf(\"Framework %s registered with master %s\", frameworkID.GetValue(), masterInfo.GetHostname())\n\tif !s.initialised {\n\t\tdriver.ReconcileTasks([]*mesos.TaskStatus{})\n\t\ts.initialised = true\n\t} else {\n\t\ts.Reconcile(driver)\n\t}\n}",
"func (mr *MockUserServiceMockRecorder) Register(ctx, user interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockUserService)(nil).Register), ctx, user)\n}",
"func (r *Registry) Register(registrant Registrant) {\n\tif _, ok := r.registrants[registrant.Name()]; ok {\n\t\tpanic(fmt.Sprintf(\"%s: registrant %q already registered\", r.name, registrant.Name()))\n\t}\n\tr.registrants[registrant.Name()] = registrant\n}",
"func (c *mockMediatorClient) Register(connectionID string) error {\n\tif c.RegisterErr != nil {\n\t\treturn c.RegisterErr\n\t}\n\n\treturn nil\n}",
"func (mr *MockISubKeyBucketMockRecorder) Register(receiver interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockISubKeyBucket)(nil).Register), receiver)\n}",
"func (_DelegateProfile *DelegateProfileSession) Registered(_addr common.Address) (bool, error) {\n\treturn _DelegateProfile.Contract.Registered(&_DelegateProfile.CallOpts, _addr)\n}",
"func (ctr *RegistRequestController) RequestRegistration(c echo.Context) error {\n\n\trequestRegistrationParams := new(param.RequestRegistrationParams)\n\n\tif err := c.Bind(requestRegistrationParams); err != nil {\n\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"Invalid params\",\n\t\t\tData: err,\n\t\t})\n\t}\n\n\tregistrationRequestOjb, err := ctr.RegistRequestRepo.GetRegRequests(requestRegistrationParams)\n\n\tif err != nil && err.Error() != pg.ErrNoRows.Error() {\n\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"System error\",\n\t\t})\n\t}\n\n\t// check email is in use or not\n\terrMail, err := ctr.RegistRequestRepo.CheckExistRequestUser(\n\t\tctr.UserRepo,\n\t\t[]string{requestRegistrationParams.EmailAddr},\n\t\trequestRegistrationParams.OrganizationID,\n\t)\n\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"System error\",\n\t\t})\n\t}\n\n\tif len(errMail) > 0 {\n\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"Email is requested or registered\",\n\t\t\tData: errMail,\n\t\t})\n\t}\n\n\torg, err := ctr.OrgRepo.SelectEmailAndPassword(requestRegistrationParams.OrganizationID)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"System error\",\n\t\t})\n\t}\n\tif org.Email == \"\" || org.EmailPassword == \"\" {\n\t\treturn c.JSON(http.StatusUnprocessableEntity, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"The email system is currently down. Please try later\",\n\t\t})\n\t}\n\n\t// no record in db\n\tif registrationRequestOjb.ID == 0 {\n\t\t// start insert new request\n\t\tregistrationRequestOjb, err := ctr.RegistRequestRepo.InsertRegRequest(cf.UserRequestType, cf.PendingRequestStatus, requestRegistrationParams)\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"System error\",\n\t\t\t})\n\t\t}\n\n\t\tctr.InitSmtp(org.Email, org.EmailPassword)\n\n\t\tsampleData := new(param.SampleData)\n\t\tsampleData.SendTo = []string{requestRegistrationParams.EmailAddr}\n\n\t\t// after insert success - send notice email\n\t\tif err := ctr.SendMail(\"Micro_Erp Success Request\", sampleData, cf.SuccessRequestTemplate); err != nil {\n\t\t\tctr.Logger.Error(err)\n\t\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"System error\",\n\t\t\t})\n\t\t}\n\n\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\tStatus: cf.SuccessResponseCode,\n\t\t\tMessage: \"Your request has been sent. We will sent you an email about your request soon\",\n\t\t\tData: registrationRequestOjb,\n\t\t})\n\t}\n\n\t// record exist in db\n\tif registrationRequestOjb.Email != \"\" {\n\t\treturnMessage := \"\"\n\t\tif registrationRequestOjb.Status == cf.DenyRequestStatus {\n\t\t\treturnMessage = \"Your request has been denied\"\n\t\t} else {\n\t\t\treturnMessage = \"Your request have been sent before. Please wait for us to check\"\n\t\t}\n\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: returnMessage,\n\t\t})\n\t}\n\n\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\tStatus: cf.FailResponseCode,\n\t\tMessage: \"System error\",\n\t})\n}",
"func (rh *RegistrationHandler) Registration(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tfileName := r.Form[\"file_name\"]\n\tfileDate := time.Now()\n\ttowerID := r.Form[\"tower_id\"]\n\tlocationID := r.Form[\"location_id\"]\n\tpostalCode := r.Form[\"postal_code\"]\n\tareaCode := r.Form[\"area_code\"]\n\n\tregistration := &domain.Registration{\n\t\tFileName: fileName[0],\n\t\tFileDate: fileDate,\n\t\tTowerID: towerID[0],\n\t\tLocationID: locationID[0],\n\t\tPostalCode: postalCode[0],\n\t\tAreaCode: areaCode[0],\n\t}\n\n\trStatus, err := rh.RegistrationService.RegisterFile(r.Context(), registration)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\trjson, err := json.Marshal(rStatus)\n\tif err != nil {\n\t\thttp.Error(w,\n\t\t\thttp.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(rjson)\n}",
"func (server SDKServer) Register(ctx context.Context, request *pb.RegisterRequest) (*pb.RegisterReply, error) {\n\tname := request.Identity\n\n\t//fee := request.Fee\n\tvar fee float64\n\tvar err error\n\tif fee, err = strconv.ParseFloat(\"0.01\", 64); err == nil {\n\t\t// TODO: Minimum fee\n\t\tfee = 0.01\n\t}\n\n\tchain := parseChainType(request.Chain)\n\n\t// First check if this account already exists, return with error if not\n\t_, stat := server.App.Accounts.FindNameOnChain(name, chain)\n\t// If this account already existsm don't let this go through\n\tif stat != status.MISSING_VALUE {\n\t\treturn nil, gstatus.Errorf(codes.AlreadyExists, \"Identity %s already exists\", name)\n\t}\n\n\tprivKey, pubKey := id.GenerateKeys(secret(name+chain.String()), true)\n\n\tpacket := action.SignAndPack(\n\t\taction.CreateRegisterRequest(\n\t\t\tname,\n\t\t\tchain.String(),\n\t\t\tglobal.Current.Sequence,\n\t\t\tglobal.Current.NodeName,\n\t\t\tnil,\n\t\t\tpubKey.Address(),\n\t\t\tfee))\n\tcomm.Broadcast(packet)\n\n\t// TODO: Use proper secret for key generation\n\treturn &pb.RegisterReply{\n\t\tOk: true,\n\t\tIdentity: name,\n\t\tPublicKey: pubKey.Bytes(),\n\t\tPrivateKey: privKey.Bytes(),\n\t}, nil\n}",
"func (k *KubernetesScheduler) Registered(driver mesos.SchedulerDriver,\n\tframeworkId *mesos.FrameworkID, masterInfo *mesos.MasterInfo) {\n\tk.frameworkId = frameworkId\n\tk.masterInfo = masterInfo\n\tk.registered = true\n\tlog.Infof(\"Scheduler registered with the master: %v with frameworkId: %v\\n\", masterInfo, frameworkId)\n}",
"func (mr *MockClusterRegistrationClientMockRecorder) Register(ctx, remoteConfig, remoteClusterName, remoteWriteNamespace, remoteContextName, discoverySource, registerOpts interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Register\", reflect.TypeOf((*MockClusterRegistrationClient)(nil).Register), ctx, remoteConfig, remoteClusterName, remoteWriteNamespace, remoteContextName, discoverySource, registerOpts)\n}",
"func (am *AppchainManager) Register(info []byte) (bool, []byte) {\n\tchain := &Appchain{}\n\tif err := json.Unmarshal(info, chain); err != nil {\n\t\treturn false, []byte(err.Error())\n\t}\n\n\tres := &g.RegisterResult{}\n\tres.ID = chain.ID\n\n\ttmpChain := &Appchain{}\n\tok := am.GetObject(am.appchainKey(chain.ID), tmpChain)\n\n\tif ok && tmpChain.Status != g.GovernanceUnavailable {\n\t\tam.Persister.Logger().WithFields(logrus.Fields{\n\t\t\t\"id\": chain.ID,\n\t\t}).Info(\"Appchain has registered\")\n\t\tres.IsRegistered = true\n\t} else {\n\t\tam.SetObject(am.appchainKey(chain.ID), chain)\n\n\t\taddr, err := getAddr(chain.PublicKey)\n\t\tif err != nil {\n\t\t\treturn false, []byte(err.Error())\n\t\t}\n\t\tam.SetObject(am.appchainAddrKey(addr), chain.ID)\n\t\tam.Logger().WithFields(logrus.Fields{\n\t\t\t\"id\": chain.ID,\n\t\t}).Info(\"Appchain is registering\")\n\t\tres.IsRegistered = false\n\t}\n\n\tresData, err := json.Marshal(res)\n\tif err != nil {\n\t\treturn false, []byte(err.Error())\n\t}\n\n\treturn true, resData\n}",
"func (p *Param) RegistrationStatus() uint32 {\n\tif p.Tag != RegistrationStatus {\n\t\treturn 0\n\t}\n\n\treturn p.decodeUint32ValData()\n}",
"func (c *Client) Register(req *api.RegistrationRequest) (rr *api.RegistrationResponse, err error) {\n\tlog.Debugf(\"Register %+v\", req)\n\n\tif req.Name == \"\" {\n\t\treturn nil, errors.New(\"Register was called without a Name set\")\n\t}\n\n\treqBody, err := util.Marshal(req, \"RegistrationRequest\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpost, err := c.newPost(\"register\", reqBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp api.RegistrationResponse\n\terr = c.SendReq(post, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debug(\"The register request completed successfully\")\n\treturn &resp, nil\n}",
"func (_e *MockDataCoord_Expecter) Register() *MockDataCoord_Register_Call {\n\treturn &MockDataCoord_Register_Call{Call: _e.mock.On(\"Register\")}\n}",
"func (m *Message) Registration() (*Registration, error) {\n\tif err := m.checkType(RegistrationName); err != nil {\n\t\treturn nil, err\n\t}\n\tif m.Raw == false {\n\t\treturn m.Payload.(*Registration), nil\n\t}\n\tobj := new(Registration)\n\treturn obj, m.unmarshalPayload(obj)\n}",
"func registerValid(reg asm.Register) error {\n\tif reg > asm.R9 {\n\t\treturn errors.Errorf(\"invalid register %v\", reg)\n\t}\n\n\treturn nil\n}",
"func register(ctx context.Context) error {\n\trw := ctx.HttpResponseWriter()\n\n\tname := ctx.PostValue(\"name\")\n\temail := ctx.PostValue(\"email\")\n\tpassword := ctx.PostValue(\"password\")\n\n\tfieldErrs, err := db.RegisterUser(name, email, password)\n\tif len(fieldErrs) > 0 {\n\t\tdata, err := json.Marshal(fieldErrs)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to marshal: \", err)\n\t\t\treturn goweb.Respond.WithStatus(ctx, http.StatusInternalServerError)\n\t\t}\n\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\t\treturn goweb.Respond.With(ctx, http.StatusBadRequest, data)\n\t}\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn goweb.Respond.WithStatus(ctx, http.StatusInternalServerError)\n\t}\n\n\t// everything went fine\n\tmsg := struct {\n\t\tBody string `json:\"body\"`\n\t\tType string `json:\"type\"`\n\t}{\n\t\tBody: \"Check your email to activate your account\",\n\t\tType: \"success\",\n\t}\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\tlog.Error(\"Unable to marshal: \", err)\n\t\treturn goweb.Respond.WithStatus(ctx, http.StatusInternalServerError)\n\t}\n\trw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn goweb.Respond.With(ctx, http.StatusOK, data)\n}",
"func testRegister(t *testing.T, useTLS bool) {\n\tassert := assert.New(t)\n\tzero := 0\n\tassert.Equal(0, zero)\n\n\t// Create new TestPingServer - needed for calling RPCs\n\tmyJrpcfs := &TestPingServer{}\n\n\trrSvr := getNewServer(10*time.Second, false, useTLS)\n\tassert.NotNil(rrSvr)\n\n\t// Register the Server - sets up the methods supported by the\n\t// server\n\terr := rrSvr.Register(myJrpcfs)\n\tassert.Nil(err)\n\n\t// Make sure we discovered the correct functions\n\tassert.Equal(4, len(rrSvr.svrMap))\n}",
"func (h *NotificationHub) Register(ctx context.Context, r Registration) (raw []byte, registrationResult *RegistrationResult, err error) {\n\tvar (\n\t\tregURL = h.generateAPIURL(\"registrations\")\n\t\tmethod = postMethod\n\t\tpayload = \"\"\n\t\theaders = map[string]string{\n\t\t\t\"Content-Type\": \"application/atom+xml;type=entry;charset=utf-8\",\n\t\t}\n\t)\n\n\tswitch r.NotificationFormat {\n\tcase AppleFormat:\n\t\tpayload = strings.Replace(appleRegXMLString, \"{{DeviceID}}\", r.DeviceID, 1)\n\tcase GcmFormat:\n\t\tpayload = strings.Replace(gcmRegXMLString, \"{{DeviceID}}\", r.DeviceID, 1)\n\tdefault:\n\t\treturn nil, nil, errors.New(\"Notification format not implemented\")\n\t}\n\tpayload = strings.Replace(payload, \"{{Tags}}\", r.Tags, 1)\n\n\tif r.RegistrationID != \"\" {\n\t\tmethod = putMethod\n\t\tregURL.Path = path.Join(regURL.Path, r.RegistrationID)\n\t}\n\n\traw, _, err = h.exec(ctx, method, regURL, headers, bytes.NewBufferString(payload))\n\n\tif err == nil {\n\t\tif err = xml.Unmarshal(raw, ®istrationResult); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif registrationResult != nil {\n\t\tregistrationResult.normalize()\n\t}\n\treturn\n}",
"func TestV1RegisterDeviceStatusOK(t *testing.T) {\n\tapiTest.T = t\n\tdeviceID := newDeviceID()\n\ttruncateAllTableRelation()\n\ttestCaseStatusOK := []struct {\n\t\tname string\n\t\tparamRequest map[string][]string\n\t}{\n\t\t{\n\t\t\tname: \"deviceID is not existed\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {deviceID},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"deviceID is existed\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {deviceID},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range testCaseStatusOK {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\tvar userExisted User\n\t\t\tif checkUserExisted(testCase.paramRequest[\"device_id\"][0]) {\n\t\t\t\tuserExisted, _ = getUserByUUID(testCase.paramRequest[\"device_id\"][0])\n\t\t\t}\n\t\t\tresp := sendRequest(testCase.paramRequest, \"application/x-www-form-urlencoded\", apiTest)\n\t\t\t// check status OK.\n\t\t\tcheckStatusCodeResponse(t, resp, http.StatusOK)\n\t\t\tresponse := &PostRegisterByDeviceResponse{}\n\t\t\tjson.Unmarshal(resp.Body.Bytes(), response)\n\t\t\tassert.Empty(t, response.Errors)\n\t\t\tassert.Empty(t, response.Message)\n\t\t\tassert.Equal(t, 3, len(strings.Split(response.Token, \".\")))\n\t\t\t// check not changed in db if user existed\n\t\t\tif userExisted.ID != 0 {\n\t\t\t\tuserAfterRegister, _ := getUserByUUID(testCase.paramRequest[\"device_id\"][0])\n\t\t\t\tassert.Equal(t, userAfterRegister.ID, userExisted.ID)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Executor %v of framework %v registered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\tif !k.swapState(disconnectedState, connectedState) {\n\t\t//programming error?\n\t\tpanic(\"already connected?!\")\n\t}\n}",
"func Register(r * http.Request, response * APIResponse) {\n\tif AllowsRegister {\n\t\tif r.FormValue(\"username\") != \"\" && r.FormValue(\"password\") != \"\" && r.FormValue(\"name\") != \"\" {\n\t\t\tusername := r.FormValue(\"username\")\n\t\t\tpassword := r.FormValue(\"password\")\n\t\t\trealName := r.FormValue(\"name\")\n\t\t\tif len(password) > 5 && userNameIsValid(username) && nameIsValid(realName) {\n\t\t\t\tif !UserForUserNameExists(username) {\n\t\t\t\t\t//The password is acceptable, the username is untake and acceptable\n\t\t\t\t\t//Sign up user\n\t\t\t\t\tuser := User{}\n\t\t\t\t\tuser.Username = username\n\t\t\t\t\tuser.HashedPassword = hashString(password)\n\t\t\t\t\tuser.UserImageURL = \"userImages/default.png\"\n\t\t\t\t\tuser.RealName = realName\n\t\t\t\t\tAddUser(&user)\n\t\t\t\t\n\t\t\t\t\t//Log the user in\n\t\t\t\t\tLogin(r, response)\n\t\t\t\t} else {\n\t\t\t\t\tresponse.Message = \"Username already taken\"\n\t\t\t\t\te(\"API\", \"Username already taken\")\n\t\t\t\t\tresponse.SuccessCode = 400\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tresponse.Message = \"Values do not meet requirements\"\n\t\t\t\te(\"API\", \"Password is too short or username is invalid\")\n\t\t\t\tresponse.SuccessCode = 400\n\t\t\t}\n\t\t} else {\n\t\t\tresponse.Message = \"More information required\"\n\t\t\te(\"API\", \"Couldn't register user - not enough detail\")\n\t\t\tresponse.SuccessCode = 400\n\t\t}\n\t} else {\n\t\tresponse.SuccessCode = 400\n\t\tresponse.Message = \"Server doesn't allow registration\"\n\t}\n}",
"func (sma *SmIPAM) CompleteRegistration() {\n\n\tif featureflags.IsOVerlayRoutingEnabled() == false {\n\t\treturn\n\t}\n\n\tsma.sm.SetIPAMPolicyReactor(smgrIPAM)\n}",
"func _TestRegisterDontAddIfError(t *testing.T) {\n\tfor i := 0; i < 10; i++ {\n\t\tsendNoEnoughNodesRequest(t)\n\t}\n}",
"func IsRegistered(name string) bool {\n\treturn i.IsRegistered(name)\n}",
"func (e *Extension) Registered(extensionName string, client *bayeux.BayeuxClient) {\n}",
"func (session *Session) register() error {\n\n\t// Create registration\n\tcmd, err := CreateRegistration(session.registration.Host, session.registration.User)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Send registration to server\n\tlog.Println(\"Registering with the server.\")\n\tSendMessage(cmd, session.socket)\n\treturn nil\n}",
"func IsRegistered(name string) bool {\n\t_, ok := registry[name]\n\treturn ok\n}",
"func (a *Client) Register(params *RegisterParams) (*RegisterOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewRegisterParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"register\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/subjects/{subject}/versions\",\n\t\tProducesMediaTypes: []string{\"application/json; qs=0.5\", \"application/vnd.schemaregistry+json; qs=0.9\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\", \"application/vnd.schemaregistry+json\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &RegisterReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*RegisterOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for register: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (nb *Newsblur) Signup(UNIMPLEMENTED) {}",
"func (self *Mediator) OnRegister() {\n\n}",
"func (_e *MockQueryCoord_Expecter) Register() *MockQueryCoord_Register_Call {\n\treturn &MockQueryCoord_Register_Call{Call: _e.mock.On(\"Register\")}\n}",
"func Register(\n\tc *gin.Context,\n\tuserService service.UserCommander,\n\tdispatcher queue.Publisher,\n) {\n\tvar req ar.RegisterRequest\n\tif isValid, errors := validation.ValidateRequest(c, &req); !isValid {\n\t\thttp.BadRequest(c, http.Errors(errors))\n\t\treturn\n\t}\n\n\tuser, err := userService.Create(c.Request.Context(), request.UserCreateRequest{\n\t\tFirstName: req.FirstName,\n\t\tLastName: req.LastName,\n\t\tEmail: req.Email,\n\t\tPassword: req.Password,\n\t\tRole: identityEntity.RoleConsumer,\n\t})\n\n\tif err != nil {\n\t\thttp.BadRequest(c, http.Errors{err.Error()})\n\t\treturn\n\t}\n\n\traiseSuccessfulRegistration(user.GetID(), dispatcher)\n\n\thttp.Created(c, http.Data{\n\t\t\"User\": user,\n\t}, nil)\n}",
"func Register(g *gin.Context) {\n\t// init visitor User struct to validate request\n\tuser := new(models.User)\n\t/**\n\t* get request and parse it to validation\n\t* if there any error will return with message\n\t */\n\terr := validations.RegisterValidate(g, user)\n\t/***\n\t* return response if there an error if true you\n\t* this mean you have errors so we will return and bind data\n\t */\n\tif helpers.ReturnNotValidRequest(err, g) {\n\t\treturn\n\t}\n\t/**\n\t* check if this email exists database\n\t* if this email found will return\n\t */\n\tconfig.Db.Find(&user, \"email = ? \", user.Email)\n\tif user.ID != 0 {\n\t\thelpers.ReturnResponseWithMessageAndStatus(g, 400, \"this email is exist!\", false)\n\t\treturn\n\t}\n\t//set type 2\n\tuser.Type = 2\n\tuser.Password, _ = helpers.HashPassword(user.Password)\n\t// create new user based on register struct\n\tconfig.Db.Create(&user)\n\t// now user is login we can return his info\n\thelpers.OkResponse(g, \"Thank you for register in our system you can login now!\", user)\n}",
"func MetricInitiateAccountRegistrationSuccess(provider string) {\n\tswitch strings.ToUpper(provider) {\n\tcase serviceprovider.FACEBOOK:\n\t\tPrometheusRegisterFacebookSuccessTotal.Inc()\n\tcase serviceprovider.GOOGLE:\n\t\tPrometheusRegisterGoogleSuccessTotal.Inc()\n\tdefault:\n\t\tPrometheusRegisterDefaultSuccessTotal.Inc()\n\t}\n}",
"func (r *apiV1Router) Register(ctx *gin.Context) {\n\tname := ctx.PostForm(\"name\")\n\temail := ctx.PostForm(\"email\")\n\tpassword := ctx.PostForm(\"password\")\n\n\tif len(name) == 0 || len(email) == 0 || len(password) == 0 {\n\t\tr.logger.Warn(\"one of name, email or password not specified\", zap.String(\"name\", name), zap.String(\"email\", email), zap.String(\"password\", password))\n\t\tmodels.SendAPIError(ctx, http.StatusBadRequest, \"request must include the user's name, email and passowrd\")\n\t\treturn\n\t}\n\n\t_, err := r.userService.GetUserWithEmail(ctx, email)\n\tif err == nil {\n\t\tr.logger.Warn(\"email taken\", zap.String(\"email\", email))\n\t\tmodels.SendAPIError(ctx, http.StatusBadRequest, \"email taken\")\n\t\treturn\n\t}\n\n\tif err != services.ErrNotFound {\n\t\tr.logger.Error(\"could not query for user with email\", zap.String(\"email\", email), zap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"something went wrong while creating new user\")\n\t\treturn\n\t}\n\n\thashedPassword, err := auth.GetHashForPassword(password)\n\tif err != nil {\n\t\tr.logger.Error(\"could not make hash for password\", zap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"something went wrong while creating new user\")\n\t\treturn\n\t}\n\n\tuser, err := r.userService.CreateUser(ctx, name, email, hashedPassword, r.cfg.BaseAuthLevel)\n\tif err != nil {\n\t\tr.logger.Error(\"could not create user\",\n\t\t\tzap.String(\"name\", name),\n\t\t\tzap.String(\"email\", email),\n\t\t\tzap.Int(\"auth level\", int(r.cfg.BaseAuthLevel)),\n\t\t\tzap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"something went wrong while creating new user\")\n\t\treturn\n\t}\n\n\temailToken, err := auth.NewJWT(*user, time.Now().Unix(), r.cfg.AuthTokenLifetime, auth.Email, []byte(r.env.Get(environment.JWTSecret)))\n\tif err != nil {\n\t\tr.logger.Error(\"could not generate JWT token\",\n\t\t\tzap.String(\"user id\", user.ID.Hex()),\n\t\t\tzap.Bool(\"JWT_SECRET set\", r.env.Get(environment.JWTSecret) != environment.DefaultEnvVarValue),\n\t\t\tzap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"something went wrong while creating new user\")\n\t\tr.userService.DeleteUserWithEmail(ctx, email)\n\t\treturn\n\t}\n\terr = r.emailService.SendEmailVerificationEmail(*user, emailToken)\n\tif err != nil {\n\t\tr.logger.Error(\"could not send email verification email\",\n\t\t\tzap.String(\"user email\", user.Email),\n\t\t\tzap.String(\"noreply email\", r.cfg.Email.NoreplyEmailAddr),\n\t\t\tzap.Bool(\"SENDGRID_API_KEY set\", r.env.Get(environment.SendgridAPIKey) != environment.DefaultEnvVarValue),\n\t\t\tzap.Error(err))\n\t\tmodels.SendAPIError(ctx, http.StatusInternalServerError, \"something went wrong while creating new user\")\n\t\tr.userService.DeleteUserWithEmail(ctx, email)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, registerRes{\n\t\tResponse: models.Response{\n\t\t\tStatus: http.StatusOK,\n\t\t},\n\t\tUser: *user,\n\t})\n}",
"func (w *Registration) WaitRegistration() {\n\tif w.Ready() {\n\t\treturn\n\t}\n\t<-w.readyChan\n}",
"func (tqsc *Controller) Register() {\n}",
"func TestEventRegistry(t *testing.T, newRegistry func() app.EventRegistry) {\n\ttests := []struct {\n\t\tscenario string\n\t\tsubName string\n\t\thandler func(*bool) interface{}\n\t\tcalled bool\n\t\tdispName string\n\t\tdispArg interface{}\n\t\tpanic bool\n\t}{\n\t\t{\n\t\t\tscenario: \"register and dispatch without arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func() {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: nil,\n\t\t},\n\t\t{\n\t\t\tscenario: \"register without arg and dispatch with arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func() {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"foobar\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register and dispatch with arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func(arg string) {\n\t\t\t\t\t*called = true\n\n\t\t\t\t\tif arg != \"hello\" {\n\t\t\t\t\t\tpanic(\"greet is not hello\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: true,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"hello\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register and dispatch with bad arg\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} {\n\t\t\t\treturn func(arg int) {\n\t\t\t\t\t*called = true\n\t\t\t\t}\n\t\t\t},\n\t\t\tcalled: false,\n\t\t\tdispName: \"test\",\n\t\t\tdispArg: \"hello\",\n\t\t},\n\t\t{\n\t\t\tscenario: \"register non func handler\",\n\t\t\tsubName: \"test\",\n\t\t\thandler: func(called *bool) interface{} { return nil },\n\t\t\tpanic: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.scenario, func(t *testing.T) {\n\t\t\tdefer func() {\n\t\t\t\terr := recover()\n\n\t\t\t\tif err != nil && !test.panic {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tcalled := false\n\n\t\t\tr := newRegistry()\n\t\t\tunsub := r.Subscribe(test.subName, test.handler(&called))\n\t\t\tdefer unsub()\n\n\t\t\tr.Dispatch(test.dispName, test.dispArg)\n\n\t\t\tif called != test.called {\n\t\t\t\tt.Error(\"called expected:\", test.called)\n\t\t\t\tt.Error(\"called: \", called)\n\t\t\t}\n\n\t\t\tif test.panic {\n\t\t\t\tt.Error(\"no panic\")\n\t\t\t}\n\t\t})\n\t}\n}",
"func MustRegister(fn interface{}) {\n\tif err := Register(fn); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (dscMgr *DSCMgrRc) CompleteRegistration() {\n\tif featureflags.IsOVerlayRoutingEnabled() == false {\n\t\treturn\n\t}\n\n\tdscMgr.sm.SetDistributedServiceCardReactor(dscMgr)\n\tdscMgr.sm.EnableSelectivePushForKind(\"DSCConfig\")\n}",
"func (a *Account) GetRegistration() *acme.RegistrationResource { return a.registration }",
"func (m *Manager) Register(args RegisterArgs, reply *string) error {\n\tfmt.Println(\"Registering key\", args.Key)\n\tfmt.Println(\"To tenant\", args.TenantID)\n\n\tm.validKeys[args.Key] = args.TenantID\n\t*reply = \"OK\"\n\treturn nil\n}",
"func Register(context *gin.Context) {\n\tvar requestBody models.UserReq\n\tif context.ShouldBind(&requestBody) == nil {\n\t\tvalidCheck := validation.Validation{}\n\t\tvalidCheck.Required(requestBody.UserName, \"user_name\").Message(\"Must have user name\")\n\t\tvalidCheck.MaxSize(requestBody.UserName, 16, \"user_name\").Message(\"User name length can not exceed 16\")\n\t\tvalidCheck.MinSize(requestBody.UserName, 6, \"user_name\").Message(\"User name length is at least 6\")\n\t\tvalidCheck.Required(requestBody.Password, \"password\").Message(\"Must have password\")\n\t\tvalidCheck.MaxSize(requestBody.Password, 16, \"password\").Message(\"Password length can not exceed 16\")\n\t\tvalidCheck.MinSize(requestBody.Password, 6, \"password\").Message(\"Password length is at least 6\")\n\t\tvalidCheck.Required(requestBody.Email, \"email\").Message(\"Must have email\")\n\t\tvalidCheck.MaxSize(requestBody.Email, 128, \"email\").Message(\"Email can not exceed 128 chars\")\n\n\t\tresponseCode := constant.INVALID_PARAMS\n\t\tif !validCheck.HasErrors() {\n\t\t\tuserEntity := models.UserReq2User(requestBody)\n\t\t\tif err := models.InsertUser(userEntity); err == nil {\n\t\t\t\tresponseCode = constant.USER_ADD_SUCCESS\n\t\t\t} else {\n\t\t\t\tresponseCode = constant.USER_ALREADY_EXIST\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, err := range validCheck.Errors {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\n\t\tcontext.JSON(http.StatusOK, gin.H{\n\t\t\t\"code\": responseCode,\n\t\t\t\"data\": \"\",\n\t\t\t\"msg\": constant.GetMessage(responseCode),\n\t\t})\n\t} else {\n\t\tcontext.JSON(http.StatusOK, gin.H{\n\t\t\t\"code\": 200,\n\t\t\t\"data\": \"\",\n\t\t\t\"msg\": \"添加失败,参数有误\",\n\t\t})\n\t}\n}",
"func (o *TechsupportmanagementEndPointAllOf) HasDeviceRegistration() bool {\n\tif o != nil && o.DeviceRegistration != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func TestRegisterRPC(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tg := newTestingGateway(t)\n\tdefer g.Close()\n\n\tg.RegisterRPC(\"Foo\", func(conn modules.PeerConn) error { return nil })\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"Registering the same RPC twice did not cause a panic\")\n\t\t}\n\t}()\n\tg.RegisterRPC(\"Foo\", func(conn modules.PeerConn) error { return nil })\n}",
"func middlewareRegistrationRestrictionTests(t *testing.T,\n\tnode *lntest.HarnessNode) {\n\n\ttestCases := []struct {\n\t\tregistration *lnrpc.MiddlewareRegistration\n\t\texpectedErr string\n\t}{{\n\t\tregistration: &lnrpc.MiddlewareRegistration{\n\t\t\tMiddlewareName: \"foo\",\n\t\t},\n\t\texpectedErr: \"invalid middleware name\",\n\t}, {\n\t\tregistration: &lnrpc.MiddlewareRegistration{\n\t\t\tMiddlewareName: \"itest-interceptor\",\n\t\t\tCustomMacaroonCaveatName: \"foo\",\n\t\t},\n\t\texpectedErr: \"custom caveat name of at least\",\n\t}, {\n\t\tregistration: &lnrpc.MiddlewareRegistration{\n\t\t\tMiddlewareName: \"itest-interceptor\",\n\t\t\tCustomMacaroonCaveatName: \"itest-caveat\",\n\t\t\tReadOnlyMode: true,\n\t\t},\n\t\texpectedErr: \"cannot set read-only and custom caveat name\",\n\t}}\n\n\tfor idx, tc := range testCases {\n\t\ttc := tc\n\n\t\tt.Run(fmt.Sprintf(\"%d\", idx), func(tt *testing.T) {\n\t\t\tinvalidName := registerMiddleware(\n\t\t\t\ttt, node, tc.registration,\n\t\t\t)\n\t\t\t_, err := invalidName.stream.Recv()\n\t\t\trequire.Error(tt, err)\n\t\t\trequire.Contains(tt, err.Error(), tc.expectedErr)\n\n\t\t\tinvalidName.cancel()\n\t\t})\n\t}\n}",
"func (e *endpoints) registerRegistrationAPI(tcpServer, udpServer *grpc.Server) {\n\tr := ®istration.Handler{\n\t\tLog: e.c.Log.WithField(\"subsystem_name\", \"registration_api\"),\n\t\tMetrics: e.c.Metrics,\n\t\tCatalog: e.c.Catalog,\n\t\tTrustDomain: e.c.TrustDomain,\n\t}\n\n\tregistration_pb.RegisterRegistrationServer(tcpServer, r)\n\tregistration_pb.RegisterRegistrationServer(udpServer, r)\n}",
"func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,\n\texecutorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {\n\tif k.isDone() {\n\t\treturn\n\t}\n\tlog.Infof(\"Executor %v of framework %v registered with slave %v\\n\",\n\t\texecutorInfo, frameworkInfo, slaveInfo)\n\tif !(&k.state).transition(disconnectedState, connectedState) {\n\t\tlog.Errorf(\"failed to register/transition to a connected state\")\n\t}\n\n\tif executorInfo != nil && executorInfo.Data != nil {\n\t\tk.staticPodsConfig = executorInfo.Data\n\t}\n\n\tif slaveInfo != nil {\n\t\t_, err := node.CreateOrUpdate(k.client, slaveInfo.GetHostname(), node.SlaveAttributesToLabels(slaveInfo.Attributes))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"cannot update node labels: %v\", err)\n\t\t}\n\t}\n\n\tk.initialRegistration.Do(k.onInitialRegistration)\n}",
"func (o *TechsupportmanagementEndPointAllOf) GetDeviceRegistrationOk() (*AssetDeviceRegistrationRelationship, bool) {\n\tif o == nil || o.DeviceRegistration == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DeviceRegistration, true\n}",
"func (g *Group) Register(units ...Unit) []bool {\n\tg.log = logger.GetLogger(g.name)\n\thasRegistered := make([]bool, len(units))\n\tfor idx := range units {\n\t\tif !g.configured {\n\t\t\t// if RunConfig has been called we can no longer register Config\n\t\t\t// phases of Units\n\t\t\tif c, ok := units[idx].(Config); ok {\n\t\t\t\tg.c = append(g.c, c)\n\t\t\t\thasRegistered[idx] = true\n\t\t\t}\n\t\t}\n\t\tif p, ok := units[idx].(PreRunner); ok {\n\t\t\tg.p = append(g.p, p)\n\t\t\thasRegistered[idx] = true\n\t\t}\n\t\tif s, ok := units[idx].(Service); ok {\n\t\t\tg.s = append(g.s, s)\n\t\t\thasRegistered[idx] = true\n\t\t}\n\t}\n\treturn hasRegistered\n}",
"func (reg *registrar) Register(example interface{}) error {\n\treg.lock.Lock()\n\tdefer reg.lock.Unlock()\n\treturn reg.Registry.Register(example)\n}",
"func TestRegisterRoute(t *testing.T) {\n\t// test cases\n\ttests := []struct {\n\t\tmethod string\n\t\tpath string\n\t\thandler func(*fasthttp.RequestCtx)\n\t\tisErr bool\n\t}{\n\t\t{method: MethodPut, path: \"/admin/welcome\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodPost, path: \"/user/add\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodGet, path: \"/account/get\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodGet, path: \"/account/*\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodDelete, path: \"/account/delete\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodDelete, path: \"/account/delete\", handler: nil, isErr: true},\n\t\t{method: MethodGet, path: \"/account/*/getAccount\", handler: nil, isErr: true},\n\t}\n\n\t// create gearbox instance\n\tgb := new(gearbox)\n\tgb.registeredRoutes = make([]*routeInfo, 0)\n\n\t// counter for valid routes\n\tvalidCounter := 0\n\n\tfor _, tt := range tests {\n\t\terr := gb.registerRoute(tt.method, tt.path, tt.handler)\n\t\tif (err != nil && !tt.isErr) || (err == nil && tt.isErr) {\n\t\t\terrMsg := \"\"\n\n\t\t\t// get error message if there is\n\t\t\tif err != nil {\n\t\t\t\terrMsg = err.Error()\n\t\t\t}\n\n\t\t\tt.Errorf(\"input %v find error %t %s expecting error %t\", tt, err == nil, errMsg, tt.isErr)\n\t\t}\n\n\t\tif !tt.isErr {\n\t\t\tvalidCounter++\n\t\t}\n\t}\n\n\t// check valid counter is the same as count of registered routes\n\tcurrentCount := len(gb.registeredRoutes)\n\tif validCounter != currentCount {\n\t\tt.Errorf(\"input %d find %d expecting %d\", validCounter, currentCount, validCounter)\n\t}\n}",
"func (m *ClientMock) MinimockRegisterResultInspect() {\n\tfor _, e := range m.RegisterResultMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to ClientMock.RegisterResult with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.RegisterResultMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterRegisterResultCounter) < 1 {\n\t\tif m.RegisterResultMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to ClientMock.RegisterResult\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to ClientMock.RegisterResult with params: %#v\", *m.RegisterResultMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcRegisterResult != nil && mm_atomic.LoadUint64(&m.afterRegisterResultCounter) < 1 {\n\t\tm.t.Error(\"Expected call to ClientMock.RegisterResult\")\n\t}\n}",
"func (h *NotificationHub) Registration(ctx context.Context, registrationID string) (raw []byte, registrationResult *RegistrationResult, err error) {\n\tvar (\n\t\tregURL = h.generateAPIURL(path.Join(\"registrations\", registrationID))\n\t)\n\traw, _, err = h.exec(ctx, getMethod, regURL, Headers{}, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = xml.Unmarshal(raw, ®istrationResult); err != nil {\n\t\treturn\n\t}\n\tregistrationResult.RegistrationContent.normalize()\n\treturn\n}",
"func Register(w http.ResponseWriter, r *http.Request) {\n\n\tPrintln(\"Endpoint Hit: Register\")\n\n\tvar response struct {\n\t\tStatus bool\n\t\tMessage string\n\t}\n\n\t// reqBody, _ := ioutil.ReadAll(r.Body)\n\t// var register_user models.Pet_Owner\n\t// json.Unmarshal(reqBody, ®ister_user)\n\n\t// email := register_user.Email\n\t// password := register_user.Password\n\t// name := register_user.Name\n\n\t//BIKIN VALIDATION\n\n\temail := r.FormValue(\"email\")\n\tpassword := r.FormValue(\"password\")\n\tname := r.FormValue(\"name\")\n\n\tif len(name) == 0 {\n\t\tmessage := \"Ada Kolom Yang Kosong\"\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(200)\n\t\tresponse.Status = false\n\t\tresponse.Message = message\n\t\tjson.NewEncoder(w).Encode(response)\n\t\treturn\n\t}\n\n\tif _, status := ValidateEmail(email); status != true {\n\t\tmessage := \"Format Email Kosong atau Salah\"\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(200)\n\t\tresponse.Status = false\n\t\tresponse.Message = message\n\t\tjson.NewEncoder(w).Encode(response)\n\t\treturn\n\t}\n\n\tif _, status := ValidatePassword(password); status != true {\n\t\tmessage := \"Format Password Kosong atau Salah, Minimal 6 Karakter\"\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(200)\n\t\tresponse.Status = false\n\t\tresponse.Message = message\n\t\tjson.NewEncoder(w).Encode(response)\n\t\treturn\n\t}\n\n\t//cek apakah email user sudah ada di database\n\t//query user dengan email tersebut\n\tstatus, _ := QueryUser(email)\n\n\t// kalo status false , berarti register\n\t// kalo status true, berarti print email terdaftar\n\n\tif status {\n\t\tmessage := \"Email sudah terdaftar\"\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(200)\n\t\tresponse.Status = false\n\t\tresponse.Message = message\n\t\tjson.NewEncoder(w).Encode(response)\n\t\treturn\n\n\t} else {\n\t\t// hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)\n\t\thashedPassword := password\n\t\tRole := 1\n\n\t\t// Println(hashedPassword)\n\t\tif len(hashedPassword) != 0 && checkErr(w, r, err) {\n\t\t\tstmt, err := db.Prepare(\"INSERT INTO account (Email, Name, Password, Role) VALUES (?,?,?,?)\")\n\t\t\tif err == nil {\n\t\t\t\t_, err := stmt.Exec(&email, &name, &hashedPassword, &Role)\n\t\t\t\tif err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmessage := \"Register Succesfull\"\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\tw.WriteHeader(200)\n\t\t\t\tresponse.Status = true\n\t\t\t\tresponse.Message = message\n\t\t\t\tjson.NewEncoder(w).Encode(response)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmessage := \"Registration Failed\"\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(200)\n\t\t\tresponse.Status = false\n\t\t\tresponse.Message = message\n\t\t\tjson.NewEncoder(w).Encode(response)\n\n\t\t}\n\t}\n}",
"func (registry *interfaceRegistry) EnsureRegistered(impl interface{}) error {\n\tif reflect.ValueOf(impl).Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"%T is not a pointer\", impl)\n\t}\n\n\tif _, found := registry.implInterfaces[reflect.TypeOf(impl)]; !found {\n\t\treturn fmt.Errorf(\"%T does not have a registered interface\", impl)\n\t}\n\n\treturn nil\n}",
"func (mr *MockOobServiceMockRecorder) RegisterActionEvent(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RegisterActionEvent\", reflect.TypeOf((*MockOobService)(nil).RegisterActionEvent), arg0)\n}",
"func (u *UserHandler) Register(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar registerReq domain.RegisterRequest\n\terr := json.NewDecoder(r.Body).Decode(®isterReq)\n\tif err != nil {\n\t\tlog.Warnf(\"Error decode user body when register : %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\terrors := u.Validator.Validate(registerReq)\n\tif errors != nil {\n\t\tlog.Warnf(\"Error validate register : %s\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(errors)\n\t\treturn\n\t}\n\tuser := domain.User{\n\t\tName: registerReq.Name,\n\t\tEmail: registerReq.Email,\n\t\tPassword: registerReq.Password,\n\t}\n\terr = u.UserSerivce.Register(r.Context(), &user)\n\tif err != nil {\n\t\tlog.Warnf(\"Error register user : %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tresponse := SuccessResponse{\n\t\tMessage: \"Success Register User\",\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(response)\n\treturn\n}",
"func Register(write http.ResponseWriter, request *http.Request) {\n\tvar object models.User\n\terr := json.NewDecoder(request.Body).Decode(&object)\n\tif err != nil {\n\t\thttp.Error(write, \"An error ocurred in user register. \"+err.Error(), 400)\n\t\treturn\n\t}\n\n\t//Validations\n\tif len(object.Email) == 0 {\n\t\thttp.Error(write, \"Email is required.\", 400)\n\t\treturn\n\t}\n\tif len(object.Password) < 6 {\n\t\thttp.Error(write, \"Password invalid, must be at least 6 characters.\", 400)\n\t\treturn\n\t}\n\n\t_, userFounded, _ := bd.CheckExistUser(object.Email)\n\n\tif userFounded {\n\t\thttp.Error(write, \"The email has already been registered.\", 400)\n\t\treturn\n\t}\n\n\t_, status, err := bd.InsertRegister(object)\n\n\tif err != nil {\n\t\thttp.Error(write, \"An error occurred in insert register user.\"+err.Error(), 400)\n\t\treturn\n\t}\n\n\tif !status {\n\t\thttp.Error(write, \"Not insert user register.\"+err.Error(), 400)\n\t\treturn\n\t}\n\n\twrite.WriteHeader(http.StatusCreated)\n}",
"func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.contract.Transact(opts, \"register\", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}",
"func (sched *Scheduler) Registered(driver sched.SchedulerDriver, frameworkId *mesos.FrameworkID, masterInfo *mesos.MasterInfo) {\n\tsched.master = MasterConnStr(masterInfo)\n\tlog.Println(\"Taurus Framework Registered with Master\", sched.master)\n\t// Start the scheduler worker\n\tgo func() {\n\t\tlog.Printf(\"Starting %s framework scheduler worker\", FrameworkName)\n\t\tsched.errChan <- sched.Worker.Start(driver, masterInfo)\n\t}()\n}",
"func (hc *httpClient) Register(info *InstanceInfo) (success bool, err error) {\n\tvar body []byte\n\tbody, err = json.Marshal(info)\n\tif err != nil {\n\t\treturn\n\t}\n\tparam := &requestParam{\n\t\tURL: hc.serviceUrl + uriApps + info.Instance.App,\n\t\tMethod: http.MethodPost,\n\t\tHeaders: hc.headers,\n\t\tBody: string(body),\n\t\tUsername: hc.username,\n\t\tPassword: hc.password,\n\t}\n\tvar statusCode int\n\t_, statusCode, err = handleHttpRequest(param)\n\tif err != nil {\n\t\thc.handleError(err)\n\t}\n\tif statusCode == http.StatusOK || statusCode == http.StatusNoContent {\n\t\tsuccess = true\n\t}\n\n\treturn\n}",
"func (mr *MockHubMockRecorder) RegisteredClients() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RegisteredClients\", reflect.TypeOf((*MockHub)(nil).RegisteredClients))\n}",
"func Register(c *gin.Context) {\n\tvar registerRequest types.RegisterRequest\n\terr := c.BindJSON(®isterRequest)\n\tif err != nil {\n\t\tresponse := types.APIErrResponse{Msg: \"Please check your data\", Success: false, Err: err.Error()}\n\t\tc.JSON(http.StatusBadRequest, response)\n\t\treturn\n\t}\n\n\t// Validate register request struct\n\t_, err = govalidator.ValidateStruct(registerRequest)\n\tif err != nil {\n\t\terrMap := govalidator.ErrorsByField(err)\n\t\tresponse := types.APIErrResponse{Msg: \"Please check your data\", Success: false, Err: errMap}\n\t\tc.JSON(http.StatusBadRequest, response)\n\t\treturn\n\t}\n\n\t// Maybe add same tag in govalidator\n\tif registerRequest.Password != registerRequest.PasswordAgain {\n\t\terrMap := make(map[string]string)\n\t\terrMap[\"password_again\"] = \"Password again must be equal to password\"\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"msg\": \"Please check your data\", \"err\": errMap})\n\t\treturn\n\t}\n\n\t// hash password\n\tbytePassword := []byte(registerRequest.Password)\n\thashedPassword := hashPassword(bytePassword)\n\n\t// Save user\n\ttx, err := models.DB.Begin()\n\tdefer tx.Rollback()\n\n\tuser := models.User{}\n\tuser.Email = registerRequest.Email\n\tuser.Password = hashedPassword\n\tuser.IsAdmin = 0\n\tif err = user.Save(tx); err != nil {\n\t\tresponse := types.APIErrResponse{Msg: \"Please check your data\", Success: false, Err: err.Error()}\n\t\tc.JSON(http.StatusNotFound, response)\n\t} else {\n\t\ttx.Commit()\n\t\tresponse := types.APIResponse{Msg: \"Register user successfully\", Success: true}\n\t\tc.JSON(http.StatusOK, response)\n\t}\n}"
] | [
"0.6324035",
"0.6104856",
"0.60354984",
"0.6025592",
"0.59889084",
"0.5973536",
"0.595364",
"0.5939176",
"0.59270525",
"0.59216005",
"0.5921067",
"0.5891129",
"0.5848316",
"0.5830676",
"0.58157265",
"0.58123446",
"0.58084095",
"0.57710874",
"0.57505393",
"0.5747051",
"0.57046664",
"0.5702086",
"0.56968975",
"0.56950843",
"0.56934893",
"0.56924874",
"0.56924874",
"0.56924874",
"0.5690212",
"0.56812865",
"0.56616664",
"0.5658534",
"0.56533355",
"0.5648434",
"0.56426495",
"0.5640439",
"0.5624993",
"0.5592196",
"0.55718374",
"0.55506307",
"0.5540373",
"0.55221534",
"0.5513259",
"0.5512379",
"0.5488571",
"0.54850304",
"0.5468531",
"0.54537016",
"0.5435428",
"0.543271",
"0.54315734",
"0.5427417",
"0.5421234",
"0.5413997",
"0.54089934",
"0.5392226",
"0.53792024",
"0.5375255",
"0.5374012",
"0.537207",
"0.5369696",
"0.53695655",
"0.5352316",
"0.5351508",
"0.5349284",
"0.5349182",
"0.534717",
"0.533631",
"0.53332144",
"0.53327876",
"0.5324536",
"0.5313824",
"0.53124857",
"0.53022206",
"0.5289912",
"0.52839184",
"0.52837837",
"0.52817345",
"0.5271078",
"0.52697915",
"0.5267168",
"0.52546704",
"0.5254355",
"0.52535933",
"0.5251936",
"0.5247192",
"0.52449346",
"0.52408713",
"0.52341485",
"0.52196276",
"0.5216522",
"0.5215678",
"0.5209137",
"0.5208415",
"0.52026755",
"0.52018905",
"0.5200701",
"0.51957774",
"0.51956624",
"0.51915264"
] | 0.7010462 | 0 |
Popup is a wrapper around gtk_popover_popup(). | Popup — это обертка вокруг gtk_popover_popup(). | func (v *Popover) Popup() {
C.gtk_popover_popup(v.native())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *Popover) Popdown() {\n\tC.gtk_popover_popdown(v.native())\n}",
"func Popup(name string) *PopupWidget {\n\treturn &PopupWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\tflags: 0,\n\t\tlayout: nil,\n\t}\n}",
"func NewPopupMenu(parent Widget, b Base, mo ...MenuOption) *PopupMenu {\n\tpm := &PopupMenu{\n\t\tPanel: NewPanel(nil, b),\n\t}\n\tInitWidget(parent, pm)\n\tpad := pm.MyTheme().Pad\n\tbtH := b.Rect.H() - pad\n\tbtW := b.Rect.W() - pad*2\n\tpm.Rect = R(pm.Rect.Min.X, pm.Rect.Max.Y-pm.Rect.H()*float64(len(mo))-pad,\n\t\tpm.Rect.Max.X, pm.Rect.Max.Y)\n\ty := pm.Rect.H() - pad\n\tfor _, o := range mo {\n\t\tbt := NewPushButton(pm, Base{\n\t\t\tRect: R(pad, y-btH, pad+btW, y),\n\t\t\tText: o.Text,\n\t\t\tImage: o.Image,\n\t\t\tDisabled: o.Disabled,\n\t\t})\n\t\ty -= btH + pad\n\t\to := o\n\t\tbt.OnPress = func() {\n\t\t\tfmt.Printf(\"popup menu onpress before popdown o=%+v\\n\", o)\n\t\t\tbt.Surface.PopDownTo(pm)\n\t\t\tfmt.Printf(\"popup menu onpress after popdown o=%+v\\n\", o)\n\t\t\tif o.Handler != nil {\n\t\t\t\tfmt.Printf(\"popup menu onpress before handler\\n\")\n\t\t\t\tclose := o.Handler(pm)\n\t\t\t\tfmt.Printf(\"popup menu onpress after handler =%v\\n\", close)\n\t\t\t\tif close {\n\t\t\t\t\tpm.Surface.PopDownTo(nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tow := optWidget{\n\t\t\topt: o,\n\t\t\tw: bt,\n\t\t}\n\t\tpm.opts = append(pm.opts, ow)\n\t}\n\tpm.Surface.PopUp(pm)\n\treturn pm\n}",
"func NewPopup(p tview.Primitive) *Popup {\n\t_, _, width, height := p.GetRect()\n\tpopup := &Popup{\n\t\tflex: tview.NewFlex().\n\t\t\tAddItem(nil, 0, 1, false).\n\t\t\tAddItem(tview.NewFlex().\n\t\t\t\tSetDirection(tview.FlexRow).\n\t\t\t\tAddItem(nil, 0, 1, false).\n\t\t\t\tAddItem(p, height, 1, false).\n\t\t\t\tAddItem(nil, 0, 1, false), width, 1, false).\n\t\t\tAddItem(nil, 0, 1, false),\n\t}\n\tpopup.content = p\n\treturn popup\n}",
"func Popup(prompt string, options ...string) (selection string, err error) {\n\t//selection, err = defaultDmenu().Popup(prompt, options...)\n\tdmenu := defaultDmenu()\n\tselection, err = dmenu.Popup(prompt, options...)\n\treturn\n}",
"func MakePopover(ptr unsafe.Pointer) *NSPopover {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn &NSPopover{\n\t\tNSResponder: *MakeResponder(ptr),\n\t}\n}",
"func (s *State) OpenPopup(bounds image.Rectangle, d Component) Popup {\n\tif s.root != nil {\n\t\ts.focused = d\n\t\ts.update = true\n\t\treturn s.root.OpenPopup(bounds.Add(s.bounds.Min), d)\n\t}\n\treturn nil\n}",
"func PopupModal(name string) *PopupModalWidget {\n\treturn &PopupModalWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\topen: nil,\n\t\tflags: WindowFlagsNoResize,\n\t\tlayout: nil,\n\t}\n}",
"func OpenPopup(name string) {\n\timgui.OpenPopup(name)\n}",
"func (v *MenuButton) GetPopover() *Popover {\n\tc := C.gtk_menu_button_get_popover(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn wrapPopover(glib.Take(unsafe.Pointer(c)))\n}",
"func (v *MenuButton) SetPopover(popover *Popover) {\n\tC.gtk_menu_button_set_popover(v.native(), popover.toWidget())\n}",
"func (v *MenuButton) SetPopup(menu IMenu) {\n\tC.gtk_menu_button_set_popup(v.native(), menu.toWidget())\n}",
"func (p *PopUpMenu) Show() {\n\tp.overlay.Show()\n\tp.Menu.Show()\n}",
"func (v *MenuButton) SetUsePopover(setting bool) {\n\tC.gtk_menu_button_set_use_popover(v.native(), gbool(setting))\n}",
"func (v *ScaleButton) GetPopup() (*Widget, error) {\n\tc := C.gtk_scale_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapWidget(obj), nil\n}",
"func (v *MenuButton) GetPopup() *Menu {\n\tc := C.gtk_menu_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapMenu(obj)\n}",
"func NewPopUpMenu(menu *fyne.Menu, c fyne.Canvas) *PopUpMenu {\n\tp := &PopUpMenu{Menu: NewMenu(menu), canvas: c}\n\tp.Menu.Resize(p.Menu.MinSize())\n\tp.Menu.customSized = true\n\to := widget.NewOverlayContainer(p.Menu, c, p.Dismiss)\n\to.Resize(o.MinSize())\n\tp.overlay = o\n\tp.OnDismiss = func() {\n\t\tp.Hide()\n\t}\n\treturn p\n}",
"func (p *PopupWidget) Build() {\n\tif imgui.BeginPopup(p.name, int(p.flags)) {\n\t\tp.layout.Build()\n\t\timgui.EndPopup()\n\t}\n}",
"func (d *Dmenu) Popup(prompt string, options ...string) (selection string, err error) {\n\tprocessedArgs := []string{}\n\tfor _, arg := range d.arguments {\n\t\tvar parg string\n\t\tif strings.Contains(arg, \"%s\") {\n\t\t\tparg = fmt.Sprintf(arg, prompt)\n\t\t} else {\n\t\t\tparg = arg\n\t\t}\n\n\t\tprocessedArgs = append(processedArgs, parg)\n\t}\n\tcmd := exec.Command(d.command, processedArgs...)\n\n\tstdin, err := cmd.StdinPipe()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting pipe: %s\", err)\n\t}\n\n\tgo func(stdin io.WriteCloser) {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, strings.Join(options, \"\\n\"))\n\t}(stdin)\n\n\tbyteOut, err := cmd.Output()\n\n\tif err != nil {\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\tif status.ExitStatus() == 1 {\n\t\t\t\t\terr = &EmptySelectionError{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\t// Cast and trim\n\tselection = strings.TrimSpace(string(byteOut))\n\n\treturn\n}",
"func (p *Popup) ParentWindow() IWindow {\n\treturn p.parentWindow\n}",
"func PopStyle() {\n\timgui.PopStyleVar()\n}",
"func (v *MenuButton) GetUsePopover() bool {\n\tc := C.gtk_menu_button_get_use_popover(v.native())\n\treturn gobool(c)\n}",
"func (gui *Gui) createConfirmationPanel(g *gocui.Gui, currentView *gocui.View, title, prompt string, handleConfirm, handleClose func(*gocui.Gui, *gocui.View) error) error {\n\treturn gui.createPopupPanel(g, currentView, title, prompt, handleConfirm, handleClose)\n}",
"func (d *BrowserViewDelegate) OnPopupBrowserViewCreated(browserView, popupBrowserView *BrowserView, isDevtools int32) int32 {\n\treturn lookupBrowserViewDelegateProxy(d.Base().Base()).OnPopupBrowserViewCreated(d, browserView, popupBrowserView, isDevtools)\n}",
"func (p *PopupModalWidget) Build() {\n\tif imgui.BeginPopupModalV(p.name, p.open, int(p.flags)) {\n\t\tp.layout.Build()\n\t\timgui.EndPopup()\n\t}\n}",
"func NewPopTable(h SQLHandle) *PopTable {\n\treturn &PopTable{h}\n}",
"func CloseCurrentPopup() {\n\timgui.CloseCurrentPopup()\n}",
"func (p *PopupWidget) Flags(flags WindowFlags) *PopupWidget {\n\tp.flags = flags\n\treturn p\n}",
"func (p *PopUpMenu) ShowAtPosition(pos fyne.Position) {\n\tp.Move(pos)\n\tp.Show()\n}",
"func (p *PopupWidget) Layout(widgets ...Widget) *PopupWidget {\n\tp.layout = Layout(widgets)\n\treturn p\n}",
"func TestGlCanvas_ResizeWithPopUpOverlay(t *testing.T) {\n\tw := createWindow(\"Test\")\n\tw.SetPadded(false)\n\n\tcontent := widget.NewLabel(\"Content\")\n\tover := widget.NewPopUp(widget.NewLabel(\"Over\"), w.Canvas())\n\tw.SetContent(content)\n\tw.Canvas().Overlays().Add(over)\n\n\tsize := fyne.NewSize(200, 100)\n\toverContentSize := over.Content.Size()\n\tassert.NotEqual(t, size, content.Size())\n\tassert.NotEqual(t, size, over.Size())\n\tassert.NotEqual(t, size, overContentSize)\n\n\tw.Resize(size)\n\tassert.Equal(t, size, content.Size(), \"canvas content is resized\")\n\tassert.Equal(t, size, over.Size(), \"canvas overlay is resized\")\n\tassert.Equal(t, overContentSize, over.Content.Size(), \"canvas overlay content is _not_ resized\")\n}",
"func (*CMsgShowPopup) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{50}\n}",
"func (p *Popup) RefreshRelativePlacement() {\n\tp.parentWindow.RefreshRelativePlacement()\n\tp.visible = p.visible && p.parentWindow.VisibleRecursive()\n\tx, y := p.parentWindow.Position()\n\tp.x = x + p.anchorX\n\tp.y = y + p.anchorY - p.anchorHeight\n}",
"func ShowPopUpMenuAtPosition(menu *fyne.Menu, c fyne.Canvas, pos fyne.Position) {\n\tm := NewPopUpMenu(menu, c)\n\tm.ShowAtPosition(pos)\n}",
"func (*CMsgDOTAPopup) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{8}\n}",
"func PopStyleColor() {\n\timgui.PopStyleColor()\n}",
"func PopItemWidth() {\n\timgui.PopItemWidth()\n}",
"func (p *PopUpMenu) Hide() {\n\tp.overlay.Hide()\n\tp.Menu.Hide()\n}",
"func (p *PopUpMenu) Resize(size fyne.Size) {\n\twidget.MoveWidget(&p.Base, p, p.adjustedPosition(p.Position(), size))\n\tp.Menu.Resize(size)\n}",
"func TestCompletionEntry_ShowMenu(t *testing.T) {\n\tentry := createEntry()\n\twin := test.NewWindow(entry)\n\twin.Resize(fyne.NewSize(500, 300))\n\tdefer win.Close()\n\n\tentry.SetText(\"init\")\n\tassert.True(t, entry.popupMenu.Visible())\n\n}",
"func (p *PopUpMenu) Move(pos fyne.Position) {\n\twidget.MoveWidget(&p.Base, p, p.adjustedPosition(pos, p.Size()))\n}",
"func (p *Popup) SetParentWindow(w *Window) {\n\tp.parentWindow = w\n}",
"func (x *CMsgDOTAPopup_PopupID) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgDOTAPopup_PopupID(num)\n\treturn nil\n}",
"func (d *BrowserViewDelegate) GetDelegateForPopupBrowserView(browserView *BrowserView, settings *BrowserSettings, client *Client, isDevtools int32) *BrowserViewDelegate {\n\treturn lookupBrowserViewDelegateProxy(d.Base().Base()).GetDelegateForPopupBrowserView(d, browserView, settings, client, isDevtools)\n}",
"func (*CMsgHidePopup) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{51}\n}",
"func Open(w *Widget, container gowid.ISettableComposite, width gowid.IWidgetDimension, app gowid.IApp) {\n\tw.ov = overlay.New(w, container.SubWidget(),\n\t\tgowid.VAlignBottom{}, gowid.RenderWithUnits{U: 3}, // Intended to mean use as much vertical space as you need\n\t\tgowid.HAlignLeft{Margin: 5, MarginRight: 5}, width,\n\t\toverlay.Options{\n\t\t\tIgnoreLowerStyle: true,\n\t\t},\n\t)\n\n\tif _, ok := width.(gowid.IRenderFixed); ok {\n\t\tw.SetContentWidth(gowid.RenderFixed{}, app) // fixed or weight:1, ratio:0.5\n\t} else {\n\t\tw.SetContentWidth(gowid.RenderWithWeight{W: 1}, app) // fixed or weight:1, ratio:0.5\n\t}\n\tw.SetSavedSubWidget(container.SubWidget(), app)\n\tw.SetSavedContainer(container, app)\n\tcontainer.SetSubWidget(w.ov, app)\n\tw.SetOpen(true, app)\n}",
"func ShowPreferencesDialog(parent gtk.IWindow, onMpdReconnect, onQueueColumnsChanged, onPlayerSettingChanged func()) {\n\t// Create the dialog\n\td := &PrefsDialog{\n\t\tonQueueColumnsChanged: onQueueColumnsChanged,\n\t\tonPlayerSettingChanged: onPlayerSettingChanged,\n\t}\n\n\t// Load the dialog layout and map the widgets\n\tbuilder, err := NewBuilder(prefsGlade)\n\tif err == nil {\n\t\terr = builder.BindWidgets(d)\n\t}\n\n\t// Check for errors\n\tif errCheck(err, \"ShowPreferencesDialog(): failed to initialise dialog\") {\n\t\tutil.ErrorDialog(parent, fmt.Sprint(glib.Local(\"Failed to load UI widgets\"), err))\n\t\treturn\n\t}\n\tdefer d.PreferencesDialog.Destroy()\n\n\t// Set the dialog up\n\td.PreferencesDialog.SetTransientFor(parent)\n\n\t// Remove the 2-pixel \"aura\" around the notebook\n\tif box, err := d.PreferencesDialog.GetContentArea(); err == nil {\n\t\tbox.SetBorderWidth(0)\n\t}\n\n\t// Map the handlers to callback functions\n\tbuilder.ConnectSignals(map[string]interface{}{\n\t\t\"on_PreferencesDialog_map\": d.onMap,\n\t\t\"on_Setting_change\": d.onSettingChange,\n\t\t\"on_MpdReconnect\": onMpdReconnect,\n\t\t\"on_ColumnMoveUpToolButton_clicked\": d.onColumnMoveUp,\n\t\t\"on_ColumnMoveDownToolButton_clicked\": d.onColumnMoveDown,\n\t})\n\n\t// Run the dialog\n\td.PreferencesDialog.Run()\n}",
"func (s *State) HasPopups() bool {\n\treturn s.root != nil && s.root.HasPopups()\n}",
"func (*CMsgPopupHTMLWindow) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{54}\n}",
"func (p *PopupModalWidget) IsOpen(open *bool) *PopupModalWidget {\n\tp.open = open\n\treturn p\n}",
"func Pop() Action {\n\treturn ActionPop{}\n}",
"func (s *State) ClosePopups() {\n\tif s.root != nil {\n\t\ts.root.ClosePopups()\n\t\ts.update = true\n\t}\n}",
"func (i *menuBarItem) Show() {\n\twidget.ShowWidget(&i.Base, i)\n}",
"func (m *ModalState) Show(now time.Time, w layout.Widget) {\n\tm.content = w\n\tm.Appear(now)\n}",
"func Modal(gtx C, max unit.Value, w layout.Widget) D {\n\treturn layout.Stack{}.Layout(\n\t\tgtx,\n\t\tlayout.Stacked(func(gtx C) D {\n\t\t\treturn util.Rect{\n\t\t\t\tSize: gtx.Constraints.Max,\n\t\t\t\tColor: color.NRGBA{A: 200},\n\t\t\t}.Layout(gtx)\n\t\t}),\n\t\tlayout.Stacked(func(gtx C) D {\n\t\t\treturn Centered(gtx, func(gtx C) D {\n\t\t\t\tcs := >x.Constraints\n\t\t\t\tif cs.Max.X > gtx.Px(max) {\n\t\t\t\t\tcs.Max.X = gtx.Px(max)\n\t\t\t\t}\n\t\t\t\treturn w(gtx)\n\t\t\t})\n\t\t}),\n\t)\n}",
"func PopFont() {\n\timgui.PopFont()\n}",
"func (win *Window) WindowPresent() {\n\twin.Candy().Guify(\"gtk_window_present\", win)\n}",
"func NewPiPanelGTK() *pipanel.Frontend {\n\treturn &pipanel.Frontend{\n\t\tAlerter: gtkttsalerter.New(),\n\t\tAudioPlayer: beeper.New(),\n\t\tDisplayManager: pitouch.New(),\n\t\tPowerManager: systemdpwr.New(),\n\t}\n}",
"func (t *MealRequestServlet) ProcessPopup(r *http.Request) *ApiResult {\n\tpopup_id_s := r.Form.Get(\"popupId\")\n\tpopup_id, err := strconv.ParseInt(popup_id_s, 10, 64)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn APIError(\"Malformed popup ID\", 400)\n\t}\n\tpopup, err := GetPopupById(t.db, popup_id)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn APIError(\"Invalid popup ID\", 400)\n\t}\n\tif popup.Processed == 1 {\n\t\treturn APISuccess(\"Already processed\")\n\t}\n\tif err = t.process_popup(popup); err != nil {\n\t\tlog.Println(err)\n\t\treturn APIError(\"Failed to process popup\", 400)\n\t}\n\treturn APISuccess(\"OK\")\n}",
"func (p *PopupModalWidget) Flags(flags WindowFlags) *PopupModalWidget {\n\tp.flags = flags\n\treturn p\n}",
"func (c *component) Tooltip() *Button {\n\treturn c.tooltip\n}",
"func OverlayNew() *Overlay {\n\tc := C.gtk_overlay_new()\n\treturn wrapOverlay(unsafe.Pointer(c))\n}",
"func (w *WidgetImplement) Tooltip() string {\n\treturn w.tooltip\n}",
"func (r *CampaignRow) GetPopName() string { return *r.Data.PopName }",
"func (r *CampaignRow) GetPopName() string { return *r.Data.PopName }",
"func (v *Overlay) native() *C.GtkOverlay {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn (*C.GtkOverlay)(v.Native())\n}",
"func (p *Popup) AnchorHeight() int {\n\treturn p.anchorHeight\n}",
"func (p *PageStack) StackPopped(o, top model.Component) {\n\to.Stop()\n\tp.StackTop(top)\n}",
"func (me TAttlistOtherIDSource) IsPop() bool { return me.String() == \"POP\" }",
"func TestCompletionEntry_WithEmptyOptions(t *testing.T) {\n\tentry := createEntry()\n\twin := test.NewWindow(entry)\n\twin.Resize(fyne.NewSize(500, 300))\n\tdefer win.Close()\n\n\tentry.OnChanged = func(s string) {\n\t\tentry.SetOptions([]string{})\n\t\tentry.ShowCompletion()\n\t}\n\n\tentry.SetText(\"foo\")\n\tassert.Nil(t, entry.popupMenu) // popupMenu should not being created\n}",
"func HeapPop(h heap.Interface) interface{} {\n\tvar result = make(chan interface{})\n\theapPopChan <- heapPopChanMsg{\n\t\th: h,\n\t\tresult: result,\n\t}\n\treturn <-result\n}",
"func (v *Popover) SetTransitionsEnabled(transitionsEnabled bool) {\n\tC.gtk_popover_set_transitions_enabled(v.native(), gbool(transitionsEnabled))\n}",
"func InputDialog(opt ...interface{}) string {\n b, _ := gtk.BuilderNewFromFile(\"glade/input-dialog.glade\")\n d := GetDialog(b, \"input_dialog\")\n entry := GetEntry(b, \"input_entry\")\n\n for i, v := range(opt) {\n if i % 2 == 0 {\n key := v.(string)\n switch key {\n case \"title\":\n d.SetTitle(opt[i+1].(string))\n case \"label\":\n l := GetLabel(b,\"input_label\")\n l.SetText(opt[i+1].(string))\n case \"password-mask\":\n entry.SetInvisibleChar(opt[i+1].(rune))\n entry.SetVisibility(false)\n case \"default\":\n entry.SetText(opt[i+1].(string))\n }\n }\n }\n\n output := \"\"\n entry.Connect(\"activate\", func (o *gtk.Entry) { d.Response(gtk.RESPONSE_OK) } )\n btok := GetButton(b, \"bt_ok\")\n btok.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_OK) } )\n\n btcancel := GetButton(b, \"bt_cancel\")\n btcancel.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_CANCEL) } )\n\n code := d.Run()\n if code == gtk.RESPONSE_OK {\n output, _ = entry.GetText()\n }\n\n d.Destroy()\n return output\n}",
"func (g *Grid) Show() {\n\tC.uiControlShow(g.c)\n}",
"func BuildModal(p tview.Primitive, width, height int) tview.Primitive {\n\tcpnt := tview.NewFlex().SetDirection(tview.FlexRow).\n\t\tAddItem(nil, 0, 1, false).\n\t\tAddItem(p, height, 1, false).\n\t\tAddItem(nil, 0, 1, false)\n\n\treturn tview.NewFlex().\n\t\tAddItem(nil, 0, 1, false).\n\t\tAddItem(cpnt, width, 1, false).\n\t\tAddItem(nil, 0, 1, false)\n}",
"func EditPerformerWindow() *EditPerformer {\n\twin := SetupPopupWindow(\"Edit Performer\", 350, 216)\n\tbox := SetupBox()\n\tnb := SetupNotebook()\n\ttb := SetupToolbar()\n\tsave := SetupToolButtonLabel(\"Save\")\n\n\tgroupContent := NewGroupContent()\n\tpersonContent := NewPersonContent()\n\n\tsave.SetExpand(true)\n\tsave.SetVExpand(true)\n\n\ttb.Add(save)\n\ttb.SetHExpand(true)\n\n\tnb.AppendPage(personContent.grid, SetupLabel(\"Person\"))\n\tnb.AppendPage(groupContent.grid, SetupLabel(\"Group\"))\n\n\tbox.Add(nb)\n\tbox.Add(tb)\n\n\twin.Add(box)\n\twin.ShowAll()\n\n\treturn &EditPerformer{\n\t\tGroupContent: groupContent,\n\t\tNotebook: nb,\n\t\tPersonContent: personContent,\n\t\tSaveB: save,\n\t\tWin: win,\n\t}\n}",
"func (p *PopUpMenu) CreateRenderer() fyne.WidgetRenderer {\n\treturn p.overlay.CreateRenderer()\n}",
"func SetOnPop(fn func(v interface{})) {\n\n\texec.OnPop = fn\n}",
"func (r *CampaignRow) SetPopName(popName string) { r.Data.PopName = &popName }",
"func (r *CampaignRow) SetPopName(popName string) { r.Data.PopName = &popName }",
"func (ptr *Application) onClickMenuEditPaste() {\n\tptr.textEditor.Paste()\n}",
"func (p *InteractiveMultiselectPrinter) Show(text ...string) ([]string, error) {\n\t// should be the first defer statement to make sure it is executed last\n\t// and all the needed cleanup can be done before\n\tcancel, exit := internal.NewCancelationSignal(p.OnInterruptFunc)\n\tdefer exit()\n\n\tif len(text) == 0 || Sprint(text[0]) == \"\" {\n\t\ttext = []string{p.DefaultText}\n\t}\n\n\tp.text = p.TextStyle.Sprint(text[0])\n\tp.fuzzySearchMatches = append([]string{}, p.Options...)\n\n\tif p.MaxHeight == 0 {\n\t\tp.MaxHeight = DefaultInteractiveMultiselect.MaxHeight\n\t}\n\n\tmaxHeight := p.MaxHeight\n\tif maxHeight > len(p.fuzzySearchMatches) {\n\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t}\n\n\tif len(p.Options) == 0 {\n\t\treturn nil, fmt.Errorf(\"no options provided\")\n\t}\n\n\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)\n\tp.displayedOptionsStart = 0\n\tp.displayedOptionsEnd = maxHeight\n\n\tfor _, option := range p.DefaultOptions {\n\t\tp.selectOption(option)\n\t}\n\n\tarea, err := DefaultArea.Start(p.renderSelectMenu())\n\tdefer area.Stop()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not start area: %w\", err)\n\t}\n\n\tif p.Filter && (p.KeyConfirm == keys.Space || p.KeySelect == keys.Space) {\n\t\treturn nil, fmt.Errorf(\"if filter/search is active, keys.Space can not be used for KeySelect or KeyConfirm\")\n\t}\n\n\tarea.Update(p.renderSelectMenu())\n\n\tcursor.Hide()\n\tdefer cursor.Show()\n\terr = keyboard.Listen(func(keyInfo keys.Key) (stop bool, err error) {\n\t\tkey := keyInfo.Code\n\n\t\tif p.MaxHeight > len(p.fuzzySearchMatches) {\n\t\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t\t} else {\n\t\t\tmaxHeight = p.MaxHeight\n\t\t}\n\n\t\tswitch key {\n\t\tcase p.KeyConfirm:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tarea.Update(p.renderFinishedMenu())\n\t\t\treturn true, nil\n\t\tcase p.KeySelect:\n\t\t\tif len(p.fuzzySearchMatches) > 0 {\n\t\t\t\t// Select option if not already selected\n\t\t\t\tp.selectOption(p.fuzzySearchMatches[p.selectedOption])\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.RuneKey:\n\t\t\tif p.Filter {\n\t\t\t\t// Fuzzy search for options\n\t\t\t\t// append to fuzzy search string\n\t\t\t\tp.fuzzySearchString += keyInfo.String()\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[:maxHeight]...)\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Space:\n\t\t\tif p.Filter {\n\t\t\t\tp.fuzzySearchString += \" \"\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tarea.Update(p.renderSelectMenu())\n\t\t\t}\n\t\tcase keys.Backspace:\n\t\t\t// Remove last character from fuzzy search string\n\t\t\tif len(p.fuzzySearchString) > 0 {\n\t\t\t\t// Handle UTF-8 characters\n\t\t\t\tp.fuzzySearchString = string([]rune(p.fuzzySearchString)[:len([]rune(p.fuzzySearchString))-1])\n\t\t\t}\n\n\t\t\tif p.fuzzySearchString == \"\" {\n\t\t\t\tp.fuzzySearchMatches = append([]string{}, p.Options...)\n\t\t\t}\n\n\t\t\tp.renderSelectMenu()\n\n\t\t\tif len(p.fuzzySearchMatches) > p.MaxHeight {\n\t\t\t\tmaxHeight = p.MaxHeight\n\t\t\t} else {\n\t\t\t\tmaxHeight = len(p.fuzzySearchMatches)\n\t\t\t}\n\n\t\t\tp.selectedOption = 0\n\t\t\tp.displayedOptionsStart = 0\n\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Left:\n\t\t\t// Unselect all options\n\t\t\tp.selectedOptions = []int{}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Right:\n\t\t\t// Select all options\n\t\t\tp.selectedOptions = []int{}\n\t\t\tfor i := 0; i < len(p.Options); i++ {\n\t\t\t\tp.selectedOptions = append(p.selectedOptions, i)\n\t\t\t}\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Up:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tif p.selectedOption > 0 {\n\t\t\t\tp.selectedOption--\n\t\t\t\tif p.selectedOption < p.displayedOptionsStart {\n\t\t\t\t\tp.displayedOptionsStart--\n\t\t\t\t\tp.displayedOptionsEnd--\n\t\t\t\t\tif p.displayedOptionsStart < 0 {\n\t\t\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\t\t}\n\t\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.selectedOption = len(p.fuzzySearchMatches) - 1\n\t\t\t\tp.displayedOptionsStart = len(p.fuzzySearchMatches) - maxHeight\n\t\t\t\tp.displayedOptionsEnd = len(p.fuzzySearchMatches)\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t}\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.Down:\n\t\t\tif len(p.fuzzySearchMatches) == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tp.displayedOptions = p.fuzzySearchMatches[:maxHeight]\n\t\t\tif p.selectedOption < len(p.fuzzySearchMatches)-1 {\n\t\t\t\tp.selectedOption++\n\t\t\t\tif p.selectedOption >= p.displayedOptionsEnd {\n\t\t\t\t\tp.displayedOptionsStart++\n\t\t\t\t\tp.displayedOptionsEnd++\n\t\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.selectedOption = 0\n\t\t\t\tp.displayedOptionsStart = 0\n\t\t\t\tp.displayedOptionsEnd = maxHeight\n\t\t\t\tp.displayedOptions = append([]string{}, p.fuzzySearchMatches[p.displayedOptionsStart:p.displayedOptionsEnd]...)\n\t\t\t}\n\n\t\t\tarea.Update(p.renderSelectMenu())\n\t\tcase keys.CtrlC:\n\t\t\tcancel()\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tError.Println(err)\n\t\treturn nil, fmt.Errorf(\"failed to start keyboard listener: %w\", err)\n\t}\n\n\tvar result []string\n\tfor _, selectedOption := range p.selectedOptions {\n\t\tresult = append(result, p.Options[selectedOption])\n\t}\n\n\treturn result, nil\n}",
"func newDialogFromNative(obj unsafe.Pointer) interface{} {\n\td := &Dialog{}\n\td.object = C.to_GtkDialog(obj)\n\n\tif gobject.IsObjectFloating(d) {\n\t\tgobject.RefSink(d)\n\t} else {\n\t\tgobject.Ref(d)\n\t}\n\td.Window = newWindowFromNative(obj).(*Window)\n\tdialogFinalizer(d)\n\n\treturn d\n}",
"func (ctx *PQContext) Pop(params []string) apis.IResponse {\n\tvar err *mpqerr.ErrorResponse\n\tvar limit int64 = 1\n\tvar asyncId string\n\n\tpopWaitTimeout := ctx.pq.config.PopWaitTimeout\n\n\tfor len(params) > 0 {\n\t\tswitch params[0] {\n\t\tcase PRM_LIMIT:\n\t\t\tparams, limit, err = mpqproto.ParseInt64Param(params, 1, conf.CFG_PQ.MaxPopBatchSize)\n\t\tcase PRM_POP_WAIT:\n\t\t\tparams, popWaitTimeout, err = mpqproto.ParseInt64Param(params, 0, conf.CFG_PQ.MaxPopWaitTimeout)\n\t\tcase PRM_ASYNC:\n\t\t\tparams, asyncId, err = mpqproto.ParseItemId(params)\n\t\tdefault:\n\t\t\treturn mpqerr.UnknownParam(params[0])\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(asyncId) > 0 {\n\t\treturn ctx.asyncPop(asyncId, 0, popWaitTimeout, limit, false)\n\t} else {\n\t\treturn ctx.pq.Pop(0, popWaitTimeout, limit, false)\n\t}\n}",
"func (canvas *Canvas) Pop() {\n\twriteCommand(canvas.contents, \"Q\")\n}",
"func PopTimeoutOption(t time.Duration) Option {\n\treturn func(config *config) {\n\t\tconfig.popTimeout = t\n\t}\n}",
"func newMessageDialogFromNative(obj unsafe.Pointer) interface{} {\n\td := &MessageDialog{}\n\td.object = C.to_GtkMessageDialog(obj)\n\n\tif gobject.IsObjectFloating(d) {\n\t\tgobject.RefSink(d)\n\t} else {\n\t\tgobject.Ref(d)\n\t}\n\td.Dialog = newDialogFromNative(obj).(*Dialog)\n\tmessageDialogFinalizer(d)\n\treturn d\n}",
"func (self *TSPAlgorithm) RandomPop() *Population {\n\tp := Population{\n\t\tMutThreshold: 0.5,\n\t\tCrossThreshold: 0.95,\n\t}\n\n\tp.Chromosomes = make([]Chromosome, 0)\n\n\tfor i := 0; i < self.PopSize; i++ {\n\t\tnewChromo := &Chromosome{\n\t\t\tLocations: self.Locations,\n\t\t\tMatrix: &self.Matrix,\n\t\t\tId: i + 1,\n\t\t}\n\n\t\tp.Chromosomes = append(p.Chromosomes, *newChromo)\n\t}\n\n\t// Randomize\n\tfor i, _ := range p.Chromosomes {\n\t\tswap := rand.Intn(15)\n\n\t\tfor j := 0; j < swap; j++ {\n\t\t\tp.Chromosomes[i].RandSwap()\n\t\t}\n\t}\n\n\tp.IDCounter = self.PopSize + 1\n\n\treturn &p\n}",
"func PopStyleV(count int) {\n\timgui.PopStyleVarV(count)\n}",
"func NewPopTestSuite(packageName PackageName, opts ...PopTestSuiteOption) *PopTestSuite {\n\t// Create a standardized PopTestSuite object.\n\tpts := &PopTestSuite{\n\t\tPackageName: packageName,\n\t}\n\t// provide a way to enable pop debugging when running tests\n\tif envy.Get(\"POP_TEST_DEBUG\", \"\") != \"\" {\n\t\tpop.Debug = true\n\t}\n\n\t// Apply the user-supplied options to the PopTestSuite object.\n\tfor _, opt := range opts {\n\t\topt(pts)\n\t}\n\n\tif pts.useHighPrivsPSQLRole && pts.usePerTestTransaction {\n\t\tlog.Fatal(\"Cannot use both high priv psql and per test transaction\")\n\t}\n\n\tpts.getDbConnectionDetails()\n\n\tlog.Printf(\"package %s is attempting to connect to database %s\", packageName, pts.pgConnDetails.Database)\n\tpgConn, err := pop.NewConnection(pts.pgConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err = pgConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\tpts.pgConn = pgConn\n\n\tif pts.usePerTestTransaction {\n\t\tpts.txnTestDb = make(map[string]*pop.Connection)\n\t\tpts.findOrCreatePerTestTransactionDb()\n\t\treturn pts\n\t}\n\n\t// set up database connections for non per test transactions\n\t// which may or may not be have useHighPrivsPSQLRole set\n\tpts.highPrivConn, err = pop.NewConnection(pts.highPrivConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err = pts.highPrivConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tpts.lowPrivConn, err = pop.NewConnection(pts.lowPrivConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err := pts.lowPrivConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tlog.Printf(\"attempting to clone database %s to %s... \", pts.dbNameTemplate, pts.lowPrivConnDetails.Database)\n\tif err := cloneDatabase(pgConn, pts.dbNameTemplate, pts.lowPrivConnDetails.Database); err != nil {\n\t\tlog.Panicf(\"failed to clone database '%s' to '%s': %#v\", pts.dbNameTemplate, pts.lowPrivConnDetails.Database, err)\n\t}\n\tlog.Println(\"success\")\n\n\t// The db is already truncated as part of the test setup\n\n\tif pts.useHighPrivsPSQLRole {\n\t\t// Disconnect the low privileged connection and replace its\n\t\t// connection and connection details with those of the high\n\t\t// privileged connection.\n\t\tif err := pts.lowPrivConn.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tpts.lowPrivConn = pts.highPrivConn\n\t\tpts.lowPrivConnDetails = pts.highPrivConnDetails\n\t}\n\n\treturn pts\n}",
"func (m *_NotificationMessage) InitializeParent(parent ExtensionObjectDefinition) {}",
"func (p printHandler) PopEvent(eventID string, elapsed time.Duration) {\n\tfmt.Printf(\"[%s] %v\\n\", eventID, elapsed)\n}",
"func imgTopReleaseEvent() {\n\tAbout.Width = 400\n\tAbout.ImageOkButtonSize = 24\n\tAbout.Show()\n}",
"func (CMsgDOTAPopup_PopupID) EnumDescriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{8, 0}\n}",
"func (ps *PubSub[Item]) Pub(msg Item, topics ...string) {\n\tps.msgChan <- msgenvelope[Item]{op: pubBlock, topics: topics, msg: msg}\n}",
"func D_TestRunPop3(t *testing.T) {\n\t// PopCmd(\"pop3.163.com\", \"110\", \"midoks\", \"mm123123\")\n}",
"func (recv *TypeInterface) PeekParent() TypeInterface {\n\tretC := C.g_type_interface_peek_parent((C.gpointer)(recv.native))\n\tretGo := *TypeInterfaceNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func (r *Reply) JSONP(data interface{}, callback string) *Reply {\n\tr.ContentType(ahttp.ContentTypeJavascript.String())\n\tr.Render(&jsonpRender{Data: data, Callback: callback})\n\treturn r\n}",
"func (v *Clipboard) native() *C.GtkClipboard {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn (*C.GtkClipboard)(v.Native())\n}",
"func (*CMsgSizePopup) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{52}\n}"
] | [
"0.7245628",
"0.67946184",
"0.671131",
"0.6632335",
"0.63890284",
"0.58852094",
"0.58588827",
"0.5829165",
"0.57027376",
"0.5697189",
"0.5695752",
"0.5623324",
"0.5471221",
"0.54265916",
"0.5425965",
"0.5336367",
"0.5220583",
"0.51320916",
"0.5081396",
"0.48694453",
"0.4869443",
"0.4862005",
"0.4694318",
"0.46190685",
"0.45853195",
"0.45745683",
"0.45422015",
"0.44996348",
"0.44385904",
"0.44113487",
"0.4355177",
"0.43517572",
"0.43429074",
"0.43127164",
"0.42951638",
"0.4290073",
"0.42889687",
"0.42594817",
"0.42594725",
"0.42281136",
"0.41910055",
"0.41787332",
"0.41501492",
"0.41392922",
"0.41247913",
"0.4101847",
"0.40880063",
"0.4075095",
"0.4068951",
"0.40434566",
"0.3992103",
"0.39903787",
"0.39557835",
"0.39317125",
"0.3930848",
"0.39030138",
"0.38976806",
"0.38828918",
"0.38514236",
"0.3832508",
"0.38284832",
"0.3826524",
"0.3824534",
"0.3769019",
"0.3769019",
"0.37687197",
"0.37365267",
"0.37259427",
"0.37169856",
"0.37124673",
"0.37005037",
"0.36952183",
"0.36832872",
"0.36725685",
"0.3668044",
"0.3666276",
"0.36574352",
"0.3657381",
"0.3641232",
"0.3641232",
"0.36300737",
"0.36243197",
"0.3622541",
"0.36211237",
"0.3616029",
"0.36124146",
"0.36094257",
"0.3609249",
"0.3599086",
"0.35988864",
"0.3590089",
"0.35640186",
"0.35491174",
"0.35472023",
"0.3540134",
"0.35278735",
"0.35199794",
"0.35150108",
"0.35149348",
"0.3511931"
] | 0.8746411 | 0 |
Popdown is a wrapper around gtk_popover_popdown(). | Popdown — это обертка вокруг gtk_popover_popdown(). | func (v *Popover) Popdown() {
C.gtk_popover_popdown(v.native())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *Popover) Popup() {\n\tC.gtk_popover_popup(v.native())\n}",
"func NewPopupMenu(parent Widget, b Base, mo ...MenuOption) *PopupMenu {\n\tpm := &PopupMenu{\n\t\tPanel: NewPanel(nil, b),\n\t}\n\tInitWidget(parent, pm)\n\tpad := pm.MyTheme().Pad\n\tbtH := b.Rect.H() - pad\n\tbtW := b.Rect.W() - pad*2\n\tpm.Rect = R(pm.Rect.Min.X, pm.Rect.Max.Y-pm.Rect.H()*float64(len(mo))-pad,\n\t\tpm.Rect.Max.X, pm.Rect.Max.Y)\n\ty := pm.Rect.H() - pad\n\tfor _, o := range mo {\n\t\tbt := NewPushButton(pm, Base{\n\t\t\tRect: R(pad, y-btH, pad+btW, y),\n\t\t\tText: o.Text,\n\t\t\tImage: o.Image,\n\t\t\tDisabled: o.Disabled,\n\t\t})\n\t\ty -= btH + pad\n\t\to := o\n\t\tbt.OnPress = func() {\n\t\t\tfmt.Printf(\"popup menu onpress before popdown o=%+v\\n\", o)\n\t\t\tbt.Surface.PopDownTo(pm)\n\t\t\tfmt.Printf(\"popup menu onpress after popdown o=%+v\\n\", o)\n\t\t\tif o.Handler != nil {\n\t\t\t\tfmt.Printf(\"popup menu onpress before handler\\n\")\n\t\t\t\tclose := o.Handler(pm)\n\t\t\t\tfmt.Printf(\"popup menu onpress after handler =%v\\n\", close)\n\t\t\t\tif close {\n\t\t\t\t\tpm.Surface.PopDownTo(nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tow := optWidget{\n\t\t\topt: o,\n\t\t\tw: bt,\n\t\t}\n\t\tpm.opts = append(pm.opts, ow)\n\t}\n\tpm.Surface.PopUp(pm)\n\treturn pm\n}",
"func (v *MenuButton) GetPopover() *Popover {\n\tc := C.gtk_menu_button_get_popover(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn wrapPopover(glib.Take(unsafe.Pointer(c)))\n}",
"func Popup(prompt string, options ...string) (selection string, err error) {\n\t//selection, err = defaultDmenu().Popup(prompt, options...)\n\tdmenu := defaultDmenu()\n\tselection, err = dmenu.Popup(prompt, options...)\n\treturn\n}",
"func (v *MenuButton) SetPopover(popover *Popover) {\n\tC.gtk_menu_button_set_popover(v.native(), popover.toWidget())\n}",
"func MakePopover(ptr unsafe.Pointer) *NSPopover {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn &NSPopover{\n\t\tNSResponder: *MakeResponder(ptr),\n\t}\n}",
"func (v *MenuButton) SetUsePopover(setting bool) {\n\tC.gtk_menu_button_set_use_popover(v.native(), gbool(setting))\n}",
"func Popup(name string) *PopupWidget {\n\treturn &PopupWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\tflags: 0,\n\t\tlayout: nil,\n\t}\n}",
"func (v *MenuButton) GetUsePopover() bool {\n\tc := C.gtk_menu_button_get_use_popover(v.native())\n\treturn gobool(c)\n}",
"func PopStyle() {\n\timgui.PopStyleVar()\n}",
"func (v *MenuButton) GetPopup() *Menu {\n\tc := C.gtk_menu_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapMenu(obj)\n}",
"func (d *Dmenu) Popup(prompt string, options ...string) (selection string, err error) {\n\tprocessedArgs := []string{}\n\tfor _, arg := range d.arguments {\n\t\tvar parg string\n\t\tif strings.Contains(arg, \"%s\") {\n\t\t\tparg = fmt.Sprintf(arg, prompt)\n\t\t} else {\n\t\t\tparg = arg\n\t\t}\n\n\t\tprocessedArgs = append(processedArgs, parg)\n\t}\n\tcmd := exec.Command(d.command, processedArgs...)\n\n\tstdin, err := cmd.StdinPipe()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting pipe: %s\", err)\n\t}\n\n\tgo func(stdin io.WriteCloser) {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, strings.Join(options, \"\\n\"))\n\t}(stdin)\n\n\tbyteOut, err := cmd.Output()\n\n\tif err != nil {\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\tif status.ExitStatus() == 1 {\n\t\t\t\t\terr = &EmptySelectionError{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\t// Cast and trim\n\tselection = strings.TrimSpace(string(byteOut))\n\n\treturn\n}",
"func NewPopTable(h SQLHandle) *PopTable {\n\treturn &PopTable{h}\n}",
"func NewPopup(p tview.Primitive) *Popup {\n\t_, _, width, height := p.GetRect()\n\tpopup := &Popup{\n\t\tflex: tview.NewFlex().\n\t\t\tAddItem(nil, 0, 1, false).\n\t\t\tAddItem(tview.NewFlex().\n\t\t\t\tSetDirection(tview.FlexRow).\n\t\t\t\tAddItem(nil, 0, 1, false).\n\t\t\t\tAddItem(p, height, 1, false).\n\t\t\t\tAddItem(nil, 0, 1, false), width, 1, false).\n\t\t\tAddItem(nil, 0, 1, false),\n\t}\n\tpopup.content = p\n\treturn popup\n}",
"func NewPopUpMenu(menu *fyne.Menu, c fyne.Canvas) *PopUpMenu {\n\tp := &PopUpMenu{Menu: NewMenu(menu), canvas: c}\n\tp.Menu.Resize(p.Menu.MinSize())\n\tp.Menu.customSized = true\n\to := widget.NewOverlayContainer(p.Menu, c, p.Dismiss)\n\to.Resize(o.MinSize())\n\tp.overlay = o\n\tp.OnDismiss = func() {\n\t\tp.Hide()\n\t}\n\treturn p\n}",
"func (wd *remoteWD) ButtonDown() error {\n\treturn wd.voidCommand(\"/session/%s/buttondown\", nil)\n}",
"func (v *MenuButton) SetPopup(menu IMenu) {\n\tC.gtk_menu_button_set_popup(v.native(), menu.toWidget())\n}",
"func (p *PodsWidget) SelectPageDown() {\n\tp.ScrollPageDown()\n}",
"func (v *ScaleButton) GetPopup() (*Widget, error) {\n\tc := C.gtk_scale_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapWidget(obj), nil\n}",
"func (p *PageStack) StackPopped(o, top model.Component) {\n\to.Stop()\n\tp.StackTop(top)\n}",
"func (*CMsgDOTAPopup) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{8}\n}",
"func (canvas *Canvas) Pop() {\n\twriteCommand(canvas.contents, \"Q\")\n}",
"func Pop() Action {\n\treturn ActionPop{}\n}",
"func (c *Cursor) TopDown(listener Listener, dir Direction, breakmode Breakmode) interface{} {\n\tc.startNode = c.current\n\tT().Debugf(\"TopDown starting at node %v\", c.current.Symbol())\n\tvalue := c.traverseTopDown(listener, dir, breakmode, 0)\n\treturn value\n}",
"func (w *ListWidget) MoveDown() {\n\tw.ChangeSelection(w.CurrentSelection() + 1)\n}",
"func (list *List) HighlightDown() *List {\n\n\t// If there is no where to go\n\tif len(list.options) < list.Index+1 {\n\t\treturn list\n\t}\n\n\tlist.Cursor.EraseCurrentLine()\n\tlist.PrintOption(list.options[list.Index-1])\n\n\tlist.Cursor.MoveDown(1)\n\n\tlist.Cursor.EraseCurrentLine()\n\tlist.PrintHighlight(list.options[list.Index])\n\n\tlist.Index++\n\n\treturn list\n}",
"func PopSub() {\n\toutput.EmitLn(\"SUB (SP)+,D0\")\n}",
"func (h *handler) pageDown(g *gocui.Gui, v *gocui.View) error {\n\tif nextIdx := h.findNextPrevLine(h.maxLines - 1); nextIdx != -1 {\n\t\tlastInstr := h.lines[nextIdx].data.(*binch.Instruction)\n\t\tif nextInstr := h.project.FindNextInstruction(lastInstr.Address); nextInstr != nil {\n\t\t\th.drawFromTop(nextInstr.Address)\n\t\t}\n\t} else {\n\t\tlog.Panicln(\"pageDown fail\")\n\t}\n\treturn nil\n}",
"func (s *State) OpenPopup(bounds image.Rectangle, d Component) Popup {\n\tif s.root != nil {\n\t\ts.focused = d\n\t\ts.update = true\n\t\treturn s.root.OpenPopup(bounds.Add(s.bounds.Min), d)\n\t}\n\treturn nil\n}",
"func (t *tui) arrowDown(g *gotui.Gui, v *gotui.View) error {\n\tcx, cy := v.Cursor()\n\tlines := v.ViewBufferLines()\n\tlineCount := len(v.ViewBufferLines()) - 1\n\tif lineCount == -1 {\n\t\treturn nil\n\t}\n\tlastLineLen := len(lines[lineCount])\n\tif cx == lastLineLen || (cx == 0 && cy == 0 && !t.sent.onLast()) {\n\t\tif cy == lineCount {\n\t\t\tv.Clear()\n\t\t\tv.SetCursor(0, 0)\n\t\t\tif !t.sent.onLast() {\n\t\t\t\tfmt.Fprint(v, t.sent.Forward().Text)\n\t\t\t}\n\t\t} else {\n\t\t\tif lineCount == 0 {\n\t\t\t\tv.SetCursor(cx, cy+1)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif cy == lineCount {\n\t\t\tv.SetCursor(lastLineLen, cy)\n\t\t} else {\n\t\t\tv.SetCursor(cx, cy+1)\n\t\t}\n\t}\n\treturn nil\n}",
"func SetOnPop(fn func(v interface{})) {\n\n\texec.OnPop = fn\n}",
"func MRThumbDown(pid interface{}, id int) error {\n\t_, _, err := lab.AwardEmoji.CreateMergeRequestAwardEmoji(pid, id, &gitlab.CreateAwardEmojiOptions{\n\t\tName: \"thumbsdown\",\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (o *openList) Pop() interface{} {\n\topn := *o\n\tit := opn[len(opn)-1]\n\tit.pqindex = -1\n\t*o = opn[:len(opn)-1]\n\treturn it\n}",
"func PopStyleColor() {\n\timgui.PopStyleColor()\n}",
"func (view *DetailsView) CursorDown() error {\n\treturn CursorDown(view.gui, view.view)\n}",
"func (g *Godis) SPop(key string) string {\n\treturn g.cmdString(\"SPOP\", key)\n}",
"func (dw *DrawingWand) PopClipPath() {\n\tC.DrawPopClipPath(dw.dw)\n}",
"func (c *Client) SPop(_ context.Context, key string) *redis.StringCmd {\n\treturn c.cli.SPop(key)\n}",
"func PopItemWidth() {\n\timgui.PopItemWidth()\n}",
"func HeapPop(h heap.Interface) interface{} {\n\tvar result = make(chan interface{})\n\theapPopChan <- heapPopChanMsg{\n\t\th: h,\n\t\tresult: result,\n\t}\n\treturn <-result\n}",
"func (ctx *PQContext) Pop(params []string) apis.IResponse {\n\tvar err *mpqerr.ErrorResponse\n\tvar limit int64 = 1\n\tvar asyncId string\n\n\tpopWaitTimeout := ctx.pq.config.PopWaitTimeout\n\n\tfor len(params) > 0 {\n\t\tswitch params[0] {\n\t\tcase PRM_LIMIT:\n\t\t\tparams, limit, err = mpqproto.ParseInt64Param(params, 1, conf.CFG_PQ.MaxPopBatchSize)\n\t\tcase PRM_POP_WAIT:\n\t\t\tparams, popWaitTimeout, err = mpqproto.ParseInt64Param(params, 0, conf.CFG_PQ.MaxPopWaitTimeout)\n\t\tcase PRM_ASYNC:\n\t\t\tparams, asyncId, err = mpqproto.ParseItemId(params)\n\t\tdefault:\n\t\t\treturn mpqerr.UnknownParam(params[0])\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(asyncId) > 0 {\n\t\treturn ctx.asyncPop(asyncId, 0, popWaitTimeout, limit, false)\n\t} else {\n\t\treturn ctx.pq.Pop(0, popWaitTimeout, limit, false)\n\t}\n}",
"func (s *openCellStack) pop() (openCell, error) {\n\tif len(*s) == 0 {\n\t\treturn openCell{palletLength, palletWidth}, errStackEmpty\n\t}\n\tc := (*s)[len(*s)-1]\n\t(*s) = (*s)[:len(*s)-1]\n\treturn c, nil\n}",
"func (p *PopUpMenu) Show() {\n\tp.overlay.Show()\n\tp.Menu.Show()\n}",
"func NewPopTestSuite(packageName PackageName, opts ...PopTestSuiteOption) *PopTestSuite {\n\t// Create a standardized PopTestSuite object.\n\tpts := &PopTestSuite{\n\t\tPackageName: packageName,\n\t}\n\t// provide a way to enable pop debugging when running tests\n\tif envy.Get(\"POP_TEST_DEBUG\", \"\") != \"\" {\n\t\tpop.Debug = true\n\t}\n\n\t// Apply the user-supplied options to the PopTestSuite object.\n\tfor _, opt := range opts {\n\t\topt(pts)\n\t}\n\n\tif pts.useHighPrivsPSQLRole && pts.usePerTestTransaction {\n\t\tlog.Fatal(\"Cannot use both high priv psql and per test transaction\")\n\t}\n\n\tpts.getDbConnectionDetails()\n\n\tlog.Printf(\"package %s is attempting to connect to database %s\", packageName, pts.pgConnDetails.Database)\n\tpgConn, err := pop.NewConnection(pts.pgConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err = pgConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\tpts.pgConn = pgConn\n\n\tif pts.usePerTestTransaction {\n\t\tpts.txnTestDb = make(map[string]*pop.Connection)\n\t\tpts.findOrCreatePerTestTransactionDb()\n\t\treturn pts\n\t}\n\n\t// set up database connections for non per test transactions\n\t// which may or may not be have useHighPrivsPSQLRole set\n\tpts.highPrivConn, err = pop.NewConnection(pts.highPrivConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err = pts.highPrivConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tpts.lowPrivConn, err = pop.NewConnection(pts.lowPrivConnDetails)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err := pts.lowPrivConn.Open(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tlog.Printf(\"attempting to clone database %s to %s... \", pts.dbNameTemplate, pts.lowPrivConnDetails.Database)\n\tif err := cloneDatabase(pgConn, pts.dbNameTemplate, pts.lowPrivConnDetails.Database); err != nil {\n\t\tlog.Panicf(\"failed to clone database '%s' to '%s': %#v\", pts.dbNameTemplate, pts.lowPrivConnDetails.Database, err)\n\t}\n\tlog.Println(\"success\")\n\n\t// The db is already truncated as part of the test setup\n\n\tif pts.useHighPrivsPSQLRole {\n\t\t// Disconnect the low privileged connection and replace its\n\t\t// connection and connection details with those of the high\n\t\t// privileged connection.\n\t\tif err := pts.lowPrivConn.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tpts.lowPrivConn = pts.highPrivConn\n\t\tpts.lowPrivConnDetails = pts.highPrivConnDetails\n\t}\n\n\treturn pts\n}",
"func PopFont() {\n\timgui.PopFont()\n}",
"func (x *CMsgDOTAPopup_PopupID) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgDOTAPopup_PopupID(num)\n\treturn nil\n}",
"func Pop(h Interface) interface{} {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func OpenPopup(name string) {\n\timgui.OpenPopup(name)\n}",
"func (x* xmlWriter) tagPop() {\n\tif x.tagOpen {\n\t\tx._tagEnd(\"/>\", false)\n\t\tx.pop()\n\t\treturn\n\t}\n\tx.iFmt(\"</%s>\", x.pop())\n\tx.newLine()\n}",
"func (hw *HighlightedWriter) ScrollDown() {\n\thw.delegate.ScrollDown()\n}",
"func Pop(ctx echo.Context) error {\n\n\treq := types.PopRequest{}\n\n\terr := ctx.Bind(&req)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tif !registration.IsAgentRegistered(req.Token) {\n\t\treturn ctx.JSON(403, types.ValidateResponse{Success: false, Message: \"Security Token Not Recognized\"})\n\t}\n\n\tmsg, err := GetFromQueue(req.Queue)\n\n\tdata := types.Message{}\n\n\tjson.Unmarshal(msg, &data)\n\n\tresp := types.PopResponse{Message: data.Message, Queue: req.Queue}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.JSON(200, resp)\n}",
"func (pq *MaxPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (me TAttlistOtherIDSource) IsPop() bool { return me.String() == \"POP\" }",
"func (r *CampaignRow) GetPopName() string { return *r.Data.PopName }",
"func (r *CampaignRow) GetPopName() string { return *r.Data.PopName }",
"func (dw *DrawingWand) PopPattern() {\n\tC.MagickDrawPopPattern(dw.dw)\n}",
"func (*CMsgMouseDown) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{3}\n}",
"func (p *Parser) pop(end xml.EndElement) (*Node, error) {\n\tif p.node.Parent == nil {\n\t\treturn nil, fmt.Errorf(\"xmlpicker: unexpected end element </%s>\", end.Name.Local)\n\t}\n\tpopped := p.node\n\tstart := popped.StartElement\n\tif start.Name.Local != end.Name.Local {\n\t\treturn nil, fmt.Errorf(\"xmlpicker: element <%s> closed by </%s>\", start.Name.Local, end.Name.Local)\n\t}\n\tif p.NSFlag != NSStrip && start.Name.Space != end.Name.Space {\n\t\treturn nil, fmt.Errorf(\"xmlpicker: element <%s> in space %s closed by </%s> in space %s\", start.Name.Local, start.Name.Space, end.Name.Local, end.Name.Space)\n\t}\n\tp.node = popped.Parent\n\treturn popped, nil\n}",
"func (pq *MinPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}",
"func (ds *DrawStack) Pop() {\n\tds.toPop++\n}",
"func (*CMsgHidePopup) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{51}\n}",
"func (d *Deck) Pop() *Card {\n\tcard := *d.Cards[len(d.Cards)-1]\n\td.Cards = d.Cards[:len(d.Cards)-1]\n\treturn &card\n}",
"func (p printHandler) PopEvent(eventID string, elapsed time.Duration) {\n\tfmt.Printf(\"[%s] %v\\n\", eventID, elapsed)\n}",
"func (*CMsgShowPopup) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{50}\n}",
"func (hw *HighlightedWriter) EraseDown() {\n\thw.delegate.EraseDown()\n}",
"func (dw *DrawingWand) PopDefs() {\n\tC.MagickDrawPopDefs(dw.dw)\n}",
"func Pop(h *PriorityQueue) *Item {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (r *CampaignRow) SetPopName(popName string) { r.Data.PopName = &popName }",
"func (r *CampaignRow) SetPopName(popName string) { r.Data.PopName = &popName }",
"func (self *SinglePad) ProcessButtonDown(buttonCode int, value interface{}) {\n self.Object.Call(\"processButtonDown\", buttonCode, value)\n}",
"func PopupModal(name string) *PopupModalWidget {\n\treturn &PopupModalWidget{\n\t\tname: Context.FontAtlas.RegisterString(name),\n\t\topen: nil,\n\t\tflags: WindowFlagsNoResize,\n\t\tlayout: nil,\n\t}\n}",
"func newPopChannel() popChannel {\n\t// The pop channel is stacked, so only a buffer of 1 is required\n\t// see http://gowithconfidence.tumblr.com/post/31426832143/stacked-channels\n\treturn make(chan []*URLContext, 1)\n}",
"func (v *View) PageDown() {\n\tif len(v.buf.lines)-(v.topline+v.height) > v.height {\n\t\tv.ScrollDown(v.height)\n\t} else {\n\t\tif len(v.buf.lines) >= v.height {\n\t\t\tv.topline = len(v.buf.lines) - v.height\n\t\t}\n\t}\n}",
"func LPop(conn redigo.Conn, key string, dest interface{}) (bool, error) {\n\treturn Result(conn.Do(\"LPOP\", key)).StringToJSON(dest)\n}",
"func (obj *stackframe) IsPop() bool {\n\treturn obj.isPop\n}",
"func (p *PopUpMenu) Move(pos fyne.Position) {\n\twidget.MoveWidget(&p.Base, p, p.adjustedPosition(pos, p.Size()))\n}",
"func (c *QueuedChan) Pop() interface{} {\n\tselect {\n\tcase i := <-c.popc:\n\t\treturn i\n\tcase <-c.close:\n\t\treturn nil\n\t}\n}",
"func (s *htmlState) pop(tag string) {\n\tn := len(s.openTags)\n\tif n == 0 {\n\t\tif s.ignore&issueStructure == 0 {\n\t\t\ts.err(fmt.Errorf(\"no open tags left to close %s\", tag))\n\t\t}\n\t\treturn\n\t}\n\tpop := s.openTags[n-1]\n\ts.openTags = s.openTags[:n-1]\n\tif s.ignore&issueStructure != 0 {\n\t\treturn\n\t}\n\tif pop != tag && !s.badNesting { // report broken structure just once.\n\t\ts.err(fmt.Errorf(\"tag '%s' closed by '%s'\", pop, tag))\n\t\ts.badNesting = true\n\t}\n}",
"func (c *Client) LPop(key string) (*string, error) {\n\treq := cmd([]string{\"lpop\", key})\n\tres, err := c.processRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.bulkString(res)\n}",
"func (self *SinglePad) OnDownCallback() interface{}{\n return self.Object.Get(\"onDownCallback\")\n}",
"func (d *PrefsDialog) onColumnMoveDown() {\n\td.moveSelectedColumnRow(false)\n}",
"func (p *Proc) Pop() {\n\tp.stk.load()\n}",
"func PopStyleV(count int) {\n\timgui.PopStyleVarV(count)\n}",
"func (m Modifiers) OptionDown() bool {\n\treturn m&OptionModifier == OptionModifier\n}",
"func D_TestRunPop3(t *testing.T) {\n\t// PopCmd(\"pop3.163.com\", \"110\", \"midoks\", \"mm123123\")\n}",
"func (v *Layer) PageDown() error {\n\tstep := int(v.height()) + 1\n\ttargetLayerIndex := v.LayerIndex + step\n\n\tif targetLayerIndex > len(v.Layers) {\n\t\tstep -= targetLayerIndex - (len(v.Layers) - 1)\n\t}\n\n\tif step > 0 {\n\t\terr := CursorStep(v.gui, v.view, step)\n\t\tif err == nil {\n\t\t\treturn v.SetCursor(v.LayerIndex + step)\n\t\t}\n\t}\n\treturn nil\n}",
"func Pop[T any](h Interface[T]) T {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}",
"func (m *Mouse) RightBtnDown() {\n\tm.RightBtn.Down()\n\tm.WriteJSON()\n}",
"func (g *Grid) PartialDown() []Partial {\n\tvar r []Partial\n\n\tfor i := 0; i < g.Size; i++ {\n\t\tpartial := \"\"\n\t\tfor j := 0; j < g.Size; j++ {\n\t\t\tt, ok := g.grid[i][j].(charCell)\n\t\t\tif ok && !t.isDown {\n\t\t\t\tPanicIfFalse(t.isRight, \"either isDown or isRight should be set\")\n\t\t\t\tpartial += fmt.Sprintf(\"%c\", t.char)\n\t\t\t} else {\n\t\t\t\tif len(partial) > 1 {\n\t\t\t\t\tr = append(r, Partial{Partial: partial, X: i, Y: j - len(partial)})\n\t\t\t\t}\n\t\t\t\tpartial = \"\"\n\t\t\t}\n\t\t}\n\t\tif len(partial) > 1 {\n\t\t\tr = append(r, Partial{Partial: partial, X: i, Y: g.Size - len(partial)})\n\t\t}\n\t}\n\treturn r\n}",
"func (self *GameHeart) Downup(msg *HeartMessageType) {\n\n}",
"func (mod ModDownToSize) Apply(pop *Population) {\n\tvar offsprings = generateOffsprings(\n\t\tmod.NbrOffsprings,\n\t\tpop.Individuals,\n\t\tmod.SelectorA,\n\t\tmod.Crossover,\n\t\tpop.rng,\n\t)\n\t// Apply mutation to the offsprings\n\tif mod.Mutator != nil {\n\t\toffsprings.Mutate(mod.Mutator, mod.MutRate, pop.rng)\n\t}\n\toffsprings.Evaluate(pop.ff)\n\t// Merge the current population with the offsprings\n\toffsprings = append(offsprings, pop.Individuals...)\n\t// Select down to size\n\tvar selected, _ = mod.SelectorB.Apply(len(pop.Individuals), offsprings, pop.rng)\n\t// Replace the current population of individuals\n\tcopy(pop.Individuals, selected)\n}",
"func (self *TSPAlgorithm) RandomPop() *Population {\n\tp := Population{\n\t\tMutThreshold: 0.5,\n\t\tCrossThreshold: 0.95,\n\t}\n\n\tp.Chromosomes = make([]Chromosome, 0)\n\n\tfor i := 0; i < self.PopSize; i++ {\n\t\tnewChromo := &Chromosome{\n\t\t\tLocations: self.Locations,\n\t\t\tMatrix: &self.Matrix,\n\t\t\tId: i + 1,\n\t\t}\n\n\t\tp.Chromosomes = append(p.Chromosomes, *newChromo)\n\t}\n\n\t// Randomize\n\tfor i, _ := range p.Chromosomes {\n\t\tswap := rand.Intn(15)\n\n\t\tfor j := 0; j < swap; j++ {\n\t\t\tp.Chromosomes[i].RandSwap()\n\t\t}\n\t}\n\n\tp.IDCounter = self.PopSize + 1\n\n\treturn &p\n}",
"func (vm *VM) opPop(instr []uint16) int {\n\tif len(vm.stack) == 0 {\n\t\t// error\n\t\tvm.Status = \"opPop has empty stack!\"\n\t\treturn 0\n\t}\n\n\tv := vm.stack[len(vm.stack)-1]\n\tvm.stack = vm.stack[:len(vm.stack)-1]\n\ta, _, _ := vm.getAbc(instr)\n\tvm.registers[a] = v\n\treturn 2\n}",
"func (p *PopupWidget) Build() {\n\tif imgui.BeginPopup(p.name, int(p.flags)) {\n\t\tp.layout.Build()\n\t\timgui.EndPopup()\n\t}\n}",
"func (t *tui) arrowUp(g *gotui.Gui, v *gotui.View) error {\n\tcx, cy := v.Cursor()\n\tif cx == 0 {\n\t\tif cy == 0 {\n\t\t\tv.Clear()\n\t\t\thl := t.sent.Back()\n\t\t\tif hl != nil {\n\t\t\t\tfmt.Fprint(v, hl.Text)\n\t\t\t}\n\t\t} else {\n\t\t\tv.SetCursor(0, 0)\n\t\t}\n\t} else {\n\t\tv.SetCursor(cx-1, cy)\n\t}\n\treturn nil\n}",
"func (gdt *Array) PopBack() Variant {\n\targ0 := gdt.getBase()\n\n\tret := C.go_godot_array_pop_back(GDNative.api, arg0)\n\n\treturn Variant{base: &ret}\n\n}",
"func (stack *Item) Pop() (newstack *Item, top *Item) {\n\ttop = stack\n\tnewstack = stack.Next\n\treturn\n}",
"func (w *ListWidget) MovePageDown() {\n\ti := w.selected\n\n\tfor remainingLinesToPage := w.h; remainingLinesToPage > 0 && i < w.itemCount(); i++ {\n\t\titem := w.itemsToShow()[i]\n\t\tremainingLinesToPage -= strings.Count(item.Display, \"\\n\") + 1 // +1 as there is an implicit newline\n\t\tremainingLinesToPage-- // separator\n\t}\n\n\tw.ChangeSelection(i)\n}",
"func (s *Stack) Pop() DrawPather {\n\tif s.Len() == 0 {\n\t\treturn nil\n\t}\n\ttmp := (*s)[s.Len()-1]\n\t*s = (*s)[:s.Len()-1]\n\treturn tmp\n}",
"func (*Action_Dropdown) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{1, 1}\n}"
] | [
"0.771435",
"0.6271339",
"0.6096663",
"0.60613847",
"0.58930135",
"0.5520053",
"0.5439196",
"0.51506114",
"0.51457214",
"0.510715",
"0.50609976",
"0.50515825",
"0.50288045",
"0.49635243",
"0.49324164",
"0.48336193",
"0.4772766",
"0.4770371",
"0.4765281",
"0.47626668",
"0.4761489",
"0.4730697",
"0.4711964",
"0.46971312",
"0.46874452",
"0.46448296",
"0.46147543",
"0.4581167",
"0.45637733",
"0.45567912",
"0.45150235",
"0.45109352",
"0.4510874",
"0.44901496",
"0.44790554",
"0.4456283",
"0.44421518",
"0.4428789",
"0.43820024",
"0.43816927",
"0.43740165",
"0.4355709",
"0.43366373",
"0.43336117",
"0.43032366",
"0.43014133",
"0.42981207",
"0.42911726",
"0.42894515",
"0.42890215",
"0.42729706",
"0.42704436",
"0.42527705",
"0.4252538",
"0.4252538",
"0.4242272",
"0.42315772",
"0.42186895",
"0.42122746",
"0.42107394",
"0.42092466",
"0.42034566",
"0.4195168",
"0.41899833",
"0.4182515",
"0.41795743",
"0.41667873",
"0.41667306",
"0.41667306",
"0.41459432",
"0.4140509",
"0.41383624",
"0.4137964",
"0.41337553",
"0.41296318",
"0.41264102",
"0.41206074",
"0.4117888",
"0.41109598",
"0.41064423",
"0.4104929",
"0.40928325",
"0.4091569",
"0.40837362",
"0.40832794",
"0.40828425",
"0.4078762",
"0.40676317",
"0.40669057",
"0.40666178",
"0.40632343",
"0.40511355",
"0.40494743",
"0.40433943",
"0.40397504",
"0.40372005",
"0.40359288",
"0.40320006",
"0.40299326",
"0.4026388"
] | 0.89984757 | 0 |
/ GtkFileChooser AddChoice is a wrapper around gtk_file_chooser_add_choice(). | GtkFileChooser AddChoice — это обертка вокруг gtk_file_chooser_add_choice(). | func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {
cId := C.CString(id)
defer C.free(unsafe.Pointer(cId))
cLabel := C.CString(label)
defer C.free(unsafe.Pointer(cLabel))
if options == nil || optionLabels == nil {
C.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)
return
}
cOptions := C.make_strings(C.int(len(options) + 1))
for i, option := range options {
cstr := C.CString(option)
defer C.free(unsafe.Pointer(cstr))
C.set_string(cOptions, C.int(i), (*C.gchar)(cstr))
}
C.set_string(cOptions, C.int(len(options)), nil)
cOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))
for i, optionLabel := range optionLabels {
cstr := C.CString(optionLabel)
defer C.free(unsafe.Pointer(cstr))
C.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))
}
C.set_string(cOptionLabels, C.int(len(optionLabels)), nil)
C.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}",
"func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}",
"func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}",
"func (cycle *Cycle) AddChoice(choice CycleChoice) *CycleItem {\n\treturn newCycleItem(cycle, choice)\n}",
"func runFileChooser(win *gtk.Window) (string, error) {\n\n\tvar fn string\n\n\topenFile, err := gtk.FileChooserDialogNewWith2Buttons(\"Open file\", win, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"Cancel\", gtk.RESPONSE_CANCEL,\n\t\t\"Ok\", gtk.RESPONSE_OK)\n\tdefer openFile.Destroy()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topenFile.SetDefaultSize(50, 50)\n\n\tres := openFile.Run()\n\n\tif res == int(gtk.RESPONSE_OK) {\n\t\tfn = openFile.FileChooser.GetFilename()\n\t}\n\n\treturn fn, nil\n}",
"func (fv *FileView) FileSelectAction(idx int) {\n\tif idx < 0 {\n\t\treturn\n\t}\n\tfv.SaveSortPrefs()\n\tfi := fv.Files[idx]\n\tfv.SelectedIdx = idx\n\tfv.SelFile = fi.Name\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}",
"func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}",
"func (s *BasevhdlListener) EnterChoice(ctx *ChoiceContext) {}",
"func (s *BasevhdlListener) EnterChoices(ctx *ChoicesContext) {}",
"func (c *Combobox) Append(text string) {\n\tctext := C.CString(text)\n\tC.uiComboboxAppend(c.c, ctext)\n\tfreestr(ctext)\n}",
"func NewChoice(allowedValues ...string) Choice {\n\treturn Choice{AllowedValues: allowedValues}\n}",
"func NewAddItemAccepted() *AddItemAccepted {\n\n\treturn &AddItemAccepted{}\n}",
"func (fv *FileView) SetSelFileAction(sel string) {\n\tfv.SelFile = sel\n\tsv := fv.FilesView()\n\tsv.SelectFieldVal(\"Name\", fv.SelFile)\n\tfv.SelectedIdx = sv.SelectedIdx\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func (c *ChoiceImpl) AddAction(name, description string, callback func()) error {\n\tif c.getActionByName(name) == nil {\n\t\ta := newAction(name, description, callback)\n\t\tc.actions = append(c.actions, a)\n\t\treturn nil\n\t}\n\treturn errors.New(\"An action with this name already exists\")\n}",
"func InputDialog(opt ...interface{}) string {\n b, _ := gtk.BuilderNewFromFile(\"glade/input-dialog.glade\")\n d := GetDialog(b, \"input_dialog\")\n entry := GetEntry(b, \"input_entry\")\n\n for i, v := range(opt) {\n if i % 2 == 0 {\n key := v.(string)\n switch key {\n case \"title\":\n d.SetTitle(opt[i+1].(string))\n case \"label\":\n l := GetLabel(b,\"input_label\")\n l.SetText(opt[i+1].(string))\n case \"password-mask\":\n entry.SetInvisibleChar(opt[i+1].(rune))\n entry.SetVisibility(false)\n case \"default\":\n entry.SetText(opt[i+1].(string))\n }\n }\n }\n\n output := \"\"\n entry.Connect(\"activate\", func (o *gtk.Entry) { d.Response(gtk.RESPONSE_OK) } )\n btok := GetButton(b, \"bt_ok\")\n btok.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_OK) } )\n\n btcancel := GetButton(b, \"bt_cancel\")\n btcancel.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_CANCEL) } )\n\n code := d.Run()\n if code == gtk.RESPONSE_OK {\n output, _ = entry.GetText()\n }\n\n d.Destroy()\n return output\n}",
"func (fv *FileView) FavSelect(idx int) {\n\tif idx < 0 || idx >= len(gi.Prefs.FavPaths) {\n\t\treturn\n\t}\n\tfi := gi.Prefs.FavPaths[idx]\n\tfv.DirPath, _ = homedir.Expand(fi.Path)\n\tfv.UpdateFilesAction()\n}",
"func NewFlagChoice(choices []string, chosen string) *FlagChoice {\n\treturn &FlagChoice{\n\t\tchoices: choices,\n\t\tchosen: chosen,\n\t}\n}",
"func (ptr *Application) onClickMenuFileNew() {\n\n\t// reset the text editor\n\tptr.textEditor.SetText(\"\")\n}",
"func (ptr *Application) onClickMenuFileSaveAs() {\n\t// if file exists, the below method will also prompt the\n\t// user for confirmation whether or not the intent is to\n\t// overwrite the intended file\n\tfilename, err := dialog.File().Filter(\n\t\t\"Select\",\n\t).Save()\n\tif err != nil || filename == \"\" {\n\t\treturn\n\t}\n\n\t// save the file\n\tptr.saveFile(filename)\n}",
"func importTrackCB() {\n\tvar impPath string\n\tfs := gtk.NewFileChooserDialog(\"Track to Import\",\n\t\twin,\n\t\tgtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"_Cancel\", gtk.RESPONSE_CANCEL, \"_Import\", gtk.RESPONSE_ACCEPT)\n\tfs.SetCurrentFolder(settings.DataDir)\n\tfs.SetLocalOnly(true)\n\tff := gtk.NewFileFilter()\n\tff.AddPattern(\"*.csv\")\n\tfs.SetFilter(ff)\n\tres := fs.Run()\n\tif res == gtk.RESPONSE_ACCEPT {\n\t\timpPath = fs.GetFilename()\n\t\tif impPath != \"\" {\n\t\t\timp, err := os.Open(impPath)\n\t\t\tif err != nil {\n\t\t\t\tmessageDialog(win, gtk.MESSAGE_INFO, \"Could not open track CSV file.\")\n\t\t\t} else {\n\t\t\t\tdefer imp.Close()\n\t\t\t\tstat, err := imp.Stat()\n\t\t\t\tif err != nil || stat.Size() == 0 {\n\t\t\t\t\tmessageDialog(win, gtk.MESSAGE_ERROR, \"Invalid track CSV file\")\n\t\t\t\t} else {\n\t\t\t\t\tr := csv.NewReader(bufio.NewReader(imp))\n\t\t\t\t\tliveTrack = readTrack(r)\n\t\t\t\t\ttrackChart.track = liveTrack\n\t\t\t\t\ttrackChart.drawTrack()\n\t\t\t\t\tprofileChart.track = liveTrack\n\t\t\t\t\tprofileChart.drawProfile()\n\t\t\t\t\tnotebook.SetCurrentPage(trackPage)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfs.Destroy()\n}",
"func selectFileToSaveGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Save()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}",
"func pickFile(dir, message string) string {\n\tfileName := \"\"\n\terr := survey.AskOne(\n\t\t&survey.Select{\n\t\t\tMessage: message,\n\t\t\tOptions: readDir(dir),\n\t\t},\n\t\t&fileName,\n\t\tsurvey.WithValidator(survey.Required),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fileName\n}",
"func (s *Script) AddDialog(d *Dialog) {\n\tif d.Text != \"\" {\n\t\ts.Dialog = append(s.Dialog, d)\n\t}\n}",
"func entryExtMaskEnterKeyPressed(e *gtk.Entry) {\n\tExtSliceToOpt()\n\tupdateTreeViewFilesDisplay()\n}",
"func newChoiceBuilder(choiceDef *ChoiceDef) ChoiceBuilder {\n\treturn &chosenBuilder{\n\t\tchoiceDef: choiceDef,\n\t}\n}",
"func numberPicker(theme gxui.Theme, overlay gxui.BubbleOverlay) gxui.Control {\n\tvar fm FileManager\n\tfm.Init()\n\tfm.SetPath(fm.RootPath)\n\n\tadapter := gxui.CreateDefaultAdapter()\n\tadapter.SetItems(fm.GetList())\n\n\tlayout := theme.CreateLinearLayout()\n\tlayout.SetDirection(gxui.TopToBottom)\n\n\tlayoutControl := theme.CreateLinearLayout()\n layoutControl.SetDirection(gxui.LeftToRight)\n\tlabelPath := theme.CreateLabel()\n\tlabelPath.SetText(\"Set root path: \")\n\tlayoutControl.AddChild(labelPath)\n\n\tinputPath := theme.CreateTextBox()\n\tlayoutControl.AddChild(inputPath)\n\n\tbtn := GetButton(\"OK\", theme)\n\tbtn.OnClick(func(gxui.MouseEvent) {\n\t\t_path := inputPath.Text()\n\t\tif fm.IsDir(_path) {\n\t\t\tfm.RootPath = _path\n\t\t\tfm.SetPath(fm.RootPath)\n\t\t\tadapter.SetItems(fm.GetList())\n\t\t}\n\t})\n\tlayoutControl.AddChild(btn)\n\n\tlayout.AddChild(layoutControl)\n\n\n\tlist := theme.CreateList()\n\tlist.SetAdapter(adapter)\n\tlist.SetOrientation(gxui.Vertical)\n\tlayout.AddChild(list)\n\n\tlayoutControlOpen := theme.CreateLinearLayout()\n layoutControlOpen.SetDirection(gxui.LeftToRight)\n\tlabelOpen := theme.CreateLabel()\n\tlabelOpen.SetText(\"Open: \")\n\tlayoutControlOpen.AddChild(labelOpen)\n\n\tbtnOpen := GetButton(\"OK\", theme)\n\tbtnOpen.OnClick(func(gxui.MouseEvent) {\n\t\t\n\t})\n\tlayoutControlOpen.AddChild(btnOpen)\n\n\tlayout.AddChild(layoutControlOpen)\n\n\tlist.OnItemClicked(func(ev gxui.MouseEvent, item gxui.AdapterItem) {\n\t\t//if dropList.Selected() != item {\n\t\t//\tdropList.Select(item)\n\t\t//}\n\t\tif ev.Button == gxui.MouseButtonRight {\n\t\t\tfm.Go(item.(string))\n\t\t\tadapter.SetItems(fm.GetList())\n\t\t\t\n\t\t}\n\t\t\n\t\t//selected.SetText(fmt.Sprintf(\"%s - %d\", item, adapter.ItemIndex(item)))\n\t})\n\n\treturn layout\n}",
"func Add(shortname, name, description string, defaultvalue interface{}) {\n\toptions = append(options, &Option{\n\t\tShortName: shortname,\n\t\tName: name,\n\t\tDescription: description,\n\t\tdefaultval: defaultvalue,\n\t})\n}",
"func selectDirectoryGUI(titleA string) string {\n\tdirectoryT, errT := dialog.Directory().Title(titleA).Browse()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn directoryT\n}",
"func (b *Builder) WriteChoice(choices []string) {\n\tfmt.Fprintf(&b.sb, \"${%d|\", b.nextTabStop())\n\tfor i, c := range choices {\n\t\tif i != 0 {\n\t\t\tb.sb.WriteByte(',')\n\t\t}\n\t\tchoiceReplacer.WriteString(&b.sb, c)\n\t}\n\tb.sb.WriteString(\"|}\")\n}",
"func createEntry() *CompletionEntry {\n\tentry := NewCompletionEntry([]string{\"zoo\", \"boo\"})\n\tentry.OnChanged = func(s string) {\n\t\tdata := []string{\"foo\", \"bar\", \"baz\"}\n\t\tentry.SetOptions(data)\n\t\tentry.ShowCompletion()\n\t}\n\treturn entry\n}",
"func (s *BasevhdlListener) EnterSelected_name_part(ctx *Selected_name_partContext) {}",
"func pickChapter(g *gocui.Gui, v *gocui.View) error {\n\tif err := openModal(g); err != nil {\n\t\treturn err\n\t}\n\n\tdone := make(chan bool)\n\ttimer := time.NewTimer(time.Second * time.Duration(downloadTimeoutSecond))\n\n\t// must run downloading process in\n\t// go routine or else the it will\n\t// block the openModal so loading modal\n\t// will not be shown to the user\n\tgo func() {\n\t\ts := trimViewLine(v)\n\t\tprepDownloadChapter(s)\n\t\tdone <- true\n\t}()\n\n\t// in case downloading takes longer than\n\t// downloadTimeoutSecond, close the modal\n\t// and continue to download in background\n\tgo func() {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tsetClosingMessage(g, \"continuing to download\\nin background...\")\n\t\t\treturn\n\t\tcase <-done:\n\t\t\tg.Update(func(g *gocui.Gui) error {\n\t\t\t\terr := closeModal(g)\n\t\t\t\treturn err\n\t\t\t})\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func Add(path, branch string) func(*types.Cmd) {\n\treturn func(g *types.Cmd) {\n\t\tg.AddOptions(\"add\")\n\t\tg.AddOptions(path)\n\t\tif len(branch) > 0 {\n\t\t\tg.AddOptions(branch)\n\t\t}\n\t}\n}",
"func (s *BasevhdlListener) EnterSelected_name(ctx *Selected_nameContext) {}",
"func (ptr *Application) onClickMenuFileOpen() {\n\tfileName, err := dialog.File().Filter(\n\t\t\"Open file\",\n\t).Load()\n\tif err != nil || fileName == \"\" {\n\t\treturn\n\t}\n\n\t// open the file\n\tfileContents, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// update current file information\n\tptr.currentFilename = fileName\n\tptr.currentFileBuffer = fileContents\n\n\t// display the contents\n\tptr.textEditor.SetText(string(ptr.currentFileBuffer))\n}",
"func AddClickToSubMenu(i github.Issue, m *systray.MenuItem) {\n\n\t<-m.ClickedCh\n\tstatus := OpenBrowser(*i.HTMLURL)\n\tif status != true {\n\t\tfmt.Println(\"something went wrong opening your browser.... use a Mac dummy\")\n\t}\n\n}",
"func (c *Config) promptChoice(prompt string, choices []string, args ...string) (string, error) {\n\tvar defaultValue *string\n\tswitch len(args) {\n\tcase 0:\n\t\t// Do nothing.\n\tcase 1:\n\t\tif !slices.Contains(choices, args[0]) {\n\t\t\treturn \"\", fmt.Errorf(\"%s: invalid default value\", args[0])\n\t\t}\n\t\tdefaultValue = &args[0]\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"want 2 or 3 arguments, got %d\", len(args)+2)\n\t}\n\tif c.interactiveTemplateFuncs.promptDefaults && defaultValue != nil {\n\t\treturn *defaultValue, nil\n\t}\n\treturn c.readChoice(prompt, choices, defaultValue)\n}",
"func (o *ColorPicker) X_AddPresetPressed() {\n\t//log.Println(\"Calling ColorPicker.X_AddPresetPressed()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ColorPicker\", \"_add_preset_pressed\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}",
"func listViewFilesRowActivated(tw *gtk.TreeView) {\n\tvar err error\n\tvar value *glib.Value\n\tvar filename string\n\tvar iters []*gtk.TreeIter\n\tvar isTxt, gtLimit bool\n\n\tif iters = tvsList.GetSelectedIters(); len(iters) > 0 {\n\n\t\tif value, err = tvsList.ListStore.GetValue(iters[0], 3); err == nil { // Field 3: get full path\n\t\t\tfilename, err = value.GetString() // Get selected file path\n\t\t}\n\t\t// Check for text file\n\t\tif isTxt, gtLimit, err = IsTextFile(\n\t\t\tfilename,\n\t\t\topt.FileMinSizeLimit,\n\t\t\topt.FileMaxSizeLimit); err == nil {\n\n\t\t\tif isTxt && gtLimit {\n\t\t\t\tif textWinTextToShowBytes, err = ioutil.ReadFile(filename); err == nil {\n\n\t\t\t\t\tcurrFilename = filename // Filename passed to popup menu of the TextView\n\n\t\t\t\t\tshowTextWin(string(textWinTextToShowBytes), TruncatePath(filename, opt.FilePathLength))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tDlgErr(sts[\"missing\"], err)\n}",
"func AddOption(name string, def interface{}, proto, help, relevance string) {\n\tAdd(Option{Name: name,\n\t\tDefault: def,\n\t\tPrototype: proto,\n\t\tHelp: help,\n\t\tRelevance: relevance,\n\t})\n}",
"func (cycle *Cycle) Choose() {\n\tif !cycle.showing ||\n\t\tlen(cycle.items) == 0 ||\n\t\tcycle.selected < 0 ||\n\t\tcycle.selected >= len(cycle.items) {\n\n\t\treturn\n\t}\n\n\tcycle.items[cycle.selected].choose()\n\tcycle.Hide()\n}",
"func (ref FileView) Add(seqNum commit.SeqNum, version resource.Version) error {\n\tfile, ok := ref.repo.files[ref.file]\n\tif !ok {\n\t\tfile = newFileEntry()\n\t}\n\tview, ok := file.Views[ref.drive]\n\tif !ok {\n\t\tview = make(map[commit.SeqNum]resource.Version)\n\t\tfile.Views[ref.drive] = view\n\t}\n\tview[seqNum] = version\n\tref.repo.files[ref.file] = file\n\treturn nil\n}",
"func (f *FilePicker) SelectFile(fileName string) uiauto.Action {\n\treturn f.filesApp.SelectFile(fileName)\n}",
"func AddAccountDialog() *nodewith.Finder {\n\treturn nodewith.Name(\"Sign in to add a Google account\").Role(role.RootWebArea)\n}",
"func (o IFS) Choose() *Affine {\n\tr := rand.Intn(len(o.Choices))\n\treturn o.Choices[r]\n}",
"func handleFiles(_ *gtk.Button, win *gtk.Window) error {\n\tfp, err := runFileChooser(win)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" selecting file: %s\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tcont, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" reading file: '%s'\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tok, err := lib.IsTagNote(&cont)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" checking file header: %s\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tchild, _ := win.GetChild()\n\twin.Remove(child)\n\n\tif ok {\n\t\ttagList, err := lib.TextToEnt(&cont)\n\t\tif err != nil {\n\t\t\tlog(ERR, fmt.Sprintf(\" splitting content: '%s'\", err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tappShowTags(win, tagList)\n\t} else {\n\t\tappConsolatoryWin(win)\n\t}\n\n\treturn nil\n}",
"func selectCategory() string {\n\tfmt.Println(printAllCategories())\n\trequest := \"\\nChoose category to edit:\"\n\tamountOfChoices := 4\n\treturn getValidInput(request, amountOfChoices)\n}",
"func (w *Select) Item(value *bool, prompt string) *Select {\n\tw.lines = w.lines + 1\n\tw.Items = append(w.Items, SelectItemBoolVar(value, prompt))\n\treturn w\n}",
"func browseFolderCallback(hwnd win.HWND, msg uint32, lp, wp uintptr) uintptr {\n\tconst BFFM_SELCHANGED = 2\n\tif msg == BFFM_SELCHANGED {\n\t\t_, err := pathFromPIDL(lp)\n\t\tvar enabled uintptr\n\t\tif err == nil {\n\t\t\tenabled = 1\n\t\t}\n\n\t\tconst BFFM_ENABLEOK = win.WM_USER + 101\n\n\t\twin.SendMessage(hwnd, BFFM_ENABLEOK, 0, enabled)\n\t}\n\n\treturn 0\n}",
"func (f *FlagChoice) String() string {\n\treturn choiceList(f.choices...)\n}",
"func (cp CommandProperties) AddFile(value string) {\n\tcp.Add(\"file\", value)\n}",
"func (cli *CliPrompter) Choose(pr string, options []string) int {\n\tselected := \"\"\n\tprompt := &survey.Select{\n\t\tMessage: pr,\n\t\tOptions: options,\n\t}\n\t_ = survey.AskOne(prompt, &selected, survey.WithValidator(survey.Required))\n\n\t// return the selected element index\n\tfor i, option := range options {\n\t\tif selected == option {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn 0\n}",
"func (f *Filesystem) add(name string, fi hugofs.FileMetaInfo) (err error) {\n\tvar file File\n\n\tfile, err = f.SourceSpec.NewFileInfo(fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.files = append(f.files, file)\n\n\treturn err\n}",
"func (f *FlagChoice) Set(value string) error {\n\tfor _, choice := range f.choices {\n\t\tif choice == value {\n\t\t\tf.chosen = value\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%q is not a valid choice, must be: %s\", value, f.String())\n}",
"func ChoiceIndexCallback(title string, choices []string, def int, f func(int, int, int)) int {\n\tselection := def\n\tnc := len(choices) - 1\n\tif selection < 0 || selection > nc {\n\t\tselection = 0\n\t}\n\toffset := 0\n\tcx := 0\n\tfor {\n\t\tsx, sy := termbox.Size()\n\t\ttermbox.HideCursor()\n\t\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\t\tPrintstring(title, 0, 0)\n\t\tfor selection < offset {\n\t\t\toffset -= 5\n\t\t\tif offset < 0 {\n\t\t\t\toffset = 0\n\t\t\t}\n\t\t}\n\t\tfor selection-offset >= sy-1 {\n\t\t\toffset += 5\n\t\t\tif offset >= nc {\n\t\t\t\toffset = nc\n\t\t\t}\n\t\t}\n\t\tfor i, s := range choices[offset:] {\n\t\t\tts, _ := trimString(s, cx)\n\t\t\tPrintstring(ts, 3, i+1)\n\t\t\tif cx > 0 {\n\t\t\t\tPrintstring(\"←\", 2, i+1)\n\t\t\t}\n\t\t}\n\t\tPrintstring(\">\", 1, (selection+1)-offset)\n\t\tif f != nil {\n\t\t\tf(selection, sx, sy)\n\t\t}\n\t\ttermbox.Flush()\n\t\tev := termbox.PollEvent()\n\t\tif ev.Type != termbox.EventKey {\n\t\t\tcontinue\n\t\t}\n\t\tkey := ParseTermboxEvent(ev)\n\t\tswitch key {\n\t\tcase \"C-v\":\n\t\t\tfallthrough\n\t\tcase \"next\":\n\t\t\tselection += sy - 5\n\t\t\tif selection >= len(choices) {\n\t\t\t\tselection = len(choices) - 1\n\t\t\t}\n\t\tcase \"M-v\":\n\t\t\tfallthrough\n\t\tcase \"prior\":\n\t\t\tselection -= sy - 5\n\t\t\tif selection < 0 {\n\t\t\t\tselection = 0\n\t\t\t}\n\t\tcase \"C-c\":\n\t\t\tfallthrough\n\t\tcase \"C-g\":\n\t\t\treturn def\n\t\tcase \"UP\", \"C-p\":\n\t\t\tif selection > 0 {\n\t\t\t\tselection--\n\t\t\t}\n\t\tcase \"DOWN\", \"C-n\":\n\t\t\tif selection < len(choices)-1 {\n\t\t\t\tselection++\n\t\t\t}\n\t\tcase \"LEFT\", \"C-b\":\n\t\t\tif cx > 0 {\n\t\t\t\tcx--\n\t\t\t}\n\t\tcase \"RIGHT\", \"C-f\":\n\t\t\tcx++\n\t\tcase \"C-a\", \"Home\":\n\t\t\tcx = 0\n\t\tcase \"M-<\":\n\t\t\tselection = 0\n\t\tcase \"M->\":\n\t\t\tselection = len(choices) - 1\n\t\tcase \"RET\":\n\t\t\treturn selection\n\t\t}\n\t}\n}",
"func (s *BasevhdlListener) ExitChoice(ctx *ChoiceContext) {}",
"func (s *BaseBundListener) EnterFile_term(ctx *File_termContext) {}",
"func (s *FileSet) Add(file string) {\n\ts.files[file] = true\n}",
"func (sd *SimpleDialog) Custom(owner walk.Form, widget Widget) (accepted bool, err error) {\n\tvar (\n\t\tdlg *walk.Dialog\n\t)\n\n\tif _, err := (Dialog{\n\t\tAssignTo: &dlg,\n\t\tLayout: VBox{Margins: Margins{}},\n\t\tChildren: []Widget{\n\t\t\twidget,\n\t\t\tComposite{\n\t\t\t\tLayout: HBox{Margins: Margins{}},\n\t\t\t\tChildren: []Widget{\n\t\t\t\t\tPushButton{\n\t\t\t\t\t\tText: i18n.Tr(\"widget.button.ok\"),\n\t\t\t\t\t\tOnClicked: func() {\n\t\t\t\t\t\t\t// some stuff here...\n\t\t\t\t\t\t\tdlg.Close(0)\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPushButton{\n\t\t\t\t\t\tText: i18n.Tr(\"widget.button.cancel\"),\n\t\t\t\t\t\tOnClicked: func() {\n\t\t\t\t\t\t\tdlg.Close(0)\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tTitle: sd.Title,\n\t\tSize: sd.Size,\n\t\tFixedSize: sd.FixedSize,\n\t}).Run(owner); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn\n}",
"func TestCompletionEntry_WithEmptyOptions(t *testing.T) {\n\tentry := createEntry()\n\twin := test.NewWindow(entry)\n\twin.Resize(fyne.NewSize(500, 300))\n\tdefer win.Close()\n\n\tentry.OnChanged = func(s string) {\n\t\tentry.SetOptions([]string{})\n\t\tentry.ShowCompletion()\n\t}\n\n\tentry.SetText(\"foo\")\n\tassert.Nil(t, entry.popupMenu) // popupMenu should not being created\n}",
"func (sp *StackPackage) AddUI(filepath string, ui string) {\n\tsp.UISpecs[filepath] = ui\n}",
"func appConsolatoryWin(win *gtk.Window) {\n\tlbl, err := gtk.LabelNew(\"There were no tags to be displayed, sorry!\")\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" creating consolatory label: %s\", err.Error()))\n\t\treturn\n\t}\n\n\tbtn, err := gtk.ButtonNewWithLabel(\"Try again\")\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" creating file reload button: %s\\n\", err.Error()))\n\t\treturn\n\t}\n\tbtn.Connect(\"clicked\", handleFiles, win)\n\n\tvbox, err := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 10)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" creating vbox: %s\\n\", err.Error()))\n\t\treturn\n\t}\n\n\tvbox.Add(lbl)\n\tvbox.Add(btn)\n\twin.Add(vbox)\n\twin.ShowAll()\n}",
"func (c *Config) readChoice(prompt string, choices []string, defaultValue *string) (string, error) {\n\tswitch {\n\tcase c.noTTY:\n\t\tfullPrompt := prompt + \" (\" + strings.Join(choices, \"/\")\n\t\tif defaultValue != nil {\n\t\t\tfullPrompt += \", default \" + *defaultValue\n\t\t}\n\t\tfullPrompt += \")? \"\n\t\tabbreviations := chezmoi.UniqueAbbreviations(choices)\n\t\tfor {\n\t\t\tvalue, err := c.readLineRaw(fullPrompt)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif value == \"\" && defaultValue != nil {\n\t\t\t\treturn *defaultValue, nil\n\t\t\t}\n\t\t\tif value, ok := abbreviations[value]; ok {\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tinitModel := chezmoibubbles.NewChoiceInputModel(prompt, choices, defaultValue)\n\t\tfinalModel, err := runCancelableModel(initModel)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn finalModel.Value(), nil\n\t}\n}",
"func (p *colorPicker) AddImage(image string) {\n\timageName := tag.StripTag(image, false)\n\tif _, ok := p.imageColors[imageName]; ok {\n\t\treturn\n\t}\n\tp.imageColors[imageName] = DefaultColorCodes[len(p.imageColors)%len(DefaultColorCodes)]\n}",
"func SelectionChanged(s *gtk.TreeSelection) {\n\t// Returns glib.List of gtk.TreePath pointers\n\trows := s.GetSelectedRows(ListStore)\n\titems := make([]string, 0, rows.Length())\n\n\tfor l := rows; l != nil; l = l.Next() {\n\t\tpath := l.Data().(*gtk.TreePath)\n\t\titer, _ := ListStore.GetIter(path)\n\t\tvalue, _ := ListStore.GetValue(iter, 0)\n\t\tstr, _ := value.GetString()\n\t\titems = append(items, str)\n\t}\n\n\tEntry.SetText(fmt.Sprint(items))\n}",
"func registerOnChosenReceived(self *State, message string) {\n\tif !validateChosen(message) || self.IsLeader {\n\t\treturn\n\t}\n\n\tfirstBracket := strings.Index(message, \"[\")\n\tsecondBracket := strings.Index(message, \"]\")\n\tself.Proposition.ChosenValue.CopyFromCommand(extractCommand(message[(firstBracket + 1):secondBracket]))\n\n\trestString := message[(secondBracket + 1):]\n\t_, index := GetKeyValuePair(restString)\n\n\tsynchronize(self, index)\n}",
"func (sa *SelectWithAdd) Run() (int, string, error) {\n\tif len(sa.Items) > 0 {\n\t\tnewItems := append([]string{sa.AddLabel}, sa.Items...)\n\n\t\tlist, err := list.New(newItems, 5)\n\t\tif err != nil {\n\t\t\treturn 0, \"\", err\n\t\t}\n\n\t\ts := Select{\n\t\t\tLabel: sa.Label,\n\t\t\tItems: newItems,\n\t\t\tIsVimMode: sa.IsVimMode,\n\t\t\tSize: 5,\n\t\t\tlist: list,\n\t\t}\n\t\ts.setKeys()\n\n\t\terr = s.prepareTemplates()\n\t\tif err != nil {\n\t\t\treturn 0, \"\", err\n\t\t}\n\n\t\tselected, value, err := s.innerRun(1, '+')\n\t\tif err != nil || selected != 0 {\n\t\t\treturn selected - 1, value, err\n\t\t}\n\n\t\t// XXX run through terminal for windows\n\t\tos.Stdout.Write([]byte(upLine(1) + \"\\r\" + clearLine))\n\t}\n\n\tp := Prompt{\n\t\tLabel: sa.AddLabel,\n\t\tValidate: sa.Validate,\n\t\tIsVimMode: sa.IsVimMode,\n\t}\n\tvalue, err := p.Run()\n\treturn SelectedAdd, value, err\n}",
"func editRuleLibItemSelected(myItem *widgets.QTreeWidgetItem, column int) {\n index := editRuleTree.IndexOfTopLevelItem(myItem)\n fullfilLineEditWithBgpFs(BgpFsActivLib[index])\n}",
"func (a *p) AddOption(items ...string) *p {\n\tif len(items) == 0 {\n\t\treturn a\n\t}\n\n\tif len(a.ItemS) == len(a.OptaS) { // add first trailing spacer\n\t\ta.addMark(-1, 0) // Note: DK starts marking with 0, and decrements. We start negative, using -1.\n\t}\n\n\tc := x.Index(len(a.OptaS)) // shall create a.OptaS[c]\n\tseen := make(map[string]int, len(items)) // to avoid duplicate items in this option.\n\n\tfor i, name := range items {\n\t\tif prev, ok := seen[name]; ok {\n\t\t\tdie(fmt.Sprintf(\"AddOption: duplicate item `%v`: first seen at %v, now at %v!\", name, prev, i))\n\t\t}\n\t\tseen[name] = i\n\n\t\ta.addCell(a.MustKnow(x.Name(name))) // append to Column(name-Index)\n\t}\n\n\ta.addMark(a.OptaS[c-1].Root-1, c) // add trailing spacer\n\ta.OptaS[c-1].Next = (c - 1) + x.Index(len(items)) // update preceding spacer\n\n\treturn a\n}",
"func (fn *formulaFuncs) CHOOSE(argsList *list.List) formulaArg {\n\tif argsList.Len() < 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CHOOSE requires 2 arguments\")\n\t}\n\tidx, err := strconv.Atoi(argsList.Front().Value.(formulaArg).Value())\n\tif err != nil {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CHOOSE requires first argument of type number\")\n\t}\n\tif argsList.Len() <= idx {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"index_num should be <= to the number of values\")\n\t}\n\targ := argsList.Front()\n\tfor i := 0; i < idx; i++ {\n\t\targ = arg.Next()\n\t}\n\treturn arg.Value.(formulaArg)\n}",
"func (s *BasevhdlListener) ExitChoices(ctx *ChoicesContext) {}",
"func (c *contentTypes) add(partName, contentType string) error {\n\t// Process descrived in ISO/IEC 29500-2 §10.1.2.3\n\tt, params, _ := mime.ParseMediaType(contentType)\n\tcontentType = mime.FormatMediaType(t, params)\n\n\text := strings.ToLower(path.Ext(partName))\n\tif len(ext) == 0 {\n\t\tc.addOverride(partName, contentType)\n\t\treturn nil\n\t}\n\text = ext[1:] // remove dot\n\tc.ensureDefaultsMap()\n\tcurrentType, ok := c.defaults[ext]\n\tif ok {\n\t\tif currentType != contentType {\n\t\t\tc.addOverride(partName, contentType)\n\t\t}\n\t} else {\n\t\tc.addDefault(ext, contentType)\n\t}\n\n\treturn nil\n}",
"func (c *walkerContext) addFile(path string, info os.FileInfo) {\n\tf := newFileInfo(path, info)\n\tc.current.Files = append(c.current.Files, f)\n}",
"func ChoiceIndex(title string, choices []string, def int) int {\n\treturn ChoiceIndexCallback(title, choices, def, nil)\n}",
"func (c *Client) Add(code, filepath string, content io.ReadCloser) error {\n\t// TODO: Decide, based on the code, which provider to choose\n\treturn c.googledrive.Add(code, filepath, content)\n}",
"func (d Client) CreateDialog(name string, filename string, data io.Reader) (string, error) {\n\treturn d.createOrUpdateDialog(\"\", name, filename, data)\n}",
"func choiceList(choices ...string) string {\n\tswitch len(choices) {\n\tcase 0:\n\t\treturn \"you have no choice\"\n\tcase 1:\n\t\treturn choices[0]\n\tcase 2:\n\t\tconst msg = \"%s or %s\"\n\t\treturn fmt.Sprintf(msg, choices[0], choices[1])\n\tdefault:\n\t\tvar buf strings.Builder\n\t\tfor i, choice := range choices {\n\t\t\tswitch i {\n\t\t\tcase 0:\n\t\t\t\tbuf.WriteString(choice)\n\t\t\tcase len(choices) - 1:\n\t\t\t\tbuf.WriteString(\", or \")\n\t\t\t\tbuf.WriteString(choice)\n\t\t\tdefault:\n\t\t\t\tbuf.WriteString(\", \")\n\t\t\t\tbuf.WriteString(choice)\n\t\t\t}\n\t\t}\n\t\treturn buf.String()\n\t}\n}",
"func (cli *CLI) Choose() (string, error) {\n\tcolorstring.Fprintf(cli.errStream, chooseText)\n\n\tnum, err := cli.AskNumber(4, 1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If user selects 3, should ask user GPL V2 or V3\n\tif num == 3 {\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"Which version do you want?\\n\")\n\t\tbuf.WriteString(\" 1) V2\\n\")\n\t\tbuf.WriteString(\" 2) V3\\n\")\n\t\tfmt.Fprintf(cli.errStream, buf.String())\n\n\t\tnum, err = cli.AskNumber(2, 1)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tnum += 4\n\t}\n\n\tvar key string\n\tswitch num {\n\tcase 1:\n\t\tkey = \"mit\"\n\tcase 2:\n\t\tkey = \"apache-2.0\"\n\tcase 4:\n\t\tkey = \"\"\n\tcase 5:\n\t\tkey = \"gpl-2.0\"\n\tcase 6:\n\t\tkey = \"gpl-3.0\"\n\tdefault:\n\t\t// Should not reach here\n\t\tpanic(\"Invalid number\")\n\t}\n\n\treturn key, nil\n}",
"func acceptCompletion(ed *Editor) {\n\tc := ed.completion\n\tif 0 <= c.selected && c.selected < len(c.candidates) {\n\t\ted.line, ed.dot = c.apply(ed.line, ed.dot)\n\t}\n\ted.mode = &ed.insert\n}",
"func addApp(search string, apps *applications) error {\n\t_, found := stringInSlice(search, (*apps)[\"default\"])\n\tif found == true {\n\t\tmsg := fmt.Sprintf(\"%s already exists in default opener group\", search)\n\t\treturn errors.New(msg)\n\t}\n\n\t(*apps)[\"default\"] = append((*apps)[\"default\"], search)\n\tmsg := fmt.Sprintf(\"Successfully added %s to default opener group\", search)\n\terr := rewriteApps(apps, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func isChoiceOrCase(e *yang.Entry) bool {\n\treturn e.Kind == yang.CaseEntry || e.Kind == yang.ChoiceEntry\n}",
"func (fs *FileSet) Add(s string) {\n\t//找一个空channel,传入string\n\tif len(fs.fileChannels[fs.top]) == bufferSize {\n\t\tfs.fileChannels = append(fs.fileChannels, make(chan string, bufferSize))\n\t\tfs.top++\n\t}\n\tfs.fileChannels[fs.top] <- s\n}",
"func (pdu *Pdu) AddOption(key OptionKey, val interface{}) {\n var option Option\n var err error\n\tiv := reflect.ValueOf(val)\n\tif iv.Kind() == reflect.String {\n if key == C.COAP_OPTION_ETAG {\n option, err = key.Opaque(val.(string))\n if err != nil {\n log.Errorf(\"Binary read data failed: %+v\", err)\n }\n } else {\n option = key.String(val.(string))\n }\n\t} else if iv.Kind() == reflect.Uint8 || iv.Kind() == reflect.Uint16 || iv.Kind() == reflect.Uint32 {\n option, err = key.Uint(val)\n if err != nil {\n log.Errorf(\"Binary read data failed: %+v\", err)\n }\n } else {\n log.Warnf(\"Unsupported type of option value. Current value type: %+v\\n\", iv.Kind().String())\n return\n\t}\n\tpdu.Options = append(pdu.Options, option)\n}",
"func (s *SelectableAttribute) Chooser() BooleanAttribute {\n\treturn s.chooser\n}",
"func (c Clipboard) Add(s string) {\n\tc.Storage.Save(s)\n}",
"func NewAddItemOK() *AddItemOK {\n\n\treturn &AddItemOK{}\n}",
"func exportTrackCB() {\n\tvar expPath string\n\tfs := gtk.NewFileChooserDialog(\n\t\t\"File for Track Export\",\n\t\twin,\n\t\tgtk.FILE_CHOOSER_ACTION_SAVE, \"_Cancel\", gtk.RESPONSE_CANCEL, \"_Export\", gtk.RESPONSE_ACCEPT)\n\tfs.SetCurrentFolder(settings.DataDir)\n\tfs.SetLocalOnly(true)\n\tff := gtk.NewFileFilter()\n\tff.AddPattern(\"*.csv\")\n\tfs.SetFilter(ff)\n\tres := fs.Run()\n\tif res == gtk.RESPONSE_ACCEPT {\n\t\texpPath = fs.GetFilename()\n\t\tif expPath != \"\" {\n\t\t\texp, err := os.Create(expPath)\n\t\t\tif err != nil {\n\t\t\t\tmessageDialog(win, gtk.MESSAGE_INFO, \"Could not create CSV file.\")\n\t\t\t} else {\n\t\t\t\tdefer exp.Close()\n\t\t\t\tw := csv.NewWriter(exp)\n\t\t\t\tliveTrack.trackMu.RLock()\n\t\t\t\tfor _, k := range liveTrack.positions {\n\t\t\t\t\tw.Write(k.toStrings())\n\t\t\t\t}\n\t\t\t\tliveTrack.trackMu.RUnlock()\n\t\t\t\tw.Flush()\n\t\t\t}\n\t\t}\n\t}\n\tfs.Destroy()\n}",
"func (m *OpenFile) AddFilter(name, pattern string) *OpenFile {\n\tm.filters = append(m.filters, filter{name, pattern})\n\treturn m\n}",
"func userChoice() {\n\tfor {\n\t\tfmt.Println(\"\\nMenu Options:\")\n\t\tfmt.Println(\"1. Add Employee\")\n\t\tfmt.Println(\"2. Delete Employee\")\n\t\tfmt.Println(\"3. Search Employee\")\n\t\tfmt.Println(\"4. List All Employees\")\n\t\tfmt.Println(\"5. Save Employee Database\")\n\t\tfmt.Println(\"6. Exit Employee Database\")\n\t\tfmt.Print(\"Enter Your Choice: \")\n\t\treader := bufio.NewReader(os.Stdin)\n\t\tchoice, _, err := reader.ReadRune()\n\t\tif err != nil {\n \t\t\tfmt.Println(err)\n\t\t}\n\t\tswitch choice {\n\t\tcase ADD:\n\t\t\taddEmployee()\n\t\tcase DELETE:\n\t\t\tdeleteEmployee()\n\t\tcase SEARCH:\n\t\t\tsearchEmployee()\n\t\tcase LIST:\n\t\t\tlistEmployees()\n\t\tcase SAVE:\n\t\t\tsaveEmployees()\n\t\tcase EXIT:\n\t\t\tsaveEmployees()\n\t\t\tos.Exit(0);\n\t\tdefault:\n\t\t\tfmt.Println(\"Wrong choice\")\n\t\t} /* End of switch */\n\t} /* End of for() */\n}",
"func exportTrackImageCB() {\n\tvar expPath string\n\tfs := gtk.NewFileChooserDialog(\n\t\t\"File for Track Image\",\n\t\twin,\n\t\tgtk.FILE_CHOOSER_ACTION_SAVE, \"_Cancel\", gtk.RESPONSE_CANCEL, \"_Export\", gtk.RESPONSE_ACCEPT)\n\tfs.SetCurrentFolder(settings.DataDir)\n\tff := gtk.NewFileFilter()\n\tff.AddPattern(\"*.png\")\n\tfs.SetFilter(ff)\n\tres := fs.Run()\n\tif res == gtk.RESPONSE_ACCEPT {\n\t\texpPath = fs.GetFilename()\n\t\tif expPath != \"\" {\n\t\t\texp, err := os.Create(expPath)\n\t\t\tif err != nil {\n\t\t\t\tmessageDialog(win, gtk.MESSAGE_INFO, \"Could not create image file.\")\n\t\t\t} else {\n\t\t\t\tdefer exp.Close()\n\t\t\t\tif err := png.Encode(exp, trackChart.backingImage); err != nil {\n\t\t\t\t\tmessageDialog(win, gtk.MESSAGE_INFO, \"Could not write image file.\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfs.Destroy()\n}",
"func (c *Command) AddValueOption(name, value, description string) {\n\topt := Option{\n\t\tKey: name,\n\t\tValue: value,\n\t\tDescription: description,\n\t}\n\tc.Options = append(c.Options, opt)\n}",
"func (s *BasevhdlListener) EnterSelected_signal_assignment(ctx *Selected_signal_assignmentContext) {}",
"func (p *Merger) Add(file string) {\n\tp.files = append(p.files, file)\n}",
"func addOption(c *ConfigParser, section, option, value string) {\n\tc.sections[section].options[option] = value\n}",
"func (p *Package) AddOption(t *GlobalOption) {\n\tp.options = append(p.options, t)\n}",
"func (in *Choice) DeepCopy() *Choice {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Choice)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func New(prompt string, choices []*Choice) *Selection {\n\treturn &Selection{\n\t\tChoices: choices,\n\t\tPrompt: prompt,\n\t\tFilterPrompt: DefaultFilterPrompt,\n\t\tTemplate: DefaultTemplate,\n\t\tConfirmationTemplate: DefaultConfirmationTemplate,\n\t\tFilter: FilterContainsCaseInsensitive,\n\t\tFilterInputPlaceholderStyle: lipgloss.NewStyle().Foreground(lipgloss.Color(\"240\")),\n\t\tKeyMap: NewDefaultKeyMap(),\n\t\tFilterPlaceholder: DefaultFilterPlaceholder,\n\t\tExtendedTemplateScope: template.FuncMap{},\n\t\tOutput: os.Stdout,\n\t\tInput: os.Stdin,\n\t}\n}"
] | [
"0.67540985",
"0.6272586",
"0.5666724",
"0.53501856",
"0.5213042",
"0.5131987",
"0.5029371",
"0.49853897",
"0.49848685",
"0.4980705",
"0.49217358",
"0.48278055",
"0.48229167",
"0.48218247",
"0.46595183",
"0.4618801",
"0.4616831",
"0.45478475",
"0.45407698",
"0.45162332",
"0.4513119",
"0.44895467",
"0.4483588",
"0.44415537",
"0.4426831",
"0.44078875",
"0.44059056",
"0.4405005",
"0.44048342",
"0.43727246",
"0.43419793",
"0.43100652",
"0.4302463",
"0.42821136",
"0.42239836",
"0.4222806",
"0.42227888",
"0.42214668",
"0.42199725",
"0.42145678",
"0.4199051",
"0.41790387",
"0.41661564",
"0.41592774",
"0.4145671",
"0.41364017",
"0.41282687",
"0.4119848",
"0.41182366",
"0.4115181",
"0.4105169",
"0.40975466",
"0.40954486",
"0.40858442",
"0.40808162",
"0.40731263",
"0.40690598",
"0.4060945",
"0.40580633",
"0.40555254",
"0.40497747",
"0.40448225",
"0.40436065",
"0.40232816",
"0.40170002",
"0.4015269",
"0.4013371",
"0.4011264",
"0.39995658",
"0.39989224",
"0.3998111",
"0.3983421",
"0.39803854",
"0.39800793",
"0.39789525",
"0.3973423",
"0.39622703",
"0.3953337",
"0.39519563",
"0.3950318",
"0.39433077",
"0.39432043",
"0.39403677",
"0.3939977",
"0.39386725",
"0.3909854",
"0.3905742",
"0.39046586",
"0.39032578",
"0.38945687",
"0.38935605",
"0.3892145",
"0.3883747",
"0.38775378",
"0.38767067",
"0.3867089",
"0.38543344",
"0.38524434",
"0.3844965",
"0.3841543"
] | 0.8392795 | 0 |
RemoveChoice is a wrapper around gtk_file_chooser_remove_choice(). | RemoveChoice — это обертка вокруг gtk_file_chooser_remove_choice(). | func (v *FileChooser) RemoveChoice(id string) {
cId := C.CString(id)
defer C.free(unsafe.Pointer(cId))
C.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}",
"func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}",
"func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}",
"func removeSelectedAccountFromOSSettings(ctx context.Context, tconn *chrome.TestConn) error {\n\ttesting.ContextLog(ctx, \"Removing account\")\n\n\tui := uiauto.New(tconn).WithTimeout(DefaultUITimeout)\n\tremoveAccountButton := RemoveActionButton()\n\tif err := uiauto.Combine(\"Click Remove account\",\n\t\tui.WaitUntilExists(removeAccountButton),\n\t\tui.LeftClick(removeAccountButton),\n\t)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to to click Remove account\")\n\t}\n\n\treturn nil\n}",
"func (m *Menu) RemoveMenuOption(index int) {\n\tcopy(m.Options[index:], m.Options[index+1:])\n\tm.Options = m.Options[:len(m.Options)-1]\n}",
"func RemoveActionButton() *nodewith.Finder {\n\treturn nodewith.Name(\"Remove this account\").Role(role.MenuItem)\n}",
"func (v *IconView) UnselectPath(path *TreePath) {\n\tC.gtk_icon_view_unselect_path(v.native(), path.native())\n}",
"func (fb *FlowBox) UnselectChild(child *FlowBoxChild) {\n\tC.gtk_flow_box_unselect_child(fb.native(), child.native())\n}",
"func UnMenuButton(cb func()) {\n\tjs.Global.Get(\"document\").Call(\"removeEventListener\", \"menubutton\", cb, false)\n}",
"func (m *_ChangeListRemoveError) GetErrorChoice() BACnetConfirmedServiceChoice {\n\treturn BACnetConfirmedServiceChoice_REMOVE_LIST_ELEMENT\n}",
"func performSuggestionRemovalDialogAction(tconn *chrome.TestConn, dialogButtonName string) uiauto.Action {\n\tui := uiauto.New(tconn)\n\treturn uiauto.Combine(\"press removal dialog button\",\n\t\tui.LeftClick(nodewith.Role(role.Button).Name(dialogButtonName).Ancestor(removalDialogFinder)),\n\t\tui.WaitUntilGone(removalDialogFinder))\n}",
"func radioMenuItemFinalizer(m *RadioMenuItem) {\n\truntime.SetFinalizer(m, func(m *RadioMenuItem) { gobject.Unref(m) })\n}",
"func (s *BasevhdlListener) ExitChoice(ctx *ChoiceContext) {}",
"func RemoveItem(defaults []string, name string) []string {\n\tvar pkgs []string\n\tfor _, pkg := range defaults {\n\t\tif pkg != name {\n\t\t\tpkgs = append(pkgs, pkg)\n\t\t}\n\t}\n\treturn pkgs\n}",
"func comboBoxFinalizer(cb *ComboBox) {\n\truntime.SetFinalizer(cb, func(cb *ComboBox) { gobject.Unref(cb) })\n}",
"func Remove(arg string) {\n\tif arg != \"\" {\n\t\terr := os.Remove(arg)\n\t\tutils.Check(err)\n\t} else {\n\t\tutils.CliErrorln(\"Filename must be included for remove command\") // If this function name changes,\n\t\t// CHANGE THIS STRING ERROR TOO\n\t}\n}",
"func radioButtonFinalizer(rb *RadioButton) {\n\truntime.SetFinalizer(rb, func(rb *RadioButton) { gobject.Unref(rb) })\n}",
"func (im InputMethod) Remove(tconn *chrome.TestConn) action.Action {\n\tf := func(ctx context.Context, fullyQualifiedIMEID string) error {\n\t\treturn RemoveInputMethod(ctx, tconn, fullyQualifiedIMEID)\n\t}\n\treturn im.actionWithFullyQualifiedID(tconn, f)\n}",
"func comboBoxTextFinalizer(ct *ComboBoxText) {\n\truntime.SetFinalizer(ct, func(ct *ComboBoxText) { gobject.Unref(ct) })\n}",
"func PromptDelete(username string) string {\n\tprompt := promptui.Select{\n\t\tLabel: \"Delete the user? (This will remove all their repositories, gists, applications, and personal settings)\",\n\t\tItems: []string{\"yes\", \"no\"},\n\t}\n\n\t_, result, err := prompt.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"Prompt failed %v\\n\", err)\n\t}\n\treturn result\n}",
"func (pdu *Pdu) RemoveOption(key OptionKey) {\n\topts := optsSorter{pdu.Options}\n\tpdu.Options = opts.Minus(key).opts\n}",
"func (s *ActionsService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) {\n\turl := fmt.Sprintf(\"orgs/%v/actions/secrets/%v/repositories/%v\", org, name, *repo.ID)\n\treturn s.removeSelectedRepoFromSecret(ctx, url)\n}",
"func Remove(confirm bool, name string) error {\n\tif !confirm {\n\t\treturn nil\n\t}\n\tif err := os.Remove(name); err != nil {\n\t\treturn fmt.Errorf(\"remove %q: %w\", name, err)\n\t}\n\treturn nil\n}",
"func (s *slot) remove(c interface{}) {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\tdelete(s.elements, c)\n}",
"func handleFiles(_ *gtk.Button, win *gtk.Window) error {\n\tfp, err := runFileChooser(win)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" selecting file: %s\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tcont, err := ioutil.ReadFile(fp)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" reading file: '%s'\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tok, err := lib.IsTagNote(&cont)\n\tif err != nil {\n\t\tlog(ERR, fmt.Sprintf(\" checking file header: %s\\n\", err.Error()))\n\t\treturn err\n\t}\n\n\tchild, _ := win.GetChild()\n\twin.Remove(child)\n\n\tif ok {\n\t\ttagList, err := lib.TextToEnt(&cont)\n\t\tif err != nil {\n\t\t\tlog(ERR, fmt.Sprintf(\" splitting content: '%s'\", err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tappShowTags(win, tagList)\n\t} else {\n\t\tappConsolatoryWin(win)\n\t}\n\n\treturn nil\n}",
"func _file_delete(call otto.FunctionCall) otto.Value {\n\tpath, _ := call.Argument(0).ToString()\n\n\terr := os.RemoveAll(path)\n\tif err != nil {\n\t\tjsThrow(call, err)\n\t}\n\treturn otto.Value{}\n}",
"func (g *Generator) RemoveSeccompSyscallByAction(action string) error {\n\tif g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Seccomp == nil {\n\t\treturn nil\n\t}\n\n\tif err := checkSeccompSyscallAction(action); err != nil {\n\t\treturn err\n\t}\n\n\tvar r []rspec.Syscall\n\tfor _, syscall := range g.spec.Linux.Seccomp.Syscalls {\n\t\tif strings.Compare(action, string(syscall.Action)) != 0 {\n\t\t\tr = append(r, syscall)\n\t\t}\n\t}\n\tg.spec.Linux.Seccomp.Syscalls = r\n\treturn nil\n}",
"func Remove(name string) func(*types.Cmd) {\n\treturn func(g *types.Cmd) {\n\t\tg.AddOptions(\"remove\")\n\t\tg.AddOptions(name)\n\t}\n}",
"func (c *Client) UnselectAndExpunge() *Command {\n\tcmd := &unselectCommand{}\n\tc.beginCommand(\"CLOSE\", cmd).end()\n\treturn &cmd.cmd\n}",
"func (s *BasevhdlListener) ExitChoices(ctx *ChoicesContext) {}",
"func createBookConfirm(controls *ControlList, conf *cf.Config) {\n\tcw, ch := term.Size()\n\thalfWidth := cw / 2\n\tdlgWidth := (halfWidth - 5) * 2\n\tcontrols.askWindow = ui.AddWindow(5, ch/2-8, dlgWidth, 3, \"Remove book\")\n\tcontrols.askWindow.SetConstraints(dlgWidth, ui.KeepValue)\n\tcontrols.askWindow.SetModal(false)\n\tcontrols.askWindow.SetPack(ui.Vertical)\n\n\tui.CreateFrame(controls.askWindow, 1, 1, ui.BorderNone, ui.Fixed)\n\tfbtn := ui.CreateFrame(controls.askWindow, 1, 1, ui.BorderNone, 1)\n\tui.CreateFrame(fbtn, 1, 1, ui.BorderNone, ui.Fixed)\n\tcontrols.askLabel = ui.CreateLabel(fbtn, 10, 3, \"Remove book?\", 1)\n\tcontrols.askLabel.SetMultiline(true)\n\tui.CreateFrame(fbtn, 1, 1, ui.BorderNone, ui.Fixed)\n\n\tui.CreateFrame(controls.askWindow, 1, 1, ui.BorderNone, ui.Fixed)\n\tfrm1 := ui.CreateFrame(controls.askWindow, 16, 4, ui.BorderNone, ui.Fixed)\n\tui.CreateFrame(frm1, 1, 1, ui.BorderNone, 1)\n\tcontrols.askRemove = ui.CreateButton(frm1, ui.AutoSize, ui.AutoSize, \"Remove\", ui.Fixed)\n\tcontrols.askCancel = ui.CreateButton(frm1, ui.AutoSize, ui.AutoSize, \"Cancel\", ui.Fixed)\n\tcontrols.askCancel.OnClick(func(ev ui.Event) {\n\t\tcontrols.askWindow.SetModal(false)\n\t\tcontrols.askWindow.SetVisible(false)\n\t\tui.ActivateControl(controls.bookListWindow, controls.bookTable)\n\t})\n\n\tcontrols.askWindow.SetVisible(false)\n\tcontrols.askWindow.OnClose(func(ev ui.Event) bool {\n\t\tcontrols.askWindow.SetVisible(false)\n\t\tcontrols.bookListWindow.SetModal(true)\n\t\tui.ActivateControl(controls.bookListWindow, controls.bookTable)\n\t\treturn false\n\t})\n}",
"func Remove(name string) error",
"func RemoveAction(name string) {\n\tdelete(actions, name)\n}",
"func RemoveOrQuit(filename string) error {\n\tif !FileExists(filename) {\n\t\treturn nil\n\t}\n\n\tfmt.Printf(\"%s exists, overwrite it?\\nEnter to overwrite or Ctrl-C to cancel...\",\n\t\tcolor.New(color.BgRed, color.Bold).Render(filename))\n\tbufio.NewReader(os.Stdin).ReadBytes('\\n')\n\n\treturn os.Remove(filename)\n}",
"func UnSearchButton(cb func()) {\n\tjs.Global.Get(\"document\").Call(\"removeEventListener\", \"searchbutton\", cb, false)\n}",
"func removeApp(search string, apps *applications) error {\n\tif i, found := stringInSlice(search, (*apps)[\"default\"]); found == true {\n\t\t(*apps)[\"default\"] = append((*apps)[\"default\"][:i], (*apps)[\"default\"][i+1:]...)\n\t\tmsg := fmt.Sprintf(\"Successfully removed %s from default opener group\", search)\n\t\terr := rewriteApps(apps, msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tmsg := fmt.Sprintf(\"Could not find %s in configuration\\n\", search)\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}",
"func (i *bashInstaller) Remove(ctx context.Context, f resources.Feature, t resources.Targetable, v data.Map, s resources.FeatureSettings) (r resources.Results, ferr fail.Error) {\n\tr = nil\n\tdefer fail.OnPanic(&ferr)\n\n\tif ctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\tif f == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"f\")\n\t}\n\tif t == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"t\")\n\t}\n\n\tif !f.(*Feature).Specs().IsSet(\"feature.install.bash.remove\") {\n\t\tmsg := `syntax error in Feature '%s' specification file (%s):\n\t\t\t\tno key 'feature.install.bash.remove' found`\n\t\treturn nil, fail.SyntaxError(msg, f.GetName(), f.GetDisplayFilename(ctx))\n\t}\n\n\tw, xerr := newWorker(ctx, f, t, installmethod.Bash, installaction.Remove, nil)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer w.Terminate()\n\n\txerr = w.CanProceed(ctx, s)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\tlogrus.Info(xerr.Error())\n\t\treturn nil, xerr\n\t}\n\n\tr, xerr = w.Proceed(ctx, v, s)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn r, fail.Wrap(xerr, \"failed to remove Feature '%s' from %s '%s'\", f.GetName(), t.TargetType(), t.GetName())\n\t}\n\n\treturn r, nil\n}",
"func (fb *FlowBox) UnselectAll() {\n\tC.gtk_flow_box_unselect_all(fb.native())\n}",
"func (c *Client) Remove(code, filepath string) error {\n\t// TODO: Decide, based on the code, which provider to choose\n\treturn c.googledrive.Remove(code, filepath)\n}",
"func (s *BasevhdlListener) ExitSelected_signal_assignment(ctx *Selected_signal_assignmentContext) {}",
"func (fs *bundleFs) Remove(name string) error {\n\treturn ErrReadOnly\n}",
"func (c *Action) RemoveMatcher(o string) {\n\tfmt.Fprintf(c.w, removeMatcherFmt, o)\n}",
"func (recv *ParamSpecPool) Remove(pspec *ParamSpec) {\n\tc_pspec := (*C.GParamSpec)(C.NULL)\n\tif pspec != nil {\n\t\tc_pspec = (*C.GParamSpec)(pspec.ToC())\n\t}\n\n\tC.g_param_spec_pool_remove((*C.GParamSpecPool)(recv.native), c_pspec)\n\n\treturn\n}",
"func Remove(file string) {\n\tknownFiles[file] = false\n\tdelete(allFiles, file)\n}",
"func (p *Merger) Remove(file string) string {\n\tfor i, f := range p.files {\n\t\tif f == file {\n\t\t\tremoved := p.files[i]\n\t\t\tp.files = append(p.files[:i], p.files[i+1:]...)\n\t\t\treturn removed\n\t\t}\n\t}\n\treturn \"\"\n}",
"func (tv *TextView) ClearSelected() {\n\ttv.WidgetBase.ClearSelected()\n\ttv.SelectReset()\n}",
"func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func (f *FileStore) Remove(collection, id string) error {\n\treturn os.Remove(filepath.Join(f.Base, collection, id+\".txt\"))\n}",
"func (file *Remote) Remove(p string) error {\n\tif p != \"\" {\n\t\tfile, err := file.walk(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Close is not necessary. Remove is also a clunk.\n\n\t\treturn file.Remove(\"\")\n\t}\n\n\t_, err := file.client.Send(&Tremove{\n\t\tFID: file.fid,\n\t})\n\treturn err\n}",
"func Uninstall() error { return mageextras.Uninstall(\"factorio\") }",
"func RemoveMatchSignal(conn *dbus.Conn, s Signal, opts ...dbus.MatchOption) error {\n\treturn conn.RemoveMatchSignal(append([]dbus.MatchOption{\n\t\tdbus.WithMatchInterface(s.Interface()),\n\t\tdbus.WithMatchMember(s.Name()),\n\t}, opts...)...)\n}",
"func Remove(options types.RemoveOptions, config config.Store) error {\n\tapp := &AppImage{}\n\n\tindexFile := fmt.Sprintf(\"%s.json\", path.Join(config.IndexStore, options.Executable))\n\tlogger.Debugf(\"Checking if %s exists\", indexFile)\n\tif !helpers.CheckIfFileExists(indexFile) {\n\t\tfmt.Printf(\"%s is not installed \\n\", tui.Yellow(options.Executable))\n\t\treturn nil\n\t}\n\n\tbar := tui.NewProgressBar(7, \"r\")\n\n\tlogger.Debugf(\"Unmarshalling JSON from %s\", indexFile)\n\tindexBytes, err := ioutil.ReadFile(indexFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbar.Add(1)\n\n\terr = json.Unmarshal(indexBytes, app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif app.IconPath != \"\" {\n\t\tlogger.Debugf(\"Removing thumbnail, %s\", app.IconPath)\n\t\tos.Remove(app.IconPath)\n\t}\n\tbar.Add(1)\n\n\tif app.IconPathHicolor != \"\" {\n\t\tlogger.Debugf(\"Removing symlink to hicolor theme, %s\", app.IconPathHicolor)\n\t\tos.Remove(app.IconPathHicolor)\n\t}\n\tbar.Add(1)\n\n\tif app.DesktopFile != \"\" {\n\t\tlogger.Debugf(\"Removing desktop file, %s\", app.DesktopFile)\n\t\tos.Remove(app.DesktopFile)\n\t}\n\tbar.Add(1)\n\n\tbinDir := path.Join(xdg.Home, \".local\", \"bin\")\n\tbinFile := path.Join(binDir, options.Executable)\n\n\tif helpers.CheckIfFileExists(binFile) {\n\t\tbinAbsPath, err := filepath.EvalSymlinks(binFile)\n\t\tif err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {\n\t\t\t// this link points to config.LocalStore, where all AppImages are stored\n\t\t\t// I guess we need to remove them, no asking and all\n\t\t\t// make sure we remove the file first to prevent conflicts in future\n\t\t\t_ = os.Remove(binFile)\n\t\t}\n\t}\n\tbar.Add(1)\n\n\tlogger.Debugf(\"Removing appimage, %s\", app.Filepath)\n\t_ = os.Remove(app.Filepath)\n\tbar.Add(1)\n\n\tlogger.Debugf(\"Removing index file, %s\", indexFile)\n\t_ = os.Remove(indexFile)\n\tbar.Add(1)\n\n\tbar.Finish()\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\"✅ %s removed successfully\\n\", app.Executable)\n\tlogger.Debugf(\"Removing all files completed successfully\")\n\n\treturn bar.Finish()\n}",
"func triggerSuggestionRemovalAction(tconn *chrome.TestConn, resultFinder, removeButton *nodewith.Finder, tabletMode bool) uiauto.Action {\n\tui := uiauto.New(tconn)\n\n\t// To get the removal action button to show up:\n\t// in clamshel, hover the mouse over the result view;\n\t// in tablet mode, long press the result view\n\tif tabletMode {\n\t\treturn uiauto.Combine(\"activate remove button using touch\",\n\t\t\tfunc(ctx context.Context) error {\n\t\t\t\ttouchCtx, err := touch.New(ctx, tconn)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"Fail to get touch screen\")\n\t\t\t\t}\n\t\t\t\tdefer touchCtx.Close()\n\n\t\t\t\treturn touchCtx.LongPress(resultFinder)(ctx)\n\t\t\t},\n\t\t\tui.WaitUntilExists(removeButton),\n\t\t\tui.LeftClick(removeButton),\n\t\t\tui.WaitUntilExists(removalDialogFinder))\n\t}\n\n\treturn uiauto.Combine(\"activate remove button using mouse\",\n\t\tui.MouseMoveTo(resultFinder, 10*time.Millisecond),\n\t\tui.WaitUntilExists(removeButton),\n\t\tui.LeftClick(removeButton),\n\t\tui.WaitUntilExists(removalDialogFinder))\n}",
"func (m mimeTypeFormats) Del(mimeType string) {\n\tdelete(m, mimeType)\n}",
"func removeFile(n string) {\n\terr := os.Remove(n)\n\tif err != nil {\n\t\tglog.Fatal(\"CleanupFiles \", err)\n\t}\n}",
"func Remove(pkgname string, p *confparse.IniParser) error {\n\ttestdb, testdir, err := getInstallConf(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb, err := backends.NewBolt(path.Join(testdir, testdb))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tlist, err := db.Get([]byte(pkgname))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = futils.RemoveList(testdir, strings.Split(string(list), \"\\n\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb.Del([]byte(pkgname))\n\n\treturn nil\n}",
"func (store *dbStore) DeleteChoiceByID(id string) error {\r\n\r\n\tsqlStatement := fmt.Sprint(\"DELETE FROM movie where choice_id= \", id)\r\n\tfmt.Println(id)\r\n\r\n\tfmt.Println(sqlStatement)\r\n\t_, err := store.db.Query(sqlStatement)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"failed to execute delete movie query on the database: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tsqlStatement = fmt.Sprint(\"DELETE FROM restaurant where choice_id= \", id)\r\n\r\n\tfmt.Println(sqlStatement)\r\n\r\n\t_, err = store.db.Query(sqlStatement)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"failed to execute delete restaurant query on the database: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\tsqlStatement = fmt.Sprint(\"DELETE FROM choice where id= \", id)\r\n\r\n\tfmt.Println(sqlStatement)\r\n\r\n\t_, err = store.db.Query(sqlStatement)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"failed to execute delete choice query on the database: %v\", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}",
"func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}",
"func GuiTextBoxDelete(text string, length int, before bool) (int, string) {\n\tctext := C.CString(text)\n\tdefer C.free(unsafe.Pointer(ctext))\n\tres := C.GuiTextBoxDelete(ctext, C.int(int32(length)), C.bool(before))\n\treturn int(int32(res)), C.GoString(ctext)\n}",
"func (s *SignalMonkey) Remove(fn *SignalHandler) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\tfor i, x := range s.handlers {\n\t\tif x.ID == fn.ID {\n\t\t\t// Delete preserving order from here:\n\t\t\t// https://code.google.com/p/go-wiki/wiki/SliceTricks\n\t\t\tcopy(s.handlers[i:], s.handlers[i+1:])\n\t\t\ts.handlers[len(s.handlers)-1] = nil // or the zero value of T\n\t\t\ts.handlers = s.handlers[:len(s.handlers)-1]\n\t\t}\n\t}\n}",
"func (ptr *Application) onClickMenuFileExit() {\n\n\t// confirm whether the user intended to exit Notepad or not if they\n\t// have unsaved changes\n\tif ptr.hasUnsavedChanges {\n\t\tconfirmationDialog := dialog.MsgBuilder{\n\t\t\tDlg: dialog.Dlg{Title: \"Exit Notepad\"},\n\t\t\tMsg: \"Discard changes & exit Notepad?\",\n\t\t}\n\t\tif !confirmationDialog.YesNo() {\n\t\t\treturn\n\t\t}\n\t}\n\n\tos.Exit(0)\n}",
"func ConfirmRemove(question string) {\n\t// Prompt the user to make sure he/she wants to do this\n\tfmt.Print(question + \" [y/N]: \")\n\tvar response string\n\n\treader := bufio.NewReader(os.Stdin)\n\tresponse, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\tFatal(CLI_INPUT_ERROR, i18n.GetMessagePrinter().Sprintf(\"Error reading input, error %v\", err))\n\t}\n\tresponse = strings.TrimSuffix(response, \"\\n\")\n\tresponse = strings.ToLower(response)\n\n\tif strings.TrimSpace(response) != \"y\" {\n\t\ti18n.GetMessagePrinter().Printf(\"Exiting.\")\n\t\ti18n.GetMessagePrinter().Println()\n\t\tos.Exit(0)\n\t}\n}",
"func (g *GitLocal) Remove(dir, fileName string) error {\n\treturn g.GitCLI.Remove(dir, fileName)\n}",
"func (o *Permissao) UnsetChave() {\n\to.Chave.Unset()\n}",
"func (g *Generator) RemoveSeccompSyscall(name string, action string) error {\n\tif g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Seccomp == nil {\n\t\treturn nil\n\t}\n\n\tif err := checkSeccompSyscallAction(action); err != nil {\n\t\treturn err\n\t}\n\n\tvar r []rspec.Syscall\n\tfor _, syscall := range g.spec.Linux.Seccomp.Syscalls {\n\t\tif !(strings.Compare(name, syscall.Name) == 0 &&\n\t\t\tstrings.Compare(action, string(syscall.Action)) == 0) {\n\t\t\tr = append(r, syscall)\n\t\t}\n\t}\n\tg.spec.Linux.Seccomp.Syscalls = r\n\treturn nil\n}",
"func (f *FileStore) Remove(paths ...string) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tvar active []TSMFile\n\tfor _, file := range f.files {\n\t\tkeep := true\n\t\tfor _, remove := range paths {\n\t\t\tif remove == file.Path() {\n\t\t\t\tkeep = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif keep {\n\t\t\tactive = append(active, file)\n\t\t}\n\t}\n\tf.files = active\n}",
"func (d *deferredConfirmations) remove(tag uint64) {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\tdc, found := d.confirmations[tag]\n\tif !found {\n\t\treturn\n\t}\n\tclose(dc.done)\n\tdelete(d.confirmations, tag)\n}",
"func (c *Client) Unselect() error {\n\tif c.c.State() != imap.SelectedState {\n\t\treturn client.ErrNoMailboxSelected\n\t}\n\n\tcmd := &Command{}\n\n\tif status, err := c.c.Execute(cmd, nil); err != nil {\n\t\treturn err\n\t} else if err := status.Err(); err != nil {\n\t\treturn err\n\t}\n\n\tc.c.SetState(imap.AuthenticatedState, nil)\n\treturn nil\n}",
"func (o *Cause) UnsetSuggestedAction() {\n\to.SuggestedAction.Unset()\n}",
"func (s *BasejossListener) ExitPartSel(ctx *PartSelContext) {}",
"func (m *ItemsMutator) ClearEnum() *ItemsMutator {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\t_ = m.proxy.enum.Clear()\n\treturn m\n}",
"func imageMenuItemFinalizer(m *ImageMenuItem) {\n\truntime.SetFinalizer(m, func(m *ImageMenuItem) { gobject.Unref(m) })\n}",
"func (cb *configBased) Remove(aps []AvailablePlugin, id string) (AvailablePlugin, error) {\n\tap, err := cb.Select(aps, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdelete(cb.metricCache, id)\n\tdelete(cb.plugins, id)\n\treturn ap, nil\n}",
"func (ptr *Application) onClickMenuEditDelete() {\n\tptr.textEditor.Delete()\n}",
"func (a *Autocompleter) Delete() error {\n\tconn := a.pool.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"DEL\", a.name)\n\treturn err\n}",
"func RemoveSpinner(tag, text string, success bool) {\n\tif s, ok := spinners[tag]; ok {\n\t\tvar prefix string\n\t\tif success {\n\t\t\tprefix = color.GreenString(\"✔\")\n\t\t} else {\n\t\t\tprefix = color.RedString(\"𝗫\")\n\t\t}\n\n\t\ts.FinalMSG = fmt.Sprintf(\"%s %s\\n\", prefix, text)\n\t\ts.Stop()\n\t\tdelete(spinners, tag)\n\t}\n}",
"func (e *Exporter) removeInstallationReference(chiName string) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\tdelete(e.chInstallations, chiName)\n}",
"func (f *Formatter) RemoveProvider(n string) {\n\tdelete(f.providers, n)\n}",
"func (s *DefaultSelector) RemoveSource(name string) error {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif _, ok := s.sources[name]; !ok {\n\t\treturn fmt.Errorf(\"unknown source %s\", name)\n\t}\n\tdelete(s.sources, name)\n\treturn nil\n}",
"func (f *Formatter) Remove(r terraform.StateResource) string {\n\tp := f.getProvider(r.ProviderName)\n\tif nil == p {\n\t\treturn \"\"\n\t}\n\treturn p.Remove(r)\n}",
"func (s *BasevhdlListener) ExitSelected_name_part(ctx *Selected_name_partContext) {}",
"func (c *Client) Unselect() *Command {\n\tcmd := &unselectCommand{}\n\tc.beginCommand(\"UNSELECT\", cmd).end()\n\treturn &cmd.cmd\n}",
"func UnVolumeDownButton(cb func()) {\n\tjs.Global.Get(\"document\").Call(\"removeEventListener\", \"volumedownbutton\", cb, false)\n}",
"func Remove(pkg string, flags *types.Flags) error {\n\tfmt.Println(\"remove is not working yet...\")\n\treturn nil\n}",
"func (_DappboxManager *DappboxManagerTransactor) RemoveFile(opts *bind.TransactOpts, dappboxAddress common.Address, fileNameMD5 string, fileNameSHA256 string, fileNameSHA1 string, folderAddress common.Address) (*types.Transaction, error) {\n\treturn _DappboxManager.contract.Transact(opts, \"RemoveFile\", dappboxAddress, fileNameMD5, fileNameSHA256, fileNameSHA1, folderAddress)\n}",
"func (reg *registry[Item]) remove(topic string, ch chan Item, drain_channel bool) {\n\treg.lock.Lock()\n\tdefer reg.lock.Unlock()\n\tif _, ok := reg.topics[topic]; !ok {\n\t\treturn\n\t}\n\n\tif _, ok := reg.topics[topic][ch]; !ok {\n\t\treturn\n\t}\n\n\tdelete(reg.topics[topic], ch)\n\tdelete(reg.revTopics[ch], topic)\n\n\tif len(reg.topics[topic]) == 0 {\n\t\tdelete(reg.topics, topic)\n\t}\n\n\tif len(reg.revTopics[ch]) == 0 {\n\t\tif drain_channel {\n\t\t\treg.drainAndScheduleCloseChannel(ch)\n\t\t}\n\t\tdelete(reg.revTopics, ch)\n\t}\n}",
"func (set StringSet) Remove(e string) {\n\tdelete(set, e)\n}",
"func (s *SetOfStr) Remove(x string) {\n\ts.mux.Lock()\n\tdefer s.mux.Unlock()\n\tdelete(s.set, x)\n}",
"func (tw *TimeWheel) Remove(c interface{}) {\n\ttw.mux.Lock()\n\tdefer tw.mux.Unlock()\n\tif v, ok := tw.indicator[c]; ok {\n\t\tv.remove(c)\n\t}\n}",
"func (self *WorkingTreeCommands) UnStageFile(fileNames []string, reset bool) error {\n\tfor _, name := range fileNames {\n\t\tvar cmdArgs []string\n\t\tif reset {\n\t\t\tcmdArgs = NewGitCmd(\"reset\").Arg(\"HEAD\", \"--\", name).ToArgv()\n\t\t} else {\n\t\t\tcmdArgs = NewGitCmd(\"rm\").Arg(\"--cached\", \"--force\", \"--\", name).ToArgv()\n\t\t}\n\n\t\terr := self.cmd.New(cmdArgs).Run()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (f *flagTrackerImpl) RemoveFlagValueChangeListener(listener <-chan interfaces.FlagValueChangeEvent) {\n\tf.lock.Lock()\n\tflagCh, ok := f.valueChangeSubscriptions[listener]\n\tdelete(f.valueChangeSubscriptions, listener)\n\tf.lock.Unlock()\n\n\tif ok {\n\t\tf.broadcaster.RemoveListener(flagCh)\n\t}\n}",
"func (c *QueuedChan) remove(cmd *queuedChanRemoveCmd) {\n\t// Get object count before remove.\n\tcount := c.List.Len()\n\t// Iterate list.\n\tfor i := c.Front(); i != nil; {\n\t\tvar re *list.Element\n\t\t// Filter object.\n\t\tok, cont := cmd.f(i.Value)\n\t\tif ok {\n\t\t\tre = i\n\t\t}\n\t\t// Next element.\n\t\ti = i.Next()\n\t\t// Remove element\n\t\tif nil != re {\n\t\t\tc.List.Remove(re)\n\t\t}\n\t\t// Continue\n\t\tif !cont {\n\t\t\tbreak\n\t\t}\n\t}\n\t// Update channel length\n\tatomic.StoreInt32(&c.len, int32(c.List.Len()))\n\t// Return removed object number.\n\tcmd.r <- count - c.List.Len()\n}",
"func (m *wasiSnapshotPreview1Impl) pathRemoveDirectory(pfd wasiFd, ppath list) (err wasiErrno) {\n\tpath, err := m.loadPath(ppath)\n\tif err != wasiErrnoSuccess {\n\t\treturn err\n\t}\n\n\tdir, err := m.files.getDirectory(pfd, wasiRightsPathRemoveDirectory)\n\tif err != wasiErrnoSuccess {\n\t\treturn err\n\t}\n\n\tif ferr := dir.Rmdir(path); ferr != nil {\n\t\treturn fileErrno(ferr)\n\t}\n\treturn wasiErrnoSuccess\n}",
"func (v *IconView) UnselectAll() {\n\tC.gtk_icon_view_unselect_all(v.native())\n}",
"func (s StringSet) Del(x string) { delete(s, x) }",
"func (b *Cowbuilder) RemoveDistribution(d deb.Codename, a deb.Architecture) error {\n\tb.acquire()\n\tdefer b.release()\n\n\timagePath, err := b.supportedDistributionPath(d, a)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Distribution %s architecture %s is not supported\", d, a)\n\t}\n\n\treturn os.RemoveAll(imagePath)\n}",
"func (g *Gui) DeleteView(name string) error {\n\tfor i, v := range g.views {\n\t\tif v.name == name {\n\t\t\tg.views = append(g.views[:i], g.views[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ErrUnknownView\n}",
"func (v *TreeStore) Remove(iter *TreeIter) bool {\n\tvar ti *C.GtkTreeIter\n\tif iter != nil {\n\t\tti = iter.native()\n\t}\n\treturn 0 != C.gtk_tree_store_remove(v.native(), ti)\n}",
"func (c *KeyStringValueChanger) Remove() error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\treturn c.node.remove()\n}",
"func remove(ymlfile string, packageName string) error {\n\tappFS := afero.NewOsFs()\n\tyf, _ := afero.ReadFile(appFS, ymlfile)\n\tfi, err := os.Stat(ymlfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar out []byte\n\ti := 0\n\tlines := bytes.Split(yf, []byte(\"\\n\"))\n\tfor _, line := range lines {\n\t\ti++\n\t\t// trim the line to detect the start of the list of packages\n\t\t// but do not write the trimmed string as it may cause an\n\t\t// unneeded file diff to the yml file\n\t\tsline := bytes.TrimLeft(line, \" \")\n\t\tif bytes.HasPrefix(sline, []byte(\"- \"+packageName)) {\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, line...)\n\t\tif i < len(lines) {\n\t\t\tout = append(out, []byte(\"\\n\")...)\n\t\t}\n\t}\n\terr = afero.WriteFile(appFS, ymlfile, out, fi.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}"
] | [
"0.533146",
"0.5215007",
"0.5209094",
"0.49791583",
"0.49628338",
"0.49071372",
"0.48017108",
"0.478262",
"0.46966022",
"0.4593523",
"0.45922872",
"0.45756906",
"0.45345142",
"0.451024",
"0.45045552",
"0.44789824",
"0.4443359",
"0.44415736",
"0.44238982",
"0.44187573",
"0.4407801",
"0.44",
"0.43882132",
"0.43829113",
"0.43458688",
"0.43298683",
"0.4324845",
"0.43083587",
"0.430666",
"0.4300404",
"0.42872494",
"0.42612848",
"0.42497066",
"0.4238545",
"0.4234839",
"0.4223826",
"0.4222659",
"0.4209632",
"0.41951227",
"0.4175401",
"0.4170202",
"0.41678333",
"0.41546237",
"0.41534513",
"0.41507977",
"0.41492438",
"0.41439155",
"0.4143675",
"0.41277194",
"0.4126404",
"0.41247413",
"0.41202512",
"0.41188022",
"0.41174144",
"0.41129383",
"0.41089833",
"0.4098706",
"0.40953255",
"0.40948",
"0.4094037",
"0.40917018",
"0.40844667",
"0.40799814",
"0.407597",
"0.40759593",
"0.40714693",
"0.40686762",
"0.40578747",
"0.40536693",
"0.4049965",
"0.4042002",
"0.4038193",
"0.40361717",
"0.4035757",
"0.4034935",
"0.4026948",
"0.40182102",
"0.40171975",
"0.4017068",
"0.40149215",
"0.40140197",
"0.40134957",
"0.40125352",
"0.4007673",
"0.4006482",
"0.40054506",
"0.40027034",
"0.40007406",
"0.40004072",
"0.39941707",
"0.39929655",
"0.39883822",
"0.39861444",
"0.39856735",
"0.39736247",
"0.39710194",
"0.39603364",
"0.39581096",
"0.39532194",
"0.39526963"
] | 0.86442417 | 0 |
SetChoice is a wrapper around gtk_file_chooser_set_choice(). | SetChoice — это обертка вокруг gtk_file_chooser_set_choice(). | func (v *FileChooser) SetChoice(id, option string) {
cId := C.CString(id)
defer C.free(unsafe.Pointer(cId))
cOption := C.CString(option)
defer C.free(unsafe.Pointer(cOption))
C.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}",
"func (f *FlagChoice) Set(value string) error {\n\tfor _, choice := range f.choices {\n\t\tif choice == value {\n\t\t\tf.chosen = value\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%q is not a valid choice, must be: %s\", value, f.String())\n}",
"func (v *FileChooser) GetChoice(id string) string {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tc := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))\n\treturn C.GoString(c)\n}",
"func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}",
"func (fv *FileView) SetSelFileAction(sel string) {\n\tfv.SelFile = sel\n\tsv := fv.FilesView()\n\tsv.SelectFieldVal(\"Name\", fv.SelFile)\n\tfv.SelectedIdx = sv.SelectedIdx\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func (cv *Choice) Set(value string) error {\n\tif indexof.String(cv.AllowedValues, value) == indexof.NotFound {\n\t\treturn fmt.Errorf(\n\t\t\t\"invalid flag value: %s, must be one of %s\",\n\t\t\tvalue,\n\t\t\tstrings.Join(cv.AllowedValues, \", \"),\n\t\t)\n\t}\n\n\tcv.Choice = &value\n\treturn nil\n}",
"func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}",
"func Set(newChoices []string) {\n\tchoices = newChoices\n\tchoicelen = len(newChoices)\n}",
"func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}",
"func (p *PollAnswerVoters) SetChosen(value bool) {\n\tif value {\n\t\tp.Flags.Set(0)\n\t\tp.Chosen = true\n\t} else {\n\t\tp.Flags.Unset(0)\n\t\tp.Chosen = false\n\t}\n}",
"func RenderSetChoice(resp http.ResponseWriter, req *http.Request, ext string, mode primitive.Mode, fileSeeker io.ReadSeeker) {\n\n\top := []OptStruct{\n\t\t{20, mode},\n\t\t{30, mode},\n\t\t{40, mode},\n\t\t{50, mode},\n\t}\n\topFileList, err := GenImgList(ext, fileSeeker, op...)\n\tif err != nil {\n http.Error(resp, err.Error(), http.StatusInternalServerError)\n return\n\t}\n\thtmlist := `<html>\n <body>\n {{range .}}\n <a href=\"/modify/{{.Name}}?mode={{.Mode}}&n={{.Numshapes}}\">\n <img style =\"width 30%\" src=\"/pics/{{.Name}}\">\n {{end}}\n </body>\n </html>\n `\n\ttempl := template.Must(template.New(\"\").Parse(htmlist))\n\n\ttype Opts struct {\n\t\tName string\n\t\tMode primitive.Mode\n\t\tNumshapes int\n\t}\n\tvar opts []Opts\n\tfor index, val := range opFileList {\n\t\topts = append(opts, Opts{Name: filepath.Base(val), Mode: op[index].mode, Numshapes: op[index].num})\n\t}\n\n\t// err = templ.Execute(resp, opts)\n\t// if err != nil {\n\t// \tpanic(err)\n\t// }\n checkError(templ.Execute(resp,opts))\n}",
"func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}",
"func (c *Combobox) SetSelected(index int) {\n\tC.uiComboboxSetSelected(c.c, C.int(index))\n}",
"func (s *StringEnum) Set(arg string) error {\n\tif _, ok := s.choices[s.choiceMapper(arg)]; !ok {\n\t\tmsg := \"%w (valid choices: %v\"\n\t\tif s.caseInsensitive {\n\t\t\tmsg += \" [case insensitive]\"\n\t\t}\n\t\tmsg += \")\"\n\t\treturn fmt.Errorf(msg, ErrInvalidChoice, s.choiceNames)\n\t}\n\n\ts.val = arg\n\n\treturn nil\n}",
"func NewChoice(allowedValues ...string) Choice {\n\treturn Choice{AllowedValues: allowedValues}\n}",
"func (fv *FileView) FileSelectAction(idx int) {\n\tif idx < 0 {\n\t\treturn\n\t}\n\tfv.SaveSortPrefs()\n\tfi := fv.Files[idx]\n\tfv.SelectedIdx = idx\n\tfv.SelFile = fi.Name\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func (s *BasevhdlListener) EnterChoice(ctx *ChoiceContext) {}",
"func (f *FileDialog) SetFilter(filter storage.FileFilter) {\n\tf.filter = filter\n\tif f.dialog != nil {\n\t\tf.dialog.refreshDir(f.dialog.dir)\n\t}\n}",
"func (s *SelectableAttribute) SetSelected(b bool) {\n\ts.chooser.SetEqualer(BoolEqualer{B: b})\n}",
"func (c ChooserDef) SetState(state ChooserState) {\n\tc.ComponentDef.SetState(state)\n}",
"func (c *Config) promptChoice(prompt string, choices []string, args ...string) (string, error) {\n\tvar defaultValue *string\n\tswitch len(args) {\n\tcase 0:\n\t\t// Do nothing.\n\tcase 1:\n\t\tif !slices.Contains(choices, args[0]) {\n\t\t\treturn \"\", fmt.Errorf(\"%s: invalid default value\", args[0])\n\t\t}\n\t\tdefaultValue = &args[0]\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"want 2 or 3 arguments, got %d\", len(args)+2)\n\t}\n\tif c.interactiveTemplateFuncs.promptDefaults && defaultValue != nil {\n\t\treturn *defaultValue, nil\n\t}\n\treturn c.readChoice(prompt, choices, defaultValue)\n}",
"func (c *Client) SetVoteChoice(ctx context.Context, hash *chainhash.Hash,\n\tchoices map[string]string, tspendPolicy map[string]string, treasuryPolicy map[string]string) error {\n\n\t// Retrieve current voting preferences from VSP.\n\tstatus, err := c.status(ctx, hash)\n\tif err != nil {\n\t\tif errors.Is(err, errors.Locked) {\n\t\t\treturn err\n\t\t}\n\t\tc.log.Errorf(\"Could not check status of VSP ticket %s: %v\", hash, err)\n\t\treturn nil\n\t}\n\n\t// Check for any mismatch between the provided voting preferences and the\n\t// VSP preferences to determine if VSP needs to be updated.\n\tupdate := false\n\n\t// Check consensus vote choices.\n\tfor newAgenda, newChoice := range choices {\n\t\tvspChoice, ok := status.VoteChoices[newAgenda]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Check tspend policies.\n\tfor newTSpend, newChoice := range tspendPolicy {\n\t\tvspChoice, ok := status.TSpendPolicy[newTSpend]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Check treasury policies.\n\tfor newKey, newChoice := range treasuryPolicy {\n\t\tvspChoice, ok := status.TSpendPolicy[newKey]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !update {\n\t\tc.log.Debugf(\"VSP already has correct vote choices for ticket %s\", hash)\n\t\treturn nil\n\t}\n\n\tc.log.Debugf(\"Updating vote choices on VSP for ticket %s\", hash)\n\terr = c.setVoteChoices(ctx, hash, choices, tspendPolicy, treasuryPolicy)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Client) SetVoteChoice(ctx context.Context, hash *chainhash.Hash,\n\tchoices []wallet.AgendaChoice, tspendPolicy map[string]string, treasuryPolicy map[string]string) error {\n\n\t// Retrieve current voting preferences from VSP.\n\tstatus, err := c.status(ctx, hash)\n\tif err != nil {\n\t\tif errors.Is(err, errors.Locked) {\n\t\t\treturn err\n\t\t}\n\t\tlog.Errorf(\"Could not check status of VSP ticket %s: %v\", hash, err)\n\t\treturn nil\n\t}\n\n\t// Check for any mismatch between the provided voting preferences and the\n\t// VSP preferences to determine if VSP needs to be updated.\n\tupdate := false\n\n\t// Check consensus vote choices.\n\tfor _, newChoice := range choices {\n\t\tvspChoice, ok := status.VoteChoices[newChoice.AgendaID]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice.ChoiceID {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Apply the above changes to the two checks below.\n\n\t// Check tspend policies.\n\tfor newTSpend, newChoice := range tspendPolicy {\n\t\tvspChoice, ok := status.TSpendPolicy[newTSpend]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Check treasury policies.\n\tfor newKey, newChoice := range treasuryPolicy {\n\t\tvspChoice, ok := status.TSpendPolicy[newKey]\n\t\tif !ok {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t\tif vspChoice != newChoice {\n\t\t\tupdate = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !update {\n\t\tlog.Debugf(\"VSP already has correct vote choices for ticket %s\", hash)\n\t\treturn nil\n\t}\n\n\tlog.Debugf(\"Updating vote choices on VSP for ticket %s\", hash)\n\terr = c.setVoteChoices(ctx, hash, choices, tspendPolicy, treasuryPolicy)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (_m *Prompter) Choice(prompt string, options []string) string {\n\tret := _m.Called(prompt, options)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string, []string) string); ok {\n\t\tr0 = rf(prompt, options)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}",
"func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func (fa *FileAction) SetAction(action string) error {\n\tvar ok bool\n\tswitch action {\n\tcase SelectFile, GetFileStructure, AddFile, MoveFile, DeleteFile, StreamFile:\n\t\tok = true\n\tdefault:\n\t\tok = false\n\t}\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"File Action not available: %v\", action)\n\t}\n\tfa.Action = action\n\treturn nil\n}",
"func (b *Builder) WriteChoice(choices []string) {\n\tfmt.Fprintf(&b.sb, \"${%d|\", b.nextTabStop())\n\tfor i, c := range choices {\n\t\tif i != 0 {\n\t\t\tb.sb.WriteByte(',')\n\t\t}\n\t\tchoiceReplacer.WriteString(&b.sb, c)\n\t}\n\tb.sb.WriteString(\"|}\")\n}",
"func (o IFS) Choose() *Affine {\n\tr := rand.Intn(len(o.Choices))\n\treturn o.Choices[r]\n}",
"func (s *BasevhdlListener) ExitChoice(ctx *ChoiceContext) {}",
"func (r *CheckGroup) SetSelected(options []string) {\n\t//if r.Selected == options {\n\t//\treturn\n\t//}\n\n\tr.Selected = options\n\n\tif r.OnChanged != nil {\n\t\tr.OnChanged(options)\n\t}\n\n\tr.Refresh()\n}",
"func (fv *FileView) FavSelect(idx int) {\n\tif idx < 0 || idx >= len(gi.Prefs.FavPaths) {\n\t\treturn\n\t}\n\tfi := gi.Prefs.FavPaths[idx]\n\tfv.DirPath, _ = homedir.Expand(fi.Path)\n\tfv.UpdateFilesAction()\n}",
"func (m *ManagementTemplateStep) SetAcceptedVersion(value ManagementTemplateStepVersionable)() {\n err := m.GetBackingStore().Set(\"acceptedVersion\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (c *Current) SetVersion(version string) {\n\tswitch c.selectedRadio {\n\tcase FocusOnInvolved, FocusOnCurrentNamespace:\n\t\tif curr.resourceVersion < version {\n\t\t\tcurr.resourceVersion = version\n\t\t}\n\tcase FocusOnAllNamespace:\n\t\tif curr.resourceVersionAllNamespace < version {\n\t\t\tcurr.resourceVersionAllNamespace = version\n\t\t}\n\t}\n}",
"func runFileChooser(win *gtk.Window) (string, error) {\n\n\tvar fn string\n\n\topenFile, err := gtk.FileChooserDialogNewWith2Buttons(\"Open file\", win, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"Cancel\", gtk.RESPONSE_CANCEL,\n\t\t\"Ok\", gtk.RESPONSE_OK)\n\tdefer openFile.Destroy()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topenFile.SetDefaultSize(50, 50)\n\n\tres := openFile.Run()\n\n\tif res == int(gtk.RESPONSE_OK) {\n\t\tfn = openFile.FileChooser.GetFilename()\n\t}\n\n\treturn fn, nil\n}",
"func (cli *CliPrompter) Choose(pr string, options []string) int {\n\tselected := \"\"\n\tprompt := &survey.Select{\n\t\tMessage: pr,\n\t\tOptions: options,\n\t}\n\t_ = survey.AskOne(prompt, &selected, survey.WithValidator(survey.Required))\n\n\t// return the selected element index\n\tfor i, option := range options {\n\t\tif selected == option {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn 0\n}",
"func pickFile(dir, message string) string {\n\tfileName := \"\"\n\terr := survey.AskOne(\n\t\t&survey.Select{\n\t\t\tMessage: message,\n\t\t\tOptions: readDir(dir),\n\t\t},\n\t\t&fileName,\n\t\tsurvey.WithValidator(survey.Required),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fileName\n}",
"func (v *RadioButton) SetGroup(group *glib.SList) {\n\tC.gtk_radio_button_set_group(v.native(), cGSList(group))\n}",
"func (c *Config) readChoice(prompt string, choices []string, defaultValue *string) (string, error) {\n\tswitch {\n\tcase c.noTTY:\n\t\tfullPrompt := prompt + \" (\" + strings.Join(choices, \"/\")\n\t\tif defaultValue != nil {\n\t\t\tfullPrompt += \", default \" + *defaultValue\n\t\t}\n\t\tfullPrompt += \")? \"\n\t\tabbreviations := chezmoi.UniqueAbbreviations(choices)\n\t\tfor {\n\t\t\tvalue, err := c.readLineRaw(fullPrompt)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif value == \"\" && defaultValue != nil {\n\t\t\t\treturn *defaultValue, nil\n\t\t\t}\n\t\t\tif value, ok := abbreviations[value]; ok {\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tinitModel := chezmoibubbles.NewChoiceInputModel(prompt, choices, defaultValue)\n\t\tfinalModel, err := runCancelableModel(initModel)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn finalModel.Value(), nil\n\t}\n}",
"func (fb *FlowBox) SetSelectionMode(mode SelectionMode) {\n\tC.gtk_flow_box_set_selection_mode(fb.native(), C.GtkSelectionMode(mode))\n}",
"func (cycle *Cycle) Choose() {\n\tif !cycle.showing ||\n\t\tlen(cycle.items) == 0 ||\n\t\tcycle.selected < 0 ||\n\t\tcycle.selected >= len(cycle.items) {\n\n\t\treturn\n\t}\n\n\tcycle.items[cycle.selected].choose()\n\tcycle.Hide()\n}",
"func (f *FilePicker) SelectFile(fileName string) uiauto.Action {\n\treturn f.filesApp.SelectFile(fileName)\n}",
"func NewFlagChoice(choices []string, chosen string) *FlagChoice {\n\treturn &FlagChoice{\n\t\tchoices: choices,\n\t\tchosen: chosen,\n\t}\n}",
"func (s *BasevhdlListener) EnterChoices(ctx *ChoicesContext) {}",
"func (file *File) Select() {\n\tfile.mark = file.location\n}",
"func (o *MicrosoftGraphChoiceColumn) SetChoices(v []string) {\n\to.Choices = &v\n}",
"func (r *GetSpecType) SetConfigMethodChoiceToGlobalSpecType(o *GlobalSpecType) error {\n\tswitch of := r.ConfigMethodChoice.(type) {\n\tcase nil:\n\t\to.ConfigMethodChoice = nil\n\n\tcase *GetSpecType_PspSpec:\n\t\to.ConfigMethodChoice = &GlobalSpecType_PspSpec{PspSpec: of.PspSpec}\n\n\tcase *GetSpecType_Yaml:\n\t\to.ConfigMethodChoice = &GlobalSpecType_Yaml{Yaml: of.Yaml}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown oneof field %T\", of)\n\t}\n\treturn nil\n}",
"func Choose(pr string, options []string) int {\n\treturn defaultPrompter.Choose(pr, options)\n}",
"func (cli *CLI) Choose() (string, error) {\n\tcolorstring.Fprintf(cli.errStream, chooseText)\n\n\tnum, err := cli.AskNumber(4, 1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If user selects 3, should ask user GPL V2 or V3\n\tif num == 3 {\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"Which version do you want?\\n\")\n\t\tbuf.WriteString(\" 1) V2\\n\")\n\t\tbuf.WriteString(\" 2) V3\\n\")\n\t\tfmt.Fprintf(cli.errStream, buf.String())\n\n\t\tnum, err = cli.AskNumber(2, 1)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tnum += 4\n\t}\n\n\tvar key string\n\tswitch num {\n\tcase 1:\n\t\tkey = \"mit\"\n\tcase 2:\n\t\tkey = \"apache-2.0\"\n\tcase 4:\n\t\tkey = \"\"\n\tcase 5:\n\t\tkey = \"gpl-2.0\"\n\tcase 6:\n\t\tkey = \"gpl-3.0\"\n\tdefault:\n\t\t// Should not reach here\n\t\tpanic(\"Invalid number\")\n\t}\n\n\treturn key, nil\n}",
"func (w *TextWidget) SetPrompt(g *gocui.Gui, prefix, content string, callback func(bool, string)) error {\n\tif w.view == nil {\n\t\treturn nil\n\t}\n\n\tgx := New(g)\n\n\toldfocus := g.CurrentView()\n\tg.Cursor = true\n\tgx.Focus(w.view)\n\n\tw.SetText(prefix)\n\tfmt.Fprintf(w.view, content)\n\n\teditor := PromptEditor(g, len(prefix), func(success bool, response string) {\n\t\tw.setEditor(nil)\n\t\tgx.Focus(oldfocus)\n\t\tg.Cursor = false\n\t\tcallback(success, response)\n\t})\n\n\tw.setEditor(editor)\n\n\treturn w.view.SetCursor(len(prefix)+len(content), 0)\n}",
"func pickChapter(g *gocui.Gui, v *gocui.View) error {\n\tif err := openModal(g); err != nil {\n\t\treturn err\n\t}\n\n\tdone := make(chan bool)\n\ttimer := time.NewTimer(time.Second * time.Duration(downloadTimeoutSecond))\n\n\t// must run downloading process in\n\t// go routine or else the it will\n\t// block the openModal so loading modal\n\t// will not be shown to the user\n\tgo func() {\n\t\ts := trimViewLine(v)\n\t\tprepDownloadChapter(s)\n\t\tdone <- true\n\t}()\n\n\t// in case downloading takes longer than\n\t// downloadTimeoutSecond, close the modal\n\t// and continue to download in background\n\tgo func() {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tsetClosingMessage(g, \"continuing to download\\nin background...\")\n\t\t\treturn\n\t\tcase <-done:\n\t\t\tg.Update(func(g *gocui.Gui) error {\n\t\t\t\terr := closeModal(g)\n\t\t\t\treturn err\n\t\t\t})\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (r *ReplaceSpecType) SetConfigMethodChoiceToGlobalSpecType(o *GlobalSpecType) error {\n\tswitch of := r.ConfigMethodChoice.(type) {\n\tcase nil:\n\t\to.ConfigMethodChoice = nil\n\n\tcase *ReplaceSpecType_PspSpec:\n\t\to.ConfigMethodChoice = &GlobalSpecType_PspSpec{PspSpec: of.PspSpec}\n\n\tcase *ReplaceSpecType_Yaml:\n\t\to.ConfigMethodChoice = &GlobalSpecType_Yaml{Yaml: of.Yaml}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown oneof field %T\", of)\n\t}\n\treturn nil\n}",
"func SetOption(pkg, name string, fn func(r Runtime) values.Value) ScopeMutator {\n\treturn func(r Runtime, scope values.Scope) {\n\t\tv := fn(r)\n\t\tp, ok := scope.Lookup(pkg)\n\t\tif ok {\n\t\t\tif p, ok := p.(values.Package); ok {\n\t\t\t\tvalues.SetOption(p, name, v)\n\t\t\t}\n\t\t} else if r.IsPreludePackage(pkg) {\n\t\t\topt, ok := scope.Lookup(name)\n\t\t\tif ok {\n\t\t\t\tif opt, ok := opt.(*values.Option); ok {\n\t\t\t\t\topt.Value = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (r *CreateSpecType) SetConfigMethodChoiceToGlobalSpecType(o *GlobalSpecType) error {\n\tswitch of := r.ConfigMethodChoice.(type) {\n\tcase nil:\n\t\to.ConfigMethodChoice = nil\n\n\tcase *CreateSpecType_PspSpec:\n\t\to.ConfigMethodChoice = &GlobalSpecType_PspSpec{PspSpec: of.PspSpec}\n\n\tcase *CreateSpecType_Yaml:\n\t\to.ConfigMethodChoice = &GlobalSpecType_Yaml{Yaml: of.Yaml}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown oneof field %T\", of)\n\t}\n\treturn nil\n}",
"func choice(a string) string {\n\tif strings.EqualFold(a, \"r\") {\n\t\treturn \"Rock\"\n\t} else if strings.EqualFold(a, \"p\") {\n\t\treturn \"Paper\"\n\t} else if strings.EqualFold(a, \"s\") {\n\t\treturn \"Scissor\"\n\t} else {\n\t\treturn \"UNKNOWN\"\n\t}\n}",
"func (f *FileDialog) SetOnClosed(closed func()) {\n\tif f.dialog == nil {\n\t\treturn\n\t}\n\t// If there is already a callback set, remember it and call both.\n\toriginalCallback := f.onClosedCallback\n\n\tf.onClosedCallback = func(response bool) {\n\t\tclosed()\n\t\tif originalCallback != nil {\n\t\t\toriginalCallback(response)\n\t\t}\n\t}\n}",
"func (fv *FileView) SetPathFile(path, file, ext string) {\n\tfv.DirPath = path\n\tfv.SelFile = file\n\tfv.SetExt(ext)\n\tfv.UpdateFromPath()\n}",
"func (m *Model) Choice() (*Choice, error) {\n\tif m.Err != nil {\n\t\treturn nil, m.Err\n\t}\n\n\tif len(m.currentChoices) == 0 {\n\t\treturn nil, fmt.Errorf(\"no choices\")\n\t}\n\n\tif m.currentIdx < 0 || m.currentIdx >= len(m.currentChoices) {\n\t\treturn nil, fmt.Errorf(\"choice index out of bounds\")\n\t}\n\n\treturn m.currentChoices[m.currentIdx], nil\n}",
"func ChoiceIndex(title string, choices []string, def int) int {\n\treturn ChoiceIndexCallback(title, choices, def, nil)\n}",
"func (g Gnome2Setter) Set(filename string) error {\n\tpath, err := filepath.Abs(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn setWithCommand(\n\t\t\"gconftool-2\",\n\t\t\"--type=string\",\n\t\t\"--set\",\n\t\t\"/desktop/gnome/background/picture_filename\",\n\t\tpath,\n\t)\n}",
"func ChoiceIndexCallback(title string, choices []string, def int, f func(int, int, int)) int {\n\tselection := def\n\tnc := len(choices) - 1\n\tif selection < 0 || selection > nc {\n\t\tselection = 0\n\t}\n\toffset := 0\n\tcx := 0\n\tfor {\n\t\tsx, sy := termbox.Size()\n\t\ttermbox.HideCursor()\n\t\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\t\tPrintstring(title, 0, 0)\n\t\tfor selection < offset {\n\t\t\toffset -= 5\n\t\t\tif offset < 0 {\n\t\t\t\toffset = 0\n\t\t\t}\n\t\t}\n\t\tfor selection-offset >= sy-1 {\n\t\t\toffset += 5\n\t\t\tif offset >= nc {\n\t\t\t\toffset = nc\n\t\t\t}\n\t\t}\n\t\tfor i, s := range choices[offset:] {\n\t\t\tts, _ := trimString(s, cx)\n\t\t\tPrintstring(ts, 3, i+1)\n\t\t\tif cx > 0 {\n\t\t\t\tPrintstring(\"←\", 2, i+1)\n\t\t\t}\n\t\t}\n\t\tPrintstring(\">\", 1, (selection+1)-offset)\n\t\tif f != nil {\n\t\t\tf(selection, sx, sy)\n\t\t}\n\t\ttermbox.Flush()\n\t\tev := termbox.PollEvent()\n\t\tif ev.Type != termbox.EventKey {\n\t\t\tcontinue\n\t\t}\n\t\tkey := ParseTermboxEvent(ev)\n\t\tswitch key {\n\t\tcase \"C-v\":\n\t\t\tfallthrough\n\t\tcase \"next\":\n\t\t\tselection += sy - 5\n\t\t\tif selection >= len(choices) {\n\t\t\t\tselection = len(choices) - 1\n\t\t\t}\n\t\tcase \"M-v\":\n\t\t\tfallthrough\n\t\tcase \"prior\":\n\t\t\tselection -= sy - 5\n\t\t\tif selection < 0 {\n\t\t\t\tselection = 0\n\t\t\t}\n\t\tcase \"C-c\":\n\t\t\tfallthrough\n\t\tcase \"C-g\":\n\t\t\treturn def\n\t\tcase \"UP\", \"C-p\":\n\t\t\tif selection > 0 {\n\t\t\t\tselection--\n\t\t\t}\n\t\tcase \"DOWN\", \"C-n\":\n\t\t\tif selection < len(choices)-1 {\n\t\t\t\tselection++\n\t\t\t}\n\t\tcase \"LEFT\", \"C-b\":\n\t\t\tif cx > 0 {\n\t\t\t\tcx--\n\t\t\t}\n\t\tcase \"RIGHT\", \"C-f\":\n\t\t\tcx++\n\t\tcase \"C-a\", \"Home\":\n\t\t\tcx = 0\n\t\tcase \"M-<\":\n\t\t\tselection = 0\n\t\tcase \"M->\":\n\t\t\tselection = len(choices) - 1\n\t\tcase \"RET\":\n\t\t\treturn selection\n\t\t}\n\t}\n}",
"func GuiTextBoxSetSelection(start int, length int) {\n\tC.GuiTextBoxSetSelection(C.int(int32(start)), C.int(int32(length)))\n}",
"func (m *Menu) SetSelected(i int) {\n\tm.selected = i\n}",
"func (q *Question) ReadChoice(prompt string, a []string) (answer string, err error) {\n\treturn q._baseReadChoice(prompt, a, 0)\n}",
"func redrawChoices() {\n\tfor ii := 0; ii < minInt(len(choices)-viewTopIndex, viewHeight); ii++ {\n\t\tredrawChoice(viewTopRow+ii, viewTopIndex+ii, false, isSelected[viewTopIndex+ii])\n\t}\n\tredrawChoice(cursorRow, cursorIndex, true, isSelected[cursorIndex])\n}",
"func (fv *FileView) SetExt(ext string) {\n\tif ext == \"\" {\n\t\tif fv.SelFile != \"\" {\n\t\t\text = strings.ToLower(filepath.Ext(fv.SelFile))\n\t\t}\n\t}\n\tfv.Ext = ext\n\texts := strings.Split(fv.Ext, \",\")\n\tfv.ExtMap = make(map[string]string, len(exts))\n\tfor _, ex := range exts {\n\t\tex = strings.TrimSpace(ex)\n\t\tif len(ex) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif ex[0] != '.' {\n\t\t\tex = \".\" + ex\n\t\t}\n\t\tfv.ExtMap[strings.ToLower(ex)] = ex\n\t}\n}",
"func (s *BasevhdlListener) ExitChoices(ctx *ChoicesContext) {}",
"func (e Enum) setValue(v any, key string) error {\n\ti, err := e.GetValue(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvValue := reflect.ValueOf(v).Elem()\n\tif !vValue.CanSet() {\n\t\tpanic(fmt.Errorf(\"flagenum: Cannot set supplied value, %v\", vValue))\n\t}\n\n\tiValue := reflect.ValueOf(i)\n\tif !vValue.Type().AssignableTo(iValue.Type()) {\n\t\tpanic(fmt.Errorf(\"flagenum: Enumeration type (%v) is incompatible with supplied value (%v)\",\n\t\t\tvValue.Type(), iValue.Type()))\n\t}\n\n\tvValue.Set(iValue)\n\treturn nil\n}",
"func numberPicker(theme gxui.Theme, overlay gxui.BubbleOverlay) gxui.Control {\n\tvar fm FileManager\n\tfm.Init()\n\tfm.SetPath(fm.RootPath)\n\n\tadapter := gxui.CreateDefaultAdapter()\n\tadapter.SetItems(fm.GetList())\n\n\tlayout := theme.CreateLinearLayout()\n\tlayout.SetDirection(gxui.TopToBottom)\n\n\tlayoutControl := theme.CreateLinearLayout()\n layoutControl.SetDirection(gxui.LeftToRight)\n\tlabelPath := theme.CreateLabel()\n\tlabelPath.SetText(\"Set root path: \")\n\tlayoutControl.AddChild(labelPath)\n\n\tinputPath := theme.CreateTextBox()\n\tlayoutControl.AddChild(inputPath)\n\n\tbtn := GetButton(\"OK\", theme)\n\tbtn.OnClick(func(gxui.MouseEvent) {\n\t\t_path := inputPath.Text()\n\t\tif fm.IsDir(_path) {\n\t\t\tfm.RootPath = _path\n\t\t\tfm.SetPath(fm.RootPath)\n\t\t\tadapter.SetItems(fm.GetList())\n\t\t}\n\t})\n\tlayoutControl.AddChild(btn)\n\n\tlayout.AddChild(layoutControl)\n\n\n\tlist := theme.CreateList()\n\tlist.SetAdapter(adapter)\n\tlist.SetOrientation(gxui.Vertical)\n\tlayout.AddChild(list)\n\n\tlayoutControlOpen := theme.CreateLinearLayout()\n layoutControlOpen.SetDirection(gxui.LeftToRight)\n\tlabelOpen := theme.CreateLabel()\n\tlabelOpen.SetText(\"Open: \")\n\tlayoutControlOpen.AddChild(labelOpen)\n\n\tbtnOpen := GetButton(\"OK\", theme)\n\tbtnOpen.OnClick(func(gxui.MouseEvent) {\n\t\t\n\t})\n\tlayoutControlOpen.AddChild(btnOpen)\n\n\tlayout.AddChild(layoutControlOpen)\n\n\tlist.OnItemClicked(func(ev gxui.MouseEvent, item gxui.AdapterItem) {\n\t\t//if dropList.Selected() != item {\n\t\t//\tdropList.Select(item)\n\t\t//}\n\t\tif ev.Button == gxui.MouseButtonRight {\n\t\t\tfm.Go(item.(string))\n\t\t\tadapter.SetItems(fm.GetList())\n\t\t\t\n\t\t}\n\t\t\n\t\t//selected.SetText(fmt.Sprintf(\"%s - %d\", item, adapter.ItemIndex(item)))\n\t})\n\n\treturn layout\n}",
"func (q *Question) _baseReadChoice(prompt string, a []string, defaultAnswer uint) (answer string, err error) {\n\t// Saves the value without ANSI to get it when it's set the answer by default.\n\tdef := a[defaultAnswer]\n\ta[defaultAnswer] = setBold + def + setOff\n\n\tline := q.getLine(prompt, strings.Join(a, \",\"), _DEFAULT_MULTIPLE)\n\n\tfor {\n\t\tanswer, err = line.Read()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif answer == \"\" {\n\t\t\treturn def, nil\n\t\t}\n\n\t\tfor _, v := range a {\n\t\t\tif answer == v {\n\t\t\t\treturn answer, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func ChooseWithDefault(pr string, defaultValue string, options []string) (string, error) {\n\treturn defaultPrompter.ChooseWithDefault(pr, defaultValue, options)\n}",
"func selectFileToSaveGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Save()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func (fOpenCfg *FileOpenConfig) SetFileOpenType(fOpenType FileOpenType) error {\n\n ePrefix := \"FileOpenConfig.SetFileOpenType() \"\n\n err := fOpenType.IsValid()\n\n if err != nil {\n return fmt.Errorf(ePrefix+\"Input parameter 'fOpenType' is INVALID! fOpenType='%v' \",\n fOpenType.Value())\n }\n\n if fOpenCfg.fileOpenModes == nil {\n fOpenCfg.fileOpenModes = make([]FileOpenMode, 0)\n }\n\n if fOpenType == FileOpenType(0).TypeNone() {\n fOpenCfg.fileOpenModes = make([]FileOpenMode, 1)\n fOpenCfg.fileOpenModes[0] = FOpenMode.ModeNone()\n }\n\n fOpenCfg.fileOpenType = fOpenType\n\n fOpenCfg.isInitialized = true\n\n return nil\n}",
"func (m *AgreementAcceptance) SetState(value *AgreementAcceptanceState)() {\n m.state = value\n}",
"func (v *Button) SetImage(image IWidget) {\n\tC.gtk_button_set_image(v.native(), image.toWidget())\n}",
"func (v *TreeStore) SetValue(iter *TreeIter, column int, value interface{}) error {\n\tswitch value.(type) {\n\tcase *gdk.Pixbuf:\n\t\tpix := value.(*gdk.Pixbuf)\n\t\tC._gtk_tree_store_set(v.native(), iter.native(), C.gint(column), unsafe.Pointer(pix.Native()))\n\n\tdefault:\n\t\tgv := glib.GValue(value)\n\n\t\tC.gtk_tree_store_set_value(v.native(), iter.native(),\n\t\t\tC.gint(column),\n\t\t\t(*C.GValue)(C.gpointer(gv.Native())))\n\t}\n\treturn nil\n}",
"func selectDirectoryGUI(titleA string) string {\n\tdirectoryT, errT := dialog.Directory().Title(titleA).Browse()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn directoryT\n}",
"func RenderInitialChoices(resp http.ResponseWriter, req *http.Request, ext string, fileSeeker io.ReadSeeker) {\n\top := []OptStruct{\n\t\t{22, primitive.TriangleMode},\n\t\t{22, primitive.CircleMode},\n\t\t{22, primitive.ComboMode},\n\t\t{22, primitive.PolygonMode},\n\t}\n\topFileList, err := GenImgList(ext, fileSeeker, op...)\n\tif err != nil {\n http.Error(resp, err.Error(), http.StatusInternalServerError)\n return\n\t}\n\thtmlist := `<html>\n <body>\n {{range .}}\n <a href=\"/modify/{{.Name}}?mode={{.Mode}}\">\n <img style =\"width 30%\" src=\"/pics/{{.Name}}\">\n {{end}}\n </body>\n </html>\n `\n\ttempl := template.Must(template.New(\"\").Parse(htmlist))\n \ntype Opts struct {\n Name string\n Mode primitive.Mode\n}\n\tvar opts []Opts\n\tfor index, val := range opFileList {\n\t\topts = append(opts, Opts{Name: filepath.Base(val), Mode: op[index].mode})\n\t}\n\n\t// err = templ.Execute(resp, opts)\n\t// if err != nil {\n\t// \tpanic(err)\n // }\n checkError(templ.Execute(resp, opts))\n \n\n}",
"func (v *LinkButton) SetUri(uri string) {\n\tcstr := C.CString(uri)\n\tC.gtk_link_button_set_uri(v.native(), (*C.gchar)(cstr))\n}",
"func (*XMLDocument) SetOnselectionchange(onselectionchange func(window.Event)) {\n\tmacro.Rewrite(\"$_.onselectionchange = $1\", onselectionchange)\n}",
"func (in *Choice) DeepCopy() *Choice {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Choice)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func SetOption(args []string) {\n\topt := args[0]\n\tif len(args) < 2 {\n\t\t// clear value\n\t\tOptions[opt].Val = \"\"\n\t\treturn\n\t}\n\n\tval := args[1:] // in case val contains spaces\n\n\tif _, exist := Options[opt]; !exist {\n\t\tCliPrintError(\"No such option: %s\", strconv.Quote(opt))\n\t\treturn\n\t}\n\n\t// set\n\tOptions[opt].Val = strings.Join(val, \" \")\n}",
"func (li *List) setSelection(litem *ListItem, state bool, force bool, dispatch bool) {\n\n\tManager().SetKeyFocus(li)\n\t// If already at this state, nothing to do\n\tif litem.selected == state && !force {\n\t\treturn\n\t}\n\tlitem.SetSelected(state)\n\n\t// If single selection, deselects all other items\n\tif li.single {\n\t\tfor _, curr := range li.items {\n\t\t\tif curr.(*ListItem) != litem {\n\t\t\t\tcurr.(*ListItem).SetSelected(false)\n\t\t\t\tcurr.(*ListItem).SetHighlighted(false)\n\t\t\t}\n\t\t}\n\t}\n\tli.update()\n\tif dispatch {\n\t\tli.Dispatch(OnChange, nil)\n\t}\n}",
"func (v *Clipboard) SetText(text string) {\n\tcstr := C.CString(text)\n\tdefer C.free(unsafe.Pointer(cstr))\n\tC.gtk_clipboard_set_text(v.native(), (*C.gchar)(cstr),\n\t\tC.gint(len(text)))\n}",
"func (s *SelectableAttribute) Chooser() BooleanAttribute {\n\treturn s.chooser\n}",
"func (c *ChoiceImpl) AskUser() {\n\tchoice := \"\"\n\tfor !c.choiceIsValid(choice) {\n\t\tfmt.Println(\"----- Available options:\")\n\t\tc.displayActions()\n\t\tfmt.Println(\"----- Your choice:\")\n\t\tchoice = c.getUsersChoice()\n\t}\n\ta := c.getActionByName(choice)\n\ta.execute()\n}",
"func (cli *CliPrompter) ChooseWithDefault(pr string, defaultValue string, options []string) (string, error) {\n\tselected := \"\"\n\tprompt := &survey.Select{\n\t\tMessage: pr,\n\t\tOptions: options,\n\t\tDefault: defaultValue,\n\t}\n\t_ = survey.AskOne(prompt, &selected, survey.WithValidator(survey.Required))\n\n\t// return the selected element index\n\tfor i, option := range options {\n\t\tif selected == option {\n\t\t\treturn options[i], nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"bad input\")\n}",
"func (me *TViewBoxSpecType) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func (c *Checkbox) SetText(text string) {\n\tctext := C.CString(text)\n\tC.uiCheckboxSetText(c.c, ctext)\n\tfreestr(ctext)\n}",
"func (fv *FileView) SetExtAction(ext string) {\n\tfv.SetExt(ext)\n\tfv.SetFullReRender()\n\tfv.UpdateFiles()\n}",
"func (recv *Value) SetEnum(vEnum int32) {\n\tc_v_enum := (C.gint)(vEnum)\n\n\tC.g_value_set_enum((*C.GValue)(recv.native), c_v_enum)\n\n\treturn\n}",
"func SetProductFile(filePath string) int {\n\tcFilePath := goToCString(filePath)\n\tstatus := C.SetProductFile(cFilePath)\n\tfreeCString(cFilePath)\n\treturn int(status)\n}",
"func (litem *ListItem) SetSelected(state bool) {\n\n\tlitem.selected = state\n\t//litem.item.SetSelected2(state)\n}",
"func (mb *MenuButton) SetCallBack(callback Callback) {\n\tmb.callback = callback\n}",
"func (cv Choice) WithDefault(value string) Choice {\n\tcv.Set(value)\n\treturn cv\n}",
"func (g Gnome3Setter) Set(filename string) error {\n\tpath, err := filepath.Abs(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn setWithCommand(\n\t\t\"gsettings\",\n\t\t\"set\",\n\t\t\"org.gnome.desktop.background\",\n\t\t\"picture-uri\",\n\t\tfmt.Sprintf(\"file://%s\", path),\n\t)\n}",
"func askSelect(message string, options []string, response interface{}) error {\n\treturn survey.AskOne(&survey.Select{\n\t\tMessage: message,\n\t\tOptions: options,\n\t}, response, nil)\n}",
"func (b *Button) SetImage(t int, handle uintptr) uintptr {\n\treturn b.SendMessage(win.BM_SETIMAGE, uintptr(t), handle)\n}",
"func RequestChoiceIfEmpty(defaultValue string, message string, options ...string) string {\n\tif defaultValue != \"\" {\n\t\treturn defaultValue\n\t}\n\treturn RequestChoice(message, options...)\n}",
"func (*AnswerChoice) Descriptor() ([]byte, []int) {\n\treturn file_quiz_proto_rawDescGZIP(), []int{4}\n}",
"func (vl *VerbLevel) Set(value string) error {\n\t// int: level value.\n\tif iv, err := strconv.Atoi(value); err == nil {\n\t\tif iv > VerbCrazy.Int() {\n\t\t\t*vl = VerbCrazy\n\t\t} else if iv < 0 { // fallback to default level.\n\t\t\t*vl = DefaultVerb\n\t\t} else { // 0 - 5\n\t\t\t*vl = VerbLevel(iv)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// string: level name.\n\t*vl = name2verbLevel(value)\n\treturn nil\n}"
] | [
"0.6469274",
"0.6148542",
"0.6068018",
"0.5769264",
"0.57536185",
"0.56970453",
"0.5626628",
"0.55424106",
"0.51889265",
"0.51361847",
"0.51076907",
"0.5051737",
"0.49173164",
"0.4893991",
"0.4842663",
"0.4834679",
"0.47279584",
"0.46964598",
"0.4682138",
"0.4653016",
"0.46258134",
"0.462052",
"0.4602431",
"0.45895332",
"0.4534642",
"0.4505106",
"0.44543216",
"0.43771532",
"0.43590632",
"0.43416277",
"0.43371588",
"0.43278313",
"0.4309242",
"0.4290017",
"0.42887387",
"0.4286901",
"0.42664492",
"0.42636845",
"0.4249235",
"0.4243678",
"0.42225006",
"0.42170876",
"0.42160907",
"0.42024148",
"0.41776416",
"0.41622543",
"0.41368714",
"0.41313824",
"0.41277272",
"0.4116558",
"0.41154155",
"0.4113371",
"0.40717593",
"0.40703687",
"0.406281",
"0.40570998",
"0.4057087",
"0.40470117",
"0.40166095",
"0.39989513",
"0.39983928",
"0.39799154",
"0.39794537",
"0.39691097",
"0.39661646",
"0.39280325",
"0.39257145",
"0.39238438",
"0.39235204",
"0.39048767",
"0.3900801",
"0.38926756",
"0.38912123",
"0.38902116",
"0.38806397",
"0.3874835",
"0.38695574",
"0.3868022",
"0.38585916",
"0.3858135",
"0.3857689",
"0.38565594",
"0.38532498",
"0.38497514",
"0.3844678",
"0.38396856",
"0.3822141",
"0.38210532",
"0.38164884",
"0.3809083",
"0.38065025",
"0.38060278",
"0.38049033",
"0.37979615",
"0.37902114",
"0.37863722",
"0.3782258",
"0.3772145",
"0.37706608",
"0.37671852"
] | 0.86250484 | 0 |
GetChoice is a wrapper around gtk_file_chooser_get_choice(). | GetChoice — это обертка вокруг gtk_file_chooser_get_choice(). | func (v *FileChooser) GetChoice(id string) string {
cId := C.CString(id)
defer C.free(unsafe.Pointer(cId))
c := C.gtk_file_chooser_get_choice(v.native(), (*C.gchar)(cId))
return C.GoString(c)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *FileChooser) SetChoice(id, option string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tcOption := C.CString(option)\n\tdefer C.free(unsafe.Pointer(cOption))\n\tC.gtk_file_chooser_set_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cOption))\n}",
"func Choice(m string, exacts []string) string {\n\tfmt.Println(colors.Blue(prefix + \" \" + m + \": \"))\n\tret := make(chan string, 1)\n\tterminate := make(chan struct{})\n\tgo cho.Run(exacts, ret, terminate)\n\tselected := \"\"\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase selected = <-ret:\n\t\t\tbreak LOOP\n\t\tcase <-terminate:\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\tif selected != \"\" {\n\t\tfmt.Println(selected)\n\t}\n\treturn selected\n}",
"func (c *Config) readChoice(prompt string, choices []string, defaultValue *string) (string, error) {\n\tswitch {\n\tcase c.noTTY:\n\t\tfullPrompt := prompt + \" (\" + strings.Join(choices, \"/\")\n\t\tif defaultValue != nil {\n\t\t\tfullPrompt += \", default \" + *defaultValue\n\t\t}\n\t\tfullPrompt += \")? \"\n\t\tabbreviations := chezmoi.UniqueAbbreviations(choices)\n\t\tfor {\n\t\t\tvalue, err := c.readLineRaw(fullPrompt)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif value == \"\" && defaultValue != nil {\n\t\t\t\treturn *defaultValue, nil\n\t\t\t}\n\t\t\tif value, ok := abbreviations[value]; ok {\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tinitModel := chezmoibubbles.NewChoiceInputModel(prompt, choices, defaultValue)\n\t\tfinalModel, err := runCancelableModel(initModel)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn finalModel.Value(), nil\n\t}\n}",
"func (v *FileChooser) AddChoice(id, label string, options, optionLabels []string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\n\tcLabel := C.CString(label)\n\tdefer C.free(unsafe.Pointer(cLabel))\n\n\tif options == nil || optionLabels == nil {\n\t\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), nil, nil)\n\t\treturn\n\t}\n\n\tcOptions := C.make_strings(C.int(len(options) + 1))\n\tfor i, option := range options {\n\t\tcstr := C.CString(option)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptions, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptions, C.int(len(options)), nil)\n\n\tcOptionLabels := C.make_strings(C.int(len(optionLabels) + 1))\n\tfor i, optionLabel := range optionLabels {\n\t\tcstr := C.CString(optionLabel)\n\t\tdefer C.free(unsafe.Pointer(cstr))\n\t\tC.set_string(cOptionLabels, C.int(i), (*C.gchar)(cstr))\n\t}\n\tC.set_string(cOptionLabels, C.int(len(optionLabels)), nil)\n\n\tC.gtk_file_chooser_add_choice(v.native(), (*C.gchar)(cId), (*C.gchar)(cLabel), cOptions, cOptionLabels)\n}",
"func getChoice(request events.APIGatewayProxyRequest, span opentracing.Span) (choice string) {\n\t_, winOk := request.QueryStringParameters[\"win\"]\n\tif winOk {\n\t\tspan.SetTag(\"WinFlag\", true)\n\t\treturn \"win\"\n\t} else {\n\t\tspan.SetTag(\"WinFlag\", false)\n\t}\n\tchoiceQP, qpOk := request.QueryStringParameters[\"choice\"]\n\tif !qpOk {\n\t\tspan.LogKV(\"event\", \"No choice query parameter provided.\")\n\t} else {\n\t\tspan.SetTag(\"choiceQueryParameter\", choiceQP)\n\t}\n\t_, validChoice := choiceToNum[choiceQP]\n\tif !validChoice {\n\t\trandomChoice := numToChoice[getRandomNumber()]\n\t\tinvalid := fmt.Sprintf(\"Invalid choice query parameter \\\"%s\\\". Using %s selected at random.\",\n\t\t\tchoiceQP, randomChoice)\n\t\tspan.LogKV(\"event\", invalid)\n\t\tspan.SetTag(\"randomChoice\", randomChoice)\n\t\tchoice = randomChoice\n\t} else {\n\t\tchoice = choiceQP\n\t}\n\treturn choice\n}",
"func selectFileGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Load()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func runFileChooser(win *gtk.Window) (string, error) {\n\n\tvar fn string\n\n\topenFile, err := gtk.FileChooserDialogNewWith2Buttons(\"Open file\", win, gtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t\"Cancel\", gtk.RESPONSE_CANCEL,\n\t\t\"Ok\", gtk.RESPONSE_OK)\n\tdefer openFile.Destroy()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topenFile.SetDefaultSize(50, 50)\n\n\tres := openFile.Run()\n\n\tif res == int(gtk.RESPONSE_OK) {\n\t\tfn = openFile.FileChooser.GetFilename()\n\t}\n\n\treturn fn, nil\n}",
"func (v *FileChooser) RemoveChoice(id string) {\n\tcId := C.CString(id)\n\tdefer C.free(unsafe.Pointer(cId))\n\tC.gtk_file_chooser_remove_choice(v.native(), (*C.gchar)(cId))\n}",
"func (m *Model) Choice() (*Choice, error) {\n\tif m.Err != nil {\n\t\treturn nil, m.Err\n\t}\n\n\tif len(m.currentChoices) == 0 {\n\t\treturn nil, fmt.Errorf(\"no choices\")\n\t}\n\n\tif m.currentIdx < 0 || m.currentIdx >= len(m.currentChoices) {\n\t\treturn nil, fmt.Errorf(\"choice index out of bounds\")\n\t}\n\n\treturn m.currentChoices[m.currentIdx], nil\n}",
"func pickFile(dir, message string) string {\n\tfileName := \"\"\n\terr := survey.AskOne(\n\t\t&survey.Select{\n\t\t\tMessage: message,\n\t\t\tOptions: readDir(dir),\n\t\t},\n\t\t&fileName,\n\t\tsurvey.WithValidator(survey.Required),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn fileName\n}",
"func (c *Config) promptChoice(prompt string, choices []string, args ...string) (string, error) {\n\tvar defaultValue *string\n\tswitch len(args) {\n\tcase 0:\n\t\t// Do nothing.\n\tcase 1:\n\t\tif !slices.Contains(choices, args[0]) {\n\t\t\treturn \"\", fmt.Errorf(\"%s: invalid default value\", args[0])\n\t\t}\n\t\tdefaultValue = &args[0]\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"want 2 or 3 arguments, got %d\", len(args)+2)\n\t}\n\tif c.interactiveTemplateFuncs.promptDefaults && defaultValue != nil {\n\t\treturn *defaultValue, nil\n\t}\n\treturn c.readChoice(prompt, choices, defaultValue)\n}",
"func NewChoice() Choice {\n\treturn new(ChoiceImpl)\n}",
"func (o IFS) Choose() *Affine {\n\tr := rand.Intn(len(o.Choices))\n\treturn o.Choices[r]\n}",
"func Choice(s *string, choices []string, title, id, class string, valid Validator) (jquery.JQuery, error) {\n\tj := jq(\"<select>\").AddClass(ClassPrefix + \"-choice\").AddClass(class)\n\tj.SetAttr(\"title\", title).SetAttr(\"id\", id)\n\tif *s == \"\" {\n\t\t*s = choices[0]\n\t}\n\tindex := -1\n\tfor i, c := range choices {\n\t\tif c == *s {\n\t\t\tindex = i\n\t\t}\n\t\tj.Append(jq(\"<option>\").SetAttr(\"value\", c).SetText(c))\n\t}\n\tif index == -1 {\n\t\treturn jq(), fmt.Errorf(\"Default of '%s' is not among valid choices\", *s)\n\t}\n\tj.SetData(\"prev\", index)\n\tj.SetProp(\"selectedIndex\", index)\n\tj.Call(jquery.CHANGE, func(event jquery.Event) {\n\t\tnewS := event.Target.Get(\"value\").String()\n\t\tnewIndex := event.Target.Get(\"selectedIndex\").Int()\n\t\tif valid != nil && !valid.Validate(newS) {\n\t\t\tnewIndex = int(j.Data(\"prev\").(float64))\n\t\t\tj.SetProp(\"selectedIndex\", newIndex)\n\t\t}\n\t\t*s = choices[int(newIndex)]\n\t\tj.SetData(\"prev\", newIndex)\n\t})\n\treturn j, nil\n}",
"func (cli *CLI) Choose() (string, error) {\n\tcolorstring.Fprintf(cli.errStream, chooseText)\n\n\tnum, err := cli.AskNumber(4, 1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// If user selects 3, should ask user GPL V2 or V3\n\tif num == 3 {\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"Which version do you want?\\n\")\n\t\tbuf.WriteString(\" 1) V2\\n\")\n\t\tbuf.WriteString(\" 2) V3\\n\")\n\t\tfmt.Fprintf(cli.errStream, buf.String())\n\n\t\tnum, err = cli.AskNumber(2, 1)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tnum += 4\n\t}\n\n\tvar key string\n\tswitch num {\n\tcase 1:\n\t\tkey = \"mit\"\n\tcase 2:\n\t\tkey = \"apache-2.0\"\n\tcase 4:\n\t\tkey = \"\"\n\tcase 5:\n\t\tkey = \"gpl-2.0\"\n\tcase 6:\n\t\tkey = \"gpl-3.0\"\n\tdefault:\n\t\t// Should not reach here\n\t\tpanic(\"Invalid number\")\n\t}\n\n\treturn key, nil\n}",
"func get_cmd() string {\n\tui := &input.UI{\n\t\tWriter: os.Stdout,\n\t\tReader: os.Stdin,\n\t}\n\tquery := \"Select option\"\n\tcmd, _ := ui.Select(query, []string{\"LIST\", \"INFO\", \"PLAY\", \"STOP\", \"QUIT\"}, &input.Options{\n\t\tLoop: true,\n\t})\n\treturn cmd\n}",
"func InputDialog(opt ...interface{}) string {\n b, _ := gtk.BuilderNewFromFile(\"glade/input-dialog.glade\")\n d := GetDialog(b, \"input_dialog\")\n entry := GetEntry(b, \"input_entry\")\n\n for i, v := range(opt) {\n if i % 2 == 0 {\n key := v.(string)\n switch key {\n case \"title\":\n d.SetTitle(opt[i+1].(string))\n case \"label\":\n l := GetLabel(b,\"input_label\")\n l.SetText(opt[i+1].(string))\n case \"password-mask\":\n entry.SetInvisibleChar(opt[i+1].(rune))\n entry.SetVisibility(false)\n case \"default\":\n entry.SetText(opt[i+1].(string))\n }\n }\n }\n\n output := \"\"\n entry.Connect(\"activate\", func (o *gtk.Entry) { d.Response(gtk.RESPONSE_OK) } )\n btok := GetButton(b, \"bt_ok\")\n btok.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_OK) } )\n\n btcancel := GetButton(b, \"bt_cancel\")\n btcancel.Connect(\"clicked\", func (b *gtk.Button) { d.Response(gtk.RESPONSE_CANCEL) } )\n\n code := d.Run()\n if code == gtk.RESPONSE_OK {\n output, _ = entry.GetText()\n }\n\n d.Destroy()\n return output\n}",
"func (cli *CliPrompter) Choose(pr string, options []string) int {\n\tselected := \"\"\n\tprompt := &survey.Select{\n\t\tMessage: pr,\n\t\tOptions: options,\n\t}\n\t_ = survey.AskOne(prompt, &selected, survey.WithValidator(survey.Required))\n\n\t// return the selected element index\n\tfor i, option := range options {\n\t\tif selected == option {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn 0\n}",
"func (p *PollAnswerVoters) GetChosen() (value bool) {\n\tif p == nil {\n\t\treturn\n\t}\n\treturn p.Flags.Has(0)\n}",
"func getValueFromPrompt(items []string, label string) (string, error) {\n\tprompt := promptui.Select{\n\t\tLabel: label,\n\t\tItems: items,\n\t}\n\n\t_, songName, err := prompt.Run()\n\n\treturn songName, err\n}",
"func (q *Question) ReadChoice(prompt string, a []string) (answer string, err error) {\n\treturn q._baseReadChoice(prompt, a, 0)\n}",
"func (d *Dmenu) Popup(prompt string, options ...string) (selection string, err error) {\n\tprocessedArgs := []string{}\n\tfor _, arg := range d.arguments {\n\t\tvar parg string\n\t\tif strings.Contains(arg, \"%s\") {\n\t\t\tparg = fmt.Sprintf(arg, prompt)\n\t\t} else {\n\t\t\tparg = arg\n\t\t}\n\n\t\tprocessedArgs = append(processedArgs, parg)\n\t}\n\tcmd := exec.Command(d.command, processedArgs...)\n\n\tstdin, err := cmd.StdinPipe()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting pipe: %s\", err)\n\t}\n\n\tgo func(stdin io.WriteCloser) {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, strings.Join(options, \"\\n\"))\n\t}(stdin)\n\n\tbyteOut, err := cmd.Output()\n\n\tif err != nil {\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\tif status.ExitStatus() == 1 {\n\t\t\t\t\terr = &EmptySelectionError{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\t// Cast and trim\n\tselection = strings.TrimSpace(string(byteOut))\n\n\treturn\n}",
"func (s *SelectableAttribute) Chooser() BooleanAttribute {\n\treturn s.chooser\n}",
"func (q *Question) _baseReadChoice(prompt string, a []string, defaultAnswer uint) (answer string, err error) {\n\t// Saves the value without ANSI to get it when it's set the answer by default.\n\tdef := a[defaultAnswer]\n\ta[defaultAnswer] = setBold + def + setOff\n\n\tline := q.getLine(prompt, strings.Join(a, \",\"), _DEFAULT_MULTIPLE)\n\n\tfor {\n\t\tanswer, err = line.Read()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif answer == \"\" {\n\t\t\treturn def, nil\n\t\t}\n\n\t\tfor _, v := range a {\n\t\t\tif answer == v {\n\t\t\t\treturn answer, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func GuiTextBoxGetSelection() Vector2 {\n\tres := C.GuiTextBoxGetSelection()\n\treturn newVector2FromPointer(unsafe.Pointer(&res))\n}",
"func (fv *FileView) FileSelectAction(idx int) {\n\tif idx < 0 {\n\t\treturn\n\t}\n\tfv.SaveSortPrefs()\n\tfi := fv.Files[idx]\n\tfv.SelectedIdx = idx\n\tfv.SelFile = fi.Name\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func (s *BasevhdlListener) ExitChoice(ctx *ChoiceContext) {}",
"func (s *BasevhdlListener) EnterChoice(ctx *ChoiceContext) {}",
"func NewChoice(allowedValues ...string) Choice {\n\treturn Choice{AllowedValues: allowedValues}\n}",
"func (cli *CliPrompter) ChooseWithDefault(pr string, defaultValue string, options []string) (string, error) {\n\tselected := \"\"\n\tprompt := &survey.Select{\n\t\tMessage: pr,\n\t\tOptions: options,\n\t\tDefault: defaultValue,\n\t}\n\t_ = survey.AskOne(prompt, &selected, survey.WithValidator(survey.Required))\n\n\t// return the selected element index\n\tfor i, option := range options {\n\t\tif selected == option {\n\t\t\treturn options[i], nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"bad input\")\n}",
"func (o *Outbound) Chooser() peer.Chooser {\n\treturn o.chooser\n}",
"func Choose(pr string, options []string) int {\n\treturn defaultPrompter.Choose(pr, options)\n}",
"func (ui *UI) GetChooseMainOption() string {\n\tprompt := promptui.Select{\n\t\tLabel: \"Select command\",\n\t\tItems: []string{\"containers\", \"exit\"},\n\t}\n\t_, result, err := prompt.Run()\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"^\") {\n\t\t\tui.logger.Errorf(\"Prompt failed %v\", err)\n\t\t}\n\t}\n\treturn result\n}",
"func pickChapter(g *gocui.Gui, v *gocui.View) error {\n\tif err := openModal(g); err != nil {\n\t\treturn err\n\t}\n\n\tdone := make(chan bool)\n\ttimer := time.NewTimer(time.Second * time.Duration(downloadTimeoutSecond))\n\n\t// must run downloading process in\n\t// go routine or else the it will\n\t// block the openModal so loading modal\n\t// will not be shown to the user\n\tgo func() {\n\t\ts := trimViewLine(v)\n\t\tprepDownloadChapter(s)\n\t\tdone <- true\n\t}()\n\n\t// in case downloading takes longer than\n\t// downloadTimeoutSecond, close the modal\n\t// and continue to download in background\n\tgo func() {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tsetClosingMessage(g, \"continuing to download\\nin background...\")\n\t\t\treturn\n\t\tcase <-done:\n\t\t\tg.Update(func(g *gocui.Gui) error {\n\t\t\t\terr := closeModal(g)\n\t\t\t\treturn err\n\t\t\t})\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func selectDirectoryGUI(titleA string) string {\n\tdirectoryT, errT := dialog.Directory().Title(titleA).Browse()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn directoryT\n}",
"func (fv *FileView) SelectedFile() string {\n\treturn filepath.Join(fv.DirPath, fv.SelFile)\n}",
"func Get(opt GetOptions) ([]action.DownloadedFile, error) {\n\tcolor.Disable()\n\n\tgopt := action.GetOpt{}\n\tgopt.Files = opt.Files\n\tgopt.Tags = opt.Tags\n\tgopt.Service = opt.Service\n\tgopt.Output = opt.Output\n\n\tif len(opt.Catalog) == 0 {\n\t\tgopt.Catalog = catalog.DefaultName\n\t} else {\n\t\tgopt.Catalog = opt.Catalog\n\t}\n\n\tm := monitor.New(ioutil.Discard, true)\n\n\tfiles, err := action.Get(gopt, action.Dep{\n\t\tStderr: os.NewFile(0, os.DevNull),\n\t\tStdout: os.NewFile(0, os.DevNull),\n\n\t\tMonitor: &m,\n\t})\n\n\tif len(m.Errors) > 0 {\n\t\treturn files, m.Errors[0]\n\t}\n\n\treturn files, err\n}",
"func choice(a string) string {\n\tif strings.EqualFold(a, \"r\") {\n\t\treturn \"Rock\"\n\t} else if strings.EqualFold(a, \"p\") {\n\t\treturn \"Paper\"\n\t} else if strings.EqualFold(a, \"s\") {\n\t\treturn \"Scissor\"\n\t} else {\n\t\treturn \"UNKNOWN\"\n\t}\n}",
"func (e Enum) GetValue(key string) (any, error) {\n\tfor k, v := range e {\n\t\tif k == key {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"flagenum: Invalid value; must be one of [%s]\", e.Choices())\n}",
"func selectFileToSaveGUI(titleA string, filterNameA string, filterTypeA string) string {\n\tfileNameT, errT := dialog.File().Filter(filterNameA, filterTypeA).Title(titleA).Save()\n\n\tif errT != nil {\n\t\treturn tk.GenerateErrorStringF(\"failed: %v\", errT)\n\t}\n\n\treturn fileNameT\n}",
"func ChoiceIndexCallback(title string, choices []string, def int, f func(int, int, int)) int {\n\tselection := def\n\tnc := len(choices) - 1\n\tif selection < 0 || selection > nc {\n\t\tselection = 0\n\t}\n\toffset := 0\n\tcx := 0\n\tfor {\n\t\tsx, sy := termbox.Size()\n\t\ttermbox.HideCursor()\n\t\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\t\tPrintstring(title, 0, 0)\n\t\tfor selection < offset {\n\t\t\toffset -= 5\n\t\t\tif offset < 0 {\n\t\t\t\toffset = 0\n\t\t\t}\n\t\t}\n\t\tfor selection-offset >= sy-1 {\n\t\t\toffset += 5\n\t\t\tif offset >= nc {\n\t\t\t\toffset = nc\n\t\t\t}\n\t\t}\n\t\tfor i, s := range choices[offset:] {\n\t\t\tts, _ := trimString(s, cx)\n\t\t\tPrintstring(ts, 3, i+1)\n\t\t\tif cx > 0 {\n\t\t\t\tPrintstring(\"←\", 2, i+1)\n\t\t\t}\n\t\t}\n\t\tPrintstring(\">\", 1, (selection+1)-offset)\n\t\tif f != nil {\n\t\t\tf(selection, sx, sy)\n\t\t}\n\t\ttermbox.Flush()\n\t\tev := termbox.PollEvent()\n\t\tif ev.Type != termbox.EventKey {\n\t\t\tcontinue\n\t\t}\n\t\tkey := ParseTermboxEvent(ev)\n\t\tswitch key {\n\t\tcase \"C-v\":\n\t\t\tfallthrough\n\t\tcase \"next\":\n\t\t\tselection += sy - 5\n\t\t\tif selection >= len(choices) {\n\t\t\t\tselection = len(choices) - 1\n\t\t\t}\n\t\tcase \"M-v\":\n\t\t\tfallthrough\n\t\tcase \"prior\":\n\t\t\tselection -= sy - 5\n\t\t\tif selection < 0 {\n\t\t\t\tselection = 0\n\t\t\t}\n\t\tcase \"C-c\":\n\t\t\tfallthrough\n\t\tcase \"C-g\":\n\t\t\treturn def\n\t\tcase \"UP\", \"C-p\":\n\t\t\tif selection > 0 {\n\t\t\t\tselection--\n\t\t\t}\n\t\tcase \"DOWN\", \"C-n\":\n\t\t\tif selection < len(choices)-1 {\n\t\t\t\tselection++\n\t\t\t}\n\t\tcase \"LEFT\", \"C-b\":\n\t\t\tif cx > 0 {\n\t\t\t\tcx--\n\t\t\t}\n\t\tcase \"RIGHT\", \"C-f\":\n\t\t\tcx++\n\t\tcase \"C-a\", \"Home\":\n\t\t\tcx = 0\n\t\tcase \"M-<\":\n\t\t\tselection = 0\n\t\tcase \"M->\":\n\t\t\tselection = len(choices) - 1\n\t\tcase \"RET\":\n\t\t\treturn selection\n\t\t}\n\t}\n}",
"func NewFlagChoice(choices []string, chosen string) *FlagChoice {\n\treturn &FlagChoice{\n\t\tchoices: choices,\n\t\tchosen: chosen,\n\t}\n}",
"func getCompletion(sh string, parent *cobra.Command) (string, error) {\n\tvar err error\n\tvar buf bytes.Buffer\n\n\tswitch sh {\n\tcase \"bash\":\n\t\terr = parent.GenBashCompletion(&buf)\n\tcase \"zsh\":\n\t\terr = parent.GenZshCompletion(&buf)\n\tcase \"fish\":\n\t\terr = parent.GenFishCompletion(&buf, true)\n\n\tdefault:\n\t\terr = errors.New(\"unsupported shell type (must be bash, zsh or fish): \" + sh)\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}",
"func RequestChoiceIfEmpty(defaultValue string, message string, options ...string) string {\n\tif defaultValue != \"\" {\n\t\treturn defaultValue\n\t}\n\treturn RequestChoice(message, options...)\n}",
"func (pm *PopupMenu) GetButton(id string) *PushButton {\n\tfor _, ow := range pm.opts {\n\t\tif ow.opt.ID == id {\n\t\t\tpb, ok := ow.w.(*PushButton)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn pb\n\t\t}\n\t}\n\treturn nil\n}",
"func get_song_selection() (int, string) {\n\tsongs := strings.Split(master_list, \"\\n\")\n\tvar ip string\n\n\tui := &input.UI{\n\t\tWriter: os.Stdout,\n\t\tReader: os.Stdin,\n\t}\n\tquery := \"Select a song\"\n\tid, _ := ui.Ask(query, &input.Options{\n\t\tValidateFunc: func(id string) error {\n\t\t\tfor _, s := range songs {\n\t\t\t\tsong_id := strings.Split(s, \":\")[0]\n\t\t\t\tif song_id == id {\n\t\t\t\t\tip = strings.SplitN(s, \":\", 3)[1][1:]\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"song id not here\")\n\t\t},\n\t\tLoop: true,\n\t})\n\tret, _ := strconv.ParseInt(id, 10, 32)\n\treturn int(ret), ip + \":\"\n}",
"func (o *os) GetImeSelection() gdnative.Vector2 {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetImeSelection()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_ime_selection\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}",
"func (cv *Choice) String() string {\n\tif cv.Choice != nil {\n\t\treturn *cv.Choice\n\t}\n\treturn \"\"\n}",
"func (l *List) Choose(ctx context.Context, req *transport.Request) (peer peer.Peer, onFinish func(error), err error) {\n\treturn l.list.Choose(ctx, req)\n}",
"func (fb *FlowBox) GetSelectionMode() SelectionMode {\n\tc := C.gtk_flow_box_get_selection_mode(fb.native())\n\treturn SelectionMode(c)\n}",
"func (v Values) GetSelection(\n\tmodel SelectModel, name string) *Selection {\n\treturn model.ToSelection(v.Get(name))\n}",
"func (_m *Prompter) Choice(prompt string, options []string) string {\n\tret := _m.Called(prompt, options)\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func(string, []string) string); ok {\n\t\tr0 = rf(prompt, options)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}",
"func (*XMLDocument) GetSelection() (w *window.Selection) {\n\tmacro.Rewrite(\"$_.getSelection()\")\n\treturn w\n}",
"func (cycle *Cycle) Choose() {\n\tif !cycle.showing ||\n\t\tlen(cycle.items) == 0 ||\n\t\tcycle.selected < 0 ||\n\t\tcycle.selected >= len(cycle.items) {\n\n\t\treturn\n\t}\n\n\tcycle.items[cycle.selected].choose()\n\tcycle.Hide()\n}",
"func waitForFileToBeSelected(ctx context.Context, conn *chrome.Conn, f *filesapp.FilesApp) error {\n\tif err := conn.WaitForExprFailOnErrWithTimeout(ctx, \"window.document.title == 'awaiting drop.'\", 5*time.Second); err != nil {\n\t\treturn errors.Wrap(err, \"failed waiting for javascript to update window.document.title\")\n\t}\n\n\t// Get the listbox which has the list of files.\n\tlistBox, err := f.Root.DescendantWithTimeout(ctx, ui.FindParams{Role: ui.RoleTypeListBox}, 15*time.Second)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to find listbox\")\n\t}\n\tdefer listBox.Release(ctx)\n\n\t// Setup a watcher to wait for the selected files to stabilize.\n\tew, err := ui.NewWatcher(ctx, listBox, ui.EventTypeActiveDescendantChanged)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed getting a watcher for the files listbox\")\n\t}\n\tdefer ew.Release(ctx)\n\n\t// Check the listbox for any Activedescendantchanged events occurring in a 2 second interval.\n\t// If any events are found continue polling until 10s is reached.\n\tif err := testing.Poll(ctx, func(ctx context.Context) error {\n\t\treturn ew.EnsureNoEvents(ctx, 2*time.Second)\n\t}, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil {\n\t\treturn errors.Wrapf(err, \"failed waiting %v for listbox to stabilize\", 10*time.Second)\n\t}\n\n\treturn nil\n}",
"func (s *Selection) RunPrompt() (*Choice, error) {\n\ttmpl, err := s.initConfirmationTemplate()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing confirmation template: %w\", err)\n\t}\n\n\tm := NewModel(s)\n\n\tp := tea.NewProgram(m, tea.WithOutput(s.Output), tea.WithInput(s.Input))\n\tif err := p.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"running prompt: %w\", err)\n\t}\n\n\tchoice, err := m.Choice()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading choice: %w\", err)\n\t}\n\n\tif s.ConfirmationTemplate == \"\" {\n\t\treturn choice, nil\n\t}\n\n\tbuffer := &bytes.Buffer{}\n\n\terr = tmpl.Execute(buffer, map[string]interface{}{\n\t\t\"FinalChoice\": choice,\n\t\t\"Prompt\": m.Prompt,\n\t\t\"AllChoices\": m.Choices,\n\t\t\"NAllChoices\": len(m.Choices),\n\t\t\"TerminalWidth\": m.width,\n\t})\n\tif err != nil {\n\t\treturn choice, fmt.Errorf(\"execute confirmation template: %w\", err)\n\t}\n\n\t_, err = fmt.Fprint(s.Output, promptkit.Wrap(buffer.String(), m.width))\n\n\treturn choice, err\n}",
"func ChooseWithDefault(pr string, defaultValue string, options []string) (string, error) {\n\treturn defaultPrompter.ChooseWithDefault(pr, defaultValue, options)\n}",
"func (pf *File) GetEnum(name string) *Enum {\n\tfor _, enum := range pf.Enums {\n\t\tif enum.Name == name {\n\t\t\treturn enum\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (p *colorPicker) Pick(image string) Color {\n\tif c, present := p.imageColors[tag.StripTag(image, false)]; present {\n\t\treturn c\n\t}\n\n\t// If no mapping is found, don't add any color formatting\n\treturn None\n}",
"func (f *FilePicker) SelectFile(fileName string) uiauto.Action {\n\treturn f.filesApp.SelectFile(fileName)\n}",
"func (ui *UI) GetChooseContainer(containers []types.Container) *types.Container {\n\titems := []displayedContainer{}\n\ttemplates := &promptui.SelectTemplates{\n\t\tActive: `{{ printf \">\" | blue }} {{ .ShortID | bold }} ({{ .ImageName | bold }})`,\n\t\tInactive: \" {{ .ShortID }} ({{ .ImageName }})\",\n\t\tSelected: \" {{ .ShortID | bold }} {{ .ImageName | bold | blue }} selected\",\n\t\tDetails: `\n\t\t--------- Container ----------\n\t\t{{ \"ID:\" | faint }}\t{{ .ShortID }}\n\t\t{{ \"Image:\" | faint }}\t{{ .ImageName }}\n\t\t{{ \"Status:\" | faint }}\t{{ .CurrentStatus }}\n\t\t{{ \"State:\" | faint }}\t{{ .State }}`,\n\t}\n\tfor _, container := range containers {\n\t\titem := displayedContainer{\n\t\t\tcontainer.ID[:12],\n\t\t\tcontainer.Image,\n\t\t\tcontainer.Status,\n\t\t\tcontainer.State,\n\t\t}\n\t\titems = append(items, item)\n\t}\n\tprompt := promptui.Select{\n\t\tLabel: \"Select Container\",\n\t\tItems: items,\n\t\tTemplates: templates,\n\t}\n\n\tindex, _, err := prompt.Run()\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"^\") {\n\t\t\tui.logger.Errorf(\"Prompt failed %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\treturn &containers[index]\n}",
"func Choose(baseURL string, store ChooseDB, strategies strategy.Strategies, chooseTemplate, meTemplate tmpl) http.Handler {\n\treturn mux.Method{\n\t\t\"GET\": chooseProvider(baseURL, store, strategies, chooseTemplate, meTemplate),\n\t}\n}",
"func (b *Button) GetImage(t int) uintptr {\n\treturn b.SendMessage(win.BM_GETIMAGE, uintptr(t), 0)\n}",
"func get(stub shim.ChaincodeStubInterface, args []string) (string, error) {\n\tif len(args) != 1 {\n\t\treturn \"\", fmt.Errorf(\"Incorrect arguments. Expecting a key\")\n\t}\n\n\tvalue, err := stub.GetState(args[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get asset: %s with error: %s\", args[0], err)\n\t}\n\tif value == nil {\n\t\treturn \"\", fmt.Errorf(\"Asset not found: %s\", args[0])\n\t}\n\treturn string(value), nil\n}",
"func get(stub shim.ChaincodeStubInterface, args []string) (string, error) {\n\tif len(args) != 1 {\n\t\treturn \"\", fmt.Errorf(\"Incorrect arguments. Expecting a key\")\n\t}\n\n\tvalue, err := stub.GetState(args[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get asset: %s with error: %s\", args[0], err)\n\t}\n\tif value == nil {\n\t\treturn \"\", fmt.Errorf(\"Asset not found: %s\", args[0])\n\t}\n\n\treturn string(value), nil\n}",
"func (m *ManagementTemplateStep) GetAcceptedVersion()(ManagementTemplateStepVersionable) {\n val, err := m.GetBackingStore().Get(\"acceptedVersion\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(ManagementTemplateStepVersionable)\n }\n return nil\n}",
"func browseFolderCallback(hwnd win.HWND, msg uint32, lp, wp uintptr) uintptr {\n\tconst BFFM_SELCHANGED = 2\n\tif msg == BFFM_SELCHANGED {\n\t\t_, err := pathFromPIDL(lp)\n\t\tvar enabled uintptr\n\t\tif err == nil {\n\t\t\tenabled = 1\n\t\t}\n\n\t\tconst BFFM_ENABLEOK = win.WM_USER + 101\n\n\t\twin.SendMessage(hwnd, BFFM_ENABLEOK, 0, enabled)\n\t}\n\n\treturn 0\n}",
"func ChoiceIndex(title string, choices []string, def int) int {\n\treturn ChoiceIndexCallback(title, choices, def, nil)\n}",
"func (v *RadioButton) GetGroup() (*glib.SList, error) {\n\tc := C.gtk_radio_button_get_group(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\treturn glib.WrapSList(uintptr(unsafe.Pointer(c))), nil\n}",
"func (a *Args) GetOpt(key string) string {\n\tfor _, b := range a.binOpts {\n\t\tif b.key == key {\n\t\t\treturn b.val\n\t\t}\n\t}\n\n\t// Invalid Option\n\tfmt.Fprintf(os.Stderr, \"Args - Missing option: %s\\n\", key)\n\tos.Exit(1)\n\n\t// Never Gets Here\n\treturn \"\"\n}",
"func (c *CompletionManager) GetSelectedSuggestion() (s Suggest, ok bool) {\n\tif c.selected == -1 {\n\t\treturn Suggest{}, false\n\t} else if c.selected < -1 {\n\t\tdebug.Assert(false, \"must not reach here\")\n\t\tc.selected = -1\n\t\treturn Suggest{}, false\n\t}\n\treturn c.tmp[c.selected], true\n}",
"func (f *FlagChoice) String() string {\n\treturn choiceList(f.choices...)\n}",
"func (selectManyConfig SelectManyConfig) GetTemplate(context *Context, metaType string) (assetfs.AssetInterface, error) {\n\tif metaType == \"form\" && selectManyConfig.SelectionTemplate != \"\" {\n\t\treturn context.Asset(selectManyConfig.SelectionTemplate)\n\t}\n\treturn nil, errors.New(\"not implemented\")\n}",
"func (fn *formulaFuncs) CHOOSE(argsList *list.List) formulaArg {\n\tif argsList.Len() < 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CHOOSE requires 2 arguments\")\n\t}\n\tidx, err := strconv.Atoi(argsList.Front().Value.(formulaArg).Value())\n\tif err != nil {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"CHOOSE requires first argument of type number\")\n\t}\n\tif argsList.Len() <= idx {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"index_num should be <= to the number of values\")\n\t}\n\targ := argsList.Front()\n\tfor i := 0; i < idx; i++ {\n\t\targ = arg.Next()\n\t}\n\treturn arg.Value.(formulaArg)\n}",
"func GetFilterOption(ignoresRegex string) (BackupOption, error) {\n\texp, err := regexp.Compile(ignoresRegex)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topt := func(action CopyAction) CopyAction {\n\t\tnewAct := func(oldPath, newPath string, info os.FileInfo) error {\n\n\t\t\tmatched := exp.Match([]byte(oldPath))\n\t\t\tif matched {\n\t\t\t\treturn fmt.Errorf(fileIgnored)\n\t\t\t}\n\t\t\treturn action(oldPath, newPath, info)\n\t\t}\n\t\treturn newAct\n\t}\n\treturn opt, nil\n}",
"func getLicenseFile(licenses []string, files []string) (string, error) {\n\tmatches := matchLicenseFile(licenses, files)\n\n\tswitch len(matches) {\n\tcase 0:\n\t\treturn \"\", ErrNoLicenseFile\n\tcase 1:\n\t\treturn matches[0], nil\n\tdefault:\n\t\treturn \"\", ErrMultipleLicenses\n\t}\n}",
"func (l *License) GuessFile(dir string) error {\n\tfiles, err := readDirectory(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmatch, err := getLicenseFile(DefaultLicenseFiles, files)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.File = filepath.Join(dir, match)\n\treturn nil\n}",
"func (e *EntryManager) GetSelectEntry() *Entry {\n\trow, _ := e.GetSelection()\n\tif len(e.entries) == 0 {\n\t\treturn nil\n\t}\n\tif row < 1 {\n\t\treturn nil\n\t}\n\n\tif row > len(e.entries) {\n\t\treturn nil\n\t}\n\treturn e.entries[row-1]\n}",
"func (ui *UI) GetCommandSelect() string {\n\tprompt := promptui.Select{\n\t\tLabel: \"Select Commnad\",\n\t\tItems: []string{\"exec\", \"logs\", \"stop\"},\n\t}\n\n\t_, result, err := prompt.Run()\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"^\") {\n\t\t\tui.logger.Errorf(\"Select containers dialog failed | %s\", err)\n\t\t}\n\t}\n\treturn result\n}",
"func (s *BasevhdlListener) ExitChoices(ctx *ChoicesContext) {}",
"func (v *MenuButton) GetPopup() *Menu {\n\tc := C.gtk_menu_button_get_popup(v.native())\n\tif c == nil {\n\t\treturn nil\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapMenu(obj)\n}",
"func (f *FileDescriptor) GetEnum(name string) *EnumDescriptor {\n\tfor _, e := range f.GetEnums() {\n\t\tif e.GetName() == name || e.GetLongName() == name {\n\t\t\treturn e\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (v *Button) GetImage() (*Widget, error) {\n\tc := C.gtk_button_get_image(v.native())\n\tif c == nil {\n\t\treturn nil, nilPtrErr\n\t}\n\tobj := glib.Take(unsafe.Pointer(c))\n\treturn wrapWidget(obj), nil\n}",
"func (e Enum) Choices() string {\n\tkeys := make([]string, 0, len(e))\n\tfor k := range e {\n\t\tif k == \"\" {\n\t\t\tk = `\"\"`\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn strings.Join(keys, \", \")\n}",
"func (Choices) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{0}\n}",
"func GetFromStdin(params *GetFromStdinParams) *string {\n\tvar prompt survey.Prompt\n\tvar result *string\n\tcompiledRegex := DefaultValidationRegexPattern\n\tif params.ValidationRegexPattern != \"\" {\n\t\tcompiledRegex = regexp.MustCompile(params.ValidationRegexPattern)\n\t}\n\n\tif params.Options != nil {\n\t\tprompt = &survey.Select{\n\t\t\tMessage: params.Question,\n\t\t\tOptions: params.Options,\n\t\t\tDefault: params.DefaultValue,\n\t\t}\n\t} else if params.IsPassword {\n\t\tprompt = &survey.Password{\n\t\t\tMessage: params.Question,\n\t\t}\n\t} else {\n\t\tprompt = &survey.Input{\n\t\t\tMessage: params.Question,\n\t\t\tDefault: params.DefaultValue,\n\t\t}\n\t}\n\n\tquestion := []*survey.Question{\n\t\t{\n\t\t\tName: \"question\",\n\t\t\tPrompt: prompt,\n\t\t},\n\t}\n\n\tif params.Options != nil {\n\t\tquestion[0].Validate = func(val interface{}) error {\n\t\t\t// since we are validating an Input, the assertion will always succeed\n\t\t\tif str, ok := val.(string); !ok || compiledRegex.MatchString(str) == false {\n\t\t\t\treturn fmt.Errorf(\"Answer has to match pattern: %s\", compiledRegex.String())\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tfor result == nil {\n\t\t// Ask it\n\t\tanswers := struct {\n\t\t\tQuestion string\n\t\t}{}\n\t\terr := survey.Ask(question, &answers)\n\t\tif err != nil {\n\t\t\tif strings.HasPrefix(err.Error(), \"Answer has to match pattern\") {\n\t\t\t\tlog.Info(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Keyboard interrupt\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tresult = &answers.Question\n\t}\n\n\treturn result\n}",
"func (c *Controller) GetFile(key string) (multipart.File, *multipart.FileHeader, error) {\n\treturn c.Ctx.Request.FormFile(key)\n}",
"func (m *AgreementAcceptance) GetAgreementFileId()(*string) {\n return m.agreementFileId\n}",
"func (g *Group) pick(r *http.Request) *Backend {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\tif len(g.backends) == 0 {\n\t\treturn nil\n\t}\n\t// TODO(kr): do something smarter than rand\n\treturn g.backends[rand.Intn(len(g.backends))]\n}",
"func (s *BasevhdlListener) EnterChoices(ctx *ChoicesContext) {}",
"func (*AnswerChoice) Descriptor() ([]byte, []int) {\n\treturn file_quiz_proto_rawDescGZIP(), []int{4}\n}",
"func HandleFilterChoice() {\n\tvar userCommand int\n\tfmt.Scan(&userCommand)\n\n\tswitch userCommand {\n\tcase 1: // Deadline\n\t\tcontrollers.ShowFilteredOptions(1)\n\tcase 2: // Priority\n\t\tcontrollers.ShowFilteredOptions(2)\n\tcase 3: // Added time\n\t\tcontrollers.ShowFilteredOptions(3)\n\tdefault:\n\t\tfmt.Println(\"Returning to Main Menu...\")\n\t}\n}",
"func RenderSetChoice(resp http.ResponseWriter, req *http.Request, ext string, mode primitive.Mode, fileSeeker io.ReadSeeker) {\n\n\top := []OptStruct{\n\t\t{20, mode},\n\t\t{30, mode},\n\t\t{40, mode},\n\t\t{50, mode},\n\t}\n\topFileList, err := GenImgList(ext, fileSeeker, op...)\n\tif err != nil {\n http.Error(resp, err.Error(), http.StatusInternalServerError)\n return\n\t}\n\thtmlist := `<html>\n <body>\n {{range .}}\n <a href=\"/modify/{{.Name}}?mode={{.Mode}}&n={{.Numshapes}}\">\n <img style =\"width 30%\" src=\"/pics/{{.Name}}\">\n {{end}}\n </body>\n </html>\n `\n\ttempl := template.Must(template.New(\"\").Parse(htmlist))\n\n\ttype Opts struct {\n\t\tName string\n\t\tMode primitive.Mode\n\t\tNumshapes int\n\t}\n\tvar opts []Opts\n\tfor index, val := range opFileList {\n\t\topts = append(opts, Opts{Name: filepath.Base(val), Mode: op[index].mode, Numshapes: op[index].num})\n\t}\n\n\t// err = templ.Execute(resp, opts)\n\t// if err != nil {\n\t// \tpanic(err)\n\t// }\n checkError(templ.Execute(resp,opts))\n}",
"func SelectorCli(label string, options ...string) (string, error) {\n\ts := promptui.Select{\n\t\tLabel: label,\n\t\tItems: options,\n\t}\n\n\t_, result, err := s.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn result, nil\n}",
"func (d *Abstraction) GetChannel(s string) chan *AbsSignal {\n\tif _, ok := d.Sigmap[s]; ok {\n\t\treturn d.Sigmap[s]\n\t}\n\treturn nil\n}",
"func (fv *FileView) SetSelFileAction(sel string) {\n\tfv.SelFile = sel\n\tsv := fv.FilesView()\n\tsv.SelectFieldVal(\"Name\", fv.SelFile)\n\tfv.SelectedIdx = sv.SelectedIdx\n\tsf := fv.SelField()\n\tsf.SetText(fv.SelFile)\n\tfv.WidgetSig.Emit(fv.This, int64(gi.WidgetSelected), fv.SelectedFile())\n}",
"func (pipe *PipeWS) GetOption(name string) (\n\tvalue interface{},\n\terr error) {\n\treturn nil, mangos.ErrBadOption\n}",
"func GetImageFlavor(label string, encryptionRequired bool, keyURL string, digest string) (*ImageFlavor, error) {\n\tlog.Trace(\"flavor/image_flavor:GetImageFlavor() Entering\")\n\tdefer log.Trace(\"flavor/image_flavor:GetImageFlavor() Leaving\")\n\tvar encryption *model.Encryption\n\n\tdescription := map[string]interface{}{\n\t\tmodel.Label: label,\n\t\tmodel.FlavorPart: \"IMAGE\",\n\t}\n\n\tmeta := model.Meta{\n\t\tDescription: description,\n\t}\n\tnewUuid, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create new UUID\")\n\t}\n\tmeta.ID = newUuid\n\n\tif encryptionRequired {\n\t\tencryption = &model.Encryption{\n\t\t\tKeyURL: keyURL,\n\t\t\tDigest: digest,\n\t\t}\n\t}\n\n\timageflavor := model.Image{\n\t\tMeta: meta,\n\t\tEncryptionRequired: encryptionRequired,\n\t\tEncryption: encryption,\n\t}\n\n\tflavor := ImageFlavor{\n\t\tImage: imageflavor,\n\t}\n\treturn &flavor, nil\n}",
"func New(prompt string, choices []*Choice) *Selection {\n\treturn &Selection{\n\t\tChoices: choices,\n\t\tPrompt: prompt,\n\t\tFilterPrompt: DefaultFilterPrompt,\n\t\tTemplate: DefaultTemplate,\n\t\tConfirmationTemplate: DefaultConfirmationTemplate,\n\t\tFilter: FilterContainsCaseInsensitive,\n\t\tFilterInputPlaceholderStyle: lipgloss.NewStyle().Foreground(lipgloss.Color(\"240\")),\n\t\tKeyMap: NewDefaultKeyMap(),\n\t\tFilterPlaceholder: DefaultFilterPlaceholder,\n\t\tExtendedTemplateScope: template.FuncMap{},\n\t\tOutput: os.Stdout,\n\t\tInput: os.Stdin,\n\t}\n}",
"func getArg(c *cli.Context, idx int, prompt string) (string, error) {\n\targ := c.Args().Get(0)\n\tif arg != \"\" {\n\t\treturn arg, nil\n\t}\n\tfmt.Printf(\"%s: \", prompt)\n\treader := bufio.NewReader(os.Stdin)\n\targ, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\treturn arg, err\n\t}\n\targ = strings.TrimSpace(arg)\n\treturn arg, nil\n}"
] | [
"0.5884683",
"0.5797472",
"0.5483212",
"0.5472473",
"0.54234344",
"0.53660977",
"0.5348636",
"0.5299239",
"0.52480185",
"0.5243561",
"0.52337843",
"0.5179653",
"0.5166573",
"0.51321834",
"0.5097163",
"0.4941595",
"0.48885983",
"0.48517326",
"0.48319155",
"0.4736443",
"0.4735221",
"0.46647197",
"0.46459487",
"0.4637478",
"0.46300396",
"0.45709053",
"0.45704213",
"0.45670515",
"0.45023456",
"0.44920647",
"0.44913262",
"0.4484871",
"0.44758517",
"0.4461774",
"0.44547376",
"0.44520876",
"0.44465202",
"0.44356954",
"0.4403275",
"0.43874356",
"0.43610242",
"0.4331063",
"0.4326026",
"0.43253496",
"0.43195364",
"0.43187764",
"0.43029153",
"0.430027",
"0.43001515",
"0.42978463",
"0.4293386",
"0.4264958",
"0.42634627",
"0.42480138",
"0.4236517",
"0.42268234",
"0.4208934",
"0.41949394",
"0.4175037",
"0.4166031",
"0.41602215",
"0.41592154",
"0.41464671",
"0.41435945",
"0.41379336",
"0.41366917",
"0.41238803",
"0.41055197",
"0.4098921",
"0.40939498",
"0.40930903",
"0.40904522",
"0.40857533",
"0.40831617",
"0.4081507",
"0.4078293",
"0.40753195",
"0.40710637",
"0.40677053",
"0.40645283",
"0.40572834",
"0.40458366",
"0.4035927",
"0.40169784",
"0.4016101",
"0.40037963",
"0.4000175",
"0.39938807",
"0.3986406",
"0.3974765",
"0.39722723",
"0.3971844",
"0.39591655",
"0.3956864",
"0.39562577",
"0.39528435",
"0.39477178",
"0.39472213",
"0.39433858",
"0.39378786"
] | 0.8411069 | 0 |
/ GtkScrolledWindow GetMaxContentWidth is a wrapper around gtk_scrolled_window_get_max_content_width(). | GtkScrolledWindow GetMaxContentWidth — это обертка вокруг gtk_scrolled_window_get_max_content_width(). | func (v *ScrolledWindow) GetMaxContentWidth() int {
c := C.gtk_scrolled_window_get_max_content_width(v.native())
return int(c)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}",
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}",
"func (v *Entry) GetMaxWidthChars() int {\n\tc := C.gtk_entry_get_max_width_chars(v.native())\n\treturn int(c)\n}",
"func getContentWidth(pdf *gofpdf.Fpdf) float64 {\n\tmarginL, _, marginR, _ := pdf.GetMargins()\n\tpageW, _ := pdf.GetPageSize()\n\twidth := pageW - marginL - marginR\n\treturn width\n}",
"func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}",
"func (w Widths) MaxWidth() (maxWidth, wideDepth int) {\n\tfor depth, width := range w {\n\t\tif width > maxWidth {\n\t\t\tmaxWidth = width\n\t\t\twideDepth = depth\n\t\t}\n\t}\n\treturn\n}",
"func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}",
"func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}",
"func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}",
"func maxWidth(no_lines int, widthFromLineNo widthFunc) int {\n\tvar max int\n\tfor i := 0; i < no_lines; i++ {\n\t\tval := widthFromLineNo(i)\n\t\tif val > max {\n\t\t\tmax = val\n\t\t}\n\t}\n\treturn max\n}",
"func (fb *FlowBox) GetMaxChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_max_children_per_line(fb.native())\n\treturn uint(c)\n}",
"func SetScrollContentTrackerSize(sa *qtwidgets.QScrollArea) {\n\twgt := sa.Widget()\n\tsa.InheritResizeEvent(func(arg0 *qtgui.QResizeEvent) {\n\t\tosz := arg0.OldSize()\n\t\tnsz := arg0.Size()\n\t\tif false {\n\t\t\tlog.Println(osz.Width(), osz.Height(), nsz.Width(), nsz.Height())\n\t\t}\n\t\tif osz.Width() != nsz.Width() {\n\t\t\twgt.SetMaximumWidth(nsz.Width())\n\t\t}\n\t\t// this.ScrollArea_2.ResizeEvent(arg0)\n\t\targ0.Ignore() // I ignore, you handle it. replace explict call parent's\n\t})\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (me XsdGoPkgHasElems_MaxWidth) MaxWidthDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (w *LWindow) MWidth() int32 {\n\treturn w.mWidth\n}",
"func (v *Value) MaxWidth(offset, tab int) int {\n\t// simple values do not have tabulations\n\twidth := v.Width\n\tfor l := 0; l < len(v.Tabs); l++ {\n\t\twidth = max(width, v.LineWidth(l, offset, tab))\n\t}\n\treturn width\n}",
"func (me XsdGoPkgHasElem_MaxWidth) MaxWidthDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (window Window) InnerWidth() int {\n\treturn window.Get(\"innerWidth\").Int()\n}",
"func (g *GitStatusWidget) GetWidth() int {\n\treturn g.renderer.GetWidth()\n}",
"func (f *Framebuffer) Width() int { return f.fb.Bounds().Max.X }",
"func (self *CellRenderer) GetFixedSize() (width, height int) {\n\tvar w C.gint\n\tvar h C.gint\n\tC.gtk_cell_renderer_get_fixed_size(self.object, &w, &h)\n\n\twidth = int(w)\n\theight = int(h)\n\treturn\n}",
"func (o *os) GetMaxWindowSize() gdnative.Vector2 {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetMaxWindowSize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_max_window_size\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}",
"func (s *ItemScroller) autoSize() {\n\n\tif s.maxAutoWidth == 0 && s.maxAutoHeight == 0 {\n\t\treturn\n\t}\n\n\tvar width float32\n\tvar height float32\n\tfor _, item := range s.items {\n\t\tpanel := item.GetPanel()\n\t\tif panel.Width() > width {\n\t\t\twidth = panel.Width()\n\t\t}\n\t\theight += panel.Height()\n\t}\n\n\t// If auto maximum width enabled\n\tif s.maxAutoWidth > 0 {\n\t\tif width <= s.maxAutoWidth {\n\t\t\ts.SetContentWidth(width)\n\t\t}\n\t}\n\t// If auto maximum height enabled\n\tif s.maxAutoHeight > 0 {\n\t\tif height <= s.maxAutoHeight {\n\t\t\ts.SetContentHeight(height)\n\t\t}\n\t}\n}",
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}",
"func (s *Artwork) SetMaxWidth(v string) *Artwork {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (v *MailView) GetMaxLines() (int, error) {\n\treturn len(v.lines), nil\n}",
"func (o *Content) GetMaxConnsPerProcess() int32 {\n\tif o == nil || o.MaxConnsPerProcess.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxConnsPerProcess.Get()\n}",
"func (self *TraitPixbuf) GetWidth() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_get_width(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func WindowContent(append ...bool) vecty.Markup {\n\treturn AddClass(wContent, append...)\n}",
"func (me *XsdGoPkgHasElems_MaxWidth) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxWidth; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (s *Sliding) MaxSize() int {\n\treturn s.maxSize\n}",
"func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}",
"func (w *WidgetImplement) Width() int {\n\treturn w.w\n}",
"func (s *Thumbnails) SetMaxWidth(v string) *Thumbnails {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (w *Window) Width() int {\n\treturn int(C.ANativeWindow_getWidth(w.cptr()))\n}",
"func (v *Pixbuf) GetWidth() int {\n\treturn int(C.gdk_pixbuf_get_width(v.Native()))\n}",
"func (m *Model) GetMaxHeight() int {\n\treturn m.maxHeight\n}",
"func (k *Kernel) MaxX() int {\n\treturn k.Width\n}",
"func (win *Window) Maximize() {\n\twin.Candy().Guify(\"gtk_window_maximize\", win)\n}",
"func (p *partitionImpl) GetMaxRows() int {\n\treturn int(p.internalData.MaxRows)\n}",
"func (win *Window) Width() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.x)\n}",
"func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}",
"func (w *Window) Width() int {\n\treturn w.width\n}",
"func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}",
"func (me *XsdGoPkgHasElem_MaxWidth) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxWidth; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (e Event) GetResizeWidth() int {\n\treturn int(C.caca_get_event_resize_width(e.Ev))\n}",
"func (r *SlidingWindow) Max() int {return r.base + len(r.values) - 1}",
"func (Empty) MaxHeight(width, height int) int {\n\treturn 1\n}",
"func (s *PresetWatermark) SetMaxWidth(v string) *PresetWatermark {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (s *ItemScroller) maxFirst() int {\n\n\t// Vertical scroller\n\tif s.vert {\n\t\tvar height float32\n\t\tpos := len(s.items) - 1\n\t\tif pos < 0 {\n\t\t\treturn 0\n\t\t}\n\t\tfor {\n\t\t\titem := s.items[pos]\n\t\t\theight += item.GetPanel().Height()\n\t\t\tif height > s.Height() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpos--\n\t\t\tif pos < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn pos + 1\n\t}\n\n\t// Horizontal scroller\n\tvar width float32\n\tpos := len(s.items) - 1\n\tif pos < 0 {\n\t\treturn 0\n\t}\n\tfor {\n\t\titem := s.items[pos]\n\t\twidth += item.GetPanel().Width()\n\t\tif width > s.Width() {\n\t\t\tbreak\n\t\t}\n\t\tpos--\n\t\tif pos < 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn pos + 1\n}",
"func (m *MailTips) GetMaxMessageSize()(*int32) {\n val, err := m.GetBackingStore().Get(\"maxMessageSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}",
"func (m Logon) GetMaxMessageSize() (v int, err quickfix.MessageRejectError) {\n\tvar f field.MaxMessageSizeField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}",
"func (x *TextDocumentContentChangeEvent_ContentChangeEvent) GetRangeLength() int32 {\n\tif x != nil {\n\t\treturn x.RangeLength\n\t}\n\treturn 0\n}",
"func (s *settings) GetMaxWriteSize() uint {\n\treturn s.wMaxSize\n}",
"func GetMaxColumns() int {\r\n\treturn converter.StrToInt(SysString(MaxColumns))\r\n}",
"func (sc *slidingCounter) Max() float64 {\n\n\tnow := time.Now().Unix()\n\n\tsc.mutex.RLock()\n\tdefer sc.mutex.RUnlock()\n\n\tvar max float64\n\n\tfor timestamp, window := range sc.windows {\n\t\tif timestamp >= now-sc.interval {\n\t\t\tif window.Value > max {\n\t\t\t\tmax = window.Value\n\t\t\t}\n\t\t}\n\t}\n\treturn max\n}",
"func (pb *Bar) SetMaxWidth(maxWidth int) *Bar {\n\tpb.mu.Lock()\n\tpb.maxWidth = maxWidth\n\tpb.mu.Unlock()\n\treturn pb\n}",
"func (b *Buffer) SizeMax() (int, int) {\n\treturn b.maxWidth, b.maxHeight\n}",
"func GxOuterWidth(value float64) *SimpleElement { return newSEFloat(\"gx:outerWidth\", value) }",
"func (gq *Dispatch) MaxLen() int {\n return gq.maxlength\n}",
"func (i *ImageBuf) XMax() int {\n\tret := int(C.ImageBuf_xmax(i.ptr))\n\truntime.KeepAlive(i)\n\treturn ret\n}",
"func (w *MainWindow) GetSize() (int, int) {\n\treturn w.glfwWindow.GetSize()\n}",
"func (pb *Bar) Width() (width int) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\twidth = defaultBarWidth\n\t\t}\n\t}()\n\tpb.mu.RLock()\n\twidth = pb.width\n\tmaxWidth := pb.maxWidth\n\tpb.mu.RUnlock()\n\tif width <= 0 {\n\t\tvar err error\n\t\tif width, err = terminalWidth(); err != nil {\n\t\t\treturn defaultBarWidth\n\t\t}\n\t}\n\tif maxWidth > 0 && width > maxWidth {\n\t\twidth = maxWidth\n\t}\n\treturn\n}",
"func (w *Window) Maximized() bool {\n\treturn w.maximized\n}",
"func (mw *MaxWriter) Max() int {\n\treturn mw.max\n}",
"func (s *Service) onWindowResize(channel chan os.Signal) {\n\t//stdScr, _ := gc.Init()\n\t//stdScr.ScrollOk(true)\n\t//gc.NewLines(true)\n\tfor {\n\t\t<-channel\n\t\t//gc.StdScr().Clear()\n\t\t//rows, cols := gc.StdScr().MaxYX()\n\t\tcols, rows := GetScreenSize()\n\t\ts.screenRows = rows\n\t\ts.screenCols = cols\n\t\ts.resizeWindows()\n\t\t//gc.End()\n\t\t//gc.Update()\n\t\t//gc.StdScr().Refresh()\n\t}\n}",
"func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}",
"func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}",
"func (me XsdGoPkgHasElems_MaxSnippetLines) MaxSnippetLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}",
"func (c *Config) MaxHeight() int {\n\tc.Mutex.RLock()\n\tdefer c.Mutex.RUnlock()\n\treturn c.Raw.MaxHeight\n}",
"func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}",
"func MaxBytesHandler(h Handler, n int64) Handler {\n\treturn HandlerFunc(func(w ResponseWriter, r *Request) {\n\t\tr2 := *r\n\t\tr2.Body = MaxBytesReader(w, r.Body, n)\n\t\th.ServeHTTP(w, &r2)\n\t})\n}",
"func PopItemWidth() {\n\timgui.PopItemWidth()\n}",
"func (w *WebviewWindow) Size() (width int, height int) {\n\tif w.impl == nil {\n\t\treturn 0, 0\n\t}\n\treturn w.impl.size()\n}",
"func (opts CreateLargeOpts) LengthOfContent() (int64, error) {\n\treturn opts.ContentLength, nil\n}",
"func (reader *Reader) GetMaxRows() int {\n\treturn reader.MaxRows\n}",
"func (self *TraitPixbufAnimation) GetWidth() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_width(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (sf *TWindow) Maximized() bool {\n\treturn sf.maximized\n}",
"func GetWindowText(hwnd syscall.Handle, str *uint16, maxCount int32) (len int32, err error) {\n\tr0, _, e1 := syscall.Syscall(getWindowTextW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(str)), uintptr(maxCount))\n\tlen = int32(r0)\n\tif len == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}",
"func (b *BoundingBox) MaxDim() int {\n\treturn b.LongestAxis()\n}",
"func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}",
"func (arg1 *UConverter) GetMaxCharSize() int",
"func (s *Stat) MaxConns() int32 {\n\treturn s.s.MaxResources()\n}",
"func (me XsdGoPkgHasAttr_MaxLines_XsdtInt_2) MaxLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (f *Frame) MaxLine() int {\n\treturn f.maxlines\n}",
"func (v *Entry) SetMaxWidthChars(nChars int) {\n\tC.gtk_entry_set_max_width_chars(v.native(), C.gint(nChars))\n}",
"func (me XsdGoPkgHasElem_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (me XsdGoPkgHasElem_MaxSnippetLines) MaxSnippetLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (b *BlockSplitterSimple) MaxSize() int64 {\n\treturn b.maxSize\n}",
"func (w *Window) Height() int {\n\treturn w.height\n}",
"func (s *VideoParameters) SetMaxWidth(v string) *VideoParameters {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (o *Content) GetMaxProcesses() int32 {\n\tif o == nil || o.MaxProcesses.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxProcesses.Get()\n}",
"func (f *Framebuffer) Height() int { return f.fb.Bounds().Max.Y }",
"func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {\n\treturn &maxBytesReader{respWriter: w, readCloser: r, bytesRemaining: n}\n}",
"func (fb *FlowBox) GetMinChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_min_children_per_line(fb.native())\n\treturn uint(c)\n}",
"func (me *XsdGoPkgHasElems_MaxSnippetLines) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxSnippetLines; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}"
] | [
"0.7802157",
"0.7460506",
"0.66026855",
"0.6217259",
"0.584643",
"0.57835525",
"0.5716844",
"0.56006247",
"0.5530119",
"0.5475368",
"0.5411298",
"0.538574",
"0.53694135",
"0.53360945",
"0.53360945",
"0.5301836",
"0.52574384",
"0.52144426",
"0.5204811",
"0.51416606",
"0.5120838",
"0.5119741",
"0.5087488",
"0.50335157",
"0.50302064",
"0.5029908",
"0.5024163",
"0.50205815",
"0.49583992",
"0.4925541",
"0.49234462",
"0.49213174",
"0.490938",
"0.49038583",
"0.48488557",
"0.48484865",
"0.48480392",
"0.48331538",
"0.48244518",
"0.48236427",
"0.4796644",
"0.47856373",
"0.47772756",
"0.47754163",
"0.4774625",
"0.47601423",
"0.47505862",
"0.47237784",
"0.47187942",
"0.47128463",
"0.4708767",
"0.46919698",
"0.469095",
"0.46898538",
"0.46743664",
"0.46739495",
"0.46596548",
"0.4649409",
"0.46471497",
"0.46447363",
"0.46384957",
"0.4638242",
"0.46370506",
"0.46347556",
"0.46330276",
"0.46196422",
"0.46136573",
"0.46005213",
"0.45970035",
"0.45960668",
"0.45863986",
"0.45848474",
"0.45819688",
"0.4572614",
"0.45722878",
"0.45718482",
"0.45712623",
"0.45608884",
"0.45580134",
"0.45431814",
"0.45305997",
"0.45272973",
"0.452251",
"0.4521219",
"0.45200998",
"0.45171893",
"0.45162585",
"0.4503006",
"0.44998652",
"0.4498968",
"0.4498721",
"0.4498412",
"0.4495319",
"0.44840777",
"0.44822595",
"0.4474918",
"0.4466484",
"0.44645673",
"0.4449208",
"0.44485673"
] | 0.8826436 | 0 |
SetMaxContentWidth is a wrapper around gtk_scrolled_window_set_max_content_width(). | SetMaxContentWidth — это обертка вокруг gtk_scrolled_window_set_max_content_width(). | func (v *ScrolledWindow) SetMaxContentWidth(width int) {
C.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}",
"func SetScrollContentTrackerSize(sa *qtwidgets.QScrollArea) {\n\twgt := sa.Widget()\n\tsa.InheritResizeEvent(func(arg0 *qtgui.QResizeEvent) {\n\t\tosz := arg0.OldSize()\n\t\tnsz := arg0.Size()\n\t\tif false {\n\t\t\tlog.Println(osz.Width(), osz.Height(), nsz.Width(), nsz.Height())\n\t\t}\n\t\tif osz.Width() != nsz.Width() {\n\t\t\twgt.SetMaximumWidth(nsz.Width())\n\t\t}\n\t\t// this.ScrollArea_2.ResizeEvent(arg0)\n\t\targ0.Ignore() // I ignore, you handle it. replace explict call parent's\n\t})\n}",
"func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}",
"func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}",
"func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}",
"func (pb *Bar) SetMaxWidth(maxWidth int) *Bar {\n\tpb.mu.Lock()\n\tpb.maxWidth = maxWidth\n\tpb.mu.Unlock()\n\treturn pb\n}",
"func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}",
"func (v *Entry) SetMaxWidthChars(nChars int) {\n\tC.gtk_entry_set_max_width_chars(v.native(), C.gint(nChars))\n}",
"func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}",
"func (wg *WidgetImplement) SetFixedSize(w, h int) {\n\twg.fixedW = w\n\twg.fixedH = h\n}",
"func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}",
"func (win *Window) SetDefaultSize(width, height int) {\n\twin.Candy().Guify(\"gtk_window_set_default_size\", win, width, height)\n}",
"func (c *HostClient) SetMaxConns(newMaxConns int) {\n\tc.connsLock.Lock()\n\tc.MaxConns = newMaxConns\n\tc.connsLock.Unlock()\n}",
"func (s *ItemScroller) autoSize() {\n\n\tif s.maxAutoWidth == 0 && s.maxAutoHeight == 0 {\n\t\treturn\n\t}\n\n\tvar width float32\n\tvar height float32\n\tfor _, item := range s.items {\n\t\tpanel := item.GetPanel()\n\t\tif panel.Width() > width {\n\t\t\twidth = panel.Width()\n\t\t}\n\t\theight += panel.Height()\n\t}\n\n\t// If auto maximum width enabled\n\tif s.maxAutoWidth > 0 {\n\t\tif width <= s.maxAutoWidth {\n\t\t\ts.SetContentWidth(width)\n\t\t}\n\t}\n\t// If auto maximum height enabled\n\tif s.maxAutoHeight > 0 {\n\t\tif height <= s.maxAutoHeight {\n\t\t\ts.SetContentHeight(height)\n\t\t}\n\t}\n}",
"func (w *WidgetBase) SetWidth(width int) {\n\tw.size.X = width\n\tif w.size.X != 0 {\n\t\tw.sizePolicyX = Minimum\n\t} else {\n\t\tw.sizePolicyX = Expanding\n\t}\n}",
"func (w *Window) SetMaximized(maximize bool) {\n\tif maximize == w.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tw.origX, w.origY = w.Pos()\n\t\tw.origWidth, w.origHeight = w.Size()\n\t\tw.maximized = true\n\t\tw.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tw.SetSize(width, height)\n\t} else {\n\t\tw.maximized = false\n\t\tw.SetPos(w.origX, w.origY)\n\t\tw.SetSize(w.origWidth, w.origHeight)\n\t}\n\tw.ResizeChildren()\n\tw.PlaceChildren()\n}",
"func (win *Window) Maximize() {\n\twin.Candy().Guify(\"gtk_window_maximize\", win)\n}",
"func (sf *TWindow) SetMaximized(maximize bool) {\n\tif maximize == sf.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tx, y := sf.pos.Get()\n\t\tsf.posOrig.X().Set(x)\n\t\tsf.posOrig.Y().Set(y)\n\t\tsf.origWidth, sf.origHeight = sf.Size()\n\t\tsf.maximized = true\n\t\tsf.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tsf.SetSize(width, height)\n\t} else {\n\t\tsf.maximized = false\n\t\tsf.SetPos(sf.posOrig.GetX(), sf.posOrig.GetY())\n\t\tsf.SetSize(sf.origWidth, sf.origHeight)\n\t}\n\tsf.ResizeChildren()\n\tsf.PlaceChildren()\n}",
"func (win *Window) SetWindowContentScaleCallback(callback WindowContentScaleCallback) WindowContentScaleCallback {\n\tcallbacks, exist := windowCallbacks[win]\n\tif !exist {\n\t\tcallbacks = new(WindowCallbacks)\n\t\twindowCallbacks[win] = callbacks\n\t}\n\n\tpreviousCallback := callbacks.ContentScaleCallback\n\tcallbacks.ContentScaleCallback = callback\n\n\tif callback != nil {\n\t\tC.goSetWindowContentScaleCallback(win.c())\n\t} else {\n\t\tC.goRemoveWindowContentScaleCallback(win.c())\n\t}\n\n\treturn previousCallback\n}",
"func (me XsdGoPkgHasElems_MaxWidth) MaxWidthDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}",
"func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}",
"func (wg *WidgetImplement) SetWidth(w int) {\n\twg.w = w\n}",
"func (s *Artwork) SetMaxWidth(v string) *Artwork {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (me XsdGoPkgHasElem_MaxWidth) MaxWidthDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (v *Entry) GetMaxWidthChars() int {\n\tc := C.gtk_entry_get_max_width_chars(v.native())\n\treturn int(c)\n}",
"func (w *WidgetImplement) SetClampWidth(clamp bool) {\n\tw.clamp[0] = clamp\n}",
"func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (s *Thumbnails) SetMaxWidth(v string) *Thumbnails {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (v *TextView) SetBorderWindowSize(tp TextWindowType, size int) {\n\tC.gtk_text_view_set_border_window_size(v.native(), C.GtkTextWindowType(tp), C.gint(size))\n}",
"func (s *ItemScroller) SetAutoWidth(maxWidth float32) {\n\n\ts.maxAutoWidth = maxWidth\n}",
"func getContentWidth(pdf *gofpdf.Fpdf) float64 {\n\tmarginL, _, marginR, _ := pdf.GetMargins()\n\tpageW, _ := pdf.GetPageSize()\n\twidth := pageW - marginL - marginR\n\treturn width\n}",
"func WindowContent(append ...bool) vecty.Markup {\n\treturn AddClass(wContent, append...)\n}",
"func (s *PresetWatermark) SetMaxWidth(v string) *PresetWatermark {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (win *Window) SetResizable(resizable bool) {\n\twin.Candy().Guify(\"gtk_window_set_resizable\", win, resizable)\n}",
"func (win *Window) ReshowWithInitialSize() {\n\twin.Candy().Guify(\"gtk_window_reshow_with_initial_size\", win)\n}",
"func (wp Wordpiece) SetMaxWordChars(c int) {\n\twp.maxWordChars = c\n}",
"func (client *GatewayClient) SetMaxConns(m int) {\n\tclient.maxOpen = m\n\n}",
"func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}",
"func (me *XsdGoPkgHasElems_MaxWidth) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxWidth; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (s *Service) onWindowResize(channel chan os.Signal) {\n\t//stdScr, _ := gc.Init()\n\t//stdScr.ScrollOk(true)\n\t//gc.NewLines(true)\n\tfor {\n\t\t<-channel\n\t\t//gc.StdScr().Clear()\n\t\t//rows, cols := gc.StdScr().MaxYX()\n\t\tcols, rows := GetScreenSize()\n\t\ts.screenRows = rows\n\t\ts.screenCols = cols\n\t\ts.resizeWindows()\n\t\t//gc.End()\n\t\t//gc.Update()\n\t\t//gc.StdScr().Refresh()\n\t}\n}",
"func (w Widths) MaxWidth() (maxWidth, wideDepth int) {\n\tfor depth, width := range w {\n\t\tif width > maxWidth {\n\t\t\tmaxWidth = width\n\t\t\twideDepth = depth\n\t\t}\n\t}\n\treturn\n}",
"func (s *settings) SetMaxWriteSize(size uint) {\n\ts.wMaxSize = size\n}",
"func maxWidth(no_lines int, widthFromLineNo widthFunc) int {\n\tvar max int\n\tfor i := 0; i < no_lines; i++ {\n\t\tval := widthFromLineNo(i)\n\t\tif val > max {\n\t\t\tmax = val\n\t\t}\n\t}\n\treturn max\n}",
"func (s *VideoParameters) SetMaxWidth(v string) *VideoParameters {\n\ts.MaxWidth = &v\n\treturn s\n}",
"func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (w *Whisper) SetMaxMessageSize(size uint32) error {\n\tif size > MaxMessageSize {\n\t\treturn fmt.Errorf(\"message size too large [%d>%d]\", size, MaxMessageSize)\n\t}\n\tw.settings.Store(maxMsgSizeIdx, size)\n\treturn nil\n}",
"func (ufc *UIOverlayContainer) SetContent(npp Frame, setup []UILayoutElement) {\n\tif ufc._state != nil {\n\t\tfor _, v := range ufc._state {\n\t\t\tufc.ThisUILayoutElementComponentDetails.Detach(v)\n\t\t}\n\t}\n\tufc._state = setup\n\tufc._framing = npp\n\tufc.ThisUIPanelDetails.Clipping = npp.FyFClipping()\n\tfor _, v := range setup {\n\t\tufc.ThisUILayoutElementComponentDetails.Attach(v)\n\t}\n\tufc.FyLSubelementChanged()\n}",
"func (c *DirentCache) setMaxSize(max uint64) {\n\tc.mu.Lock()\n\tc.maxSize = max\n\tc.maybeShrink()\n\tc.mu.Unlock()\n}",
"func (w *LWindow) MWidth() int32 {\n\treturn w.mWidth\n}",
"func (s *RedisStore) SetMaxLength(l int) {\n\tif l >= 0 {\n\t\ts.maxLength = l\n\t}\n}",
"func (sf *TWindow) SetSizable(sizable bool) {\n\tsf.fixedSize = !sizable\n}",
"func (fb *FlowBox) SetMaxChildrenPerLine(n_children uint) {\n\tC.gtk_flow_box_set_max_children_per_line(fb.native(), C.guint(n_children))\n}",
"func (conn *Conn) SetMaxTopics(max int) {\n\tif max < 1 {\n\t\tmax = 50\n\t}\n\tconn.length = max\n}",
"func (m *WorkbookCommentReply) SetContent(value *string)() {\n m.content = value\n}",
"func (me *XsdGoPkgHasElem_MaxWidth) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxWidth; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (m Logon) SetMaxMessageSize(v int) {\n\tm.Set(field.NewMaxMessageSize(v))\n}",
"func (_m *RediStore) SetMaxLength(l int) {\n\t_m.Called(l)\n}",
"func (v *Value) MaxWidth(offset, tab int) int {\n\t// simple values do not have tabulations\n\twidth := v.Width\n\tfor l := 0; l < len(v.Tabs); l++ {\n\t\twidth = max(width, v.LineWidth(l, offset, tab))\n\t}\n\treturn width\n}",
"func (me XsdGoPkgHasAttr_MaxLines_XsdtInt_2) MaxLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (me XsdGoPkgHasElems_MaxSnippetLines) MaxSnippetLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (p *Policy) setMaxBlockSize(ic *interop.Context, args []stackitem.Item) stackitem.Item {\n\tvalue := uint32(toBigInt(args[0]).Int64())\n\tif value > payload.MaxSize {\n\t\tpanic(fmt.Errorf(\"MaxBlockSize cannot be more than the maximum payload size = %d\", payload.MaxSize))\n\t}\n\tok, err := p.checkValidators(ic)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !ok {\n\t\treturn stackitem.NewBool(false)\n\t}\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\terr = p.setUint32WithKey(ic.DAO, maxBlockSizeKey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.isValid = false\n\treturn stackitem.NewBool(true)\n}",
"func (g *Grid) SetWidth(w int) { g.Width = w }",
"func (v *DCHttpClient) SetMaxConnsPerHost(size int) {\n\tv.Transport.MaxConnsPerHost = size\n}",
"func (store *RedisStore) SetMaxLength(l int) {\r\n\tif l >= 0 {\r\n\t\tstore.maxLength = l\r\n\t}\r\n}",
"func (win *Window) SetPolicy(allowShrink, allowGrow, autoShrink int) {\n\twin.Candy().Guify(\"gtk_window_set_policy\", win, allowShrink, allowGrow, autoShrink)\n}",
"func MaxOpenConns(moc int) Option {\n\treturn func(o *options) {\n\t\to.maxOpenConns = moc\n\t}\n}",
"func SetMaxDeltaSchemaCount(cnt int64) {\n\tatomic.StoreInt64(&maxDeltaSchemaCount, cnt)\n}",
"func (this *FeedableBuffer) Maximize() {\n\tthis.ExpandTo(this.maxByteCount)\n}",
"func (db *DB) SetMaxOpenConns(n int) {\n\tdb.master.SetMaxOpenConns(n)\n\tfor _, r := range db.readreplicas {\n\t\tr.SetMaxOpenConns(n)\n\t}\n}",
"func (me XsdGoPkgHasElem_MaxSnippetLines) MaxSnippetLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (label *LabelWidget) SetContent(content string) {\n\tlabel.content = content\n\tlabel.needsRepaint = true\n}",
"func (t *Terminal) Resize(s fyne.Size) {\n\tif s.Width == t.Size().Width && s.Height == t.Size().Height {\n\t\treturn\n\t}\n\tif s.Width < 20 { // not sure why we get tiny sizes\n\t\treturn\n\t}\n\tt.BaseWidget.Resize(s)\n\tt.content.Resize(s)\n\n\tcellSize := t.guessCellSize()\n\toldRows := int(t.config.Rows)\n\n\tt.config.Columns = uint(math.Floor(float64(s.Width) / float64(cellSize.Width)))\n\tt.config.Rows = uint(math.Floor(float64(s.Height) / float64(cellSize.Height)))\n\tif t.scrollBottom == 0 || t.scrollBottom == oldRows-1 {\n\t\tt.scrollBottom = int(t.config.Rows) - 1\n\t}\n\tt.onConfigure()\n\n\tgo t.updatePTYSize()\n}",
"func (self *CellRenderer) GetFixedSize() (width, height int) {\n\tvar w C.gint\n\tvar h C.gint\n\tC.gtk_cell_renderer_get_fixed_size(self.object, &w, &h)\n\n\twidth = int(w)\n\theight = int(h)\n\treturn\n}",
"func (o AutoscalingPolicyScaleInControlOutput) MaxScaledInReplicas() FixedOrPercentPtrOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyScaleInControl) *FixedOrPercent { return v.MaxScaledInReplicas }).(FixedOrPercentPtrOutput)\n}",
"func (w *Window) SetSize(width, height int) {\n\tif w.closed {\n\t\treturn\n\t}\n\n\tw.width, w.height = width, height\n\tif w.lockedSize {\n\t\tw.updateSizeHints()\n\t}\n\tw.win.Resize(width, height)\n\treturn\n}",
"func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}",
"func wmFreeformResize(ctx context.Context, tconn *chrome.TestConn, a *arc.ARC, d *ui.Device) error {\n\tact, err := arc.NewActivity(a, wm.Pkg24, wm.ResizableUnspecifiedActivity)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer act.Close()\n\tif err := act.StartWithDefaultOptions(ctx, tconn); err != nil {\n\t\treturn err\n\t}\n\tdefer act.Stop(ctx, tconn)\n\tif err := wm.WaitUntilActivityIsReady(ctx, tconn, act, d); err != nil {\n\t\treturn err\n\t}\n\n\twindow, err := ash.GetARCAppWindowInfo(ctx, tconn, act.PackageName())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Resizable apps are launched in maximized in P.\n\tif window.State != ash.WindowStateNormal {\n\t\tif ws, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventNormal); err != nil {\n\t\t\treturn err\n\t\t} else if ws != ash.WindowStateNormal {\n\t\t\treturn errors.Errorf(\"failed to set window state: got %s, want %s\", ws, ash.WindowStateNormal)\n\t\t}\n\t\tif err := ash.WaitForARCAppWindowState(ctx, tconn, act.PackageName(), ash.WindowStateNormal); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := ash.WaitWindowFinishAnimating(ctx, tconn, window.ID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdispMode, err := ash.PrimaryDisplayMode(ctx, tconn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdispInfo, err := display.GetPrimaryInfo(ctx, tconn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmaxBounds := coords.ConvertBoundsFromDPToPX(dispInfo.Bounds, dispMode.DeviceScaleFactor)\n\n\tif ws, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventNormal); err != nil {\n\t\treturn err\n\t} else if ws != ash.WindowStateNormal {\n\t\treturn errors.Errorf(\"failed to set window state: got %s, want %s\", ws, ash.WindowStateNormal)\n\t}\n\n\t// Now we grab the bounds from the restored app, and we try to resize it to its previous right margin.\n\torigBounds, err := act.WindowBounds(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The -1 is needed to prevent injecting a touch event outside bounds.\n\tright := maxBounds.Left + maxBounds.Width - 1\n\ttesting.ContextLog(ctx, \"Resizing app to right margin = \", right)\n\tto := coords.NewPoint(right, origBounds.Top+origBounds.Height/2)\n\tif err := act.ResizeWindow(ctx, tconn, arc.BorderRight, to, 500*time.Millisecond); err != nil {\n\t\treturn err\n\t}\n\n\tbounds, err := act.WindowBounds(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// ResizeWindow() does not guarantee pixel-perfect resizing.\n\t// For this particular test, we are good as long as the window has been resized at least one pixel.\n\tif bounds.Width <= origBounds.Width {\n\t\ttesting.ContextLogf(ctx, \"Original bounds: %+v; resized bounds: %+v\", origBounds, bounds)\n\t\treturn errors.Errorf(\"invalid window width: got %d; want %d > %d\", bounds.Width, bounds.Width, origBounds.Width)\n\t}\n\treturn nil\n}",
"func (canvas *CanvasWidget) SetWidth(width float64) {\n\tcanvas.box.width = width\n\tcanvas.fixedWidth = true\n\tcanvas.RequestReflow()\n}",
"func (p *Page) MustWindowMaximize() *Page {\n\tp.e(p.SetWindow(&proto.BrowserBounds{\n\t\tWindowState: proto.BrowserWindowStateMaximized,\n\t}))\n\treturn p\n}",
"func SetMaxNumberEntries(sz int) {\n\tC.hepevt_set_max_number_entries(C.int(sz))\n}",
"func (o *VolumeInfinitevolAttributesType) SetMaxNamespaceConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxNamespaceConstituentSizePtr = &newValue\n\treturn o\n}",
"func MaxBytesHandler(h Handler, n int64) Handler {\n\treturn HandlerFunc(func(w ResponseWriter, r *Request) {\n\t\tr2 := *r\n\t\tr2.Body = MaxBytesReader(w, r.Body, n)\n\t\th.ServeHTTP(w, &r2)\n\t})\n}",
"func (m *ItemsMutator) MaxLength(v int) *ItemsMutator {\n\tm.proxy.maxLength = &v\n\treturn m\n}",
"func limitNumClients(f http.HandlerFunc, maxClients int) http.HandlerFunc {\n\tsema := make(chan struct{}, maxClients)\n\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tsema <- struct{}{}\n\t\tdefer func() { <-sema }()\n\t\tf(w, req)\n\t}\n}",
"func (s *Swarm64) ChangeMaxStart(maxstart float64) {\n\ts.xmaxstart = maxstart\n}",
"func (me *XsdGoPkgHasElems_MaxSnippetLines) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxSnippetLines; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func SetMaxIdleConns(maxIdleConns int) {\n\tclient.SetMaxIdleConns(maxIdleConns)\n}",
"func (_m *SQLDatabase) SetMaxOpenConns(n int) {\n\t_m.Called(n)\n}",
"func MaxFadeExtent(value int) *SimpleElement { return newSEInt(\"maxFadeExtent\", value) }",
"func (opts CreateLargeOpts) LengthOfContent() (int64, error) {\n\treturn opts.ContentLength, nil\n}",
"func SetContent(x, y int, mainc rune, combc []rune, style tcell.Style) {\n\tif !Screen.CanDisplay(mainc, true) {\n\t\tmainc = '�'\n\t}\n\n\tScreen.SetContent(x, y, mainc, combc, style)\n\tif UseFake() && lastCursor.x == x && lastCursor.y == y {\n\t\tlastCursor.r = mainc\n\t\tlastCursor.style = style\n\t\tlastCursor.combc = combc\n\t}\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func (client *GatewayClient) SetMaxIdleConns(m int) {\n\tclient.maxIdle = m\n\n\tpreviousIdleConns := client.idleConn\n\tclient.idleConn = make(chan *tls.Conn, m)\n\ti := 0\n\tfor c := range previousIdleConns {\n\t\tif i < m {\n\t\t\tclient.idleConn <- c\n\t\t\ti++\n\t\t} else {\n\t\t\t// close this idle connection\n\t\t\tclient.connCount--\n\t\t\tc.Close()\n\t\t}\n\t}\n\tclose(previousIdleConns)\n}",
"func (q *Queue) SetMaxLen(maxLen int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tq.maxLen = maxLen\n}",
"func (win *Window) SetMaximizeCallback(callback WindowMaximizeCallback) WindowMaximizeCallback {\n\tcallbacks, exist := windowCallbacks[win]\n\tif !exist {\n\t\tcallbacks = new(WindowCallbacks)\n\t\twindowCallbacks[win] = callbacks\n\t}\n\n\tpreviousCallback := callbacks.MaximizeCallback\n\tcallbacks.MaximizeCallback = callback\n\n\tif callback != nil {\n\t\tC.goSetWindowMaximizeCallback(win.c())\n\t} else {\n\t\tC.goRemoveWindowMaximizeCallback(win.c())\n\t}\n\n\treturn previousCallback\n}"
] | [
"0.7728575",
"0.7647834",
"0.65633065",
"0.6101868",
"0.55851233",
"0.5574874",
"0.55399346",
"0.54410404",
"0.53355515",
"0.5113834",
"0.505512",
"0.49563086",
"0.49242747",
"0.48697284",
"0.48511386",
"0.48052323",
"0.47839496",
"0.47761905",
"0.4729902",
"0.47225168",
"0.47206512",
"0.4715658",
"0.4708544",
"0.4708544",
"0.46923223",
"0.4670486",
"0.46373826",
"0.4600696",
"0.45936435",
"0.4566099",
"0.45402616",
"0.45166832",
"0.45154002",
"0.4506303",
"0.44970232",
"0.44772005",
"0.44523853",
"0.4429479",
"0.4427718",
"0.44258302",
"0.43614694",
"0.4324603",
"0.43105912",
"0.43083516",
"0.4305787",
"0.42898297",
"0.42797884",
"0.42793086",
"0.42738026",
"0.42738026",
"0.42543343",
"0.4252208",
"0.42392388",
"0.42179722",
"0.42045847",
"0.41984144",
"0.41873664",
"0.41622844",
"0.41564888",
"0.4155135",
"0.41538814",
"0.41269583",
"0.41122562",
"0.41044277",
"0.40989605",
"0.4088588",
"0.40814978",
"0.40585077",
"0.4051298",
"0.40488765",
"0.40414363",
"0.40352222",
"0.40274778",
"0.40188313",
"0.4007019",
"0.40029174",
"0.39882478",
"0.39879912",
"0.39864808",
"0.39833748",
"0.39830422",
"0.39762485",
"0.39704978",
"0.39647797",
"0.39646563",
"0.39640293",
"0.39636022",
"0.39630193",
"0.39595956",
"0.39350152",
"0.3932592",
"0.39258474",
"0.3921016",
"0.39208037",
"0.3903169",
"0.39005494",
"0.38840917",
"0.38829747",
"0.38737765",
"0.38736582"
] | 0.8705722 | 0 |
GetMaxContentHeight is a wrapper around gtk_scrolled_window_get_max_content_height(). | GetMaxContentHeight является обёрткой вокруг gtk_scrolled_window_get_max_content_height(). | func (v *ScrolledWindow) GetMaxContentHeight() int {
c := C.gtk_scrolled_window_get_max_content_height(v.native())
return int(c)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}",
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (g *GitStatusWidget) GetHeight() int {\n\treturn g.renderer.GetHeight()\n}",
"func (c *Config) MaxHeight() int {\n\tc.Mutex.RLock()\n\tdefer c.Mutex.RUnlock()\n\treturn c.Raw.MaxHeight\n}",
"func (v *Pixbuf) GetHeight() int {\n\treturn int(C.gdk_pixbuf_get_height(v.Native()))\n}",
"func (self *TraitPixbuf) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (m *Model) GetMaxHeight() int {\n\treturn m.maxHeight\n}",
"func (Empty) MaxHeight(width, height int) int {\n\treturn 1\n}",
"func (panel *Panel) GetContentBody() string {\n\treturn tplBody\n}",
"func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}",
"func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (me XsdGoPkgHasElem_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (self *TraitPixbufAnimation) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}",
"func (e Event) GetResizeHeight() int {\n\treturn int(C.caca_get_event_resize_height(e.Ev))\n}",
"func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}",
"func (rect *PdfRectangle) Height() float64 {\n\treturn math.Abs(rect.Ury - rect.Lly)\n}",
"func (c *Canvas) Height() int {\n\treturn c.maxCorner.Y - c.minCorner.Y + 1\n}",
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}",
"func (fb *FlowBox) GetMaxChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_max_children_per_line(fb.native())\n\treturn uint(c)\n}",
"func (w *LWindow) MHeight() int32 {\n\treturn w.mHeight\n}",
"func (f *Framebuffer) Height() int { return f.fb.Bounds().Max.Y }",
"func (me *XsdGoPkgHasElems_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (w *Window) Height() int {\n\treturn w.height\n}",
"func (o *os) GetMaxWindowSize() gdnative.Vector2 {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetMaxWindowSize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_max_window_size\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}",
"func (me *XsdGoPkgHasElem_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (s *PageLayout) Height() float64 {\n\treturn s.height\n}",
"func (window Window) InnerHeight() int {\n\treturn window.Get(\"innerHeight\").Int()\n}",
"func (w *WidgetImplement) Height() int {\n\treturn w.h\n}",
"func (o *GetMessagesAllOf) GetContent() interface{} {\n\tif o == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.Content\n}",
"func (g *Grid) GetHeight() int {\n\treturn g.Height\n}",
"func (r *ImageRef) GetPageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}",
"func (self *CellRenderer) GetFixedSize() (width, height int) {\n\tvar w C.gint\n\tvar h C.gint\n\tC.gtk_cell_renderer_get_fixed_size(self.object, &w, &h)\n\n\twidth = int(w)\n\theight = int(h)\n\treturn\n}",
"func (r Rectangle) Height() float64 {\n\treturn r.Max.Y - r.Min.Y\n}",
"func (o *TileBounds) GetHeight() int32 {\n\tif o == nil || o.Height == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Height\n}",
"func (rc Rectangle) Height() int {\n\treturn rc.Bottom - rc.Top\n}",
"func (data *Data) GetCntMinMaxHeight() (int, int, int, error) {\n\tdb, err := data.openDb()\n\tdefer data.closeDb(db)\n\tif err != nil {\n\t\tlog.Printf(\"data.openDb Error : %+v\", err)\n\t\treturn -1, -1, -1, err\n\t}\n\tvar cnt int\n\terr = db.QueryRow(\"SELECT COUNT(hash) FROM headers\").Scan(&cnt)\n\tif err != nil {\n\t\tlog.Printf(\"db.QueryRow Error : %+v\", err)\n\t\treturn -1, -1, -1, err\n\t}\n\tif cnt == 0 {\n\t\treturn cnt, -1, -1, nil\n\t}\n\tvar max int\n\tvar min int\n\terr = db.QueryRow(\"SELECT MIN(height), MAX(height) FROM headers\").Scan(&min, &max)\n\tif err != nil {\n\t\tlog.Printf(\"db.QueryRow Error : %+v\", err)\n\t\treturn -1, -1, -1, err\n\t}\n\treturn cnt, min, max, nil\n}",
"func (m *MailTips) GetMaxMessageSize()(*int32) {\n val, err := m.GetBackingStore().Get(\"maxMessageSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}",
"func (v *MailView) GetMaxLines() (int, error) {\n\treturn len(v.lines), nil\n}",
"func (m Logon) GetMaxMessageSize() (v int, err quickfix.MessageRejectError) {\n\tvar f field.MaxMessageSizeField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}",
"func (b *BaseElement) GetHeight() int32 {\n\treturn b.h\n}",
"func (o *Content) GetMaxConnsPerProcess() int32 {\n\tif o == nil || o.MaxConnsPerProcess.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxConnsPerProcess.Get()\n}",
"func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}",
"func (o *Content) GetMaxProcesses() int32 {\n\tif o == nil || o.MaxProcesses.Get() == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxProcesses.Get()\n}",
"func (p *PdfiumImplementation) FPDFBitmap_GetHeight(request *requests.FPDFBitmap_GetHeight) (*responses.FPDFBitmap_GetHeight, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tbitmapHandle, err := p.getBitmapHandle(request.Bitmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theight := C.FPDFBitmap_GetHeight(bitmapHandle.handle)\n\treturn &responses.FPDFBitmap_GetHeight{\n\t\tHeight: int(height),\n\t}, nil\n}",
"func (p *PdfiumImplementation) FPDF_GetPageHeight(request *requests.FPDF_GetPageHeight) (*responses.FPDF_GetPageHeight, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tpageHandle, err := p.loadPage(request.Page)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theight := C.FPDF_GetPageHeight(pageHandle.handle)\n\n\treturn &responses.FPDF_GetPageHeight{\n\t\tPage: pageHandle.index,\n\t\tHeight: float64(height),\n\t}, nil\n}",
"func (b *Buffer) SizeMax() (int, int) {\n\treturn b.maxWidth, b.maxHeight\n}",
"func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}",
"func (pc PieChart) GetHeight() int {\n\tif pc.Height == 0 {\n\t\treturn DefaultChartWidth\n\t}\n\treturn pc.Height\n}",
"func (b *BBox) MaxY() int64 {\n\treturn int64(b.handle.yMax)\n}",
"func GetContent(url string, timeout uint) ([]byte, error) {\n\tresp, err := GetResp(url, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn io.ReadAll(resp.Body)\n}",
"func (r *ImageRef) PageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}",
"func (msg *Message) GetContent() interface{} {\n\treturn msg.Content\n}",
"func (s *Thumbnails) SetMaxHeight(v string) *Thumbnails {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func (p *Page) GetContent() string {\n\treturn p.Content\n}",
"func (b *Bound) Height() float64 {\n\treturn b.ne.Y() - b.sw.Y()\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func SetScrollContentTrackerSize(sa *qtwidgets.QScrollArea) {\n\twgt := sa.Widget()\n\tsa.InheritResizeEvent(func(arg0 *qtgui.QResizeEvent) {\n\t\tosz := arg0.OldSize()\n\t\tnsz := arg0.Size()\n\t\tif false {\n\t\t\tlog.Println(osz.Width(), osz.Height(), nsz.Width(), nsz.Height())\n\t\t}\n\t\tif osz.Width() != nsz.Width() {\n\t\t\twgt.SetMaximumWidth(nsz.Width())\n\t\t}\n\t\t// this.ScrollArea_2.ResizeEvent(arg0)\n\t\targ0.Ignore() // I ignore, you handle it. replace explict call parent's\n\t})\n}",
"func (s *Artwork) SetMaxHeight(v string) *Artwork {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func GetHeight(ctx Context) (int64, bool) {\n\tval, ok := ctx.Value(contextKeyHeight).(int64)\n\treturn val, ok\n}",
"func Height() uint64 {\n\n\tglobalData.RLock()\n\tdefer globalData.RUnlock()\n\n\treturn globalData.height\n}",
"func (Adapter MockPage) GetContent() (string, error) {\n\treturn Adapter.FakeContent, Adapter.ContentError\n}",
"func (s *SimPDF) Height() float64 {\n\tif s.Page.IsLandscape {\n\t\treturn s.Page.Width\n\t}\n\treturn s.Page.Height\n}",
"func (t *Link) GetHeight() (v int64) {\n\treturn *t.height.nonNegativeInteger\n\n}",
"func (v *Entry) GetMaxWidthChars() int {\n\tc := C.gtk_entry_get_max_width_chars(v.native())\n\treturn int(c)\n}",
"func (r *RepositoryContentResponse) GetContent() *RepositoryContent {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.Content\n}",
"func GetHeight(hostURL string, hostPort int) *bytes.Buffer {\n\treturn makeGetRequest(\"getheight\", hostURL, hostPort)\n}",
"func (b *BlockSplitterSimple) MaxSize() int64 {\n\treturn b.maxSize\n}",
"func (v *StackPanel) Height() int {\n\tif v.orientation == Vertical {\n\t\th := 0\n\t\tfor _, val := range v.heights {\n\t\t\th += val\n\t\t}\n\t\treturn h\n\t}\n\treturn v.height\n}",
"func NewMaxMessageSize(val int) MaxMessageSizeField {\n\treturn MaxMessageSizeField{quickfix.FIXInt(val)}\n}",
"func GetHeight() int {\n\treturn viper.GetInt(FlagHeight)\n}",
"func (v *Version) GetContent(ctx context.Context) ([]byte, error) {\n\tlock := v.Chart.Space.SpaceManager.Lock.Get(v.Chart.Space.Name(), v.Chart.Name(), v.Number())\n\tif !lock.RLock(v.Chart.Space.SpaceManager.LockTimeout) {\n\t\treturn nil, ErrorLocking.Format(\"version\", v.Chart.Space.Name()+\"/\"+v.Chart.Name()+\"/\"+v.Number())\n\t}\n\tdefer lock.RUnlock()\n\tif err := v.Validate(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tpath := path.Join(v.Prefix, chartPackageName)\n\tdata, err := v.Chart.Space.SpaceManager.Backend.GetContent(ctx, path)\n\tif err != nil {\n\t\treturn nil, ErrorContentNotFound.Format(v.Prefix)\n\t}\n\treturn data, nil\n}",
"func (cs ClientState) GetLatestHeight() uint64 {\n\treturn uint64(cs.Height)\n}",
"func Height() int {\n\tws, err := getWinsize()\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn int(ws.Row)\n}",
"func (b *IndexBuilder) ContentSize() uint32 {\n\t// Add the name too so we don't skip building index if we have\n\t// lots of empty files.\n\treturn b.contentEnd + b.nameEnd\n}",
"func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}",
"func (t *Text) LinesHeight() int {\n\tpad := t.setter.opts.Padding\n\tif t.size.Y <= 0 {\n\t\treturn 0\n\t}\n\tif t.size.Y-2*pad <= 0 {\n\t\treturn t.size.Y\n\t}\n\ty := pad\n\tfor _, l := range t.lines {\n\t\th := l.h.Round()\n\t\tif y+h > t.size.Y-pad {\n\t\t\tbreak\n\t\t}\n\t\ty += h\n\t}\n\tif h := trailingNewlineHeight(t); h > 0 && y+h <= t.size.Y-pad {\n\t\ty += h\n\t}\n\treturn y + pad\n}",
"func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}",
"func (c RelativeConstraint) GetHeight() float32 {\n\treturn c.op(c.parent().GetHeight(), c.constant)\n}",
"func (r *RepositoryContent) GetSize() int {\n\tif r == nil || r.Size == nil {\n\t\treturn 0\n\t}\n\treturn *r.Size\n}",
"func (opts CreateLargeOpts) LengthOfContent() (int64, error) {\n\treturn opts.ContentLength, nil\n}",
"func (api *ContentsAPI) GetContent(path string) (*Contents, error) {\n\treturn api.GetContentByRef(path, \"\")\n}",
"func (s SectionHead) ContentLength() int {\n\treturn int(s.ByteLength) - binary.Size(s)\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func GetContent(url string) ([]byte, error) {\n\tr, err := GetContentReader(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\treturn ioutil.ReadAll(r)\n}",
"func (b *OGame) GetPageContent(vals url.Values) ([]byte, error) {\n\treturn b.WithPriority(taskRunner.Normal).GetPageContent(vals)\n}",
"func (k *Kernel) MaxY() int {\n\treturn k.Height\n}",
"func (p *Picture) GetHeight() int {\r\n\treturn p.pixelHeight\r\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func (sc *slidingCounter) Max() float64 {\n\n\tnow := time.Now().Unix()\n\n\tsc.mutex.RLock()\n\tdefer sc.mutex.RUnlock()\n\n\tvar max float64\n\n\tfor timestamp, window := range sc.windows {\n\t\tif timestamp >= now-sc.interval {\n\t\t\tif window.Value > max {\n\t\t\t\tmax = window.Value\n\t\t\t}\n\t\t}\n\t}\n\treturn max\n}",
"func (c *DataCache) GetHeight() uint32 {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\treturn c.height\n}",
"func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}",
"func (h *HTTP) FetchContent() ([]byte, error) {\n\tresp, err := h.FetchResponse()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"HTTP error %d in %s: %s\", resp.StatusCode, h.name, resp.Status)\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func (o *Content) HasMaxProcesses() bool {\n\tif o != nil && o.MaxProcesses.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func getContentWidth(pdf *gofpdf.Fpdf) float64 {\n\tmarginL, _, marginR, _ := pdf.GetMargins()\n\tpageW, _ := pdf.GetPageSize()\n\twidth := pageW - marginL - marginR\n\treturn width\n}",
"func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}",
"func (a *Animation) GetHeight() float64 {\n\tif a == nil || a.Height == nil {\n\t\treturn 0.0\n\t}\n\treturn *a.Height\n}",
"func (f *Face) Height() int {\n\treturn f.ypixels(int(f.handle.height))\n}"
] | [
"0.80224156",
"0.738089",
"0.6511789",
"0.575716",
"0.5734719",
"0.5732532",
"0.5663143",
"0.55172205",
"0.54255056",
"0.5353016",
"0.53288096",
"0.5287494",
"0.5258306",
"0.5254579",
"0.5203907",
"0.5163954",
"0.5143804",
"0.51205325",
"0.5093301",
"0.50751376",
"0.5039852",
"0.50293154",
"0.50232494",
"0.50126517",
"0.49878493",
"0.49854538",
"0.4975729",
"0.49506602",
"0.49327448",
"0.49326104",
"0.4931299",
"0.49140152",
"0.49071267",
"0.48915324",
"0.4878953",
"0.48735708",
"0.4835878",
"0.4820916",
"0.48152575",
"0.48136342",
"0.47977498",
"0.47854257",
"0.47692534",
"0.47630528",
"0.4762518",
"0.4749553",
"0.4732659",
"0.47256452",
"0.4719037",
"0.4703761",
"0.47025818",
"0.4687691",
"0.4674435",
"0.46668965",
"0.46535707",
"0.46327305",
"0.46283355",
"0.46245983",
"0.46245983",
"0.46241927",
"0.46175513",
"0.4606927",
"0.46067858",
"0.46007338",
"0.45959717",
"0.4589484",
"0.45881128",
"0.45874497",
"0.45824015",
"0.45693672",
"0.45677027",
"0.456648",
"0.45624888",
"0.4559078",
"0.4555476",
"0.45550692",
"0.45520267",
"0.45516488",
"0.45427206",
"0.4541161",
"0.4525813",
"0.45241705",
"0.4509971",
"0.4508825",
"0.4503322",
"0.4502993",
"0.44971365",
"0.44891065",
"0.44855464",
"0.4483766",
"0.4479248",
"0.4479159",
"0.44737756",
"0.4471669",
"0.44559172",
"0.44490772",
"0.44457135",
"0.44376856",
"0.44373876",
"0.44294703"
] | 0.9018182 | 0 |
SetMaxContentHeight is a wrapper around gtk_scrolled_window_set_max_content_height(). | SetMaxContentHeight — это обертка вокруг gtk_scrolled_window_set_max_content_height(). | func (v *ScrolledWindow) SetMaxContentHeight(width int) {
C.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}",
"func SetScrollContentTrackerSize(sa *qtwidgets.QScrollArea) {\n\twgt := sa.Widget()\n\tsa.InheritResizeEvent(func(arg0 *qtgui.QResizeEvent) {\n\t\tosz := arg0.OldSize()\n\t\tnsz := arg0.Size()\n\t\tif false {\n\t\t\tlog.Println(osz.Width(), osz.Height(), nsz.Width(), nsz.Height())\n\t\t}\n\t\tif osz.Width() != nsz.Width() {\n\t\t\twgt.SetMaximumWidth(nsz.Width())\n\t\t}\n\t\t// this.ScrollArea_2.ResizeEvent(arg0)\n\t\targ0.Ignore() // I ignore, you handle it. replace explict call parent's\n\t})\n}",
"func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}",
"func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}",
"func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}",
"func (w *Window) SetMaximized(maximize bool) {\n\tif maximize == w.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tw.origX, w.origY = w.Pos()\n\t\tw.origWidth, w.origHeight = w.Size()\n\t\tw.maximized = true\n\t\tw.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tw.SetSize(width, height)\n\t} else {\n\t\tw.maximized = false\n\t\tw.SetPos(w.origX, w.origY)\n\t\tw.SetSize(w.origWidth, w.origHeight)\n\t}\n\tw.ResizeChildren()\n\tw.PlaceChildren()\n}",
"func (w *WidgetBase) SetHeight(height int) {\n\tw.size.Y = height\n\tif w.size.Y != 0 {\n\t\tw.sizePolicyY = Minimum\n\t} else {\n\t\tw.sizePolicyY = Expanding\n\t}\n}",
"func (win *Window) Maximize() {\n\twin.Candy().Guify(\"gtk_window_maximize\", win)\n}",
"func (sf *TWindow) SetMaximized(maximize bool) {\n\tif maximize == sf.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tx, y := sf.pos.Get()\n\t\tsf.posOrig.X().Set(x)\n\t\tsf.posOrig.Y().Set(y)\n\t\tsf.origWidth, sf.origHeight = sf.Size()\n\t\tsf.maximized = true\n\t\tsf.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tsf.SetSize(width, height)\n\t} else {\n\t\tsf.maximized = false\n\t\tsf.SetPos(sf.posOrig.GetX(), sf.posOrig.GetY())\n\t\tsf.SetSize(sf.origWidth, sf.origHeight)\n\t}\n\tsf.ResizeChildren()\n\tsf.PlaceChildren()\n}",
"func (Empty) MaxHeight(width, height int) int {\n\treturn 1\n}",
"func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (me XsdGoPkgHasElems_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func (w *WidgetImplement) SetClampHeight(clamp bool) {\n\tw.clamp[1] = clamp\n}",
"func (me XsdGoPkgHasElem_MaxHeight) MaxHeightDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"0\")\r\n\treturn *x\r\n}",
"func (c *HostClient) SetMaxConns(newMaxConns int) {\n\tc.connsLock.Lock()\n\tc.MaxConns = newMaxConns\n\tc.connsLock.Unlock()\n}",
"func (w *WidgetImplement) SetFixedHeight(h int) {\n\tw.fixedH = h\n}",
"func (c *Config) MaxHeight() int {\n\tc.Mutex.RLock()\n\tdefer c.Mutex.RUnlock()\n\treturn c.Raw.MaxHeight\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}",
"func (t *Textarea) SetHeight(val int) {\n\tt.Call(\"setAttribute\", \"style\", \"height:\"+strconv.Itoa(val)+\"px\")\n}",
"func (s *ItemScroller) SetAutoHeight(maxHeight float32) {\n\n\ts.maxAutoHeight = maxHeight\n}",
"func NewMaxMessageSize(val int) MaxMessageSizeField {\n\treturn MaxMessageSizeField{quickfix.FIXInt(val)}\n}",
"func (w *WidgetImplement) SetHeight(h int) {\n\tw.h = h\n}",
"func (me *XsdGoPkgHasElems_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func MaxValSize(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxValSize(max)\n\t}\n}",
"func (wg *WidgetImplement) SetFixedSize(w, h int) {\n\twg.fixedW = w\n\twg.fixedH = h\n}",
"func (m Logon) SetMaxMessageSize(v int) {\n\tm.Set(field.NewMaxMessageSize(v))\n}",
"func (win *Window) SetDefaultSize(width, height int) {\n\twin.Candy().Guify(\"gtk_window_set_default_size\", win, width, height)\n}",
"func MaxBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\to.maxBytes = m\n\t\treturn nil\n\t}\n}",
"func (c *DirentCache) setMaxSize(max uint64) {\n\tc.mu.Lock()\n\tc.maxSize = max\n\tc.maybeShrink()\n\tc.mu.Unlock()\n}",
"func (s *Thumbnails) SetMaxHeight(v string) *Thumbnails {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func (w *Whisper) SetMaxMessageSize(size uint32) error {\n\tif size > MaxMessageSize {\n\t\treturn fmt.Errorf(\"message size too large [%d>%d]\", size, MaxMessageSize)\n\t}\n\tw.settings.Store(maxMsgSizeIdx, size)\n\treturn nil\n}",
"func (s *Artwork) SetMaxHeight(v string) *Artwork {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func (me *XsdGoPkgHasElem_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (win *Window) SetWindowContentScaleCallback(callback WindowContentScaleCallback) WindowContentScaleCallback {\n\tcallbacks, exist := windowCallbacks[win]\n\tif !exist {\n\t\tcallbacks = new(WindowCallbacks)\n\t\twindowCallbacks[win] = callbacks\n\t}\n\n\tpreviousCallback := callbacks.ContentScaleCallback\n\tcallbacks.ContentScaleCallback = callback\n\n\tif callback != nil {\n\t\tC.goSetWindowContentScaleCallback(win.c())\n\t} else {\n\t\tC.goRemoveWindowContentScaleCallback(win.c())\n\t}\n\n\treturn previousCallback\n}",
"func (win *Window) SetMaximizeCallback(callback WindowMaximizeCallback) WindowMaximizeCallback {\n\tcallbacks, exist := windowCallbacks[win]\n\tif !exist {\n\t\tcallbacks = new(WindowCallbacks)\n\t\twindowCallbacks[win] = callbacks\n\t}\n\n\tpreviousCallback := callbacks.MaximizeCallback\n\tcallbacks.MaximizeCallback = callback\n\n\tif callback != nil {\n\t\tC.goSetWindowMaximizeCallback(win.c())\n\t} else {\n\t\tC.goRemoveWindowMaximizeCallback(win.c())\n\t}\n\n\treturn previousCallback\n}",
"func (fb *FlowBox) SetMaxChildrenPerLine(n_children uint) {\n\tC.gtk_flow_box_set_max_children_per_line(fb.native(), C.guint(n_children))\n}",
"func (canvas *CanvasWidget) SetHeight(height float64) {\n\tcanvas.box.height = height\n\tcanvas.fixedHeight = true\n\tcanvas.RequestReflow()\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (st *Settings) MaxWindowSize() uint32 {\n\treturn st.windowSize\n}",
"func (s *VideoParameters) SetMaxHeight(v string) *VideoParameters {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func (ufc *UIOverlayContainer) SetContent(npp Frame, setup []UILayoutElement) {\n\tif ufc._state != nil {\n\t\tfor _, v := range ufc._state {\n\t\t\tufc.ThisUILayoutElementComponentDetails.Detach(v)\n\t\t}\n\t}\n\tufc._state = setup\n\tufc._framing = npp\n\tufc.ThisUIPanelDetails.Clipping = npp.FyFClipping()\n\tfor _, v := range setup {\n\t\tufc.ThisUILayoutElementComponentDetails.Attach(v)\n\t}\n\tufc.FyLSubelementChanged()\n}",
"func (hpack *HPACK) SetMaxTableSize(size int) {\n\thpack.maxTableSize = size\n}",
"func (m *Model) GetMaxHeight() int {\n\treturn m.maxHeight\n}",
"func (s *PresetWatermark) SetMaxHeight(v string) *PresetWatermark {\n\ts.MaxHeight = &v\n\treturn s\n}",
"func (w *LWindow) MHeight() int32 {\n\treturn w.mHeight\n}",
"func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}",
"func (label *LabelWidget) SetContent(content string) {\n\tlabel.content = content\n\tlabel.needsRepaint = true\n}",
"func (e *Element) SetContent(content []byte) {\n\tif len(e.children) > 0 {\n\t\treturn\n\t}\n\te.content = content\n}",
"func (p *Policy) setMaxBlockSize(ic *interop.Context, args []stackitem.Item) stackitem.Item {\n\tvalue := uint32(toBigInt(args[0]).Int64())\n\tif value > payload.MaxSize {\n\t\tpanic(fmt.Errorf(\"MaxBlockSize cannot be more than the maximum payload size = %d\", payload.MaxSize))\n\t}\n\tok, err := p.checkValidators(ic)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !ok {\n\t\treturn stackitem.NewBool(false)\n\t}\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\terr = p.setUint32WithKey(ic.DAO, maxBlockSizeKey, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tp.isValid = false\n\treturn stackitem.NewBool(true)\n}",
"func (o AutoscalingPolicyScaleDownControlOutput) MaxScaledDownReplicas() FixedOrPercentPtrOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyScaleDownControl) *FixedOrPercent { return v.MaxScaledDownReplicas }).(FixedOrPercentPtrOutput)\n}",
"func (client *GatewayClient) SetMaxConns(m int) {\n\tclient.maxOpen = m\n\n}",
"func SetMaxNumberEntries(sz int) {\n\tC.hepevt_set_max_number_entries(C.int(sz))\n}",
"func MaxMsgSize(s int) server.Option {\n\treturn server.SetOption(maxMsgSizeKey{}, s)\n}",
"func (m *WorkbookCommentReply) SetContent(value *string)() {\n m.content = value\n}",
"func (conn *Conn) SetMaxTopics(max int) {\n\tif max < 1 {\n\t\tmax = 50\n\t}\n\tconn.length = max\n}",
"func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}",
"func (c *Card) SetContent(obj fyne.CanvasObject) {\n\tc.Content = obj\n\n\tc.Refresh()\n}",
"func (s *settings) SetMaxWriteSize(size uint) {\n\ts.wMaxSize = size\n}",
"func (o *VolumeInfinitevolAttributesType) SetMaxDataConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxDataConstituentSizePtr = &newValue\n\treturn o\n}",
"func (s *Server) SetMaxHeaderBytes(b int) {\n\ts.config.MaxHeaderBytes = b\n}",
"func (pb *Bar) SetMaxWidth(maxWidth int) *Bar {\n\tpb.mu.Lock()\n\tpb.maxWidth = maxWidth\n\tpb.mu.Unlock()\n\treturn pb\n}",
"func (o AutoscalingPolicyScaleDownControlResponsePtrOutput) MaxScaledDownReplicas() FixedOrPercentResponsePtrOutput {\n\treturn o.ApplyT(func(v *AutoscalingPolicyScaleDownControlResponse) *FixedOrPercentResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.MaxScaledDownReplicas\n\t}).(FixedOrPercentResponsePtrOutput)\n}",
"func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}",
"func (s *ClampDirectionOffset) SetMax(max float64) *ClampDirectionOffset {\n\ts.max = max\n\treturn s\n}",
"func (s *ItemScroller) autoSize() {\n\n\tif s.maxAutoWidth == 0 && s.maxAutoHeight == 0 {\n\t\treturn\n\t}\n\n\tvar width float32\n\tvar height float32\n\tfor _, item := range s.items {\n\t\tpanel := item.GetPanel()\n\t\tif panel.Width() > width {\n\t\t\twidth = panel.Width()\n\t\t}\n\t\theight += panel.Height()\n\t}\n\n\t// If auto maximum width enabled\n\tif s.maxAutoWidth > 0 {\n\t\tif width <= s.maxAutoWidth {\n\t\t\ts.SetContentWidth(width)\n\t\t}\n\t}\n\t// If auto maximum height enabled\n\tif s.maxAutoHeight > 0 {\n\t\tif height <= s.maxAutoHeight {\n\t\t\ts.SetContentHeight(height)\n\t\t}\n\t}\n}",
"func MaxCacheSize(max int64) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxCacheSize = max\n\t\treturn nil\n\t}\n}",
"func (o AutoscalingPolicyScaleDownControlResponseOutput) MaxScaledDownReplicas() FixedOrPercentResponseOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyScaleDownControlResponse) FixedOrPercentResponse {\n\t\treturn v.MaxScaledDownReplicas\n\t}).(FixedOrPercentResponseOutput)\n}",
"func (s *Slider) Max(max float32) *Slider {\n\ts.max = max\n\treturn s\n}",
"func (s *ItemScroller) setHScrollBar(state bool) {\n\n\t// Visible\n\tif state {\n\t\tvar scrollHeight float32 = 20\n\t\tif s.hscroll == nil {\n\t\t\ts.hscroll = NewHScrollBar(0, 0)\n\t\t\ts.hscroll.SetBorders(1, 0, 0, 0)\n\t\t\ts.hscroll.Subscribe(OnChange, s.onScrollBarEvent)\n\t\t\ts.Panel.Add(s.hscroll)\n\t\t}\n\t\ts.hscroll.SetSize(s.ContentWidth(), scrollHeight)\n\t\ts.hscroll.SetPositionX(0)\n\t\ts.hscroll.SetPositionY(s.ContentHeight() - scrollHeight)\n\t\ts.hscroll.recalc()\n\t\ts.hscroll.SetVisible(true)\n\t\t// Not visible\n\t} else {\n\t\tif s.hscroll != nil {\n\t\t\ts.hscroll.SetVisible(false)\n\t\t}\n\t}\n}",
"func (o *PollersPostParams) SetContent(content *models.Poller20PartialPoller) {\n\to.Content = content\n}",
"func MaxCacheSize(max int64) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxCacheSize(max)\n\t}\n}",
"func (me XsdGoPkgHasAttr_MaxLines_XsdtInt_2) MaxLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (d *PerfData) SetMax(max string) {\n\tif !valueCheck.MatchString(max) {\n\t\tpanic(\"invalid value\")\n\t}\n\td.max = max\n\td.bits = d.bits | PDAT_MAX\n}",
"func NewMockisListenResponse_Content(ctrl *gomock.Controller) *MockisListenResponse_Content {\n\tmock := &MockisListenResponse_Content{ctrl: ctrl}\n\tmock.recorder = &MockisListenResponse_ContentMockRecorder{mock}\n\treturn mock\n}",
"func (rn *RangedNumber) SetMax(max int) *RangedNumber {\n\trn.max = max\n\tif rn.min > rn.max {\n\t\trn.max = rn.min\n\t\trn.min = max\n\t}\n\n\treturn rn\n}",
"func (v *TextView) SetBorderWindowSize(tp TextWindowType, size int) {\n\tC.gtk_text_view_set_border_window_size(v.native(), C.GtkTextWindowType(tp), C.gint(size))\n}",
"func NewContent(\n\tinitialContent lease.ReadProxy,\n\tclock timeutil.Clock) (mc Content) {\n\tmc = &mutableContent{\n\t\tclock: clock,\n\t\tinitialContent: initialContent,\n\t\tdirtyThreshold: initialContent.Size(),\n\t}\n\n\treturn\n}",
"func WindowContent(append ...bool) vecty.Markup {\n\treturn AddClass(wContent, append...)\n}",
"func (s *Set) SetMaxLineSize(i int) {\n\ts.MaxLineSize = i\n}",
"func MaxPageSize(m int) func(*ParquetWriter) error {\n\treturn func(p *ParquetWriter) error {\n\t\tp.max = m\n\t\treturn nil\n\t}\n}",
"func MaxRecvMsgSize(s int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, maxRecvMsgSizeKey{}, s)\n\t}\n}",
"func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}",
"func (fb *FlowBox) GetMaxChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_max_children_per_line(fb.native())\n\treturn uint(c)\n}",
"func (p *PinnedItem) SetContent(content *PinnedItemContent) *PinnedItem {\n\tp.PinnedItemContent = content\n\treturn p\n}",
"func (d *DiscordWebhook) SetContent(content string) {\n\td.Content = content\n}",
"func (upu *UnsavedPostUpdate) SetContent(s string) *UnsavedPostUpdate {\n\tupu.mutation.SetContent(s)\n\treturn upu\n}",
"func SetMaxIdleConns(maxIdleConns int) {\n\tclient.SetMaxIdleConns(maxIdleConns)\n}",
"func (o AutoscalingPolicyScaleInControlOutput) MaxScaledInReplicas() FixedOrPercentPtrOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyScaleInControl) *FixedOrPercent { return v.MaxScaledInReplicas }).(FixedOrPercentPtrOutput)\n}",
"func (o AutoscalingPolicyScaleInControlResponsePtrOutput) MaxScaledInReplicas() FixedOrPercentResponsePtrOutput {\n\treturn o.ApplyT(func(v *AutoscalingPolicyScaleInControlResponse) *FixedOrPercentResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.MaxScaledInReplicas\n\t}).(FixedOrPercentResponsePtrOutput)\n}",
"func MaxBytesHandler(h Handler, n int64) Handler {\n\treturn HandlerFunc(func(w ResponseWriter, r *Request) {\n\t\tr2 := *r\n\t\tr2.Body = MaxBytesReader(w, r.Body, n)\n\t\th.ServeHTTP(w, &r2)\n\t})\n}",
"func (pu *PostUpdate) SetContent(s string) *PostUpdate {\n\tpu.mutation.SetContent(s)\n\treturn pu\n}",
"func (pu *PostUpdate) SetContent(s string) *PostUpdate {\n\tpu.mutation.SetContent(s)\n\treturn pu\n}",
"func (u *GithubGistUpsertBulk) SetContent(v string) *GithubGistUpsertBulk {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.SetContent(v)\n\t})\n}",
"func (o *Content) SetMaxProcesses(v int32) {\n\to.MaxProcesses.Set(&v)\n}",
"func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}",
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}"
] | [
"0.8023439",
"0.7892212",
"0.7063212",
"0.59386873",
"0.5932504",
"0.52396935",
"0.5048182",
"0.5048182",
"0.50402075",
"0.5032491",
"0.5004204",
"0.5002754",
"0.49823657",
"0.49187666",
"0.49154866",
"0.48963708",
"0.48863816",
"0.4872838",
"0.48663074",
"0.48459524",
"0.48410115",
"0.48086682",
"0.48009363",
"0.46959776",
"0.46896234",
"0.46866378",
"0.46846426",
"0.46737525",
"0.4654549",
"0.4651974",
"0.46448636",
"0.46269923",
"0.46149328",
"0.46068418",
"0.45934793",
"0.45927826",
"0.4581558",
"0.4543512",
"0.45262972",
"0.44706896",
"0.445977",
"0.44492963",
"0.44492963",
"0.4439194",
"0.4433498",
"0.4422241",
"0.44036117",
"0.43915558",
"0.43804714",
"0.4354259",
"0.43450323",
"0.43395567",
"0.4335974",
"0.4335628",
"0.43333265",
"0.43314517",
"0.43284938",
"0.43272853",
"0.43270668",
"0.4314515",
"0.43007663",
"0.42851096",
"0.42821103",
"0.42809525",
"0.42642978",
"0.42640007",
"0.42504212",
"0.42502663",
"0.42469904",
"0.42464346",
"0.42333785",
"0.423059",
"0.4225662",
"0.42239913",
"0.421106",
"0.42058417",
"0.42051828",
"0.4204017",
"0.42016026",
"0.41959056",
"0.41942292",
"0.41905728",
"0.41865307",
"0.41853392",
"0.41752148",
"0.4175065",
"0.41672614",
"0.41658175",
"0.41556093",
"0.4153752",
"0.4150305",
"0.41499788",
"0.41405642",
"0.41361314",
"0.41351768",
"0.41351768",
"0.4132325",
"0.41291118",
"0.4128421",
"0.41265848"
] | 0.87742 | 0 |
GetPropagateNaturalWidth is a wrapper around gtk_scrolled_window_get_propagate_natural_width(). | GetPropagateNaturalWidth — это обертка вокруг gtk_scrolled_window_get_propagate_natural_width(). | func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {
c := C.gtk_scrolled_window_get_propagate_natural_width(v.native())
return gobool(c)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}",
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}",
"func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}",
"func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}",
"func GxOuterWidth(value float64) *SimpleElement { return newSEFloat(\"gx:outerWidth\", value) }",
"func (c RelativeConstraint) GetWidth() float32 {\n\treturn c.op(c.parent().GetWidth(), c.constant)\n}",
"func (window Window) InnerWidth() int {\n\treturn window.Get(\"innerWidth\").Int()\n}",
"func (w *Window) Width() int {\n\treturn int(C.ANativeWindow_getWidth(w.cptr()))\n}",
"func (window Window) OuterWidth() int {\n\treturn window.Get(\"outerWidth\").Int()\n}",
"func (t *Text) Width(takable, taken float64) float64 {\n\tt.UpdateParagraph(takable)\n\tif t.Align != txt.Left {\n\t\treturn t.Paragraph.Width * t.Scl.X\n\t}\n\treturn t.Bounds().W() * t.Scl.X\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (b *Bound) Width() float64 {\n\treturn b.ne.X() - b.sw.X()\n}",
"func (w *sliderElement) MinIntrinsicWidth(base.Length) base.Length {\n\twidth, _ := w.handle.GetPreferredWidth()\n\tif limit := base.FromPixelsX(width); limit < 160*DIP {\n\t\treturn 160 * DIP\n\t}\n\treturn base.FromPixelsX(width)\n}",
"func (win *Window) Width() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.x)\n}",
"func (w *LWindow) MWidth() int32 {\n\treturn w.mWidth\n}",
"func (w *Window) Width() int {\n\treturn w.width\n}",
"func (b *Bound) GeoWidth(haversine ...bool) float64 {\n\tc := b.Center()\n\n\tA := &Point{b.sw[0], c[1]}\n\tB := &Point{b.ne[0], c[1]}\n\n\treturn A.GeoDistanceFrom(B, yesHaversine(haversine))\n}",
"func (t *Link) GetWidth() (v int64) {\n\treturn *t.width.nonNegativeInteger\n\n}",
"func maxWidth(no_lines int, widthFromLineNo widthFunc) int {\n\tvar max int\n\tfor i := 0; i < no_lines; i++ {\n\t\tval := widthFromLineNo(i)\n\t\tif val > max {\n\t\t\tmax = val\n\t\t}\n\t}\n\treturn max\n}",
"func (g *GitStatusWidget) GetWidth() int {\n\treturn g.renderer.GetWidth()\n}",
"func PropValWindow(reply *xproto.GetPropertyReply,\n\terr error) (xproto.Window, error) {\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif reply.Format != 32 {\n\t\treturn 0, fmt.Errorf(\"PropValId: Expected format 32 but got %d\",\n\t\t\treply.Format)\n\t}\n\treturn xproto.Window(xgb.Get32(reply.Value)), nil\n}",
"func (me XsdGoPkgHasElems_Width) WidthDefault() xsdt.Double {\r\n\tvar x = new(xsdt.Double)\r\n\tx.Set(\"1.0\")\r\n\treturn *x\r\n}",
"func (self *TraitPixbufAnimation) GetWidth() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_width(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (self *TraitPixbuf) GetWidth() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_get_width(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func GetApproximateTextWidth(text string, fontSize int) float64 {\n\tsize := 0.0\n\tfSize := float64(fontSize)\n\tif fontSize == 0 {\n\t\tfSize = 14.0 // default font size on ons site\n\t}\n\tspacing := SpaceBetweenCharacters * fSize // allow for some spacing between letters\n\tfor _, runeValue := range text {\n\t\truneSize, ok := characterWidths[runeValue]\n\t\tif ok {\n\t\t\truneSize = fSize * runeSize\n\t\t} else { // unknown character - assume it's quite wide\n\t\t\truneSize = fSize * 0.8\n\t\t}\n\t\tsize += runeSize + spacing\n\t}\n\treturn size\n}",
"func getWidth() int {\n\tstdoutFd := int(wrappedstreams.Stdout().Fd())\n\tstderrFd := int(wrappedstreams.Stderr().Fd())\n\n\tw := getWidthFd(stdoutFd)\n\tif w < 0 {\n\t\tw = getWidthFd(stderrFd)\n\t}\n\n\treturn w\n}",
"func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}",
"func (me XsdGoPkgHasElem_Width) WidthDefault() xsdt.Double {\r\n\tvar x = new(xsdt.Double)\r\n\tx.Set(\"1.0\")\r\n\treturn *x\r\n}",
"func (s *State) AdaptiveElWidth() int {\n\treturn s.adaptiveElWidth\n}",
"func (b *BaseElement) GetWidth() int32 {\n\treturn b.w\n}",
"func GetWidgetWidth(w Widget) (result float32) {\n\timgui.PushID(GenAutoID(\"GetWidgetWidthMeasurement\"))\n\tdefer imgui.PopID()\n\n\t// save cursor position before doing anything\n\tcurrentPos := GetCursorPos()\n\n\t// set cursor position to something really out of working space\n\tstartPos := image.Pt(getWidgetWidthTestingSpaceX, getWidgetWidthTestingSpaceY)\n\tSetCursorPos(startPos)\n\n\t// render widget in `dry` mode\n\timgui.PushStyleVarFloat(imgui.StyleVarAlpha, 0)\n\tw.Build()\n\timgui.PopStyleVar()\n\n\t// save widget's width\n\t// check cursor position\n\timgui.SameLine()\n\n\tspacingW, _ := GetItemSpacing()\n\tresult = float32(GetCursorPos().X-startPos.X) - spacingW\n\n\t// reset drawing cursor position\n\tSetCursorPos(currentPos)\n\n\treturn result\n}",
"func (w *WidgetImplement) Width() int {\n\treturn w.w\n}",
"func GxPhysicalWidth(value float64) *SimpleElement { return newSEFloat(\"gx:physicalWidth\", value) }",
"func (v *Pixbuf) GetWidth() int {\n\treturn int(C.gdk_pixbuf_get_width(v.Native()))\n}",
"func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}",
"func (v *TextView) GetLeftMargin() int {\n\tc := C.gtk_text_view_get_left_margin(v.native())\n\treturn int(c)\n}",
"func (e Event) GetResizeWidth() int {\n\treturn int(C.caca_get_event_resize_width(e.Ev))\n}",
"func MonoWidth(input string) int {\n\n\ts := norm.NFD.String(input)\n\n\tcount := 0\n\tfor i := 0; i < len(s); {\n\t\td := norm.NFC.NextBoundaryInString(s[i:], true)\n\t\tcount += 1\n\t\ti += d\n\t}\n\n\treturn count\n}",
"func (t *Link) GetUnknownWidth() (v interface{}) {\n\treturn t.width.unknown_\n\n}",
"func (h *BSTHandler) CalculateTreeWidth() {\n\n}",
"func (b *BinP1D) XWidth() float64 {\n\treturn b.xrange.Max - b.xrange.Min\n}",
"func (v *TextView) GetPixelsInsideWrap() int {\n\tc := C.gtk_text_view_get_pixels_inside_wrap(v.native())\n\treturn int(c)\n}",
"func (win *Window) ReshowWithInitialSize() {\n\twin.Candy().Guify(\"gtk_window_reshow_with_initial_size\", win)\n}",
"func (o *os) GetBorderlessWindow() gdnative.Bool {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetBorderlessWindow()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_borderless_window\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}",
"func calcWidth(width int) int {\n\tspacing := colSpacing * len(colWidths)\n\tvar staticCols int\n\tfor _, w := range colWidths {\n\t\twidth -= w\n\t\tif w == 0 {\n\t\t\tstaticCols++\n\t\t}\n\t}\n\treturn (width - spacing) / staticCols\n}",
"func (b *BoundingBox2D) width() float64 {\n\n\treturn b.upperCorner.X - b.lowerCorner.X\n}",
"func (w Widths) MaxWidth() (maxWidth, wideDepth int) {\n\tfor depth, width := range w {\n\t\tif width > maxWidth {\n\t\t\tmaxWidth = width\n\t\t\twideDepth = depth\n\t\t}\n\t}\n\treturn\n}",
"func (w *WarpState) WindowSize() warp.Size {\n\treturn w.windowSize\n}",
"func (n *node) width() int32 {\n\t// find two non-nil nodes\n\tfor i := 0; i < 8; i++ {\n\t\tif n.leafs[i] != nil && n.leafs[i].Center() != nil {\n\t\t\tfor j := 0; j < 8; j++ {\n\t\t\t\tif n.leafs[j] != nil && n.leafs[j].Center() != nil {\n\t\t\t\t\tp1, p2 := n.leafs[i].Center(), n.leafs[j].Center()\n\t\t\t\t\t// calculate non-zero difference in one of the dimensions (any)\n\t\t\t\t\txwidth := math.Abs(float64(p1.X - p2.X))\n\t\t\t\t\tif xwidth > 0 {\n\t\t\t\t\t\treturn int32(xwidth)\n\t\t\t\t\t}\n\t\t\t\t\tywidth := math.Abs(float64(p1.Y - p2.Y))\n\t\t\t\t\tif ywidth > 0 {\n\t\t\t\t\t\treturn int32(xwidth)\n\t\t\t\t\t}\n\t\t\t\t\tzwidth := math.Abs(float64(p1.Z - p2.Z))\n\t\t\t\t\tif zwidth > 0 {\n\t\t\t\t\t\treturn int32(xwidth)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}",
"func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}",
"func (o *FilingSentiment) GetConstraining() float32 {\n\tif o == nil || o.Constraining == nil {\n\t\tvar ret float32\n\t\treturn ret\n\t}\n\treturn *o.Constraining\n}",
"func resolveMetricsWindow(logger moira.Logger, trigger moira.TriggerData, pkg NotificationPackage) (int64, int64) {\n\t// resolve default realtime window for any case\n\tnow := time.Now()\n\tdefaultFrom := roundToRetention(now.UTC().Add(-defaultTimeRange).Unix())\n\tdefaultTo := roundToRetention(now.UTC().Unix())\n\t// try to resolve package window, force default realtime window on fail for both local and remote triggers\n\tfrom, to, err := pkg.GetWindow()\n\tif err != nil {\n\t\tlogger.Warning().\n\t\t\tString(\"default_window\", defaultTimeRange.String()).\n\t\t\tError(err).\n\t\t\tMsg(\"Failed to get trigger package window, using default window\")\n\t\treturn defaultFrom, defaultTo\n\t}\n\t// round to the nearest retention to correctly fetch data from redis\n\tfrom = roundToRetention(from)\n\tto = roundToRetention(to)\n\t// package window successfully resolved, test it's wide and realtime metrics window\n\tfromTime, toTime := moira.Int64ToTime(from), moira.Int64ToTime(to)\n\tisWideWindow := toTime.Sub(fromTime).Minutes() >= defaultTimeRange.Minutes()\n\tisRealTimeWindow := now.UTC().Sub(fromTime).Minutes() <= defaultTimeRange.Minutes()\n\t// resolve remote trigger window.\n\t// window is wide: use package window to fetch limited historical data from graphite\n\t// window is not wide: use shifted window to fetch extended historical data from graphite\n\tif trigger.IsRemote {\n\t\tif isWideWindow {\n\t\t\treturn fromTime.Unix(), toTime.Unix()\n\t\t}\n\t\treturn toTime.Add(-defaultTimeRange + defaultTimeShift).Unix(), toTime.Add(defaultTimeShift).Unix()\n\t}\n\t// resolve local trigger window.\n\t// window is realtime: use shifted window to fetch actual data from redis\n\t// window is not realtime: force realtime window\n\tif isRealTimeWindow {\n\t\treturn toTime.Add(-defaultTimeRange + defaultTimeShift).Unix(), toTime.Add(defaultTimeShift).Unix()\n\t}\n\treturn defaultFrom, defaultTo\n}",
"func (rect *PdfRectangle) Width() float64 {\n\treturn math.Abs(rect.Urx - rect.Llx)\n}",
"func (m *ModuleBase) Width(takable, taken float64) float64 {\n\treturn taken\n}",
"func (v *IconView) GetItemWidth() int {\n\treturn int(C.gtk_icon_view_get_item_width(v.native()))\n}",
"func (c *Canvas) Width() int {\n\treturn c.maxCorner.X - c.minCorner.X + 1\n}",
"func Pwidth(wp, cw, defval float64) float64 {\n\tif wp == 0 {\n\t\treturn defval\n\t}\n\treturn (wp / 100) * cw\n}",
"func (g Grid) Width() int {\n\treturn g.width\n}",
"func (dim *Dimensions) Width() int64 {\n\treturn dim.width\n}",
"func GetTerminalWidthFunc(defaultWidth int) TerminalWidthGetter {\n\treturn func() int {\n\t\tterminalWidth, _, err := term.GetSize(int(os.Stdout.Fd()))\n\t\tif err == nil {\n\t\t\treturn terminalWidth\n\t\t}\n\t\treturn defaultWidth\n\t}\n}",
"func (object Object) Width(value int64) Object {\n\treturn object.SimpleValue(as.PropertyWidth, value)\n}",
"func getContentWidth(pdf *gofpdf.Fpdf) float64 {\n\tmarginL, _, marginR, _ := pdf.GetMargins()\n\tpageW, _ := pdf.GetPageSize()\n\twidth := pageW - marginL - marginR\n\treturn width\n}",
"func GetWindowText(hwnd syscall.Handle, str *uint16, maxCount int32) (len int32, err error) {\n\tr0, _, e1 := syscall.Syscall(getWindowTextW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(str)), uintptr(maxCount))\n\tlen = int32(r0)\n\tif len == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}",
"func (l *Ledger) GetIrreversibleSlideWindow() int64 {\n\tdefaultIrreversibleSlideWindow := l.GenesisBlock.GetConfig().GetIrreversibleSlideWindow()\n\treturn defaultIrreversibleSlideWindow\n}",
"func (b *Border) Width(width float32) *Border {\n\tb.width = b.th.TextSize.Scale(width)\n\treturn b\n}",
"func (n *node) confine(adjustment float64) float64 {\n\tconfined := math.Max(1/DiffConfineFactor, adjustment)\n\tconfined = math.Min(DiffConfineFactor, confined)\n\n\treturn confined\n}",
"func (b *Border) Width(width float32) *Border {\n\tb.width = b.Theme.TextSize.Scale(width)\n\treturn b\n}",
"func PopItemWidth() {\n\timgui.PopItemWidth()\n}",
"func nodeWidth(n uint) uint {\n\treturn 2*(n-1) + 1\n}",
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (pb *Bar) Width() (width int) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\twidth = defaultBarWidth\n\t\t}\n\t}()\n\tpb.mu.RLock()\n\twidth = pb.width\n\tmaxWidth := pb.maxWidth\n\tpb.mu.RUnlock()\n\tif width <= 0 {\n\t\tvar err error\n\t\tif width, err = terminalWidth(); err != nil {\n\t\t\treturn defaultBarWidth\n\t\t}\n\t}\n\tif maxWidth > 0 && width > maxWidth {\n\t\twidth = maxWidth\n\t}\n\treturn\n}",
"func (v *Entry) GetMaxWidthChars() int {\n\tc := C.gtk_entry_get_max_width_chars(v.native())\n\treturn int(c)\n}",
"func GET_WHEEL_DELTA_WPARAM(wParam WPARAM) int16 {\n\treturn int16(HIWORD(uint32(wParam)))\n}",
"func (s *PageLayout) Width() float64 {\n\treturn s.width\n}",
"func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}",
"func (a *Animation) GetWidth() float64 {\n\tif a == nil || a.Width == nil {\n\t\treturn 0.0\n\t}\n\treturn *a.Width\n}",
"func (w *WorkerContainer) Conns() uint32 {\n\treturn atomic.LoadUint32(&w.conns)\n}",
"func (s *Stream) incrSendWindow(hdr header, flags uint16) error {\n\tif err := s.processFlags(flags); err != nil {\n\t\treturn err\n\t}\n\n\t// Increase window, unblock a sender\n\tatomic.AddUint32(&s.sendWindow, hdr.Length())\n\tasyncNotify(s.sendNotifyCh)\n\treturn nil\n}",
"func minWindow1(s string, t string) string {\n\tresult := \"\"\n\tletterCount := make(map[byte]int, len(s))\n\tfor i := 0; i < len(t); i++ {\n\t\tletterCount[t[i]]++\n\t}\n\n\tminLen := math.MaxInt64\n\tleftBorder := 0\n\tcnt := 0\n\tfor i := 0; i < len(s); i++ {\n\t\tletterCount[s[i]]--\n\t\tif letterCount[s[i]] >= 0 { cnt++ }\n\n\t\tfor cnt == len(t) {\n\t\t\tif minLen > (i - leftBorder + 1) {\n\t\t\t\tminLen = i - leftBorder + 1\n\t\t\t\tresult = s[leftBorder:i+1]\n\t\t\t}\n\n\t\t\tletterCount[s[leftBorder]]++\n\t\t\tif letterCount[s[leftBorder]] > 0 { cnt-- }\n\n\t\t\tleftBorder++\n\t\t}\n\t}\n\n\treturn result\n}",
"func LevelWidth(n Nodes, l Level) Nodes {\n\tif l < 0 {\n\t\tpanic(\"can't see below\")\n\t}\n\tfor l > 0 {\n\t\tn = (n + 1) / 2\n\t\tl--\n\t}\n\treturn n\n}",
"func (m Model) GetWidth() int {\n\treturn m.Width\n}",
"func Width() int {\n\tws, err := getWinsize()\n\n\tif err != nil {\n\t\treturn -1\n\t}\n\n\treturn int(ws.Col)\n}",
"func (v *StackPanel) Width() int {\n\tif v.orientation == Horizontal {\n\t\tw := 0\n\t\tfor _, val := range v.widths {\n\t\t\tw += val\n\t\t}\n\t\treturn w\n\t}\n\treturn v.width\n}",
"func (n *windowNode) computeWindows() error {\n\trowCount := n.wrappedRenderVals.Len()\n\tif rowCount == 0 {\n\t\treturn nil\n\t}\n\n\twindowCount := len(n.funcs)\n\tacc := n.windowsAcc.Wtxn(n.planner.session)\n\n\twinValSz := uintptr(rowCount) * unsafe.Sizeof([]parser.Datum{})\n\twinAllocSz := uintptr(rowCount*windowCount) * unsafe.Sizeof(parser.Datum(nil))\n\tif err := acc.Grow(int64(winValSz + winAllocSz)); err != nil {\n\t\treturn err\n\t}\n\n\tn.windowValues = make([][]parser.Datum, rowCount)\n\twindowAlloc := make([]parser.Datum, rowCount*windowCount)\n\tfor i := range n.windowValues {\n\t\tn.windowValues[i] = windowAlloc[i*windowCount : (i+1)*windowCount]\n\t}\n\n\tvar scratchBytes []byte\n\tvar scratchDatum []parser.Datum\n\tfor windowIdx, windowFn := range n.funcs {\n\t\tpartitions := make(map[string][]parser.IndexedRow)\n\n\t\tif len(windowFn.partitionIdxs) == 0 {\n\t\t\t// If no partition indexes are included for the window function, all\n\t\t\t// rows are added to the same partition, which need to be pre-allocated.\n\t\t\tsz := int64(uintptr(rowCount) * unsafe.Sizeof(parser.IndexedRow{}))\n\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpartitions[\"\"] = make([]parser.IndexedRow, rowCount)\n\t\t}\n\n\t\tif n := len(windowFn.partitionIdxs); n > cap(scratchDatum) {\n\t\t\tsz := int64(uintptr(n) * unsafe.Sizeof(parser.Datum(nil)))\n\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tscratchDatum = make([]parser.Datum, n)\n\t\t} else {\n\t\t\tscratchDatum = scratchDatum[:n]\n\t\t}\n\n\t\t// Partition rows into separate partitions based on hash values of the\n\t\t// window function's PARTITION BY attribute.\n\t\t//\n\t\t// TODO(nvanbenschoten) Window functions with the same window definition\n\t\t// can share partition and sorting work.\n\t\t// See Cao et al. [http://vldb.org/pvldb/vol5/p1244_yucao_vldb2012.pdf]\n\t\tfor rowI := 0; rowI < rowCount; rowI++ {\n\t\t\trow := n.wrappedRenderVals.At(rowI)\n\t\t\trowWindowDef := n.wrappedWindowDefVals.At(rowI)\n\t\t\tentry := parser.IndexedRow{Idx: rowI, Row: row}\n\t\t\tif len(windowFn.partitionIdxs) == 0 {\n\t\t\t\t// If no partition indexes are included for the window function, all\n\t\t\t\t// rows are added to the same partition.\n\t\t\t\tpartitions[\"\"][rowI] = entry\n\t\t\t} else {\n\t\t\t\t// If the window function has partition indexes, we hash the values of each\n\t\t\t\t// of these indexes for each row, and partition based on this hashed value.\n\t\t\t\tfor i, idx := range windowFn.partitionIdxs {\n\t\t\t\t\tscratchDatum[i] = rowWindowDef[idx]\n\t\t\t\t}\n\n\t\t\t\tencoded, err := sqlbase.EncodeDTuple(scratchBytes, scratchDatum)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tsz := int64(uintptr(len(encoded)) + unsafe.Sizeof(entry))\n\t\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpartitions[string(encoded)] = append(partitions[string(encoded)], entry)\n\t\t\t\tscratchBytes = encoded[:0]\n\t\t\t}\n\t\t}\n\n\t\t// For each partition, perform necessary sorting based on the window function's\n\t\t// ORDER BY attribute. After this, perform the window function computation for\n\t\t// each tuple and save the result in n.windowValues.\n\t\t//\n\t\t// TODO(nvanbenschoten)\n\t\t// - Investigate inter- and intra-partition parallelism\n\t\t// - Investigate more efficient aggregation techniques\n\t\t// * Removable Cumulative\n\t\t// * Segment Tree\n\t\t// See Leis et al. [http://www.vldb.org/pvldb/vol8/p1058-leis.pdf]\n\t\tfor _, partition := range partitions {\n\t\t\t// TODO(nvanbenschoten) Handle framing here. Right now we only handle the default\n\t\t\t// framing option of RANGE UNBOUNDED PRECEDING. With ORDER BY, this sets the frame\n\t\t\t// to be all rows from the partition start up through the current row's last ORDER BY\n\t\t\t// peer. Without ORDER BY, all rows of the partition are included in the window frame,\n\t\t\t// since all rows become peers of the current row. Once we add better framing support,\n\t\t\t// we should flesh this logic out more.\n\t\t\tbuiltin := windowFn.expr.GetWindowConstructor()()\n\n\t\t\t// Since we only support two types of window frames (see TODO above), we only\n\t\t\t// need two possible types of peerGroupChecker's to help determine peer groups\n\t\t\t// for given tuples.\n\t\t\tvar peerGrouper peerGroupChecker\n\t\t\tif windowFn.columnOrdering != nil {\n\t\t\t\t// If an ORDER BY clause is provided, order the partition and use the\n\t\t\t\t// sorter as our peerGroupChecker.\n\t\t\t\tsorter := &partitionSorter{\n\t\t\t\t\trows: partition,\n\t\t\t\t\twindowDefVals: n.wrappedWindowDefVals,\n\t\t\t\t\tordering: windowFn.columnOrdering,\n\t\t\t\t}\n\t\t\t\t// The sort needs to be deterministic because multiple window functions with\n\t\t\t\t// syntactically equivalent ORDER BY clauses in their window definitions\n\t\t\t\t// need to be guaranteed to be evaluated in the same order, even if the\n\t\t\t\t// ORDER BY *does not* uniquely determine an ordering. In the future, this\n\t\t\t\t// could be guaranteed by only performing a single pass over a sorted partition\n\t\t\t\t// for functions with syntactically equivalent PARTITION BY and ORDER BY clauses.\n\t\t\t\tsort.Sort(sorter)\n\t\t\t\tpeerGrouper = sorter\n\t\t\t} else {\n\t\t\t\t// If no ORDER BY clause is provided, all rows in the partition are peers.\n\t\t\t\tpeerGrouper = allPeers{}\n\t\t\t}\n\n\t\t\t// Iterate over peer groups within partition using a window frame.\n\t\t\tframe := parser.WindowFrame{\n\t\t\t\tRows: partition,\n\t\t\t\tArgIdxStart: windowFn.argIdxStart,\n\t\t\t\tArgCount: windowFn.argCount,\n\t\t\t\tRowIdx: 0,\n\t\t\t}\n\t\t\tfor frame.RowIdx < len(partition) {\n\t\t\t\t// Compute the size of the current peer group.\n\t\t\t\tframe.FirstPeerIdx = frame.RowIdx\n\t\t\t\tframe.PeerRowCount = 1\n\t\t\t\tfor ; frame.FirstPeerIdx+frame.PeerRowCount < len(partition); frame.PeerRowCount++ {\n\t\t\t\t\tcur := frame.FirstPeerIdx + frame.PeerRowCount\n\t\t\t\t\tif !peerGrouper.InSameGroup(cur, cur-1) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Perform calculations on each row in the current peer group.\n\t\t\t\tfor ; frame.RowIdx < frame.FirstPeerIdx+frame.PeerRowCount; frame.RowIdx++ {\n\t\t\t\t\tres, err := builtin.Compute(frame)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// This may overestimate, because WindowFuncs may perform internal caching.\n\t\t\t\t\tsz := res.Size()\n\t\t\t\t\tif err := acc.Grow(int64(sz)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Save result into n.windowValues, indexed by original row index.\n\t\t\t\t\tvalRowIdx := partition[frame.RowIdx].Idx\n\t\t\t\t\tn.windowValues[valRowIdx][windowIdx] = res\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Done using window definition values, release memory.\n\tn.wrappedWindowDefVals.Close()\n\tn.wrappedWindowDefVals = nil\n\n\treturn nil\n}",
"func PropValWindows(reply *xproto.GetPropertyReply,\n\terr error) ([]xproto.Window, error) {\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif reply.Format != 32 {\n\t\treturn nil, fmt.Errorf(\"PropValIds: Expected format 32 but got %d\",\n\t\t\treply.Format)\n\t}\n\n\tids := make([]xproto.Window, reply.ValueLen)\n\tvals := reply.Value\n\tfor i := 0; len(vals) >= 4; i++ {\n\t\tids[i] = xproto.Window(xgb.Get32(vals))\n\t\tvals = vals[4:]\n\t}\n\treturn ids, nil\n}",
"func (n *windowNode) populateValues() error {\n\tacc := n.windowsAcc.Wtxn(n.planner.session)\n\trowCount := n.wrappedRenderVals.Len()\n\tn.values.rows = NewRowContainer(\n\t\tn.planner.session.TxnState.makeBoundAccount(), n.values.columns, rowCount,\n\t)\n\n\trow := make(parser.DTuple, len(n.windowRender))\n\tfor i := 0; i < rowCount; i++ {\n\t\twrappedRow := n.wrappedRenderVals.At(i)\n\n\t\tn.curRowIdx = i // Point all windowFuncHolders to the correct row values.\n\t\tcurColIdx := 0\n\t\tcurFnIdx := 0\n\t\tfor j := range row {\n\t\t\tif curWindowRender := n.windowRender[j]; curWindowRender == nil {\n\t\t\t\t// If the windowRender at this index is nil, propagate the datum\n\t\t\t\t// directly from the wrapped planNode. It wasn't changed by windowNode.\n\t\t\t\trow[j] = wrappedRow[curColIdx]\n\t\t\t\tcurColIdx++\n\t\t\t} else {\n\t\t\t\t// If the windowRender is not nil, ignore 0 or more columns from the wrapped\n\t\t\t\t// planNode. These were used as arguments to window functions all beneath\n\t\t\t\t// a single windowRender.\n\t\t\t\t// SELECT rank() over () from t; -> ignore 0 from wrapped values\n\t\t\t\t// SELECT (rank() over () + avg(b) over ()) from t; -> ignore 1 from wrapped values\n\t\t\t\t// SELECT (avg(a) over () + avg(b) over ()) from t; -> ignore 2 from wrapped values\n\t\t\t\tfor ; curFnIdx < len(n.funcs); curFnIdx++ {\n\t\t\t\t\twindowFn := n.funcs[curFnIdx]\n\t\t\t\t\tif windowFn.argIdxStart != curColIdx {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tcurColIdx += windowFn.argCount\n\t\t\t\t}\n\t\t\t\t// Instead, we evaluate the current window render, which depends on at least\n\t\t\t\t// one window function, at the given row.\n\t\t\t\tres, err := curWindowRender.Eval(&n.planner.evalCtx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trow[j] = res\n\t\t\t}\n\t\t}\n\n\t\tif _, err := n.values.rows.AddRow(row); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Done using the output of computeWindows, release memory and clear\n\t// accounts.\n\tn.wrappedRenderVals.Close()\n\tn.wrappedRenderVals = nil\n\tn.wrappedIndexedVarVals.Close()\n\tn.wrappedIndexedVarVals = nil\n\tn.windowValues = nil\n\tacc.Close()\n\n\treturn nil\n}",
"func (s *SimPDF) Width() float64 {\n\tif s.Page.IsLandscape {\n\t\treturn s.Page.Height\n\t}\n\treturn s.Page.Width\n}",
"func (fb *FlowBox) GetMinChildrenPerLine() uint {\n\tc := C.gtk_flow_box_get_min_children_per_line(fb.native())\n\treturn uint(c)\n}",
"func (v *TextView) GetPixelsAboveLines() int {\n\tc := C.gtk_text_view_get_pixels_above_lines(v.native())\n\treturn int(c)\n}",
"func minSizeEffective(item LayoutItem) Size {\n\tgeometry := item.Geometry()\n\n\tvar s Size\n\tif msh, ok := item.(MinSizer); ok {\n\t\ts = msh.MinSize()\n\t} else if is, ok := item.(IdealSizer); ok {\n\t\ts = is.IdealSize()\n\t}\n\n\tsize := maxSize(geometry.MinSize, s)\n\n\tmax := geometry.MaxSize\n\tif max.Width > 0 && size.Width > max.Width {\n\t\tsize.Width = max.Width\n\t}\n\tif max.Height > 0 && size.Height > max.Height {\n\t\tsize.Height = max.Height\n\t}\n\n\treturn size\n}",
"func (r Rectangle) Width() float64 {\n\treturn r.Max.X - r.Min.X\n}",
"func (s *CountMinSketch) Width() uint {\n\treturn s.w\n}",
"func (o *WorkbookChart) GetWidth() AnyOfnumberstringstring {\n\tif o == nil || o.Width == nil {\n\t\tvar ret AnyOfnumberstringstring\n\t\treturn ret\n\t}\n\treturn *o.Width\n}",
"func minWindow(s string, t string) string {\n \n}",
"func LineWidth(width float32) {\n\tsyscall.Syscall(gpLineWidth, 1, uintptr(math.Float32bits(width)), 0, 0)\n}",
"func (s *Scroll) Window() sparta.Window {\n\treturn s.win\n}",
"func (joint B2WheelJoint) GetLowerLimit() float64 {\n\treturn joint.M_lowerTranslation\n}",
"func (r Rect) W() float64 {\n\treturn r.Max.X - r.Min.X\n}",
"func (r Rect) W() float64 {\n\treturn r.Max.X - r.Min.X\n}",
"func (status *InstanceStatus) PropagateAutoscalingStatus(app *App, hpa *autoscalingv1.HorizontalPodAutoscaler) {\n\t// hpa is nil when autoscaling is disabled, or maxreplicas hasn't been set, or no rules specified by user.\n\tif hpa == nil {\n\t\treturn\n\t}\n\n\tif hpa.Status.CurrentCPUUtilizationPercentage == nil {\n\t\t// hpa is not ready yet\n\t\treturn\n\t}\n\n\t// Set instance status for autoscaling rule\n\tstatus.AutoscalingStatus = []AutoscalingRuleStatus{\n\t\t{\n\t\t\t// Rules is guaranteed to not be empty here because hpa is not nil.\n\t\t\tAppAutoscalingRule: app.Spec.Instances.Autoscaling.Rules[0],\n\t\t\tCurrent: AutoscalingRuleMetricValueStatus{\n\t\t\t\tAverageValue: resource.NewQuantity(int64(*hpa.Status.CurrentCPUUtilizationPercentage), resource.DecimalSI),\n\t\t\t},\n\t\t},\n\t}\n}"
] | [
"0.81382954",
"0.74754125",
"0.6816626",
"0.5524392",
"0.55122465",
"0.53927827",
"0.51792467",
"0.5166886",
"0.510769",
"0.5031366",
"0.48824355",
"0.48683724",
"0.47451147",
"0.47078547",
"0.46988234",
"0.4590461",
"0.45584512",
"0.4545798",
"0.44968885",
"0.44810912",
"0.44612533",
"0.44388798",
"0.4431155",
"0.4413729",
"0.44012958",
"0.43999803",
"0.43937293",
"0.43793204",
"0.4377281",
"0.43678764",
"0.43491295",
"0.43381414",
"0.43372354",
"0.4280027",
"0.4273655",
"0.42420977",
"0.4219347",
"0.42128748",
"0.42045763",
"0.41627103",
"0.41436407",
"0.4129937",
"0.41259852",
"0.41203302",
"0.41198325",
"0.4117093",
"0.41001827",
"0.41000643",
"0.40768763",
"0.407667",
"0.40723222",
"0.4064654",
"0.40597075",
"0.40582484",
"0.40485737",
"0.40481415",
"0.4047917",
"0.4047624",
"0.40473467",
"0.4042705",
"0.4037631",
"0.40070656",
"0.40024036",
"0.39949575",
"0.39871714",
"0.39798802",
"0.397791",
"0.39763787",
"0.39750266",
"0.39748344",
"0.39697734",
"0.3967661",
"0.39539698",
"0.3945641",
"0.39447328",
"0.39325276",
"0.39262688",
"0.392077",
"0.39137018",
"0.3910601",
"0.3910364",
"0.3905198",
"0.3892346",
"0.3889667",
"0.38875258",
"0.38845167",
"0.3871653",
"0.38665715",
"0.38647726",
"0.38509932",
"0.38491723",
"0.38335797",
"0.38316178",
"0.38282216",
"0.38269827",
"0.38267165",
"0.38177115",
"0.38048407",
"0.38048407",
"0.38022012"
] | 0.87654483 | 0 |
SetPropagateNaturalWidth is a wrapper around gtk_scrolled_window_set_propagate_natural_width(). | SetPropagateNaturalWidth является обёрткой вокруг gtk_scrolled_window_set_propagate_natural_width(). | func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {
C.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}",
"func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}",
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}",
"func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}",
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (w *WidgetImplement) FixedWidth() int {\n\treturn w.fixedW\n}",
"func GxOuterWidth(value float64) *SimpleElement { return newSEFloat(\"gx:outerWidth\", value) }",
"func (win *Window) ReshowWithInitialSize() {\n\twin.Candy().Guify(\"gtk_window_reshow_with_initial_size\", win)\n}",
"func (w *WidgetBase) SetWidth(width int) {\n\tw.size.X = width\n\tif w.size.X != 0 {\n\t\tw.sizePolicyX = Minimum\n\t} else {\n\t\tw.sizePolicyX = Expanding\n\t}\n}",
"func (window Window) InnerWidth() int {\n\treturn window.Get(\"innerWidth\").Int()\n}",
"func (t *Text) Width(takable, taken float64) float64 {\n\tt.UpdateParagraph(takable)\n\tif t.Align != txt.Left {\n\t\treturn t.Paragraph.Width * t.Scl.X\n\t}\n\treturn t.Bounds().W() * t.Scl.X\n}",
"func (w *Window) Width() int {\n\treturn int(C.ANativeWindow_getWidth(w.cptr()))\n}",
"func (v *Entry) SetMaxWidthChars(nChars int) {\n\tC.gtk_entry_set_max_width_chars(v.native(), C.gint(nChars))\n}",
"func (w *sliderElement) MinIntrinsicWidth(base.Length) base.Length {\n\twidth, _ := w.handle.GetPreferredWidth()\n\tif limit := base.FromPixelsX(width); limit < 160*DIP {\n\t\treturn 160 * DIP\n\t}\n\treturn base.FromPixelsX(width)\n}",
"func (window Window) OuterWidth() int {\n\treturn window.Get(\"outerWidth\").Int()\n}",
"func (wg *WidgetImplement) SetWidth(w int) {\n\twg.w = w\n}",
"func (s *ItemScroller) SetAutoWidth(maxWidth float32) {\n\n\ts.maxAutoWidth = maxWidth\n}",
"func (s *Stream) incrSendWindow(hdr header, flags uint16) error {\n\tif err := s.processFlags(flags); err != nil {\n\t\treturn err\n\t}\n\n\t// Increase window, unblock a sender\n\tatomic.AddUint32(&s.sendWindow, hdr.Length())\n\tasyncNotify(s.sendNotifyCh)\n\treturn nil\n}",
"func TestServerZeroWindowAdjust(t *testing.T) {\n\tconn := dial(exitStatusZeroHandler, t)\n\tdefer conn.Close()\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to request new session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\tif err := session.Shell(); err != nil {\n\t\tt.Fatalf(\"Unable to execute command: %v\", err)\n\t}\n\n\t// send a bogus zero sized window update\n\tsession.clientChan.sendWindowAdj(0)\n\n\terr = session.Wait()\n\tif err != nil {\n\t\tt.Fatalf(\"expected nil but got %v\", err)\n\t}\n}",
"func (g *Grid) SetWidth(w int) { g.Width = w }",
"func (w *WidgetImplement) SetClampWidth(clamp bool) {\n\tw.clamp[0] = clamp\n}",
"func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) {\n\tc.mutex.Lock()\n\tif inc > c.receiveWindowSize {\n\t\tc.logger.Debugf(\"Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase\", c.receiveWindowSize/(1<<10))\n\t\tnewSize := utils.Min(inc, c.maxReceiveWindowSize)\n\t\tif delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) {\n\t\t\tc.receiveWindowSize = newSize\n\t\t}\n\t\tc.startNewAutoTuningEpoch(time.Now())\n\t}\n\tc.mutex.Unlock()\n}",
"func (v *TextView) SetBorderWindowSize(tp TextWindowType, size int) {\n\tC.gtk_text_view_set_border_window_size(v.native(), C.GtkTextWindowType(tp), C.gint(size))\n}",
"func WrapWidth(w float64) MeshOption {\n\treturn func(t *Mesh) error {\n\t\tt.renderAutoWrap = true\n\t\tt.renderWrapWidth = w\n\n\t\treturn nil\n\t}\n}",
"func (b *Bar) SetWidth(n int) *Bar {\n\tif n < 2 || isClosed(b.done) {\n\t\treturn b\n\t}\n\tb.widthCh <- n\n\treturn b\n}",
"func (fb *FlowBox) SetMinChildrenPerLine(n_children uint) {\n\tC.gtk_flow_box_set_min_children_per_line(fb.native(), C.guint(n_children))\n}",
"func (win *Window) SetPolicy(allowShrink, allowGrow, autoShrink int) {\n\twin.Candy().Guify(\"gtk_window_set_policy\", win, allowShrink, allowGrow, autoShrink)\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func NaturalBreaks(data []float64, nClasses int) []float64 {\n\t// sort data in numerical order, since this is expected by the matrices function\n\tdata = sortData(data)\n\n\t// sanity check\n\tuniq := deduplicate(data)\n\tif nClasses >= len(uniq) {\n\t\treturn uniq\n\t}\n\n\t// get our basic matrices (we only need lower class limits here)\n\tlowerClassLimits, _ := getMatrices(data, nClasses)\n\n\t// extract nClasses out of the computed matrices\n\treturn breaks(data, lowerClassLimits, nClasses)\n}",
"func (win *Window) Width() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.x)\n}",
"func (gr *groupT) WidthMax(s string) {\n\tgr.Style = css.NewStylesResponsive(gr.Style)\n\tgr.Style.Desktop.StyleBox.WidthMax = s\n\tgr.Style.Mobile.StyleBox.WidthMax = \"none\" // => 100% of page - page has margins; replaced desktop max-width\n}",
"func (label *LabelWidget) SetWidth(width float64) {\n\tlabel.box.width = width\n\tlabel.fixedWidth = true\n\tlabel.RequestReflow()\n}",
"func (tv *TextView) ResizeIfNeeded(nwSz image.Point) bool {\n\tif nwSz == tv.LinesSize {\n\t\treturn false\n\t}\n\t// fmt.Printf(\"%v needs resize: %v\\n\", tv.Nm, nwSz)\n\ttv.LinesSize = nwSz\n\tdiff := tv.SetSize()\n\tif !diff {\n\t\t// fmt.Printf(\"%v resize no setsize: %v\\n\", tv.Nm, nwSz)\n\t\treturn false\n\t}\n\tly := tv.ParentLayout()\n\tif ly != nil {\n\t\ttv.SetFlag(int(TextViewInReLayout))\n\t\tly.GatherSizes() // can't call Size2D b/c that resets layout\n\t\tly.Layout2DTree()\n\t\ttv.SetFlag(int(TextViewRenderScrolls))\n\t\ttv.ClearFlag(int(TextViewInReLayout))\n\t\t// fmt.Printf(\"resized: %v\\n\", tv.LayData.AllocSize)\n\t}\n\treturn true\n}",
"func (p *pageT) WidthMax(s string) {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tp.Style.Desktop.StyleBox.WidthMax = s\n\tp.Style.Mobile.StyleBox.WidthMax = \"calc(100% - 1.2rem)\" // 0.6rem margin-left and -right in mobile view\n}",
"func maxWidth(no_lines int, widthFromLineNo widthFunc) int {\n\tvar max int\n\tfor i := 0; i < no_lines; i++ {\n\t\tval := widthFromLineNo(i)\n\t\tif val > max {\n\t\t\tmax = val\n\t\t}\n\t}\n\treturn max\n}",
"func (w *LWindow) MWidth() int32 {\n\treturn w.mWidth\n}",
"func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}",
"func (s *ItemScroller) autoSize() {\n\n\tif s.maxAutoWidth == 0 && s.maxAutoHeight == 0 {\n\t\treturn\n\t}\n\n\tvar width float32\n\tvar height float32\n\tfor _, item := range s.items {\n\t\tpanel := item.GetPanel()\n\t\tif panel.Width() > width {\n\t\t\twidth = panel.Width()\n\t\t}\n\t\theight += panel.Height()\n\t}\n\n\t// If auto maximum width enabled\n\tif s.maxAutoWidth > 0 {\n\t\tif width <= s.maxAutoWidth {\n\t\t\ts.SetContentWidth(width)\n\t\t}\n\t}\n\t// If auto maximum height enabled\n\tif s.maxAutoHeight > 0 {\n\t\tif height <= s.maxAutoHeight {\n\t\t\ts.SetContentHeight(height)\n\t\t}\n\t}\n}",
"func (s *ItemScroller) vRecalc() {\n\n\t// Checks if scroll bar should be visible or not\n\tscroll := false\n\tif s.first > 0 {\n\t\tscroll = true\n\t} else {\n\t\tvar posY float32\n\t\tfor _, item := range s.items[s.first:] {\n\t\t\tposY += item.Height()\n\t\t\tif posY > s.height {\n\t\t\t\tscroll = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ts.setVScrollBar(scroll)\n\n\t// Compute size of scroll button\n\tif scroll && s.autoButtonSize {\n\t\tvar totalHeight float32\n\t\tfor _, item := range s.items {\n\t\t\t// TODO OPTIMIZATION\n\t\t\t// Break when the view/content proportion becomes smaller than the minimum button size\n\t\t\ttotalHeight += item.Height()\n\t\t}\n\t\ts.vscroll.SetButtonSize(s.height * s.height / totalHeight)\n\t}\n\n\t// Items width\n\twidth := s.ContentWidth()\n\tif scroll {\n\t\twidth -= s.vscroll.Width()\n\t}\n\n\tvar posY float32\n\t// Sets positions of all items\n\tfor pos, ipan := range s.items {\n\t\titem := ipan.GetPanel()\n\t\tif pos < s.first {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// If item is after last visible, sets not visible\n\t\tif posY > s.height {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// Sets item position\n\t\titem.SetVisible(true)\n\t\titem.SetPosition(0, posY)\n\t\tif s.adjustItem {\n\t\t\titem.SetWidth(width)\n\t\t}\n\t\tposY += ipan.Height()\n\t}\n\n\t// Set scroll bar value if recalc was not due by scroll event\n\tif scroll && !s.scrollBarEvent {\n\t\ts.vscroll.SetValue(float32(s.first) / float32(s.maxFirst()))\n\t}\n\ts.scrollBarEvent = false\n}",
"func AlignManually(alignmentType AlignmentType, widget Widget, widgetWidth float32, forceApplyWidth bool) Widget {\n\treturn Custom(func() {\n\t\tspacingX, _ := GetItemSpacing()\n\t\tavailableW, _ := GetAvailableRegion()\n\n\t\tvar dummyX float32\n\n\t\tswitch alignmentType {\n\t\tcase AlignLeft:\n\t\t\twidget.Build()\n\t\t\treturn\n\t\tcase AlignCenter:\n\t\t\tdummyX = (availableW-widgetWidth)/2 - spacingX\n\t\t\tif dummyX < 0 {\n\t\t\t\tdummyX = 0\n\t\t\t}\n\t\tcase AlignRight:\n\t\t\tdummyX = availableW - widgetWidth - spacingX\n\t\t\tif dummyX < 0 {\n\t\t\t\tdummyX = 0\n\t\t\t}\n\t\t}\n\n\t\tDummy(dummyX, 0).Build()\n\n\t\tif forceApplyWidth {\n\t\t\tPushItemWidth(widgetWidth)\n\t\t\tdefer PopItemWidth()\n\t\t}\n\n\t\timgui.SameLine()\n\t\twidget.Build()\n\t})\n}",
"func (p *ControlPanel) RunWindow(opts *widget.RunWindowOptions) error {\n\tvar (\n\t\tnwo *screen.NewWindowOptions\n\t\tt *theme.Theme\n\t)\n\tif opts != nil {\n\t\tnwo = &opts.NewWindowOptions\n\t\tt = &opts.Theme\n\t}\n\tvar err error\n\tp.w, err = p.s.NewWindow(nwo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer p.w.Release()\n\n\tpaintPending := false\n\n\tgef := gesture.EventFilter{EventDeque: p.w}\n\tfor {\n\t\te := p.w.NextEvent()\n\n\t\tif e = gef.Filter(e); e == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch e := e.(type) {\n\t\tcase lifecycle.Event:\n\t\t\tp.root.OnLifecycleEvent(e)\n\t\t\tif e.To == lifecycle.StageDead {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\tcase gesture.Event, mouse.Event:\n\t\t\tp.root.OnInputEvent(e, image.Point{})\n\n\t\tcase paint.Event:\n\t\t\tctx := &node.PaintContext{\n\t\t\t\tTheme: t,\n\t\t\t\tScreen: p.s,\n\t\t\t\tDrawer: p.w,\n\t\t\t\tSrc2Dst: f64.Aff3{\n\t\t\t\t\t1, 0, 0,\n\t\t\t\t\t0, 1, 0,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif err := p.root.Paint(ctx, image.Point{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.w.Publish()\n\t\t\tpaintPending = false\n\n\t\tcase size.Event:\n\t\t\tif dpi := float64(e.PixelsPerPt) * unit.PointsPerInch; dpi != t.GetDPI() {\n\t\t\t\tnewT := new(theme.Theme)\n\t\t\t\tif t != nil {\n\t\t\t\t\t*newT = *t\n\t\t\t\t}\n\t\t\t\tnewT.DPI = dpi\n\t\t\t\tt = newT\n\t\t\t}\n\n\t\t\twindowSize := e.Size()\n\t\t\tp.root.Measure(t, windowSize.X, windowSize.Y)\n\t\t\tp.root.Wrappee().Rect = e.Bounds()\n\t\t\tp.root.Layout(t)\n\t\t\t// TODO: call Mark(node.MarkNeedsPaint)?\n\n\t\tcase panelUpdate:\n\n\t\tcase error:\n\t\t\treturn e\n\t\t}\n\n\t\tif !paintPending && p.root.Wrappee().Marks.NeedsPaint() {\n\t\t\tpaintPending = true\n\t\t\tp.w.Send(paint.Event{})\n\t\t}\n\t}\n}",
"func (o *DcimRacksListParams) SetOuterWidthn(outerWidthn *string) {\n\to.OuterWidthn = outerWidthn\n}",
"func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}",
"func WindowSize(s int) ConfigFunc {\n\treturn func(c *Config) {\n\t\tc.WindowSize = s\n\t}\n}",
"func (v *TextView) SetLeftMargin(margin int) {\n\tC.gtk_text_view_set_left_margin(v.native(), C.gint(margin))\n}",
"func ApplyOfficialWindowScaling(storedValue int, rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64, bitsAllocated uint16) uint16 {\n\t// 1: StoredValue to ModalityValue\n\tvar modalityValue float64\n\tif rescaleSlope == 0 {\n\t\t// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html :\n\t\t// For modalities such as ultrasound and MRI that do not have any units,\n\t\t// the RescaleSlope and RescaleIntercept are absent and the Modality\n\t\t// Values are equal to the Stored Values.\n\t\tmodalityValue = float64(storedValue)\n\t} else {\n\t\t// Otherwise, we can apply the rescale slope and intercept to the stored\n\t\t// value.\n\t\tmodalityValue = float64(storedValue)*rescaleSlope + rescaleIntercept\n\t}\n\n\t// 2: ModalityValue to WindowedValue\n\n\t// The key here is that we're using bitsAllocated (e.g., 16 bits) instead of\n\t// bitsStored (e.g., 11 bits)\n\tvar grayLevels float64\n\tswitch bitsAllocated {\n\t// Precompute common cases so you're not exponentiating in the hot path\n\tcase 16:\n\t\tgrayLevels = 65536\n\tcase 8:\n\t\tgrayLevels = 256\n\tdefault:\n\t\tgrayLevels = math.Pow(2, float64(bitsAllocated))\n\t}\n\n\t// We are creating a 16-bit image, so we need to scale the modality value to\n\t// the range of 0-65535. Particularly if we're using 8-bit, then we need to\n\t// scale the 0-255 range to 0-65535, otherwise the images will look black.\n\tsixteenBitCorrection := math.MaxUint16 / uint16(grayLevels-1)\n\n\t// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html : For\n\t// ultrasound (and for 8-bit images in general) the WindowWidth and\n\t// WindowCenter may be absent from the file. If absent, they can be assumed\n\t// to be 256 and 128 respectively, which provides an 8-bit identity mapping.\n\t// Here, instead of assuming 8 bit, we use the grayLevels value.\n\tif windowWidth == 0 && windowCenter == 0 {\n\t\twindowWidth = grayLevels\n\t\twindowCenter = grayLevels / 2\n\t}\n\n\tw := windowWidth - 1.0\n\tc := windowCenter - 0.5\n\n\t// Below the lower bound of our window, draw black\n\tif modalityValue <= c-0.5*w {\n\t\treturn 0\n\t}\n\n\t// Above the upper bound of our window, draw white\n\tif modalityValue > c+0.5*w {\n\t\treturn uint16(grayLevels-1.0) * sixteenBitCorrection\n\t}\n\n\t// Within the window, return a scaled value\n\treturn uint16(((modalityValue-c)/w+0.5)*(grayLevels-1.0)) * sixteenBitCorrection\n\n}",
"func (t *Link) SetWidth(v int64) {\n\tt.width = &widthIntermediateType{nonNegativeInteger: &v}\n\n}",
"func OptionSetWidth(s int) Option {\n\treturn func(p *ProgressBar) {\n\t\tp.config.width = s\n\t}\n}",
"func (n *windowNode) populateValues() error {\n\tacc := n.windowsAcc.Wtxn(n.planner.session)\n\trowCount := n.wrappedRenderVals.Len()\n\tn.values.rows = NewRowContainer(\n\t\tn.planner.session.TxnState.makeBoundAccount(), n.values.columns, rowCount,\n\t)\n\n\trow := make(parser.DTuple, len(n.windowRender))\n\tfor i := 0; i < rowCount; i++ {\n\t\twrappedRow := n.wrappedRenderVals.At(i)\n\n\t\tn.curRowIdx = i // Point all windowFuncHolders to the correct row values.\n\t\tcurColIdx := 0\n\t\tcurFnIdx := 0\n\t\tfor j := range row {\n\t\t\tif curWindowRender := n.windowRender[j]; curWindowRender == nil {\n\t\t\t\t// If the windowRender at this index is nil, propagate the datum\n\t\t\t\t// directly from the wrapped planNode. It wasn't changed by windowNode.\n\t\t\t\trow[j] = wrappedRow[curColIdx]\n\t\t\t\tcurColIdx++\n\t\t\t} else {\n\t\t\t\t// If the windowRender is not nil, ignore 0 or more columns from the wrapped\n\t\t\t\t// planNode. These were used as arguments to window functions all beneath\n\t\t\t\t// a single windowRender.\n\t\t\t\t// SELECT rank() over () from t; -> ignore 0 from wrapped values\n\t\t\t\t// SELECT (rank() over () + avg(b) over ()) from t; -> ignore 1 from wrapped values\n\t\t\t\t// SELECT (avg(a) over () + avg(b) over ()) from t; -> ignore 2 from wrapped values\n\t\t\t\tfor ; curFnIdx < len(n.funcs); curFnIdx++ {\n\t\t\t\t\twindowFn := n.funcs[curFnIdx]\n\t\t\t\t\tif windowFn.argIdxStart != curColIdx {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tcurColIdx += windowFn.argCount\n\t\t\t\t}\n\t\t\t\t// Instead, we evaluate the current window render, which depends on at least\n\t\t\t\t// one window function, at the given row.\n\t\t\t\tres, err := curWindowRender.Eval(&n.planner.evalCtx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trow[j] = res\n\t\t\t}\n\t\t}\n\n\t\tif _, err := n.values.rows.AddRow(row); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Done using the output of computeWindows, release memory and clear\n\t// accounts.\n\tn.wrappedRenderVals.Close()\n\tn.wrappedRenderVals = nil\n\tn.wrappedIndexedVarVals.Close()\n\tn.wrappedIndexedVarVals = nil\n\tn.windowValues = nil\n\tacc.Close()\n\n\treturn nil\n}",
"func (pb *Bar) SetMaxWidth(maxWidth int) *Bar {\n\tpb.mu.Lock()\n\tpb.maxWidth = maxWidth\n\tpb.mu.Unlock()\n\treturn pb\n}",
"func (s *Service) onWindowResize(channel chan os.Signal) {\n\t//stdScr, _ := gc.Init()\n\t//stdScr.ScrollOk(true)\n\t//gc.NewLines(true)\n\tfor {\n\t\t<-channel\n\t\t//gc.StdScr().Clear()\n\t\t//rows, cols := gc.StdScr().MaxYX()\n\t\tcols, rows := GetScreenSize()\n\t\ts.screenRows = rows\n\t\ts.screenCols = cols\n\t\ts.resizeWindows()\n\t\t//gc.End()\n\t\t//gc.Update()\n\t\t//gc.StdScr().Refresh()\n\t}\n}",
"func (status *InstanceStatus) PropagateAutoscalingStatus(app *App, hpa *autoscalingv1.HorizontalPodAutoscaler) {\n\t// hpa is nil when autoscaling is disabled, or maxreplicas hasn't been set, or no rules specified by user.\n\tif hpa == nil {\n\t\treturn\n\t}\n\n\tif hpa.Status.CurrentCPUUtilizationPercentage == nil {\n\t\t// hpa is not ready yet\n\t\treturn\n\t}\n\n\t// Set instance status for autoscaling rule\n\tstatus.AutoscalingStatus = []AutoscalingRuleStatus{\n\t\t{\n\t\t\t// Rules is guaranteed to not be empty here because hpa is not nil.\n\t\t\tAppAutoscalingRule: app.Spec.Instances.Autoscaling.Rules[0],\n\t\t\tCurrent: AutoscalingRuleMetricValueStatus{\n\t\t\t\tAverageValue: resource.NewQuantity(int64(*hpa.Status.CurrentCPUUtilizationPercentage), resource.DecimalSI),\n\t\t\t},\n\t\t},\n\t}\n}",
"func (v *TextView) SetPixelsInsideWrap(px int) {\n\tC.gtk_text_view_set_pixels_inside_wrap(v.native(), C.gint(px))\n}",
"func PropValWindow(reply *xproto.GetPropertyReply,\n\terr error) (xproto.Window, error) {\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif reply.Format != 32 {\n\t\treturn 0, fmt.Errorf(\"PropValId: Expected format 32 but got %d\",\n\t\t\treply.Format)\n\t}\n\treturn xproto.Window(xgb.Get32(reply.Value)), nil\n}",
"func (o *DcimRacksListParams) SetOuterWidthGte(outerWidthGte *string) {\n\to.OuterWidthGte = outerWidthGte\n}",
"func (n *node) confine(adjustment float64) float64 {\n\tconfined := math.Max(1/DiffConfineFactor, adjustment)\n\tconfined = math.Min(DiffConfineFactor, confined)\n\n\treturn confined\n}",
"func (sf *TWindow) SetSizable(sizable bool) {\n\tsf.fixedSize = !sizable\n}",
"func (s *ItemScroller) hRecalc() {\n\n\t// Checks if scroll bar should be visible or not\n\tscroll := false\n\tif s.first > 0 {\n\t\tscroll = true\n\t} else {\n\t\tvar posX float32\n\t\tfor _, item := range s.items[s.first:] {\n\t\t\tposX += item.GetPanel().Width()\n\t\t\tif posX > s.width {\n\t\t\t\tscroll = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ts.setHScrollBar(scroll)\n\n\t// Compute size of scroll button\n\tif scroll && s.autoButtonSize {\n\t\tvar totalWidth float32\n\t\tfor _, item := range s.items {\n\t\t\t// TODO OPTIMIZATION\n\t\t\t// Break when the view/content proportion becomes smaller than the minimum button size\n\t\t\ttotalWidth += item.GetPanel().Width()\n\t\t}\n\t\ts.hscroll.SetButtonSize(s.width * s.width / totalWidth)\n\t}\n\n\t// Items height\n\theight := s.ContentHeight()\n\tif scroll {\n\t\theight -= s.hscroll.Height()\n\t}\n\n\tvar posX float32\n\t// Sets positions of all items\n\tfor pos, ipan := range s.items {\n\t\titem := ipan.GetPanel()\n\t\t// If item is before first visible, sets not visible\n\t\tif pos < s.first {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// If item is after last visible, sets not visible\n\t\tif posX > s.width {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// Sets item position\n\t\titem.SetVisible(true)\n\t\titem.SetPosition(posX, 0)\n\t\tif s.adjustItem {\n\t\t\titem.SetHeight(height)\n\t\t}\n\t\tposX += item.Width()\n\t}\n\n\t// Set scroll bar value if recalc was not due by scroll event\n\tif scroll && !s.scrollBarEvent {\n\t\ts.hscroll.SetValue(float32(s.first) / float32(s.maxFirst()))\n\t}\n\ts.scrollBarEvent = false\n}",
"func (o *os) CenterWindow() {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.CenterWindow()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"center_window\")\n\n\t// Call the parent method.\n\t// void\n\tretPtr := gdnative.NewEmptyVoid()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n}",
"func setNoWrap(panel gwu.Panel) {\n\tcount := panel.CompsCount()\n\tfor i := count - 1; i >= 0; i-- {\n\t\tpanel.CompAt(i).Style().SetWhiteSpace(gwu.WhiteSpaceNowrap)\n\t}\n}",
"func Natural(value float64) bool {\n\treturn Whole(value) && value > 0\n}",
"func (me XsdGoPkgHasElems_Width) WidthDefault() xsdt.Double {\r\n\tvar x = new(xsdt.Double)\r\n\tx.Set(\"1.0\")\r\n\treturn *x\r\n}",
"func (b *Border) Width(width float32) *Border {\n\tb.width = b.th.TextSize.Scale(width)\n\treturn b\n}",
"func (b *Border) Width(width float32) *Border {\n\tb.width = b.Theme.TextSize.Scale(width)\n\treturn b\n}",
"func AllNaturalBreaks(data []float64, maxClasses int) [][]float64 {\n\t// sort data in numerical order, since this is expected by the matrices function\n\tdata = sortData(data)\n\n\t// sanity check\n\tuniq := deduplicate(data)\n\tif maxClasses > len(uniq) {\n\t\tmaxClasses = len(uniq)\n\t}\n\n\t// get our basic matrices (we only need lower class limits here)\n\tlowerClassLimits, _ := getMatrices(data, maxClasses)\n\n\t// extract nClasses out of the computed matrices\n\tallBreaks := [][]float64{}\n\tfor i := 2; i <= maxClasses; i++ {\n\t\tnClasses := breaks(data, lowerClassLimits, i)\n\t\tif i == len(uniq) {\n\t\t\tnClasses = uniq\n\t\t}\n\t\tallBreaks = append(allBreaks, nClasses)\n\t}\n\treturn allBreaks\n}",
"func (sc *Session) sendWindowUpdate(st *stream, n int) {\n\tsc.serveG.check()\n\t// \"The legal range for the increment to the flow control\n\t// window is 1 to 2^31-1 (2,147,483,647) octets.\"\n\t// A Go Read call on 64-bit machines could in theory read\n\t// a larger Read than this. Very unlikely, but we handle it here\n\t// rather than elsewhere for now.\n\tconst maxUint31 = 1<<31 - 1\n\tfor n >= maxUint31 {\n\t\tsc.sendWindowUpdate32(st, maxUint31)\n\t\tn -= maxUint31\n\t}\n\tsc.sendWindowUpdate32(st, int32(n))\n}",
"func (win *Window) SetKeepAbove(setting bool) {\n\twin.Candy().Guify(\"gtk_window_set_keep_above\", win, setting)\n}",
"func updateWindow(ctx context.Context, a api.FullNode, w CidWindow, t int, ch chan CidWindow) (CidWindow, error) {\n\thead, err := a.ChainHead(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twindow := appendCIDsToWindow(w, head.Cids(), t)\n\tch <- window\n\treturn window, nil\n}",
"func (b *Bound) Width() float64 {\n\treturn b.ne.X() - b.sw.X()\n}",
"func WithNonMonotonic(nm bool) CounterOptionApplier {\n\treturn counterOptionWrapper{\n\t\tF: func(d *Descriptor) {\n\t\t\td.alternate = nm\n\t\t},\n\t}\n}",
"func LineWidth(width float32) {\n\tsyscall.Syscall(gpLineWidth, 1, uintptr(math.Float32bits(width)), 0, 0)\n}",
"func (me XsdGoPkgHasElem_Width) WidthDefault() xsdt.Double {\r\n\tvar x = new(xsdt.Double)\r\n\tx.Set(\"1.0\")\r\n\treturn *x\r\n}",
"func (v *TextView) SetPixelsAboveLines(px int) {\n\tC.gtk_text_view_set_pixels_above_lines(v.native(), C.gint(px))\n}",
"func (w *Window) updateDimensions() {\n\tif w.windowLayout == nil {\n\t\treturn\n\t}\n\n\tw.window.SetFixedHeight(w.windowLayout.SizeHint().Height())\n}",
"func (t *Link) SetUnknownWidth(i interface{}) {\n\tif t.unknown_ == nil {\n\t\tt.unknown_ = make(map[string]interface{})\n\t}\n\ttmp := &widthIntermediateType{}\n\ttmp.unknown_ = i\n\tt.width = tmp\n\n}",
"func (s *Scroll) SetWindow(win sparta.Window) {\n\ts.win = win\n}",
"func MonoWidth(input string) int {\n\n\ts := norm.NFD.String(input)\n\n\tcount := 0\n\tfor i := 0; i < len(s); {\n\t\td := norm.NFC.NextBoundaryInString(s[i:], true)\n\t\tcount += 1\n\t\ti += d\n\t}\n\n\treturn count\n}",
"func adjustStringWidth(s string, width int) string {\n\ts = strings.TrimLeft(s, \" \")\n\tif len(s) < width {\n\t\tdiff := width - len(s)\n\t\tfor i := 0; i < diff; i++ {\n\t\t\ts += \" \"\n\t\t}\n\t}\n\treturn s\n}",
"func (n *windowNode) computeWindows() error {\n\trowCount := n.wrappedRenderVals.Len()\n\tif rowCount == 0 {\n\t\treturn nil\n\t}\n\n\twindowCount := len(n.funcs)\n\tacc := n.windowsAcc.Wtxn(n.planner.session)\n\n\twinValSz := uintptr(rowCount) * unsafe.Sizeof([]parser.Datum{})\n\twinAllocSz := uintptr(rowCount*windowCount) * unsafe.Sizeof(parser.Datum(nil))\n\tif err := acc.Grow(int64(winValSz + winAllocSz)); err != nil {\n\t\treturn err\n\t}\n\n\tn.windowValues = make([][]parser.Datum, rowCount)\n\twindowAlloc := make([]parser.Datum, rowCount*windowCount)\n\tfor i := range n.windowValues {\n\t\tn.windowValues[i] = windowAlloc[i*windowCount : (i+1)*windowCount]\n\t}\n\n\tvar scratchBytes []byte\n\tvar scratchDatum []parser.Datum\n\tfor windowIdx, windowFn := range n.funcs {\n\t\tpartitions := make(map[string][]parser.IndexedRow)\n\n\t\tif len(windowFn.partitionIdxs) == 0 {\n\t\t\t// If no partition indexes are included for the window function, all\n\t\t\t// rows are added to the same partition, which need to be pre-allocated.\n\t\t\tsz := int64(uintptr(rowCount) * unsafe.Sizeof(parser.IndexedRow{}))\n\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpartitions[\"\"] = make([]parser.IndexedRow, rowCount)\n\t\t}\n\n\t\tif n := len(windowFn.partitionIdxs); n > cap(scratchDatum) {\n\t\t\tsz := int64(uintptr(n) * unsafe.Sizeof(parser.Datum(nil)))\n\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tscratchDatum = make([]parser.Datum, n)\n\t\t} else {\n\t\t\tscratchDatum = scratchDatum[:n]\n\t\t}\n\n\t\t// Partition rows into separate partitions based on hash values of the\n\t\t// window function's PARTITION BY attribute.\n\t\t//\n\t\t// TODO(nvanbenschoten) Window functions with the same window definition\n\t\t// can share partition and sorting work.\n\t\t// See Cao et al. [http://vldb.org/pvldb/vol5/p1244_yucao_vldb2012.pdf]\n\t\tfor rowI := 0; rowI < rowCount; rowI++ {\n\t\t\trow := n.wrappedRenderVals.At(rowI)\n\t\t\trowWindowDef := n.wrappedWindowDefVals.At(rowI)\n\t\t\tentry := parser.IndexedRow{Idx: rowI, Row: row}\n\t\t\tif len(windowFn.partitionIdxs) == 0 {\n\t\t\t\t// If no partition indexes are included for the window function, all\n\t\t\t\t// rows are added to the same partition.\n\t\t\t\tpartitions[\"\"][rowI] = entry\n\t\t\t} else {\n\t\t\t\t// If the window function has partition indexes, we hash the values of each\n\t\t\t\t// of these indexes for each row, and partition based on this hashed value.\n\t\t\t\tfor i, idx := range windowFn.partitionIdxs {\n\t\t\t\t\tscratchDatum[i] = rowWindowDef[idx]\n\t\t\t\t}\n\n\t\t\t\tencoded, err := sqlbase.EncodeDTuple(scratchBytes, scratchDatum)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tsz := int64(uintptr(len(encoded)) + unsafe.Sizeof(entry))\n\t\t\t\tif err := acc.Grow(sz); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpartitions[string(encoded)] = append(partitions[string(encoded)], entry)\n\t\t\t\tscratchBytes = encoded[:0]\n\t\t\t}\n\t\t}\n\n\t\t// For each partition, perform necessary sorting based on the window function's\n\t\t// ORDER BY attribute. After this, perform the window function computation for\n\t\t// each tuple and save the result in n.windowValues.\n\t\t//\n\t\t// TODO(nvanbenschoten)\n\t\t// - Investigate inter- and intra-partition parallelism\n\t\t// - Investigate more efficient aggregation techniques\n\t\t// * Removable Cumulative\n\t\t// * Segment Tree\n\t\t// See Leis et al. [http://www.vldb.org/pvldb/vol8/p1058-leis.pdf]\n\t\tfor _, partition := range partitions {\n\t\t\t// TODO(nvanbenschoten) Handle framing here. Right now we only handle the default\n\t\t\t// framing option of RANGE UNBOUNDED PRECEDING. With ORDER BY, this sets the frame\n\t\t\t// to be all rows from the partition start up through the current row's last ORDER BY\n\t\t\t// peer. Without ORDER BY, all rows of the partition are included in the window frame,\n\t\t\t// since all rows become peers of the current row. Once we add better framing support,\n\t\t\t// we should flesh this logic out more.\n\t\t\tbuiltin := windowFn.expr.GetWindowConstructor()()\n\n\t\t\t// Since we only support two types of window frames (see TODO above), we only\n\t\t\t// need two possible types of peerGroupChecker's to help determine peer groups\n\t\t\t// for given tuples.\n\t\t\tvar peerGrouper peerGroupChecker\n\t\t\tif windowFn.columnOrdering != nil {\n\t\t\t\t// If an ORDER BY clause is provided, order the partition and use the\n\t\t\t\t// sorter as our peerGroupChecker.\n\t\t\t\tsorter := &partitionSorter{\n\t\t\t\t\trows: partition,\n\t\t\t\t\twindowDefVals: n.wrappedWindowDefVals,\n\t\t\t\t\tordering: windowFn.columnOrdering,\n\t\t\t\t}\n\t\t\t\t// The sort needs to be deterministic because multiple window functions with\n\t\t\t\t// syntactically equivalent ORDER BY clauses in their window definitions\n\t\t\t\t// need to be guaranteed to be evaluated in the same order, even if the\n\t\t\t\t// ORDER BY *does not* uniquely determine an ordering. In the future, this\n\t\t\t\t// could be guaranteed by only performing a single pass over a sorted partition\n\t\t\t\t// for functions with syntactically equivalent PARTITION BY and ORDER BY clauses.\n\t\t\t\tsort.Sort(sorter)\n\t\t\t\tpeerGrouper = sorter\n\t\t\t} else {\n\t\t\t\t// If no ORDER BY clause is provided, all rows in the partition are peers.\n\t\t\t\tpeerGrouper = allPeers{}\n\t\t\t}\n\n\t\t\t// Iterate over peer groups within partition using a window frame.\n\t\t\tframe := parser.WindowFrame{\n\t\t\t\tRows: partition,\n\t\t\t\tArgIdxStart: windowFn.argIdxStart,\n\t\t\t\tArgCount: windowFn.argCount,\n\t\t\t\tRowIdx: 0,\n\t\t\t}\n\t\t\tfor frame.RowIdx < len(partition) {\n\t\t\t\t// Compute the size of the current peer group.\n\t\t\t\tframe.FirstPeerIdx = frame.RowIdx\n\t\t\t\tframe.PeerRowCount = 1\n\t\t\t\tfor ; frame.FirstPeerIdx+frame.PeerRowCount < len(partition); frame.PeerRowCount++ {\n\t\t\t\t\tcur := frame.FirstPeerIdx + frame.PeerRowCount\n\t\t\t\t\tif !peerGrouper.InSameGroup(cur, cur-1) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Perform calculations on each row in the current peer group.\n\t\t\t\tfor ; frame.RowIdx < frame.FirstPeerIdx+frame.PeerRowCount; frame.RowIdx++ {\n\t\t\t\t\tres, err := builtin.Compute(frame)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// This may overestimate, because WindowFuncs may perform internal caching.\n\t\t\t\t\tsz := res.Size()\n\t\t\t\t\tif err := acc.Grow(int64(sz)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Save result into n.windowValues, indexed by original row index.\n\t\t\t\t\tvalRowIdx := partition[frame.RowIdx].Idx\n\t\t\t\t\tn.windowValues[valRowIdx][windowIdx] = res\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Done using window definition values, release memory.\n\tn.wrappedWindowDefVals.Close()\n\tn.wrappedWindowDefVals = nil\n\n\treturn nil\n}",
"func (o *FilingSentiment) SetConstraining(v float32) {\n\to.Constraining = &v\n}",
"func (o *PluginDnsClient) SetTruncated() {}",
"func GxPhysicalWidth(value float64) *SimpleElement { return newSEFloat(\"gx:physicalWidth\", value) }",
"func (c RelativeConstraint) GetWidth() float32 {\n\treturn c.op(c.parent().GetWidth(), c.constant)\n}",
"func wmFreeformResize(ctx context.Context, tconn *chrome.TestConn, a *arc.ARC, d *ui.Device) error {\n\tact, err := arc.NewActivity(a, wm.Pkg24, wm.ResizableUnspecifiedActivity)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer act.Close()\n\tif err := act.StartWithDefaultOptions(ctx, tconn); err != nil {\n\t\treturn err\n\t}\n\tdefer act.Stop(ctx, tconn)\n\tif err := wm.WaitUntilActivityIsReady(ctx, tconn, act, d); err != nil {\n\t\treturn err\n\t}\n\n\twindow, err := ash.GetARCAppWindowInfo(ctx, tconn, act.PackageName())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Resizable apps are launched in maximized in P.\n\tif window.State != ash.WindowStateNormal {\n\t\tif ws, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventNormal); err != nil {\n\t\t\treturn err\n\t\t} else if ws != ash.WindowStateNormal {\n\t\t\treturn errors.Errorf(\"failed to set window state: got %s, want %s\", ws, ash.WindowStateNormal)\n\t\t}\n\t\tif err := ash.WaitForARCAppWindowState(ctx, tconn, act.PackageName(), ash.WindowStateNormal); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := ash.WaitWindowFinishAnimating(ctx, tconn, window.ID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdispMode, err := ash.PrimaryDisplayMode(ctx, tconn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdispInfo, err := display.GetPrimaryInfo(ctx, tconn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmaxBounds := coords.ConvertBoundsFromDPToPX(dispInfo.Bounds, dispMode.DeviceScaleFactor)\n\n\tif ws, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventNormal); err != nil {\n\t\treturn err\n\t} else if ws != ash.WindowStateNormal {\n\t\treturn errors.Errorf(\"failed to set window state: got %s, want %s\", ws, ash.WindowStateNormal)\n\t}\n\n\t// Now we grab the bounds from the restored app, and we try to resize it to its previous right margin.\n\torigBounds, err := act.WindowBounds(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The -1 is needed to prevent injecting a touch event outside bounds.\n\tright := maxBounds.Left + maxBounds.Width - 1\n\ttesting.ContextLog(ctx, \"Resizing app to right margin = \", right)\n\tto := coords.NewPoint(right, origBounds.Top+origBounds.Height/2)\n\tif err := act.ResizeWindow(ctx, tconn, arc.BorderRight, to, 500*time.Millisecond); err != nil {\n\t\treturn err\n\t}\n\n\tbounds, err := act.WindowBounds(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// ResizeWindow() does not guarantee pixel-perfect resizing.\n\t// For this particular test, we are good as long as the window has been resized at least one pixel.\n\tif bounds.Width <= origBounds.Width {\n\t\ttesting.ContextLogf(ctx, \"Original bounds: %+v; resized bounds: %+v\", origBounds, bounds)\n\t\treturn errors.Errorf(\"invalid window width: got %d; want %d > %d\", bounds.Width, bounds.Width, origBounds.Width)\n\t}\n\treturn nil\n}",
"func (sf *TWindow) Sizable() bool {\n\treturn !sf.fixedSize\n}",
"func (s *State) AdaptiveElWidth() int {\n\treturn s.adaptiveElWidth\n}",
"func (o *NewWindowOptions) Fixup() {\n\tsc := TheApp.Screen(0)\n\tscsz := sc.Geometry.Size() // window coords size\n\n\tif o.Size.X <= 0 {\n\t\to.StdPixels = false\n\t\to.Size.X = int(0.8 * float32(scsz.X) * sc.DevicePixelRatio)\n\t}\n\tif o.Size.Y <= 0 {\n\t\to.StdPixels = false\n\t\to.Size.Y = int(0.8 * float32(scsz.Y) * sc.DevicePixelRatio)\n\t}\n\n\to.Size, o.Pos = sc.ConstrainWinGeom(o.Size, o.Pos)\n\tif o.Pos.X == 0 && o.Pos.Y == 0 {\n\t\twsz := sc.WinSizeFmPix(o.Size)\n\t\tdialog, modal, _, _ := WindowFlagsToBool(o.Flags)\n\t\tnw := TheApp.NWindows()\n\t\tif nw > 0 {\n\t\t\tlastw := TheApp.Window(nw - 1)\n\t\t\tlsz := lastw.WinSize()\n\t\t\tlp := lastw.Position()\n\n\t\t\tnwbig := wsz.X > lsz.X || wsz.Y > lsz.Y\n\n\t\t\tif modal || dialog || !nwbig { // place centered on top of current\n\t\t\t\tctrx := lp.X + (lsz.X / 2)\n\t\t\t\tctry := lp.Y + (lsz.Y / 2)\n\t\t\t\to.Pos.X = ctrx - wsz.X/2\n\t\t\t\to.Pos.Y = ctry - wsz.Y/2\n\t\t\t} else { // cascade to right\n\t\t\t\to.Pos.X = lp.X + lsz.X // tile to right -- could depend on orientation\n\t\t\t\to.Pos.Y = lp.Y + 72 // and move down a bit\n\t\t\t}\n\t\t} else { // center in screen\n\t\t\to.Pos.X = scsz.X/2 - wsz.X/2\n\t\t\to.Pos.Y = scsz.Y/2 - wsz.Y/2\n\t\t}\n\t\to.Size, o.Pos = sc.ConstrainWinGeom(o.Size, o.Pos) // make sure ok\n\t}\n}",
"func (canvas *CanvasWidget) SetWidth(width float64) {\n\tcanvas.box.width = width\n\tcanvas.fixedWidth = true\n\tcanvas.RequestReflow()\n}",
"func WordWrap(allWords []string, maxLen int) []string {\n\tvar (\n\t\tlines []string\n\t\tcurLen int\n\t\twords []string\n\t)\n\tfor _, word := range allWords {\n\t\t// curLen + len(words) + len(word) is the length of the current\n\t\t// line including spaces\n\t\tif curLen+len(words)+len(word) > maxLen {\n\t\t\t// we have our line. That does not include the current word\n\t\t\tlines = append(lines, strings.Join(words, \" \"))\n\t\t\t// reset the current line, add the current word\n\t\t\twords = []string{word}\n\t\t\tcurLen = len(word)\n\t\t} else {\n\t\t\twords = append(words, word)\n\t\t\tcurLen += len(word)\n\t\t}\n\t}\n\tif len(words) > 0 {\n\t\t// there's one last line to add\n\t\tlines = append(lines, strings.Join(words, \" \"))\n\t}\n\tfor idx, line := range lines {\n\t\tif len(line) > maxLen {\n\t\t\t// truncate\n\t\t\tlines[idx] = line[:maxLen]\n\t\t}\n\t}\n\treturn lines\n}",
"func (this *WeightedSum) ivweightedSumPropagate(varid core.VarId, evt *core.ChangeEvent) {\n\n\tthis.checkXmultCeqY(varid, evt)\n\n\tthis.ivsumPropagate(varid, evt)\n}",
"func (b *Bound) GeoWidth(haversine ...bool) float64 {\n\tc := b.Center()\n\n\tA := &Point{b.sw[0], c[1]}\n\tB := &Point{b.ne[0], c[1]}\n\n\treturn A.GeoDistanceFrom(B, yesHaversine(haversine))\n}",
"func (dnd *Dnd) setupWindowProperty() error {\n\tdata := []byte{xproto.AtomBitmap, 0, 0, 0}\n\tcookie := xproto.ChangePropertyChecked(\n\t\tdnd.conn,\n\t\txproto.PropModeAppend, // mode\n\t\tdnd.win,\n\t\tDndAtoms.XdndAware, // atom\n\t\txproto.AtomAtom, // type\n\t\t32, // format: xprop says that it should be 32 bit\n\t\tuint32(len(data))/4,\n\t\tdata)\n\treturn cookie.Check()\n}",
"func MaybeIncreaseBufferSize(w *fsnotify.Watcher) {\n\tw.SetBufferSize(DesiredWindowsBufferSize())\n}",
"func (w *Window) Width() int {\n\treturn w.width\n}",
"func (o AiFeatureStoreOnlineServingConfigOutput) FixedNodeCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AiFeatureStoreOnlineServingConfig) *int { return v.FixedNodeCount }).(pulumi.IntPtrOutput)\n}",
"func (b *BaseElement) OnWindowResized(w, h int32) {\n\tif b.Events.OnWindowResized != nil {\n\t\tb.Events.OnWindowResized(w, h)\n\t}\n}",
"func (win *Window) SetTransientFor(parent Window) {\n\twin.Candy().Guify(\"gtk_window_set_transient_for\", win, parent)\n}",
"func (o AiFeatureStoreOnlineServingConfigPtrOutput) FixedNodeCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *AiFeatureStoreOnlineServingConfig) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.FixedNodeCount\n\t}).(pulumi.IntPtrOutput)\n}",
"func (p *pageT) WidthDefault() {\n\tp.Style = css.NewStylesResponsive(p.Style)\n\tif p.Style.Desktop.StyleBox.Margin == \"\" && p.Style.Mobile.StyleBox.Margin == \"\" {\n\t\tp.Style.Desktop.StyleBox.Margin = \"1.2rem auto 0 auto\"\n\t\tp.Style.Mobile.StyleBox.Margin = \"0.8rem auto 0 auto\"\n\t}\n}",
"func (m *MainWindow) initializeSidebar() {\n\tsidebar_vbox := gtk.NewVBox(false, 0)\n\n\tserver_info_frame := gtk.NewFrame(ctx.Translator.Translate(\"Server information\", nil))\n\tsidebar_vbox.PackStart(server_info_frame, true, true, 5)\n\tsi_vbox := gtk.NewVBox(false, 0)\n\tserver_info_frame.Add(si_vbox)\n\n\t// Scrolled thing.\n\tsi_scroll := gtk.NewScrolledWindow(nil, nil)\n\tsi_scroll.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\tsi_vbox.PackStart(si_scroll, true, true, 5)\n\n\t// Server's information.\n\tm.server_info = gtk.NewTreeView()\n\tm.server_info.SetModel(m.server_info_store)\n\n\tkey_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Key\", nil), gtk.NewCellRendererText(), \"markup\", 0)\n\tm.server_info.AppendColumn(key_column)\n\n\tvalue_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Value\", nil), gtk.NewCellRendererText(), \"markup\", 1)\n\tm.server_info.AppendColumn(value_column)\n\n\tsi_scroll.Add(m.server_info)\n\n\t// Players information.\n\tplayers_info_frame := gtk.NewFrame(ctx.Translator.Translate(\"Players\", nil))\n\tsidebar_vbox.PackStart(players_info_frame, true, true, 5)\n\n\tpi_scroll := gtk.NewScrolledWindow(nil, nil)\n\tpi_scroll.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\tplayers_info_frame.Add(pi_scroll)\n\n\tm.players_info = gtk.NewTreeView()\n\tm.players_info.SetModel(m.players_info_store)\n\tpi_scroll.Add(m.players_info)\n\n\tname_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Player name\", nil), gtk.NewCellRendererText(), \"markup\", 0)\n\tm.players_info.AppendColumn(name_column)\n\n\tfrags_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Frags\", nil), gtk.NewCellRendererText(), \"markup\", 1)\n\tm.players_info.AppendColumn(frags_column)\n\n\tping_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Ping\", nil), gtk.NewCellRendererText(), \"markup\", 2)\n\tm.players_info.AppendColumn(ping_column)\n\n\t// Show CVars button.\n\tshow_cvars_button := gtk.NewButtonWithLabel(ctx.Translator.Translate(\"Show CVars\", nil))\n\tshow_cvars_button.SetTooltipText(ctx.Translator.Translate(\"Show server's CVars\", nil))\n\tshow_cvars_button.Clicked(m.showServerCVars)\n\tsidebar_vbox.PackStart(show_cvars_button, false, true, 5)\n\n\t// Quick connect frame.\n\tquick_connect_frame := gtk.NewFrame(ctx.Translator.Translate(\"Quick connect\", nil))\n\tsidebar_vbox.PackStart(quick_connect_frame, false, true, 5)\n\tqc_vbox := gtk.NewVBox(false, 0)\n\tquick_connect_frame.Add(qc_vbox)\n\n\t// Server address.\n\tsrv_tooltip := ctx.Translator.Translate(\"Server address we will connect to\", nil)\n\tsrv_label := gtk.NewLabel(ctx.Translator.Translate(\"Server address:\", nil))\n\tsrv_label.SetTooltipText(srv_tooltip)\n\tqc_vbox.PackStart(srv_label, false, true, 5)\n\n\tm.qc_server_address = gtk.NewEntry()\n\tm.qc_server_address.SetTooltipText(srv_tooltip)\n\tqc_vbox.PackStart(m.qc_server_address, false, true, 5)\n\n\t// Password.\n\tpass_tooltip := ctx.Translator.Translate(\"Password we will use for server\", nil)\n\tpass_label := gtk.NewLabel(ctx.Translator.Translate(\"Password:\", nil))\n\tpass_label.SetTooltipText(pass_tooltip)\n\tqc_vbox.PackStart(pass_label, false, true, 5)\n\n\tm.qc_password = gtk.NewEntry()\n\tm.qc_password.SetTooltipText(pass_tooltip)\n\tqc_vbox.PackStart(m.qc_password, false, true, 5)\n\n\t// Nickname\n\tnick_tooltip := ctx.Translator.Translate(\"Nickname we will use\", nil)\n\tnick_label := gtk.NewLabel(ctx.Translator.Translate(\"Nickname:\", nil))\n\tnick_label.SetTooltipText(nick_tooltip)\n\tqc_vbox.PackStart(nick_label, false, true, 5)\n\n\tm.qc_nickname = gtk.NewEntry()\n\tm.qc_nickname.SetTooltipText(nick_tooltip)\n\tqc_vbox.PackStart(m.qc_nickname, false, true, 5)\n\n\tm.hpane.Add2(sidebar_vbox)\n}"
] | [
"0.7961128",
"0.79388314",
"0.6707606",
"0.5346279",
"0.47825405",
"0.4755107",
"0.46057013",
"0.45764875",
"0.45117897",
"0.4315776",
"0.42900914",
"0.4247731",
"0.42211503",
"0.42068702",
"0.42030233",
"0.4134348",
"0.41119507",
"0.4108613",
"0.4049972",
"0.40450892",
"0.40437365",
"0.39934456",
"0.3975168",
"0.39693403",
"0.39659613",
"0.39636934",
"0.39481375",
"0.39280483",
"0.39222252",
"0.39031982",
"0.39001748",
"0.38987264",
"0.3897588",
"0.3896837",
"0.38800997",
"0.3862602",
"0.38603702",
"0.38492012",
"0.38470387",
"0.38305214",
"0.38260594",
"0.38225687",
"0.3798621",
"0.37940165",
"0.37907603",
"0.3787526",
"0.37746534",
"0.37680444",
"0.3753093",
"0.37436572",
"0.37372148",
"0.3733609",
"0.37220305",
"0.3713886",
"0.37073788",
"0.3705136",
"0.3702468",
"0.36954018",
"0.3688314",
"0.36881492",
"0.36828455",
"0.36773685",
"0.36764345",
"0.36756176",
"0.3665066",
"0.36611786",
"0.36577755",
"0.36557454",
"0.36506423",
"0.36481592",
"0.36291635",
"0.3626794",
"0.36132956",
"0.36119327",
"0.36118254",
"0.36115295",
"0.36084917",
"0.36061296",
"0.35959196",
"0.35950565",
"0.3589969",
"0.3585758",
"0.35857397",
"0.3578339",
"0.3574258",
"0.3572419",
"0.35702693",
"0.3570116",
"0.35649407",
"0.35623074",
"0.3561689",
"0.35529307",
"0.3551185",
"0.35507774",
"0.35494152",
"0.3548222",
"0.35347617",
"0.35095894",
"0.3507484",
"0.35060948"
] | 0.91999835 | 0 |
GetPropagateNaturalHeight is a wrapper around gtk_scrolled_window_get_propagate_natural_height(). | GetPropagateNaturalHeight — это обертка вокруг gtk_scrolled_window_get_propagate_natural_height(). | func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {
c := C.gtk_scrolled_window_get_propagate_natural_height(v.native())
return gobool(c)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))\n}",
"func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}",
"func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}",
"func (window Window) InnerHeight() int {\n\treturn window.Get(\"innerHeight\").Int()\n}",
"func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}",
"func (c RelativeConstraint) GetHeight() float32 {\n\treturn c.op(c.parent().GetHeight(), c.constant)\n}",
"func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}",
"func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}",
"func (p *Protocol) GetEpochHeight(epochNum uint64) uint64 {\n\tif epochNum == 0 {\n\t\treturn 0\n\t}\n\tdardanellesEpoch := p.GetEpochNum(p.dardanellesHeight)\n\tif !p.dardanellesOn || epochNum <= dardanellesEpoch {\n\t\treturn (epochNum-1)*p.numDelegates*p.numSubEpochs + 1\n\t}\n\tdardanellesEpochHeight := p.GetEpochHeight(dardanellesEpoch)\n\treturn dardanellesEpochHeight + (epochNum-dardanellesEpoch)*p.numDelegates*p.numSubEpochsDardanelles\n}",
"func (b *Bound) Height() float64 {\n\treturn b.ne.Y() - b.sw.Y()\n}",
"func (window Window) OuterHeight() int {\n\treturn window.Get(\"outerHeight\").Int()\n}",
"func (t *Link) GetHeight() (v int64) {\n\treturn *t.height.nonNegativeInteger\n\n}",
"func updateHeight(n *node) {\n\tn.H = math.Max(height(child(n, 0)), height(child(n, 1))+1)\n}",
"func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}",
"func (self *TraitPixbufAnimation) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (g *GitStatusWidget) GetHeight() int {\n\treturn g.renderer.GetHeight()\n}",
"func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}",
"func (t *Text) LinesHeight() int {\n\tpad := t.setter.opts.Padding\n\tif t.size.Y <= 0 {\n\t\treturn 0\n\t}\n\tif t.size.Y-2*pad <= 0 {\n\t\treturn t.size.Y\n\t}\n\ty := pad\n\tfor _, l := range t.lines {\n\t\th := l.h.Round()\n\t\tif y+h > t.size.Y-pad {\n\t\t\tbreak\n\t\t}\n\t\ty += h\n\t}\n\tif h := trailingNewlineHeight(t); h > 0 && y+h <= t.size.Y-pad {\n\t\ty += h\n\t}\n\treturn y + pad\n}",
"func (self *TraitPixbuf) GetHeight() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_get_height(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (v *Pixbuf) GetHeight() int {\n\treturn int(C.gdk_pixbuf_get_height(v.Native()))\n}",
"func (w *Window) Height() int {\n\treturn w.height\n}",
"func (w *LWindow) MHeight() int32 {\n\treturn w.mHeight\n}",
"func (_Depositmanager *DepositmanagerCallerSession) DepositSubtreeHeight() (*big.Int, error) {\n\treturn _Depositmanager.Contract.DepositSubtreeHeight(&_Depositmanager.CallOpts)\n}",
"func (_Depositmanager *DepositmanagerSession) DepositSubtreeHeight() (*big.Int, error) {\n\treturn _Depositmanager.Contract.DepositSubtreeHeight(&_Depositmanager.CallOpts)\n}",
"func (n *NodeMetastate) Height() uint64 {\n\treturn n.LedgerHeight\n}",
"func (_Depositmanager *DepositmanagerCaller) DepositSubtreeHeight(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Depositmanager.contract.Call(opts, out, \"depositSubtreeHeight\")\n\treturn *ret0, err\n}",
"func (b *BaseElement) GetHeight() int32 {\n\treturn b.h\n}",
"func (s *SkipList) getHeight() uint32 {\n\t// Height can be modified concurrently, so we need to load it atomically.\n\treturn atomic.LoadUint32(&s.height)\n}",
"func (g *Grid) GetHeight() int {\n\treturn g.Height\n}",
"func (n *Network) LatestHeight() (int64, error) {\n\tif len(n.Validators) == 0 {\n\t\treturn 0, errors.New(\"no validators available\")\n\t}\n\n\tstatus, err := n.Validators[0].RPCClient.Status(context.Background())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn status.SyncInfo.LatestBlockHeight, nil\n}",
"func (t *Link) GetUnknownHeight() (v interface{}) {\n\treturn t.height.unknown_\n\n}",
"func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, error) {\n\tif heightPtr != nil {\n\t\theight := *heightPtr\n\t\tif height <= 0 {\n\t\t\treturn 0, fmt.Errorf(\"%w (requested height: %d)\", coretypes.ErrZeroOrNegativeHeight, height)\n\t\t}\n\t\tif height > latestHeight {\n\t\t\treturn 0, fmt.Errorf(\"%w (requested height: %d, blockchain height: %d)\",\n\t\t\t\tcoretypes.ErrHeightExceedsChainHead, height, latestHeight)\n\t\t}\n\t\tbase := env.BlockStore.Base()\n\t\tif height < base {\n\t\t\treturn 0, fmt.Errorf(\"%w (requested height: %d, base height: %d)\", coretypes.ErrHeightNotAvailable, height, base)\n\t\t}\n\t\treturn height, nil\n\t}\n\treturn latestHeight, nil\n}",
"func (cs ClientState) GetLatestHeight() uint64 {\n\treturn uint64(cs.Height)\n}",
"func (o *os) GetVirtualKeyboardHeight() gdnative.Int {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetVirtualKeyboardHeight()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_virtual_keyboard_height\")\n\n\t// Call the parent method.\n\t// int\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn ret\n}",
"func (v *TextView) GetPixelsBelowLines() int {\n\tc := C.gtk_text_view_get_pixels_below_lines(v.native())\n\treturn int(c)\n}",
"func GetHeight() int {\n\treturn viper.GetInt(FlagHeight)\n}",
"func (t *Text) Height(takable, taken float64) float64 {\n\treturn t.Bounds().H() * t.Scl.Y\n}",
"func (v *TextView) GetVisibleRect() *gdk.Rectangle {\n\tvar rect C.GdkRectangle\n\tC.gtk_text_view_get_visible_rect(v.native(), &rect)\n\treturn gdk.WrapRectangle(uintptr(unsafe.Pointer(&rect)))\n}",
"func (rect *PdfRectangle) Height() float64 {\n\treturn math.Abs(rect.Ury - rect.Lly)\n}",
"func (e Event) GetResizeHeight() int {\n\treturn int(C.caca_get_event_resize_height(e.Ev))\n}",
"func (r *ImageRef) GetPageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}",
"func CalculateNecessaryHeight(width int, text string) int {\n\tsplitLines := strings.Split(text, \"\\n\")\n\n\twrappedLines := 0\n\tfor _, line := range splitLines {\n\t\tif len(line) >= width {\n\t\t\twrappedLines = wrappedLines + ((len(line) - (len(line) % width)) / width)\n\t\t}\n\t}\n\n\treturn len(splitLines) + wrappedLines\n\n}",
"func (r *ImageRef) PageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}",
"func (b *BoundingBox2D) height() float64 {\n\n\treturn b.upperCorner.Y - b.lowerCorner.Y\n}",
"func (p *Visitor) LedgerHeight() uint64 {\n\treturn atomic.LoadUint64(&p.lastCommittedBlock) + 1\n}",
"func (a *Animation) GetHeight() float64 {\n\tif a == nil || a.Height == nil {\n\t\treturn 0.0\n\t}\n\treturn *a.Height\n}",
"func (w *WidgetImplement) Height() int {\n\treturn w.h\n}",
"func (me *XsdGoPkgHasElem_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func GetProcessedHeight(clientStore sdk.KVStore, height exported.Height) (exported.Height, bool) {\n\tkey := ProcessedHeightKey(height)\n\tbz := clientStore.Get(key)\n\tif bz == nil {\n\t\treturn nil, false\n\t}\n\tprocessedHeight, err := clienttypes.ParseHeight(string(bz))\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\treturn processedHeight, true\n}",
"func (me *XsdGoPkgHasElems_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (b *Bound) GeoHeight() float64 {\n\treturn 111131.75 * b.Height()\n}",
"func (u *Upstream) GetHeight() uint64 {\n\treturn u.blockHeight\n}",
"func injectHeightNotifier(content []byte, name string) []byte {\n\tcontent = append([]byte(`<div id=\"wrapper\">`), content...)\n\t// From https://stackoverflow.com/a/44547866 and extended to also pass\n\t// back the element id, as we can have multiple pages.\n\treturn append(content, []byte(fmt.Sprintf(`</div><script type=\"text/javascript\">\nwindow.addEventListener(\"load\", function(){\n if(window.self === window.top) return; // if w.self === w.top, we are not in an iframe\n send_height_to_parent_function = function(){\n var height = document.getElementById(\"wrapper\").offsetHeight;\n parent.postMessage({\"height\" : height , \"id\": \"%s\"}, \"*\");\n }\n send_height_to_parent_function(); //whenever the page is loaded\n window.addEventListener(\"resize\", send_height_to_parent_function); // whenever the page is resized\n var observer = new MutationObserver(send_height_to_parent_function); // whenever DOM changes PT1\n var config = { attributes: true, childList: true, characterData: true, subtree:true}; // PT2\n observer.observe(window.document, config); // PT3\n});\n</script>`, name))...)\n}",
"func (w *Window) updateDimensions() {\n\tif w.windowLayout == nil {\n\t\treturn\n\t}\n\n\tw.window.SetFixedHeight(w.windowLayout.SizeHint().Height())\n}",
"func (o *DynamicFont) GetFallbackCount() gdnative.Int {\n\t//log.Println(\"Calling DynamicFont.GetFallbackCount()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"DynamicFont\", \"get_fallback_count\")\n\n\t// Call the parent method.\n\t// int\n\tretPtr := gdnative.NewEmptyInt()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewIntFromPointer(retPtr)\n\treturn ret\n}",
"func (bp RPCBlockProvider) GetBlockHeight() int {\r\n\tvar block int\r\n\terr := bp.Client.Call(\"BlockPropagationHandler.GetBlockHeight\", 0, &block)\r\n\tif err != nil {\r\n\t\tlog.Print(err)\r\n\t}\r\n\treturn block\r\n}",
"func getHeight() int {\n\tfor {\n\t\theight := go50.GetInt(\"Height: \")\n\t\tif height > 0 && height < 24 {\n\t\t\treturn height\n\t\t}\n\t}\n}",
"func MaxN(window int) func(s []float64) []float64 {\n\treturn func(s []float64) []float64 {\n\t\tmax := make([]float64, 0)\n\t\ti := 0\n\t\tfor _, v := range s {\n\t\t\tif i < window {\n\t\t\t\tfor j := 0; j < i; j++ {\n\t\t\t\t\tv = math.Max(v, max[j])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor j := i - window; j < i; j++ {\n\t\t\t\t\tv = math.Max(v, max[j])\n\t\t\t\t}\n\t\t\t}\n\t\t\tmax = append(max, v)\n\t\t}\n\n\t\treturn max\n\t}\n}",
"func (r Rectangle) Height() float64 {\n\treturn r.Max.Y - r.Min.Y\n}",
"func (c *Config) MaxHeight() int {\n\tc.Mutex.RLock()\n\tdefer c.Mutex.RUnlock()\n\treturn c.Raw.MaxHeight\n}",
"func (g *Graph) Height() uint64 {\n\tg.RLock()\n\theight := g.height\n\tg.RUnlock()\n\n\treturn height\n}",
"func GetChainHeight(cliCtx context.CLIContext) (int64, error) {\n\tnode, err := cliCtx.GetNode()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tstatus, err := node.Status()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\theight := status.SyncInfo.LatestBlockHeight\n\treturn height, nil\n}",
"func (p *Protocol) GetEpochNum(height uint64) uint64 {\n\tif height == 0 {\n\t\treturn 0\n\t}\n\tif !p.dardanellesOn || height <= p.dardanellesHeight {\n\t\treturn (height-1)/p.numDelegates/p.numSubEpochs + 1\n\t}\n\tdardanellesEpoch := p.GetEpochNum(p.dardanellesHeight)\n\tdardanellesEpochHeight := p.GetEpochHeight(dardanellesEpoch)\n\treturn dardanellesEpoch + (height-dardanellesEpochHeight)/p.numDelegates/p.numSubEpochsDardanelles\n}",
"func Height() uint64 {\n\n\tglobalData.RLock()\n\tdefer globalData.RUnlock()\n\n\treturn globalData.height\n}",
"func (d Doc) DefaultLineHeight() float64 {\n\treturn d.capValue * float64(d.fontSize) / 2000.0 * d.lineSpread\n}",
"func (v *Layer) height() uint {\n\t_, height := v.view.Size()\n\treturn uint(height - 1)\n}",
"func (o *os) GetBorderlessWindow() gdnative.Bool {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetBorderlessWindow()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_borderless_window\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}",
"func (commit *Commit) Height() int64 {\n\tif len(commit.Precommits) == 0 {\n\t\treturn 0\n\t}\n\treturn commit.FirstPrecommit().Vote.Height\n}",
"func (w *WidgetImplement) SetFixedHeight(h int) {\n\tw.fixedH = h\n}",
"func (s *ItemScroller) hRecalc() {\n\n\t// Checks if scroll bar should be visible or not\n\tscroll := false\n\tif s.first > 0 {\n\t\tscroll = true\n\t} else {\n\t\tvar posX float32\n\t\tfor _, item := range s.items[s.first:] {\n\t\t\tposX += item.GetPanel().Width()\n\t\t\tif posX > s.width {\n\t\t\t\tscroll = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ts.setHScrollBar(scroll)\n\n\t// Compute size of scroll button\n\tif scroll && s.autoButtonSize {\n\t\tvar totalWidth float32\n\t\tfor _, item := range s.items {\n\t\t\t// TODO OPTIMIZATION\n\t\t\t// Break when the view/content proportion becomes smaller than the minimum button size\n\t\t\ttotalWidth += item.GetPanel().Width()\n\t\t}\n\t\ts.hscroll.SetButtonSize(s.width * s.width / totalWidth)\n\t}\n\n\t// Items height\n\theight := s.ContentHeight()\n\tif scroll {\n\t\theight -= s.hscroll.Height()\n\t}\n\n\tvar posX float32\n\t// Sets positions of all items\n\tfor pos, ipan := range s.items {\n\t\titem := ipan.GetPanel()\n\t\t// If item is before first visible, sets not visible\n\t\tif pos < s.first {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// If item is after last visible, sets not visible\n\t\tif posX > s.width {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// Sets item position\n\t\titem.SetVisible(true)\n\t\titem.SetPosition(posX, 0)\n\t\tif s.adjustItem {\n\t\t\titem.SetHeight(height)\n\t\t}\n\t\tposX += item.Width()\n\t}\n\n\t// Set scroll bar value if recalc was not due by scroll event\n\tif scroll && !s.scrollBarEvent {\n\t\ts.hscroll.SetValue(float32(s.first) / float32(s.maxFirst()))\n\t}\n\ts.scrollBarEvent = false\n}",
"func (o *os) GetMaxWindowSize() gdnative.Vector2 {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetMaxWindowSize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_max_window_size\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}",
"func (commit *Commit) GetHeight() int64 {\n\treturn commit.Height\n}",
"func (commit *Commit) GetHeight() int64 {\n\treturn commit.Height\n}",
"func (lc *FilChain) GetHeight(ctx context.Context) (uint64, error) {\n\th, err := lc.api.ChainHead(ctx)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"get head from lotus node: %s\", err)\n\t}\n\treturn uint64(h.Height()), nil\n}",
"func (self *CellRenderer) GetFixedSize() (width, height int) {\n\tvar w C.gint\n\tvar h C.gint\n\tC.gtk_cell_renderer_get_fixed_size(self.object, &w, &h)\n\n\twidth = int(w)\n\theight = int(h)\n\treturn\n}",
"func getHeight(latestHeight int64, heightPtr *int64) (int64, error) {\n\tif heightPtr != nil {\n\t\theight := *heightPtr\n\t\tif height <= 0 {\n\t\t\treturn 0, fmt.Errorf(\"height must be greater than 0, but got %d\", height)\n\t\t}\n\t\tif height > latestHeight {\n\t\t\treturn 0, fmt.Errorf(\"height %d must be less than or equal to the current blockchain height %d\",\n\t\t\t\theight, latestHeight)\n\t\t}\n\t\tbase := env.BlockStore.Base()\n\t\tif height < base {\n\t\t\treturn 0, fmt.Errorf(\"height %d is not available, lowest height is %d\",\n\t\t\t\theight, base)\n\t\t}\n\t\treturn height, nil\n\t}\n\treturn latestHeight, nil\n}",
"func (n *Node) Height() int {\n\tif len(n.Children) == 0 {\n\t\treturn 0\n\t}\n\tvar max int\n\tfor _, c := range n.Children {\n\t\th := c.Height()\n\t\tif max < h {\n\t\t\tmax = h\n\t\t}\n\t}\n\treturn max + 1\n}",
"func (g Grid) Height() int {\n\treturn g.height\n}",
"func (s *TXPoolServer) getHeight() uint32 {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.height\n}",
"func (mTrx *_MyTrx) Height() int64 {\n\treturn atomic.LoadInt64(&mTrx.height)\n}",
"func (window Window) ScrollY() int {\n\treturn window.Get(\"scrollY\").Int()\n}",
"func (rt *recvTxOut) Height() int32 {\n\theight := int32(-1)\n\tif rt.block != nil {\n\t\theight = rt.block.Height\n\t}\n\treturn height\n}",
"func (l *Ledger) GetIrreversibleSlideWindow() int64 {\n\tdefaultIrreversibleSlideWindow := l.GenesisBlock.GetConfig().GetIrreversibleSlideWindow()\n\treturn defaultIrreversibleSlideWindow\n}",
"func (m *Model) GetMaxHeight() int {\n\treturn m.maxHeight\n}",
"func (lc *LotusChain) GetHeight(ctx context.Context) (uint64, error) {\n\th, err := lc.api.ChainHead(ctx)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"get head from lotus node: %s\", err)\n\t}\n\treturn uint64(h.Height()), nil\n}",
"func (v *TextView) GetPixelsAboveLines() int {\n\tc := C.gtk_text_view_get_pixels_above_lines(v.native())\n\treturn int(c)\n}",
"func PropValWindow(reply *xproto.GetPropertyReply,\n\terr error) (xproto.Window, error) {\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif reply.Format != 32 {\n\t\treturn 0, fmt.Errorf(\"PropValId: Expected format 32 but got %d\",\n\t\t\treply.Format)\n\t}\n\treturn xproto.Window(xgb.Get32(reply.Value)), nil\n}",
"func (ws *workingSet) Height() (uint64, error) {\n\treturn ws.height, nil\n}",
"func (s *PageLayout) Height() float64 {\n\treturn s.height\n}",
"func (v *StackPanel) Height() int {\n\tif v.orientation == Vertical {\n\t\th := 0\n\t\tfor _, val := range v.heights {\n\t\t\th += val\n\t\t}\n\t\treturn h\n\t}\n\treturn v.height\n}",
"func (dim *Dimensions) Height() int64 {\n\treturn dim.height\n}",
"func (e *EthManager) getNetworkHeight() (response int64, err error) {\n\tbody := `{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":42}`\n\ttarget := ethBlockNumber{}\n\tif err = requestAndParseJSON(e.ethJsonRPC, body, &target); err != nil {\n\t\tlog.Printf(\"Query Error: %v\", err)\n\t\treturn -1, err\n\t}\n\n\tresponse, err = strconv.ParseInt(target.Result[2:], 16, 0)\n\tif err != nil {\n\t\tlog.Printf(\"Parse Error: %v\", err)\n\t\treturn -1, err\n\t}\n\n\t// won't log non error responses as this is a very frequent query\n\treturn\n}",
"func GetHeightFromIterationKey(iterKey []byte) exported.Height {\n\tbigEndianBytes := iterKey[len([]byte(KeyIterateConsensusStatePrefix)):]\n\trevisionBytes := bigEndianBytes[0:8]\n\theightBytes := bigEndianBytes[8:]\n\trevision := binary.BigEndian.Uint64(revisionBytes)\n\theight := binary.BigEndian.Uint64(heightBytes)\n\treturn clienttypes.NewHeight(revision, height)\n}",
"func (window Window) InnerWidth() int {\n\treturn window.Get(\"innerWidth\").Int()\n}",
"func (object Object) Height(value int64) Object {\n\treturn object.SimpleValue(as.PropertyHeight, value)\n}",
"func (o *WorkbookChart) GetHeight() AnyOfnumberstringstring {\n\tif o == nil || o.Height == nil {\n\t\tvar ret AnyOfnumberstringstring\n\t\treturn ret\n\t}\n\treturn *o.Height\n}",
"func GetWindowText(hwnd syscall.Handle, str *uint16, maxCount int32) (len int32, err error) {\n\tr0, _, e1 := syscall.Syscall(getWindowTextW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(str)), uintptr(maxCount))\n\tlen = int32(r0)\n\tif len == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}",
"func Height() int {\n\tws, err := getWinsize()\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn int(ws.Row)\n}",
"func (d Dispatcher) LatestBlockHeight() (int, error) {\n\tblock, err := d.GetBC().GetLatestBlock()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(block.GetHeight()), nil\n}"
] | [
"0.7887868",
"0.74162084",
"0.6701644",
"0.5282257",
"0.51819026",
"0.5121254",
"0.501534",
"0.4822917",
"0.47664776",
"0.4716606",
"0.46773928",
"0.46360207",
"0.46136078",
"0.45566",
"0.45110223",
"0.45107284",
"0.44809556",
"0.4468844",
"0.4460032",
"0.4425723",
"0.4416638",
"0.44122142",
"0.4399586",
"0.43993717",
"0.43485212",
"0.43359566",
"0.43278587",
"0.42698163",
"0.42442557",
"0.42281184",
"0.42197645",
"0.4219191",
"0.4218309",
"0.42157862",
"0.42050365",
"0.42024297",
"0.41917074",
"0.41906318",
"0.41879004",
"0.41757324",
"0.41343313",
"0.41327047",
"0.40962648",
"0.40892443",
"0.40855587",
"0.40779692",
"0.4076409",
"0.40730914",
"0.40644068",
"0.40459943",
"0.40439418",
"0.40427056",
"0.4042122",
"0.4040144",
"0.40398046",
"0.4039066",
"0.40379512",
"0.40245676",
"0.40230897",
"0.4015996",
"0.40049094",
"0.39840588",
"0.39716953",
"0.39691374",
"0.39634565",
"0.39556053",
"0.39536494",
"0.3950783",
"0.39496425",
"0.39359665",
"0.39291504",
"0.39268395",
"0.39245057",
"0.39245057",
"0.390508",
"0.38992688",
"0.38945225",
"0.38945174",
"0.3887766",
"0.38820538",
"0.38672337",
"0.38645172",
"0.38629058",
"0.38598287",
"0.38561016",
"0.38527358",
"0.3848467",
"0.38367712",
"0.38255265",
"0.3824432",
"0.38138247",
"0.38123122",
"0.38119453",
"0.3805694",
"0.37996662",
"0.37954783",
"0.37948817",
"0.3789593",
"0.37856805",
"0.37832612"
] | 0.88503075 | 0 |
SetPropagateNaturalHeight is a wrapper around gtk_scrolled_window_set_propagate_natural_height(). | SetPropagateNaturalHeight — это обертка вокруг gtk_scrolled_window_set_propagate_natural_height(). | func (v *ScrolledWindow) SetPropagateNaturalHeight(propagate bool) {
C.gtk_scrolled_window_set_propagate_natural_height(v.native(), gbool(propagate))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v *ScrolledWindow) GetPropagateNaturalHeight() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_height(v.native())\n\treturn gobool(c)\n}",
"func (v *ScrolledWindow) SetPropagateNaturalWidth(propagate bool) {\n\tC.gtk_scrolled_window_set_propagate_natural_width(v.native(), gbool(propagate))\n}",
"func (v *ScrolledWindow) GetPropagateNaturalWidth() bool {\n\tc := C.gtk_scrolled_window_get_propagate_natural_width(v.native())\n\treturn gobool(c)\n}",
"func (w *WidgetImplement) SetFixedHeight(h int) {\n\tw.fixedH = h\n}",
"func updateHeight(n *node) {\n\tn.H = math.Max(height(child(n, 0)), height(child(n, 1))+1)\n}",
"func (v *ScrolledWindow) SetMaxContentHeight(width int) {\n\tC.gtk_scrolled_window_set_max_content_height(v.native(), C.gint(width))\n}",
"func (w *WidgetImplement) FixedHeight() int {\n\treturn w.fixedH\n}",
"func (v *ScrolledWindow) SetMaxContentWidth(width int) {\n\tC.gtk_scrolled_window_set_max_content_width(v.native(), C.gint(width))\n}",
"func (w *Window) updateDimensions() {\n\tif w.windowLayout == nil {\n\t\treturn\n\t}\n\n\tw.window.SetFixedHeight(w.windowLayout.SizeHint().Height())\n}",
"func (window Window) InnerHeight() int {\n\treturn window.Get(\"innerHeight\").Int()\n}",
"func (w *WidgetBase) SetHeight(height int) {\n\tw.size.Y = height\n\tif w.size.Y != 0 {\n\t\tw.sizePolicyY = Minimum\n\t} else {\n\t\tw.sizePolicyY = Expanding\n\t}\n}",
"func (v *TextView) SetBorderWindowSize(tp TextWindowType, size int) {\n\tC.gtk_text_view_set_border_window_size(v.native(), C.GtkTextWindowType(tp), C.gint(size))\n}",
"func (tv *TextView) ResizeIfNeeded(nwSz image.Point) bool {\n\tif nwSz == tv.LinesSize {\n\t\treturn false\n\t}\n\t// fmt.Printf(\"%v needs resize: %v\\n\", tv.Nm, nwSz)\n\ttv.LinesSize = nwSz\n\tdiff := tv.SetSize()\n\tif !diff {\n\t\t// fmt.Printf(\"%v resize no setsize: %v\\n\", tv.Nm, nwSz)\n\t\treturn false\n\t}\n\tly := tv.ParentLayout()\n\tif ly != nil {\n\t\ttv.SetFlag(int(TextViewInReLayout))\n\t\tly.GatherSizes() // can't call Size2D b/c that resets layout\n\t\tly.Layout2DTree()\n\t\ttv.SetFlag(int(TextViewRenderScrolls))\n\t\ttv.ClearFlag(int(TextViewInReLayout))\n\t\t// fmt.Printf(\"resized: %v\\n\", tv.LayData.AllocSize)\n\t}\n\treturn true\n}",
"func (s *ItemScroller) hRecalc() {\n\n\t// Checks if scroll bar should be visible or not\n\tscroll := false\n\tif s.first > 0 {\n\t\tscroll = true\n\t} else {\n\t\tvar posX float32\n\t\tfor _, item := range s.items[s.first:] {\n\t\t\tposX += item.GetPanel().Width()\n\t\t\tif posX > s.width {\n\t\t\t\tscroll = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ts.setHScrollBar(scroll)\n\n\t// Compute size of scroll button\n\tif scroll && s.autoButtonSize {\n\t\tvar totalWidth float32\n\t\tfor _, item := range s.items {\n\t\t\t// TODO OPTIMIZATION\n\t\t\t// Break when the view/content proportion becomes smaller than the minimum button size\n\t\t\ttotalWidth += item.GetPanel().Width()\n\t\t}\n\t\ts.hscroll.SetButtonSize(s.width * s.width / totalWidth)\n\t}\n\n\t// Items height\n\theight := s.ContentHeight()\n\tif scroll {\n\t\theight -= s.hscroll.Height()\n\t}\n\n\tvar posX float32\n\t// Sets positions of all items\n\tfor pos, ipan := range s.items {\n\t\titem := ipan.GetPanel()\n\t\t// If item is before first visible, sets not visible\n\t\tif pos < s.first {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// If item is after last visible, sets not visible\n\t\tif posX > s.width {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// Sets item position\n\t\titem.SetVisible(true)\n\t\titem.SetPosition(posX, 0)\n\t\tif s.adjustItem {\n\t\t\titem.SetHeight(height)\n\t\t}\n\t\tposX += item.Width()\n\t}\n\n\t// Set scroll bar value if recalc was not due by scroll event\n\tif scroll && !s.scrollBarEvent {\n\t\ts.hscroll.SetValue(float32(s.first) / float32(s.maxFirst()))\n\t}\n\ts.scrollBarEvent = false\n}",
"func (w *Window) Height() int {\n\treturn int(C.ANativeWindow_getHeight(w.cptr()))\n}",
"func (win *Window) ReshowWithInitialSize() {\n\twin.Candy().Guify(\"gtk_window_reshow_with_initial_size\", win)\n}",
"func (t *Text) LinesHeight() int {\n\tpad := t.setter.opts.Padding\n\tif t.size.Y <= 0 {\n\t\treturn 0\n\t}\n\tif t.size.Y-2*pad <= 0 {\n\t\treturn t.size.Y\n\t}\n\ty := pad\n\tfor _, l := range t.lines {\n\t\th := l.h.Round()\n\t\tif y+h > t.size.Y-pad {\n\t\t\tbreak\n\t\t}\n\t\ty += h\n\t}\n\tif h := trailingNewlineHeight(t); h > 0 && y+h <= t.size.Y-pad {\n\t\ty += h\n\t}\n\treturn y + pad\n}",
"func (fb *FlowBox) SetMaxChildrenPerLine(n_children uint) {\n\tC.gtk_flow_box_set_max_children_per_line(fb.native(), C.guint(n_children))\n}",
"func (w *WidgetImplement) SetClampHeight(clamp bool) {\n\tw.clamp[1] = clamp\n}",
"func (v *ScrolledWindow) GetMaxContentHeight() int {\n\tc := C.gtk_scrolled_window_get_max_content_height(v.native())\n\treturn int(c)\n}",
"func (b *BaseElement) OnWindowResized(w, h int32) {\n\tif b.Events.OnWindowResized != nil {\n\t\tb.Events.OnWindowResized(w, h)\n\t}\n}",
"func (win *Window) SetKeepBelow(setting bool) {\n\twin.Candy().Guify(\"gtk_window_set_keep_below\", win, setting)\n}",
"func (win *Window) Maximize() {\n\twin.Candy().Guify(\"gtk_window_maximize\", win)\n}",
"func (s *ItemScroller) SetAutoHeight(maxHeight float32) {\n\n\ts.maxAutoHeight = maxHeight\n}",
"func (w *Window) SetMaximized(maximize bool) {\n\tif maximize == w.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tw.origX, w.origY = w.Pos()\n\t\tw.origWidth, w.origHeight = w.Size()\n\t\tw.maximized = true\n\t\tw.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tw.SetSize(width, height)\n\t} else {\n\t\tw.maximized = false\n\t\tw.SetPos(w.origX, w.origY)\n\t\tw.SetSize(w.origWidth, w.origHeight)\n\t}\n\tw.ResizeChildren()\n\tw.PlaceChildren()\n}",
"func MaxN(window int) func(s []float64) []float64 {\n\treturn func(s []float64) []float64 {\n\t\tmax := make([]float64, 0)\n\t\ti := 0\n\t\tfor _, v := range s {\n\t\t\tif i < window {\n\t\t\t\tfor j := 0; j < i; j++ {\n\t\t\t\t\tv = math.Max(v, max[j])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor j := i - window; j < i; j++ {\n\t\t\t\t\tv = math.Max(v, max[j])\n\t\t\t\t}\n\t\t\t}\n\t\t\tmax = append(max, v)\n\t\t}\n\n\t\treturn max\n\t}\n}",
"func NaturalBreaks(data []float64, nClasses int) []float64 {\n\t// sort data in numerical order, since this is expected by the matrices function\n\tdata = sortData(data)\n\n\t// sanity check\n\tuniq := deduplicate(data)\n\tif nClasses >= len(uniq) {\n\t\treturn uniq\n\t}\n\n\t// get our basic matrices (we only need lower class limits here)\n\tlowerClassLimits, _ := getMatrices(data, nClasses)\n\n\t// extract nClasses out of the computed matrices\n\treturn breaks(data, lowerClassLimits, nClasses)\n}",
"func injectHeightNotifier(content []byte, name string) []byte {\n\tcontent = append([]byte(`<div id=\"wrapper\">`), content...)\n\t// From https://stackoverflow.com/a/44547866 and extended to also pass\n\t// back the element id, as we can have multiple pages.\n\treturn append(content, []byte(fmt.Sprintf(`</div><script type=\"text/javascript\">\nwindow.addEventListener(\"load\", function(){\n if(window.self === window.top) return; // if w.self === w.top, we are not in an iframe\n send_height_to_parent_function = function(){\n var height = document.getElementById(\"wrapper\").offsetHeight;\n parent.postMessage({\"height\" : height , \"id\": \"%s\"}, \"*\");\n }\n send_height_to_parent_function(); //whenever the page is loaded\n window.addEventListener(\"resize\", send_height_to_parent_function); // whenever the page is resized\n var observer = new MutationObserver(send_height_to_parent_function); // whenever DOM changes PT1\n var config = { attributes: true, childList: true, characterData: true, subtree:true}; // PT2\n observer.observe(window.document, config); // PT3\n});\n</script>`, name))...)\n}",
"func (s *ItemScroller) vRecalc() {\n\n\t// Checks if scroll bar should be visible or not\n\tscroll := false\n\tif s.first > 0 {\n\t\tscroll = true\n\t} else {\n\t\tvar posY float32\n\t\tfor _, item := range s.items[s.first:] {\n\t\t\tposY += item.Height()\n\t\t\tif posY > s.height {\n\t\t\t\tscroll = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\ts.setVScrollBar(scroll)\n\n\t// Compute size of scroll button\n\tif scroll && s.autoButtonSize {\n\t\tvar totalHeight float32\n\t\tfor _, item := range s.items {\n\t\t\t// TODO OPTIMIZATION\n\t\t\t// Break when the view/content proportion becomes smaller than the minimum button size\n\t\t\ttotalHeight += item.Height()\n\t\t}\n\t\ts.vscroll.SetButtonSize(s.height * s.height / totalHeight)\n\t}\n\n\t// Items width\n\twidth := s.ContentWidth()\n\tif scroll {\n\t\twidth -= s.vscroll.Width()\n\t}\n\n\tvar posY float32\n\t// Sets positions of all items\n\tfor pos, ipan := range s.items {\n\t\titem := ipan.GetPanel()\n\t\tif pos < s.first {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// If item is after last visible, sets not visible\n\t\tif posY > s.height {\n\t\t\titem.SetVisible(false)\n\t\t\tcontinue\n\t\t}\n\t\t// Sets item position\n\t\titem.SetVisible(true)\n\t\titem.SetPosition(0, posY)\n\t\tif s.adjustItem {\n\t\t\titem.SetWidth(width)\n\t\t}\n\t\tposY += ipan.Height()\n\t}\n\n\t// Set scroll bar value if recalc was not due by scroll event\n\tif scroll && !s.scrollBarEvent {\n\t\ts.vscroll.SetValue(float32(s.first) / float32(s.maxFirst()))\n\t}\n\ts.scrollBarEvent = false\n}",
"func (d Doc) DefaultLineHeight() float64 {\n\treturn d.capValue * float64(d.fontSize) / 2000.0 * d.lineSpread\n}",
"func (v *TextView) SetPixelsBelowLines(px int) {\n\tC.gtk_text_view_set_pixels_below_lines(v.native(), C.gint(px))\n}",
"func TestServerZeroWindowAdjust(t *testing.T) {\n\tconn := dial(exitStatusZeroHandler, t)\n\tdefer conn.Close()\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to request new session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\tif err := session.Shell(); err != nil {\n\t\tt.Fatalf(\"Unable to execute command: %v\", err)\n\t}\n\n\t// send a bogus zero sized window update\n\tsession.clientChan.sendWindowAdj(0)\n\n\terr = session.Wait()\n\tif err != nil {\n\t\tt.Fatalf(\"expected nil but got %v\", err)\n\t}\n}",
"func (w *ScrollWidget) SetMax(max int) {\n\tw.max = max\n\tw.clampCurrent()\n}",
"func (sf *TWindow) SetSizable(sizable bool) {\n\tsf.fixedSize = !sizable\n}",
"func (label *LabelWidget) SetHeight(height float64) {\n\tlabel.box.height = height\n\tlabel.fixedHeight = true\n\tlabel.RequestReflow()\n}",
"func (w *WidgetImplement) SetHeight(h int) {\n\tw.h = h\n}",
"func (sf *TWindow) SetMaximized(maximize bool) {\n\tif maximize == sf.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tx, y := sf.pos.Get()\n\t\tsf.posOrig.X().Set(x)\n\t\tsf.posOrig.Y().Set(y)\n\t\tsf.origWidth, sf.origHeight = sf.Size()\n\t\tsf.maximized = true\n\t\tsf.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tsf.SetSize(width, height)\n\t} else {\n\t\tsf.maximized = false\n\t\tsf.SetPos(sf.posOrig.GetX(), sf.posOrig.GetY())\n\t\tsf.SetSize(sf.origWidth, sf.origHeight)\n\t}\n\tsf.ResizeChildren()\n\tsf.PlaceChildren()\n}",
"func (m *Model) SetHeight(height int) {\n\t// Make space for the description string.\n\tm.height = clamp(height, 2, m.maxHeight)\n\tfor _, l := range m.valueLists {\n\t\tl.SetHeight(m.height - 1)\n\t\t// Force recomputing the keybindings, which\n\t\t// is dependent on the page size.\n\t\tl.SetFilteringEnabled(true)\n\t}\n}",
"func (v *ScrolledWindow) GetMaxContentWidth() int {\n\tc := C.gtk_scrolled_window_get_max_content_width(v.native())\n\treturn int(c)\n}",
"func (l *Label) setHeight(g *Graph) bool {\n // keep track of whether or not the height has changed\n changed := false\n \n Assert (nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilLabelStore, g.labelStore != nil)\n \n var newHeight uint8\n \n // get the heights of the right and left nodes\n left, _ := l.left(g) // TODO do not ignore this error\n right, _ := l.right(g) // TODO do not ignore this error\n \n lh := left.height() \n rh := right.height() \n \n // determine the new height\n if (lh < rh) {\n newHeight = uint8(1 + rh)\n } else {\n newHeight = uint8(1 + lh)\n }\n \n if int8(newHeight) != l.height() {\n changed = true\n }\n \n l.h = newHeight\n \n return changed\n}",
"func (sf *TWindow) Sizable() bool {\n\treturn !sf.fixedSize\n}",
"func (w *LWindow) MHeight() int32 {\n\treturn w.mHeight\n}",
"func (window Window) OuterHeight() int {\n\treturn window.Get(\"outerHeight\").Int()\n}",
"func (v *View) ScrollDown(n int) {\n\t// Try to scroll by n but if it would overflow, scroll by 1\n\tif v.Topline+n <= v.Buf.NumLines {\n\t\tv.Topline += n\n\t} else if v.Topline < v.Buf.NumLines-1 {\n\t\tv.Topline++\n\t}\n}",
"func (me *XsdGoPkgHasElems_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElems_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (wg *WidgetImplement) SetFixedWidth(w int) {\n\twg.fixedW = w\n}",
"func AllNaturalBreaks(data []float64, maxClasses int) [][]float64 {\n\t// sort data in numerical order, since this is expected by the matrices function\n\tdata = sortData(data)\n\n\t// sanity check\n\tuniq := deduplicate(data)\n\tif maxClasses > len(uniq) {\n\t\tmaxClasses = len(uniq)\n\t}\n\n\t// get our basic matrices (we only need lower class limits here)\n\tlowerClassLimits, _ := getMatrices(data, maxClasses)\n\n\t// extract nClasses out of the computed matrices\n\tallBreaks := [][]float64{}\n\tfor i := 2; i <= maxClasses; i++ {\n\t\tnClasses := breaks(data, lowerClassLimits, i)\n\t\tif i == len(uniq) {\n\t\t\tnClasses = uniq\n\t\t}\n\t\tallBreaks = append(allBreaks, nClasses)\n\t}\n\treturn allBreaks\n}",
"func (h *Handle) NeighSet(neigh *Neigh) error {\n\treturn h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE)\n}",
"func (v *View) ScrollDown(n int) {\n\t// Try to scroll by n but if it would overflow, scroll by 1\n\tif v.topline+n <= len(v.buf.lines)-v.height {\n\t\tv.topline += n\n\t} else if v.topline < len(v.buf.lines)-v.height {\n\t\tv.topline++\n\t}\n}",
"func (me *XsdGoPkgHasElem_MaxHeight) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_MaxHeight; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}",
"func (fb *FlowBox) SetMinChildrenPerLine(n_children uint) {\n\tC.gtk_flow_box_set_min_children_per_line(fb.native(), C.guint(n_children))\n}",
"func (win *Window) SetKeepAbove(setting bool) {\n\twin.Candy().Guify(\"gtk_window_set_keep_above\", win, setting)\n}",
"func (s *ItemScroller) autoSize() {\n\n\tif s.maxAutoWidth == 0 && s.maxAutoHeight == 0 {\n\t\treturn\n\t}\n\n\tvar width float32\n\tvar height float32\n\tfor _, item := range s.items {\n\t\tpanel := item.GetPanel()\n\t\tif panel.Width() > width {\n\t\t\twidth = panel.Width()\n\t\t}\n\t\theight += panel.Height()\n\t}\n\n\t// If auto maximum width enabled\n\tif s.maxAutoWidth > 0 {\n\t\tif width <= s.maxAutoWidth {\n\t\t\ts.SetContentWidth(width)\n\t\t}\n\t}\n\t// If auto maximum height enabled\n\tif s.maxAutoHeight > 0 {\n\t\tif height <= s.maxAutoHeight {\n\t\t\ts.SetContentHeight(height)\n\t\t}\n\t}\n}",
"func (s *Scroll) SetWindow(win sparta.Window) {\n\ts.win = win\n}",
"func (this *WeightedSum) ivweightedSumPropagate(varid core.VarId, evt *core.ChangeEvent) {\n\n\tthis.checkXmultCeqY(varid, evt)\n\n\tthis.ivsumPropagate(varid, evt)\n}",
"func (c *HostClient) SetMaxConns(newMaxConns int) {\n\tc.connsLock.Lock()\n\tc.MaxConns = newMaxConns\n\tc.connsLock.Unlock()\n}",
"func (win *Window) Height() int {\n\tsize := C.sfRenderWindow_getSize(win.win)\n\treturn int(size.y)\n}",
"func (fb *FlowBox) SetHAdjustment(adjustment *Adjustment) {\n\tC.gtk_flow_box_set_hadjustment(fb.native(), adjustment.native())\n}",
"func (wg *WidgetImplement) SetFixedSize(w, h int) {\n\twg.fixedW = w\n\twg.fixedH = h\n}",
"func (win *Window) SetPolicy(allowShrink, allowGrow, autoShrink int) {\n\twin.Candy().Guify(\"gtk_window_set_policy\", win, allowShrink, allowGrow, autoShrink)\n}",
"func (w *Wrapper) NaturalJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"NATURAL JOIN\", condition)\n\treturn w\n}",
"func (p *Protocol) GetEpochHeight(epochNum uint64) uint64 {\n\tif epochNum == 0 {\n\t\treturn 0\n\t}\n\tdardanellesEpoch := p.GetEpochNum(p.dardanellesHeight)\n\tif !p.dardanellesOn || epochNum <= dardanellesEpoch {\n\t\treturn (epochNum-1)*p.numDelegates*p.numSubEpochs + 1\n\t}\n\tdardanellesEpochHeight := p.GetEpochHeight(dardanellesEpoch)\n\treturn dardanellesEpochHeight + (epochNum-dardanellesEpoch)*p.numDelegates*p.numSubEpochsDardanelles\n}",
"func AbsEndRun(window fyne.Window, c *fyne.Container, w, h int) {\n\twindow.Resize(fyne.NewSize(float32(w), float32(h)))\n\twindow.SetFixedSize(true)\n\twindow.SetPadded(false)\n\twindow.SetContent(c)\n\twindow.ShowAndRun()\n}",
"func (b *Bound) Height() float64 {\n\treturn b.ne.Y() - b.sw.Y()\n}",
"func (v *TextView) SetPixelsAboveLines(px int) {\n\tC.gtk_text_view_set_pixels_above_lines(v.native(), C.gint(px))\n}",
"func Natural(value float64) bool {\n\treturn Whole(value) && value > 0\n}",
"func (this *FeedableBuffer) Maximize() {\n\tthis.ExpandTo(this.maxByteCount)\n}",
"func UpdateMaxNodesCount(nodesCount int) {\n\tmaxNodesCount.Set(float64(nodesCount))\n}",
"func (n *node) confine(adjustment float64) float64 {\n\tconfined := math.Max(1/DiffConfineFactor, adjustment)\n\tconfined = math.Min(DiffConfineFactor, confined)\n\n\treturn confined\n}",
"func (v *TextView) GetBorderWindowSize(tp TextWindowType) int {\n\treturn int(C.gtk_text_view_get_border_window_size(v.native(), C.GtkTextWindowType(tp)))\n}",
"func (wh *WholeNet) Backpropagate(target *t.Tensor) {\n\tlastLayer := (*wh).Layers[len((*wh).Layers)-1].GetOutput()\n\n\tdifference := lastLayer.Sub(target)\n\t(*wh).Layers[len((*wh).Layers)-1].CalculateGradients(&difference)\n\n\tfor i := len((*wh).Layers) - 2; i >= 0; i-- {\n\t\tgrad := (*wh).Layers[i+1].GetGradients()\n\t\t(*wh).Layers[i].CalculateGradients(&grad)\n\t}\n\tfor i := range (*wh).Layers {\n\t\t(*wh).Layers[i].UpdateWeights()\n\t}\n}",
"func (w *WidgetImplement) OnPerformLayout(self Widget, ctx *canvas.Context) {\n\tif w.layout != nil {\n\t\tw.layout.OnPerformLayout(self, ctx)\n\t} else {\n\t\tfor _, child := range w.children {\n\t\t\tprefW, prefH := child.PreferredSize(child, ctx)\n\t\t\tfixW, fixH := child.FixedSize()\n\t\t\tw := toI(fixW > 0, fixW, prefW)\n\t\t\th := toI(fixH > 0, fixH, prefH)\n\t\t\tchild.SetSize(w, h)\n\t\t\tchild.OnPerformLayout(child, ctx)\n\t\t}\n\t}\n}",
"func WithMaxRound(round types.RoundID) OptionFunc {\n\treturn func(wc *WeakCoin) {\n\t\twc.config.MaxRound = round\n\t}\n}",
"func newAdjustmentFromNative(obj unsafe.Pointer) interface{} {\n\ta := &Adjustment{}\n\ta.object = C.to_GtkAdjustment(obj)\n\n\tif gobject.IsObjectFloating(a) {\n\t\tgobject.RefSink(a)\n\t} else {\n\t\tgobject.Ref(a)\n\t}\n\tadjustmentFinalizer(a)\n\n\treturn a\n}",
"func SetMaxNumberEntries(sz int) {\n\tC.hepevt_set_max_number_entries(C.int(sz))\n}",
"func WindowSize(s int) ConfigFunc {\n\treturn func(c *Config) {\n\t\tc.WindowSize = s\n\t}\n}",
"func (v *View) PageDown() {\n\tif len(v.buf.lines)-(v.topline+v.height) > v.height {\n\t\tv.ScrollDown(v.height)\n\t} else {\n\t\tif len(v.buf.lines) >= v.height {\n\t\t\tv.topline = len(v.buf.lines) - v.height\n\t\t}\n\t}\n}",
"func WithMaxStaleness(ms time.Duration) Option {\n\treturn func(rp *ReadPref) error {\n\t\trp.maxStaleness = ms\n\t\trp.maxStalenessSet = true\n\t\treturn nil\n\t}\n}",
"func (s *Service) onWindowResize(channel chan os.Signal) {\n\t//stdScr, _ := gc.Init()\n\t//stdScr.ScrollOk(true)\n\t//gc.NewLines(true)\n\tfor {\n\t\t<-channel\n\t\t//gc.StdScr().Clear()\n\t\t//rows, cols := gc.StdScr().MaxYX()\n\t\tcols, rows := GetScreenSize()\n\t\ts.screenRows = rows\n\t\ts.screenCols = cols\n\t\ts.resizeWindows()\n\t\t//gc.End()\n\t\t//gc.Update()\n\t\t//gc.StdScr().Refresh()\n\t}\n}",
"func SetMaxDepthDifference(maxDifference float64) {\n\tHeightMininum = maxDifference\n}",
"func SetScrollContentTrackerSize(sa *qtwidgets.QScrollArea) {\n\twgt := sa.Widget()\n\tsa.InheritResizeEvent(func(arg0 *qtgui.QResizeEvent) {\n\t\tosz := arg0.OldSize()\n\t\tnsz := arg0.Size()\n\t\tif false {\n\t\t\tlog.Println(osz.Width(), osz.Height(), nsz.Width(), nsz.Height())\n\t\t}\n\t\tif osz.Width() != nsz.Width() {\n\t\t\twgt.SetMaximumWidth(nsz.Width())\n\t\t}\n\t\t// this.ScrollArea_2.ResizeEvent(arg0)\n\t\targ0.Ignore() // I ignore, you handle it. replace explict call parent's\n\t})\n}",
"func (s *ItemScroller) setHScrollBar(state bool) {\n\n\t// Visible\n\tif state {\n\t\tvar scrollHeight float32 = 20\n\t\tif s.hscroll == nil {\n\t\t\ts.hscroll = NewHScrollBar(0, 0)\n\t\t\ts.hscroll.SetBorders(1, 0, 0, 0)\n\t\t\ts.hscroll.Subscribe(OnChange, s.onScrollBarEvent)\n\t\t\ts.Panel.Add(s.hscroll)\n\t\t}\n\t\ts.hscroll.SetSize(s.ContentWidth(), scrollHeight)\n\t\ts.hscroll.SetPositionX(0)\n\t\ts.hscroll.SetPositionY(s.ContentHeight() - scrollHeight)\n\t\ts.hscroll.recalc()\n\t\ts.hscroll.SetVisible(true)\n\t\t// Not visible\n\t} else {\n\t\tif s.hscroll != nil {\n\t\t\ts.hscroll.SetVisible(false)\n\t\t}\n\t}\n}",
"func (me XsdGoPkgHasAttr_MaxLines_XsdtInt_2) MaxLinesDefault() xsdt.Int {\r\n\tvar x = new(xsdt.Int)\r\n\tx.Set(\"2\")\r\n\treturn *x\r\n}",
"func (v *View) ScrollUp(n int) {\n\t// Try to scroll by n but if it would overflow, scroll by 1\n\tif v.Topline-n >= 0 {\n\t\tv.Topline -= n\n\t} else if v.Topline > 0 {\n\t\tv.Topline--\n\t}\n}",
"func WithNonMonotonic(nm bool) CounterOptionApplier {\n\treturn counterOptionWrapper{\n\t\tF: func(d *Descriptor) {\n\t\t\td.alternate = nm\n\t\t},\n\t}\n}",
"func nlimit(n int) int {\n\tif n > maxNewlines {\n\t\tn = maxNewlines\n\t}\n\treturn n\n}",
"func (syncer *MerkleSyncer) updateHeight() {\n\tatomic.AddUint64(&syncer.height, 1)\n}",
"func (canvas *CanvasWidget) SetHeight(height float64) {\n\tcanvas.box.height = height\n\tcanvas.fixedHeight = true\n\tcanvas.RequestReflow()\n}",
"func (t *Link) SetUnknownHeight(i interface{}) {\n\tif t.unknown_ == nil {\n\t\tt.unknown_ = make(map[string]interface{})\n\t}\n\ttmp := &heightIntermediateType{}\n\ttmp.unknown_ = i\n\tt.height = tmp\n\n}",
"func (t *Terminal) Resize(s fyne.Size) {\n\tif s.Width == t.Size().Width && s.Height == t.Size().Height {\n\t\treturn\n\t}\n\tif s.Width < 20 { // not sure why we get tiny sizes\n\t\treturn\n\t}\n\tt.BaseWidget.Resize(s)\n\tt.content.Resize(s)\n\n\tcellSize := t.guessCellSize()\n\toldRows := int(t.config.Rows)\n\n\tt.config.Columns = uint(math.Floor(float64(s.Width) / float64(cellSize.Width)))\n\tt.config.Rows = uint(math.Floor(float64(s.Height) / float64(cellSize.Height)))\n\tif t.scrollBottom == 0 || t.scrollBottom == oldRows-1 {\n\t\tt.scrollBottom = int(t.config.Rows) - 1\n\t}\n\tt.onConfigure()\n\n\tgo t.updatePTYSize()\n}",
"func PushDownWindowAggregateMax() BoolFlag {\n\treturn pushDownWindowAggregateMax\n}",
"func WithMonotonic(m bool) GaugeOptionApplier {\n\treturn gaugeOptionWrapper{\n\t\tF: func(d *Descriptor) {\n\t\t\td.alternate = m\n\t\t},\n\t}\n}",
"func (win *Window) SetMaximizeCallback(callback WindowMaximizeCallback) WindowMaximizeCallback {\n\tcallbacks, exist := windowCallbacks[win]\n\tif !exist {\n\t\tcallbacks = new(WindowCallbacks)\n\t\twindowCallbacks[win] = callbacks\n\t}\n\n\tpreviousCallback := callbacks.MaximizeCallback\n\tcallbacks.MaximizeCallback = callback\n\n\tif callback != nil {\n\t\tC.goSetWindowMaximizeCallback(win.c())\n\t} else {\n\t\tC.goRemoveWindowMaximizeCallback(win.c())\n\t}\n\n\treturn previousCallback\n}",
"func (cs *ConsensusState) updateHeight(height int64) {\n\tcs.Height = height\n}",
"func (t *Link) SetHeight(v int64) {\n\tt.height = &heightIntermediateType{nonNegativeInteger: &v}\n\n}",
"func (o *WaitListParams) SetHeight(height *uint64) {\n\to.Height = height\n}",
"func (v *View) ScrollUp(n int) {\n\t// Try to scroll by n but if it would overflow, scroll by 1\n\tif v.topline-n >= 0 {\n\t\tv.topline -= n\n\t} else if v.topline > 0 {\n\t\tv.topline--\n\t}\n}",
"func (n *windowNode) populateValues() error {\n\tacc := n.windowsAcc.Wtxn(n.planner.session)\n\trowCount := n.wrappedRenderVals.Len()\n\tn.values.rows = NewRowContainer(\n\t\tn.planner.session.TxnState.makeBoundAccount(), n.values.columns, rowCount,\n\t)\n\n\trow := make(parser.DTuple, len(n.windowRender))\n\tfor i := 0; i < rowCount; i++ {\n\t\twrappedRow := n.wrappedRenderVals.At(i)\n\n\t\tn.curRowIdx = i // Point all windowFuncHolders to the correct row values.\n\t\tcurColIdx := 0\n\t\tcurFnIdx := 0\n\t\tfor j := range row {\n\t\t\tif curWindowRender := n.windowRender[j]; curWindowRender == nil {\n\t\t\t\t// If the windowRender at this index is nil, propagate the datum\n\t\t\t\t// directly from the wrapped planNode. It wasn't changed by windowNode.\n\t\t\t\trow[j] = wrappedRow[curColIdx]\n\t\t\t\tcurColIdx++\n\t\t\t} else {\n\t\t\t\t// If the windowRender is not nil, ignore 0 or more columns from the wrapped\n\t\t\t\t// planNode. These were used as arguments to window functions all beneath\n\t\t\t\t// a single windowRender.\n\t\t\t\t// SELECT rank() over () from t; -> ignore 0 from wrapped values\n\t\t\t\t// SELECT (rank() over () + avg(b) over ()) from t; -> ignore 1 from wrapped values\n\t\t\t\t// SELECT (avg(a) over () + avg(b) over ()) from t; -> ignore 2 from wrapped values\n\t\t\t\tfor ; curFnIdx < len(n.funcs); curFnIdx++ {\n\t\t\t\t\twindowFn := n.funcs[curFnIdx]\n\t\t\t\t\tif windowFn.argIdxStart != curColIdx {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tcurColIdx += windowFn.argCount\n\t\t\t\t}\n\t\t\t\t// Instead, we evaluate the current window render, which depends on at least\n\t\t\t\t// one window function, at the given row.\n\t\t\t\tres, err := curWindowRender.Eval(&n.planner.evalCtx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trow[j] = res\n\t\t\t}\n\t\t}\n\n\t\tif _, err := n.values.rows.AddRow(row); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Done using the output of computeWindows, release memory and clear\n\t// accounts.\n\tn.wrappedRenderVals.Close()\n\tn.wrappedRenderVals = nil\n\tn.wrappedIndexedVarVals.Close()\n\tn.wrappedIndexedVarVals = nil\n\tn.windowValues = nil\n\tacc.Close()\n\n\treturn nil\n}",
"func (v *Label) SetLines(lines int) {\n\tC.gtk_label_set_lines(v.native(), C.gint(lines))\n}",
"func (m *MainWindow) initializeSidebar() {\n\tsidebar_vbox := gtk.NewVBox(false, 0)\n\n\tserver_info_frame := gtk.NewFrame(ctx.Translator.Translate(\"Server information\", nil))\n\tsidebar_vbox.PackStart(server_info_frame, true, true, 5)\n\tsi_vbox := gtk.NewVBox(false, 0)\n\tserver_info_frame.Add(si_vbox)\n\n\t// Scrolled thing.\n\tsi_scroll := gtk.NewScrolledWindow(nil, nil)\n\tsi_scroll.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\tsi_vbox.PackStart(si_scroll, true, true, 5)\n\n\t// Server's information.\n\tm.server_info = gtk.NewTreeView()\n\tm.server_info.SetModel(m.server_info_store)\n\n\tkey_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Key\", nil), gtk.NewCellRendererText(), \"markup\", 0)\n\tm.server_info.AppendColumn(key_column)\n\n\tvalue_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Value\", nil), gtk.NewCellRendererText(), \"markup\", 1)\n\tm.server_info.AppendColumn(value_column)\n\n\tsi_scroll.Add(m.server_info)\n\n\t// Players information.\n\tplayers_info_frame := gtk.NewFrame(ctx.Translator.Translate(\"Players\", nil))\n\tsidebar_vbox.PackStart(players_info_frame, true, true, 5)\n\n\tpi_scroll := gtk.NewScrolledWindow(nil, nil)\n\tpi_scroll.SetPolicy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\tplayers_info_frame.Add(pi_scroll)\n\n\tm.players_info = gtk.NewTreeView()\n\tm.players_info.SetModel(m.players_info_store)\n\tpi_scroll.Add(m.players_info)\n\n\tname_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Player name\", nil), gtk.NewCellRendererText(), \"markup\", 0)\n\tm.players_info.AppendColumn(name_column)\n\n\tfrags_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Frags\", nil), gtk.NewCellRendererText(), \"markup\", 1)\n\tm.players_info.AppendColumn(frags_column)\n\n\tping_column := gtk.NewTreeViewColumnWithAttributes(ctx.Translator.Translate(\"Ping\", nil), gtk.NewCellRendererText(), \"markup\", 2)\n\tm.players_info.AppendColumn(ping_column)\n\n\t// Show CVars button.\n\tshow_cvars_button := gtk.NewButtonWithLabel(ctx.Translator.Translate(\"Show CVars\", nil))\n\tshow_cvars_button.SetTooltipText(ctx.Translator.Translate(\"Show server's CVars\", nil))\n\tshow_cvars_button.Clicked(m.showServerCVars)\n\tsidebar_vbox.PackStart(show_cvars_button, false, true, 5)\n\n\t// Quick connect frame.\n\tquick_connect_frame := gtk.NewFrame(ctx.Translator.Translate(\"Quick connect\", nil))\n\tsidebar_vbox.PackStart(quick_connect_frame, false, true, 5)\n\tqc_vbox := gtk.NewVBox(false, 0)\n\tquick_connect_frame.Add(qc_vbox)\n\n\t// Server address.\n\tsrv_tooltip := ctx.Translator.Translate(\"Server address we will connect to\", nil)\n\tsrv_label := gtk.NewLabel(ctx.Translator.Translate(\"Server address:\", nil))\n\tsrv_label.SetTooltipText(srv_tooltip)\n\tqc_vbox.PackStart(srv_label, false, true, 5)\n\n\tm.qc_server_address = gtk.NewEntry()\n\tm.qc_server_address.SetTooltipText(srv_tooltip)\n\tqc_vbox.PackStart(m.qc_server_address, false, true, 5)\n\n\t// Password.\n\tpass_tooltip := ctx.Translator.Translate(\"Password we will use for server\", nil)\n\tpass_label := gtk.NewLabel(ctx.Translator.Translate(\"Password:\", nil))\n\tpass_label.SetTooltipText(pass_tooltip)\n\tqc_vbox.PackStart(pass_label, false, true, 5)\n\n\tm.qc_password = gtk.NewEntry()\n\tm.qc_password.SetTooltipText(pass_tooltip)\n\tqc_vbox.PackStart(m.qc_password, false, true, 5)\n\n\t// Nickname\n\tnick_tooltip := ctx.Translator.Translate(\"Nickname we will use\", nil)\n\tnick_label := gtk.NewLabel(ctx.Translator.Translate(\"Nickname:\", nil))\n\tnick_label.SetTooltipText(nick_tooltip)\n\tqc_vbox.PackStart(nick_label, false, true, 5)\n\n\tm.qc_nickname = gtk.NewEntry()\n\tm.qc_nickname.SetTooltipText(nick_tooltip)\n\tqc_vbox.PackStart(m.qc_nickname, false, true, 5)\n\n\tm.hpane.Add2(sidebar_vbox)\n}"
] | [
"0.8024622",
"0.7986467",
"0.6738286",
"0.48992896",
"0.46789613",
"0.45104513",
"0.44237876",
"0.43279177",
"0.42821398",
"0.42359063",
"0.41540527",
"0.4000865",
"0.39987636",
"0.397609",
"0.39755943",
"0.39618266",
"0.39565128",
"0.3954699",
"0.39486593",
"0.39300218",
"0.3919906",
"0.39164233",
"0.39049605",
"0.38074327",
"0.37919587",
"0.3772034",
"0.37640107",
"0.37321723",
"0.37288117",
"0.37184706",
"0.3710474",
"0.3704771",
"0.36775517",
"0.36725175",
"0.36694705",
"0.36542833",
"0.3634152",
"0.3626809",
"0.36108017",
"0.3595091",
"0.35924673",
"0.35637718",
"0.3561461",
"0.3558079",
"0.3556662",
"0.3551393",
"0.35470152",
"0.35462645",
"0.35355553",
"0.35283792",
"0.35242385",
"0.35151252",
"0.34993306",
"0.34949544",
"0.34920505",
"0.3479313",
"0.34644112",
"0.3459414",
"0.34541786",
"0.34478474",
"0.34473425",
"0.34470138",
"0.34436092",
"0.34434816",
"0.34422895",
"0.34412178",
"0.3438194",
"0.3438064",
"0.34358144",
"0.34306887",
"0.34234542",
"0.34189036",
"0.34172365",
"0.341685",
"0.34111375",
"0.34086567",
"0.33986938",
"0.33971453",
"0.33878395",
"0.33844122",
"0.338411",
"0.33826143",
"0.3382605",
"0.33772534",
"0.33708617",
"0.33675802",
"0.3361676",
"0.33601463",
"0.3353844",
"0.33449438",
"0.3335192",
"0.3333231",
"0.33089995",
"0.33083972",
"0.33070737",
"0.3298121",
"0.32930812",
"0.32929182",
"0.32870975",
"0.3285696"
] | 0.91302776 | 0 |
Post is post message to slack | Post — отправить сообщение в slack | func (s *Slack) Post(msg Message) (err error) {
b, err := json.Marshal(msg)
if err != nil {
return err
}
buf := bytes.NewBuffer(b)
req, err := http.NewRequest("POST", s.URL, buf)
if err != nil {
return errors.Wrap(err, "Can't make new request")
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
if s.Verbose {
if curl, err := http2curl.GetCurlCommand(req); err == nil {
fmt.Fprintf(os.Stderr, "[CURL]: %v", curl)
}
}
client := http.Client{Timeout: s.Timeout}
res, err := client.Do(req)
if err != nil {
return errors.Wrap(err, "Can't post request")
}
defer func() {
if err := res.Body.Close(); err != nil {
fmt.Fprintf(os.Stderr, "[WARN]: %v", errors.Wrap(err, "Can't close response body"))
}
}()
if res.StatusCode != 200 {
return errors.New("Slack response status is not 2xx")
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s SlackReporter) Post(ctx context.Context, msg string) (string, error) {\n\t_, ts, err := s.api.PostMessageContext(ctx, s.channel, slack.MsgOptionText(msg, false))\n\treturn ts, err\n}",
"func (t *FakeSlackChat) postMessage(msg Message) error {\n\treturn nil\n}",
"func (s Slack) PostMessage(title string,message string)(errs []error) {\n\tfield1 := slack.Field{Title: title, Value: message}\n\t//field2 := slack.Field{Title: \"AnythingKey\", Value: \"AnythingValue\"}\n\n\tattachment := slack.Attachment{}\n\t//attachment.AddField(field1).AddField(field2)\n\tattachment.AddField(field1)\n\tcolor := \"good\"\n\tattachment.Color = &color\n\tpayload := slack.Payload {\n\t\tUsername: s.UserName,\n\t\tChannel: s.Channel,\n\t\tAttachments: []slack.Attachment{attachment},\n\t}\n\terr := slack.Send(s.WebhookUrl, \"\", payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (b *Bot) post(message map[string]interface{}, reply *domain.WorkReply, data *domain.Context, sub *subscription) error {\n\tmessage[\"text\"] = mainMessageFormatted()\n\tmessage[\"as_user\"] = true\n\tvar err error\n\t_, err = sub.s.Do(\"POST\", \"chat.postMessage\", message)\n\treturn err\n}",
"func (m *Module) Post(txt string, params *PostMessageParameters) {\n\tm.PostC(m.config.SlackConfig.Channel, txt, params)\n}",
"func SlackPost(username string, icon string, text string, hookurl string) (err error) {\n\tapiURL, err := url.ParseRequestURI(hookurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tquery := url.Values{}\n\tapiURL.RawQuery = query.Encode()\n\tdata, _ := json.Marshal(map[string]string{\n\t\t\"text\": text,\n\t\t\"icon_emoji\": icon,\n\t\t\"username\": username,\n\t})\n\tclient := &http.Client{}\n\tr, err := http.NewRequest(\"POST\", hookurl, strings.NewReader(string(data)))\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Header.Set(\"Content-Type\", \"application/json\")\n\t_, err = client.Do(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *instance) postMessage(jsondata []byte) {\r\n\tif s.client != nil {\r\n\t\tchannelID, timestamp, err := s.client.PostMessage(s.config.Channel, slack.MsgOptionText(string(jsondata), false), slack.MsgOptionUsername(\"g0-h0m3\"), slack.MsgOptionAsUser(true))\r\n\t\tif err == nil {\r\n\t\t\ts.service.Logger.LogInfo(s.name, fmt.Sprintf(\"message '%s' send (%s, %s)\", string(jsondata), channelID, timestamp))\r\n\t\t} else {\r\n\t\t\ts.service.Logger.LogError(s.name, fmt.Sprintf(\"message '%s' not send (%s, %s)\", string(jsondata), s.config.Channel, timestamp))\r\n\t\t\ts.service.Logger.LogError(s.name, err.Error())\r\n\t\t}\r\n\t} else {\r\n\t\ts.service.Logger.LogError(s.name, \"service not connected\")\r\n\t}\r\n}",
"func (hc *HipChat2) Post(message string) bool {\n\tif hc.client == nil {\n\t\thc.client = hc.newClient()\n\t\tif hc.client == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tmsg := &hipchat.NotificationRequest{\n\t\tColor: \"purple\",\n\t\tMessage: message,\n\t\tNotify: true,\n\t\tMessageFormat: \"text\",\n\t}\n\n\tif _, err := hc.client.Room.Notification(hc.RoomID, msg); err != nil {\n\t\tlog.Errorf(\"Failed post message...: %s\", msg.Message)\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func SendMessageToSlack(message string) {\n\tfmt.Println(\"Sending message to slack...\")\n\n\thttp.Post(url, \"\")\n\n\treturn nil\n}",
"func HandlePost(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar configuration = config.GetConfig()\n\tvar api = configuration.Slack.Client\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(r.Body)\n\tbody := buf.String()\n\n\t// Check if the request is valid and coming from Events API\n\t// TODO: Update tokens auth to OAuth Flow\n\teventsAPIEvent, e := slackevents.ParseEvent(json.RawMessage(body),\n\t\tslackevents.OptionVerifyToken(\n\t\t\t&slackevents.TokenComparator{VerificationToken: configuration.Slack.VerificationToken},\n\t\t),\n\t)\n\tif e != nil {\n\t\tlog.Printf(\"Error parsing slack event %+v\\n\", e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\t// Slack URL verification method\n\tif eventsAPIEvent.Type == slackevents.URLVerification {\n\t\thandleURLVerificationEvent(w, body)\n\t}\n\n\t// Slack messages in channel\n\tif eventsAPIEvent.Type == slackevents.CallbackEvent {\n\t\thandleCallbackEvent(w, api, eventsAPIEvent)\n\t}\n}",
"func Post(w http.ResponseWriter, r *http.Request) {\n\tvar errs []string\n\tvar errStr, userIDStr, message string\n\n\tif userIDStr, errStr = processFormField(r, \"userID\"); len(errStr) != 0 {\n\t\terrs = append(errs, errStr)\n\t}\n\n\tuserID, err := gocql.ParseUUID(userIDStr)\n\tif err != nil {\n\t\terrs = append(errs, \"parameter 'userID' not a UUID\")\n\t}\n\n\tif message, errStr = processFormField(r, \"message\"); len(errStr) != 0 {\n\t\terrs = append(errs, errStr)\n\t}\n\n\tgocqlUUID := gocql.TimeUUID()\n\n\tvar created bool = false\n\tif len(errs) == 0 {\n\t\tquery := \"INSERT INTO messages (id, user_id, message) VALUES (?, ?, ?)\"\n\t\terr := db.Session.Query(\n\t\t\tquery,\n\t\t\tgocqlUUID, userID, message,\n\t\t).Exec()\n\t\tif err != nil {\n\t\t\terrs = append(errs, err.Error())\n\t\t} else {\n\t\t\tcreated = true\n\t\t}\n\t}\n\n\tif created {\n\t\t// send message to stream\n\t\tglobalMessages, err := stream.Client.FlatFeed(\"akshit\", \"global\")\n\t\tlog.Print(\"stream error\", err)\n\t\tif err == nil {\n\t\t\t_, err := globalMessages.AddActivity(getstream.Activity{\n\t\t\t\tActor: userID.String(),\n\t\t\t\tVerb: \"post\",\n\t\t\t\tObject: gocqlUUID.String(),\n\t\t\t})\n\t\t\tlog.Print(\"stream error 2 \", err)\n\t\t}\n\t\tjson.NewEncoder(w).Encode(NewMessageResponse{ID: gocqlUUID})\n\t} else {\n\t\tjson.NewEncoder(w).Encode(ErrorResponse{Errors: errs})\n\t}\n}",
"func sendSlackMessage(message slack.Message) {\n\tuserData := SlackAPI.GetUserInfo(message.User)\n\ttimestampSplit := strings.Split(message.Ts, \".\")\n\ttimestampInt, err := strconv.ParseInt(timestampSplit[0], 10, 64)\n\ttimestamp := time.Unix(timestampInt, 0)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\terr = SlackAPI.PostMessage(conf.ChannelToMessage, conf.SlackMessageText)\n\tif err != nil {\n\t\tspew.Dump(err)\n\t}\n\n\terr = SlackAPI.PostMessage(conf.ChannelToMessage, \"> <@\"+userData.User.ID+\"> - \"+timestamp.Format(\"03:04:05 PM\")+\": \\n> \"+message.Text)\n\tif err != nil {\n\t\tspew.Dump(err)\n\t}\n}",
"func sendMessage(recipient string, reviewUrl string) {\n\tb := new(bytes.Buffer)\n\tjson.NewEncoder(b).Encode(SlackMessage{Channel: recipient, Text: reviewUrl})\n\tresp, _ := http.Post(os.Getenv(\"SLACKURL\"), \"application/json; charset=utf-8\", b)\n\n\tdefer resp.Body.Close()\n\tioutil.ReadAll(resp.Body)\n}",
"func (t *Tracker) send(color, message string) error {\n\tenv := os.Getenv(\"ENV\")\n\t// If no ENV is specified, assume we are in development mode, so we don't want to flood Slack uselessly.\n\tif env == \"\" {\n\t\treturn nil\n\t}\n\n\t_, perr := poster.Post(\n\t\tt.WebHook,\n\t\tmap[string]interface{}{\n\t\t\t\"text\" : fmt.Sprintf(\"%s - %s\", t.Application, env),\n\t\t\t\"attachments\": []map[string]interface{}{\n\t\t\t\t{\n\t\t\t\t\t\"color\": color,\n\t\t\t\t\t\"text\": fmt.Sprintf(\n\t\t\t\t\t\t\"*Message*\\n%s\\n\\n*Stack*\\n```%s```\\n\\n*Time*\\n%s\",\n\t\t\t\t\t\tmessage,\n\t\t\t\t\t\tstring(debug.Stack()),\n\t\t\t\t\t\ttime.Now().Format(\"2006-01-02 03:04:05\"),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\t// An unexpected error happened when sending our message to Slack.\n\treturn perr\n}",
"func MsgSlack(ctx *Context, msg string) error {\n\tcfg := ctx.Config\n\twebhookURL := \"https://hooks.slack.com/services/\" + cfg.Slacks[*ctx.ArgProfileName].Key\n\n\tlog.Printf(\"webhook: %s\", webhookURL)\n\n\tslackBody, _ := json.Marshal(slackRequestBody{Text: msg})\n\treq, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewBuffer(slackBody))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tif buf.String() != \"ok\" {\n\t\treturn errors.New(\"Non-ok response returned from Slack\")\n\t}\n\treturn nil\n}",
"func postWebhook(webhookUrl string, webhookType string, p []byte) {\n\n\tvar payloadName string\n\tswitch webhookType {\n\tcase \"slack\":\n\t\tpayloadName = \"payload\"\n\tcase \"discord\":\n\t\tpayloadName = \"payload_json\"\n\t}\n\tresp, _ := http.PostForm(\n\t\twebhookUrl,\n\t\turl.Values{payloadName: {string(p)}},\n\t)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tfmt.Println(string(body))\n}",
"func (c Client) Post(method string, params url.Values) (*jsontree.JsonTree, error) {\n\tparams[\"token\"] = []string{c.Token}\n\tresp, err := http.PostForm(fmt.Sprintf(\"https://slack.com/api/%s\", method), params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\ttree := jsontree.New()\n\terr = tree.UnmarshalJSON(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tok, err := tree.Get(\"ok\").Boolean()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ok {\n\t\tmessage, _ := tree.Get(\"error\").String()\n\t\treturn nil, fmt.Errorf(\"Error: %s\", message)\n\t}\n\n\treturn tree, nil\n}",
"func (s *SlackSvc) SendMessage(report ReportPayload) error {\n\tattachments := make([]map[string]interface{}, 1)\n\tattachments[0] = map[string]interface{}{\"text\": fmt.Sprintf(\"Howdy! Here's a list of *%d* PRs waiting to be reviewed and merged:\", len(report.PRs))}\n\tfor _, v := range report.PRs {\n\t\tattachments = append(attachments, map[string]interface{}{\"text\": v.ToString()})\n\t}\n\n\tif len(report.Reminders) > 0 {\n\t\tfor _, v := range report.Reminders {\n\t\t\tattachments = append(attachments, map[string]interface{}{\"text\": v.Text})\n\t\t}\n\t}\n\n\tmessage := map[string]interface{}{\n\t\t\"channel\": s.channelID,\n\t\t\"username\": s.user,\n\t\t\"icon_emoji\": \":robot_face:\",\n\t\t\"attachments\": attachments,\n\t}\n\n\tpayload, err := json.Marshal(message)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Failed to serialize Slack payload\")\n\t\treturn err\n\t}\n\n\tresp, err := s.client.Post(s.webhook, \"application/json\", bytes.NewReader(payload))\n\tif err != nil {\n\t\tlog.Error().Err(err).Msgf(\"Failed to serialize Slack payload: %v\", err)\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tlog.Info().Msgf(\"Message successfully sent to channel %s\", s.channelID)\n\treturn nil\n}",
"func (mm mattermostMessage) sendMessage() {\n\tpost := &model.Post{}\n\tpost.ChannelId = mm.Event.Broadcast.ChannelId\n\t// Create file if message is too large\n\tif len(mm.Response) >= 3990 {\n\t\tres, resp := client.UploadFileAsRequestBody([]byte(mm.Response), mm.Event.Broadcast.ChannelId, mm.Request)\n\t\tif resp.Error != nil {\n\t\t\tlogging.Logger.Error(\"Error occured while uploading file. Error: \", resp.Error)\n\t\t}\n\t\tpost.FileIds = []string{string(res.FileInfos[0].Id)}\n\t} else if len(mm.Response) == 0 {\n\t\tlogging.Logger.Info(\"Invalid request. Dumping the response\")\n\t\treturn\n\t} else {\n\t\tpost.Message = \"```\\n\" + mm.Response + \"\\n```\"\n\t}\n\n\t// Create a post in the Channel\n\tif _, resp := client.CreatePost(post); resp.Error != nil {\n\t\tlogging.Logger.Error(\"Failed to send message. Error: \", resp.Error)\n\t}\n}",
"func PostInitSlackMessage(webhook string) {\n\tmsg := &slack.WebhookMessage{\n\t\tUsername: Username,\n\t\tIconEmoji: IconEmoji,\n\t\tText: \"DocNoc has started scanning\",\n\t}\n\tif err := slack.PostWebhook(webhook, msg); err != nil {\n\t\tfmt.Println(\"🔥: Can't post init message to slack. Operating in headless state\", err)\n\t}\n}",
"func (b *Bot) PostMessage(text, channel string) {\n\tb.rtm.SendMessage(b.rtm.NewOutgoingMessage(text, channel))\n}",
"func PostToNewAccounts(msg string) error {\n\tctx := context.Background()\n\tclient := &http.Client{}\n\tu := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"hooks.slack.com\",\n\t\tPath: \"services/TJ42GDSA0/BL63K1C57/T2byQxw0oXiRqUOGCEbwP5TG\",\n\t}\n\n\tbuf := &bytes.Buffer{}\n\tslackMsg := slackMessage{\n\t\tText: msg,\n\t}\n\tif err := json.NewEncoder(buf).Encode(&slackMsg); err != nil {\n\t\treturn errors.Wrap(err, \"encoding slack message\")\n\t}\n\n\toperation := func() error {\n\t\treq, err := http.NewRequest(http.MethodPost, u.String(), buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer check.Err(resp.Body.Close)\n\n\t\tif resp.StatusCode == http.StatusBadGateway {\n\t\t\treturn fmt.Errorf(\"server: temporary error\")\n\t\t} else if resp.StatusCode >= 300 {\n\t\t\tb, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn backoff.Permanent(fmt.Errorf(\"server: %v\", string(b)))\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err := backoff.RetryNotify(operation,\n\t\tbackoff.WithContext(backoff.WithMaxRetries(backoff.NewExponentialBackOff(), maxRetries), ctx),\n\t\tfunc(err error, t time.Duration) {\n\t\t\tlog.Sugar.Errorw(\"error posting to new-accounts in slack, retrying\",\n\t\t\t\t\"err\", err.Error(),\n\t\t\t)\n\t\t}); err != nil {\n\t\treturn errors.Wrap(err, \"posting to new-accounts in slack\")\n\t}\n\n\treturn nil\n}",
"func (s SlackReporter) PostThread(ctx context.Context, timeStamp, msg string) error {\n\t_, _, err := s.api.PostMessageContext(ctx, s.channel, slack.MsgOptionText(msg, false), slack.MsgOptionTS(timeStamp))\n\treturn err\n}",
"func SpeakPost(ctx *iris.Context) {\n\n //Get our Json values\n testField := ctx.FormValue(\"statement\")\n\n //Place quotes around the testfield\n testField = fmt.Sprintf(\"\\\"%s\\\"\", testField);\n\n //Log the event\n\tspeakLog := fmt.Sprintf(\"/speak post | Speaking the following statement: %s\\n\", testField)\n fmt.Printf(speakLog)\n\n //Run the espeak command, and catch any errors\n //exec.Command(comman, commandArguments)\n cmd := exec.Command(\"./speak.sh\", testField);\n err := cmd.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n //Send Okay and respond\n ctx.JSON(iris.StatusOK, iris.Map{\"message\": fmt.Sprintf(\"Success! Speaking the following statement: %s\", testField)})\n}",
"func messageHandler(w http.ResponseWriter, r *http.Request) {\n\tu := r.PostFormValue(\"username\")\n\tm := r.PostFormValue(\"message\")\n\tlog.Printf(\"Howdy mam, username is: %q, message is: %q\\n\", u, m)\n\tfmt.Fprintf(w, \"I am not ready yet, mam\\n\")\n\t// TODO: need to check for post, username and message.\n\t// TODO: need to send the request to slack.\n}",
"func Post(ongoing model.Ongoing) {\n\n\turlImage := \"https://shikimori.one\" + ongoing.Anime.Image.Original\n\n\tpathImage := getImageAndSave(urlImage, ongoing.Anime.ID)\n\n\tcmd := exec.Command(\"notify-send\", \"-i\", pathImage, fmt.Sprintf(`%v Вышла серия номер %d`, ongoing.Anime.Russian, ongoing.NextEpisode))\n\n\tcmd.Run()\n}",
"func PostMessage(channel, username, text string) (res *http.Response, err error) {\n\tm := Message{\n\t\tChannel: channel,\n\t\tUsername: username,\n\t\tText: text,\n\t}\n\tb, _ := json.Marshal(m)\n\n\treq, err := http.NewRequest(\"POST\", apiURL, bytes.NewBuffer(b))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tres, err = client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\treturn\n}",
"func sendMessage(api *slack.Client, ephemeral bool, channel, userID, text, threadTimeStamp, wsToken string, attachments []slack.Attachment) error {\n\t// send ephemeral message is indicated\n\tif ephemeral {\n\t\tvar opt slack.MsgOption\n\t\tif len(attachments) > 0 {\n\t\t\topt = slack.MsgOptionAttachments(attachments[0]) // only handling attachments messages with single attachments\n\t\t\t_, err := api.PostEphemeral(channel, userID, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\t// send standard message\n\tpmp := slack.PostMessageParameters{\n\t\tAsUser: true,\n\t\tThreadTimestamp: threadTimeStamp,\n\t}\n\t// check if message was a link to set link attachment\n\tif text != \"\" && strings.Contains(text, \"http\") {\n\t\tif isValidURL(text) {\n\t\t\tif len(attachments) > 0 {\n\t\t\t\tattachments[0].ImageURL = text\n\t\t\t} else {\n\t\t\t\tattachments = []slack.Attachment{\n\t\t\t\t\t{\n\t\t\t\t\t\tImageURL: text,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tattachments[0].ImageURL = text\n\t\t\t}\n\t\t}\n\t}\n\t// include attachments if any\n\tif len(attachments) > 0 {\n\t\tpmp.Attachments = attachments\n\t}\n\t_, _, err := api.PostMessage(channel, text, pmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func PostActionMessage(webhook, cN, cID, action string, errType bool) {\n\tif errType {\n\t\tIO.Println(fmt.Sprintf(\"\\t🔥 Failed to %s container with ID: %s\", action, cID))\n\t} else {\n\t\tIO.Println(fmt.Sprintf(\"\\t🚒 %s container with ID: %s\", action, cID))\n\t}\n\n\tif webhook != \"\" {\n\t\tvar text, color string\n\t\tif errType {\n\t\t\ttext = fmt.Sprintf(\"Failed to %s container `%s`\", action, cN)\n\t\t\tcolor = \"warning\"\n\t\t} else {\n\t\t\ttext = fmt.Sprintf(\"%s container `%s`\", action, cN)\n\t\t\tcolor = \"good\"\n\t\t}\n\t\tslack.PostWebhook(webhook, &slack.WebhookMessage{\n\t\t\tUsername: Username,\n\t\t\tIconEmoji: IconEmoji,\n\t\t\tAttachments: []slack.Attachment{\n\t\t\t\tslack.Attachment{\n\t\t\t\t\tTitle: fmt.Sprintf(\":package: Container %s\", cN),\n\t\t\t\t\tText: text,\n\t\t\t\t\tFooter: cID,\n\t\t\t\t\tColor: color,\n\t\t\t\t\tMarkdownIn: []string{\"text\"},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n}",
"func testPostWebhook(p bytes.Buffer) {\n\tresp, _ := http.PostForm(\n\t\t\"\",\n\t\turl.Values{\"payload_json\": {p.String()}},\n\t)\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tfmt.Println(string(body))\n}",
"func PostMessage(c *gin.Context) {\n\tbody := c.Request.Body\n\n\tvalue, err := ioutil.ReadAll(body)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"message\": string(value),\n\t})\n}",
"func (s *SlackNotify) Send(v ...interface{}) error {\n\tif s.URL == \"\" {\n\t\treturn nil\n\t}\n\tpayload, err := json.Marshal(slackMsg{Text: fmt.Sprint(s.prefix, v)})\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, _ := http.NewRequest(\"POST\", s.URL, bytes.NewBuffer(payload))\n\treq.Header.Add(\"content-type\", \"application/json\")\n\tres, err := s.c.Do(req)\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Error posting to slack\")\n\t}\n\treturn nil\n}",
"func SendSlackWebhook(message string) {\n\twebhookURL := getWebhookURLFromEnvironmentVariable()\n\tif webhookURL == \"none\" {\n\t\treturn\n\t}\n\tslackBody, _ := json.Marshal(slackRequestBody{Text: message})\n\n\treq, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewBuffer(slackBody))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tif buf.String() != \"ok\" {\n\t\tfmt.Println(errors.New(\"Non-ok response returned from Slack\"))\n\t}\n}",
"func notifyAdmin(token, channelID, message string) {\n\n\tif token == \"\" || channelID == \"\" {\n\t\treturn\n\t}\n\n\tapi := slack.New(token)\n\n\t// If you set debugging, it will log all requests to the console\n\t// Useful when encountering issues\n\tapi.SetDebug(true)\n\n\t// Ref: https://github.com/nlopes/slack/blob/master/examples/messages/messages.go#L26:2\n\tparams := slack.PostMessageParameters{}\n\tchannelID, timestamp, err := api.PostMessage(channelID, message, params)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"Message successfully sent to channel %s at %s\", channelID, timestamp)\n\n}",
"func registerMessage(template, channelID, username, filename, url string) error {\n\tmessage := fmt.Sprintf(template, username, filename, url)\n\t_, _, err := slackClient.PostMessage(channelID, message, slackParams)\n\tcheckError(err)\n\treturn err\n}",
"func endpointHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data support.TelegramObject\n\tdefer r.Body.Close()\n\n\terr := data.DecodeJSON(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tif debug {\n\t\tlog.Printf(\"got message -> %+v\\n\", data)\n\t}\n\n\tif data.Message.Chat.ID == botSession.ChatID {\n\t\tlog.Println(\"got URL from command /post on group \", botSession.GroupHandle, \"posting on reddit...\")\n\n\t\turl, err := splitPostCommand(data.Message.Text)\n\n\t\tif debug {\n\t\t\tlog.Println(\"splitted url:\", url)\n\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tvalidURL, err := support.ValidateURL(url)\n\t\tif err != nil {\n\t\t\tif err = data.ReplyBackToChat(\"Invalid URL :(\"); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif debug {\n\t\t\tlog.Println(\"validated url:\", validURL)\n\t\t}\n\n\t\tredditSession, err := reddit.NewSession(redditUsername, redditPassword, redditClientID, redditClientSecret)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif err = redditSession.Post(validURL); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tif err = data.ReplyBackToChat(\"Posted!\"); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t}\n}",
"func PostMessage(c *gin.Context) {\n\tmessage := c.PostForm(\"message\")\n\tname := c.DefaultPostForm(\"name\", \"Guest\")\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"message\": message,\n\t\t\"name\": name,\n\t})\n}",
"func sendVideoMessage(slackClient *slack.RTM, slackChannel string) {\n\tpostMessageParameters:= slack.PostMessageParameters{UnfurlMedia: true, UnfurlLinks: true}\n\tslackClient.PostMessage(slackChannel, slack.MsgOptionText(\"https://www.youtube.com/watch?v=Rh64GkNRDNU&ab_channel=MarySpender\", true), slack.MsgOptionPostMessageParameters(postMessageParameters))\n}",
"func (mm *mattermostMessage) handleMessage(b *MMBot) {\n\tpost := model.PostFromJson(strings.NewReader(mm.Event.Data[\"post\"].(string)))\n\tchannelType := mmChannelType(mm.Event.Data[\"channel_type\"].(string))\n\tif channelType == mmChannelPrivate || channelType == mmChannelPublic {\n\t\t// Message posted in a channel\n\t\t// Serve only if starts with mention\n\t\tif !strings.HasPrefix(post.Message, \"@\"+BotName+\" \") {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check if message posted in authenticated channel\n\tif mm.Event.Broadcast.ChannelId == b.getChannel().Id {\n\t\tmm.IsAuthChannel = true\n\t}\n\tlogging.Logger.Debugf(\"Received mattermost event: %+v\", mm.Event.Data)\n\n\t// Trim the @BotKube prefix if exists\n\tmm.Request = strings.TrimPrefix(post.Message, \"@\"+BotName+\" \")\n\n\te := execute.NewDefaultExecutor(mm.Request, b.AllowKubectl, b.ClusterName, b.ChannelName, mm.IsAuthChannel)\n\tmm.Response = e.Execute()\n\tmm.sendMessage()\n}",
"func (c *ChanPost) slckSend(rtm *slack.RTM) {\n\tlog.Println(\"Message from bot received\")\n\tmessage := \"Infra Announce: \" + c.text + \" \" +\n\t\ttime.Unix(int64(c.date), 0).Format(\"Mon Jan _2 15:04\") +\n\t\t\"(from telegram channel)\"\n\trtm.SendMessage(rtm.NewOutgoingMessage(message, config.sChat))\n}",
"func (a *App) postMessage(w http.ResponseWriter, r *http.Request) {\n\tm := message{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tSendJSON(w, message{message: \"Error reading post message\"}, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = m.postMessage(a.DB, string(body))\n\n\tif err != nil {\n\t\tSendJSON(w, message{message: \"DB post error\"}, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tSendJSON(w, m, http.StatusOK)\n}",
"func (r *PostingBot) Post(p *reddit.Post) error {\n\tswitch {\n\tcase p.NSFW:\n\t\t// We hide NSFW content\n\t\tmsg := tgbotapi.NewMessage(r.Config.ChatID, fmt.Sprintf(\"Uh oh, nsfw content! 🔞\\n%s\", p.URL))\n\t\tmsg.DisableWebPagePreview = true\n\t\tmsg.ReplyMarkup = utility.SetupInlineKeyboard(p.Subreddit, p.Permalink)\n\t\tr.TBot.Send(msg)\n\tcase p.Media.RedditVideo.IsGIF:\n\t\tmsg := tgbotapi.NewDocumentUpload(r.Config.ChatID, p.URL)\n\t\tmsg.ReplyMarkup = utility.SetupInlineKeyboard(p.Subreddit, p.Permalink)\n\t\tr.TBot.Send(msg)\n\tcase strings.Contains(p.URL, \".jpg\") || strings.Contains(p.URL, \".png\"):\n\t\tmsg := tgbotapi.NewPhotoUpload(r.Config.ChatID, \"\")\n\t\tmsg.FileID = p.URL\n\t\tmsg.UseExisting = true\n\t\tmsg.ReplyMarkup = utility.SetupInlineKeyboard(p.Subreddit, p.Permalink)\n\t\tr.TBot.Send(msg)\n\tdefault:\n\t\tif r.Config.VideoDownload {\n\t\t\tfileName, err := video.GetVideo(p.URL)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tvideoPath := r.Config.DownloadPath + fileName\n\n\t\t\tmsg := tgbotapi.NewVideoUpload(r.Config.ChatID, videoPath)\n\t\t\tmsg.ReplyMarkup = utility.SetupInlineKeyboard(p.Subreddit, p.Permalink)\n\n\t\t\tr.TBot.Send(msg)\n\t\t\tos.Remove(videoPath)\n\t\t} else {\n\t\t\tmsg := tgbotapi.NewMessage(r.Config.ChatID, p.URL)\n\t\t\tr.TBot.Send(msg)\n\t\t}\n\t}\n\treturn nil\n}",
"func (b *Bslack) sendWebhook(msg config.Message) error {\n\t// Skip events.\n\tif msg.Event != \"\" {\n\t\treturn nil\n\t}\n\n\tif b.GetBool(useNickPrefixConfig) {\n\t\tmsg.Text = msg.Username + msg.Text\n\t}\n\n\tif msg.Extra != nil {\n\t\t// This sends a message only if we received a config.EVENT_FILE_FAILURE_SIZE.\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\trmsg := rmsg // scopelint\n\t\t\ticonURL := config.GetIconURL(&rmsg, b.GetString(iconURLConfig))\n\t\t\tmatterMessage := matterhook.OMessage{\n\t\t\t\tIconURL: iconURL,\n\t\t\t\tChannel: msg.Channel,\n\t\t\t\tUserName: rmsg.Username,\n\t\t\t\tText: rmsg.Text,\n\t\t\t}\n\t\t\tif err := b.mh.Send(matterMessage); err != nil {\n\t\t\t\tb.Log.Errorf(\"Failed to send message: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\t// Webhook doesn't support file uploads, so we add the URL manually.\n\t\tfor _, f := range msg.Extra[\"file\"] {\n\t\t\tfi, ok := f.(config.FileInfo)\n\t\t\tif !ok {\n\t\t\t\tb.Log.Errorf(\"Received a file with unexpected content: %#v\", f)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif fi.URL != \"\" {\n\t\t\t\tmsg.Text += \" \" + fi.URL\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we have native slack_attachments add them.\n\tvar attachs []slack.Attachment\n\tfor _, attach := range msg.Extra[sSlackAttachment] {\n\t\tattachs = append(attachs, attach.([]slack.Attachment)...)\n\t}\n\n\ticonURL := config.GetIconURL(&msg, b.GetString(iconURLConfig))\n\tmatterMessage := matterhook.OMessage{\n\t\tIconURL: iconURL,\n\t\tAttachments: attachs,\n\t\tChannel: msg.Channel,\n\t\tUserName: msg.Username,\n\t\tText: msg.Text,\n\t}\n\tif msg.Avatar != \"\" {\n\t\tmatterMessage.IconURL = msg.Avatar\n\t}\n\tif err := b.mh.Send(matterMessage); err != nil {\n\t\tb.Log.Errorf(\"Failed to send message via webhook: %#v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *Slack) Send(color, msg string, v ...interface{}) error {\n\tb, err := json.Marshal(&payload{\n\t\tChannel: s.channel,\n\t\tUsername: s.username,\n\t\tIconURL: s.iconURL,\n\t\tAttachments: []attachment{\n\t\t\t{\n\t\t\t\tColor: color,\n\t\t\t\tText: fmt.Sprintf(msg, v...),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.infof(\"payload: %s\", b)\n\tr, err := http.Post(s.webhookURL, \"application/json\", bytes.NewReader(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.infof(\"response: %s\", r.Status)\n\n\tif r.StatusCode >= 400 {\n\t\treturn &ResponseError{r}\n\t}\n\treturn nil\n}",
"func (sn *SlackMessage) Notify(slackURL string) error {\n\tbp, err := json.Marshal(sn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = http.Post(slackURL, \"application/json\", bytes.NewBuffer(bp))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (h *slackNotificationPostHandler) Run(ctx context.Context) gimlet.Responder {\n\tattachments := []message.SlackAttachment{}\n\tfor _, a := range h.APISlack.Attachments {\n\t\ti, err := a.ToService()\n\t\tif err != nil {\n\t\t\treturn gimlet.MakeJSONInternalErrorResponder(errors.Wrap(err, \"API error converting from model.APISlackAttachment to message.SlackAttachment\"))\n\t\t}\n\t\tattachment, ok := i.(*message.SlackAttachment)\n\t\tif !ok {\n\t\t\treturn gimlet.MakeJSONErrorResponder(gimlet.ErrorResponse{\n\t\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\t\tMessage: fmt.Sprintf(\"Unexpected type %T for message.SlackAttachment\", i),\n\t\t\t})\n\t\t}\n\t\tattachments = append(attachments, *attachment)\n\t}\n\ttarget := utility.FromStringPtr(h.APISlack.Target)\n\tmsg := utility.FromStringPtr(h.APISlack.Msg)\n\n\th.composer = message.NewSlackMessage(level.Notice, target, msg, attachments)\n\ts, err := h.environment.GetSender(evergreen.SenderSlack)\n\tif err != nil {\n\t\treturn gimlet.MakeJSONInternalErrorResponder(errors.Wrap(err, \"Error fetching sender key for evergreen.SenderSlack\"))\n\t}\n\n\th.sender = s\n\th.sender.Send(h.composer)\n\n\treturn gimlet.NewJSONResponse(struct{}{})\n}",
"func send(api *slack.Client, message models.Message, bot *models.Bot) {\n\tusers, err := getSlackUsers(api, message)\n\tif err != nil {\n\t\tbot.Log.Errorf(\"Problem sending message: %s\", err.Error())\n\t}\n\tif message.DirectMessageOnly {\n\t\terr := handleDirectMessage(api, message, bot)\n\t\tif err != nil {\n\t\t\tbot.Log.Errorf(\"Problem sending message: %s\", err.Error())\n\t\t}\n\t} else {\n\t\terr := handleNonDirectMessage(api, users, message, bot)\n\t\tif err != nil {\n\t\t\tbot.Log.Errorf(\"Problem sending message: %s\", err.Error())\n\t\t}\n\t}\n}",
"func (m *Module) PostC(channel, txt string, params *PostMessageParameters) {\n\tvar p PostMessageParameters\n\tif params != nil {\n\t\tp = *params\n\t}\n\tif m.LogMessages {\n\t\tm.Logger.Infof(\"[%s] %s\", m.config.SlackConfig.Channel, txt)\n\t}\n\t_, _, err := m.client.PostMessage(channel, txt, p)\n\tif err != nil {\n\t\tm.Logger.Error(errors.Wrap(err))\n\t}\n}",
"func SendSlack(temp string, data interface{}) error {\n\tmessageLock.Lock()\n\tbuf := new(bytes.Buffer)\n\tslackTemp, _ := template.New(\"slack\").Parse(temp)\n\tslackTemp.Execute(buf, data)\n\tslackMessages = append(slackMessages, buf.String())\n\tmessageLock.Unlock()\n\treturn nil\n}",
"func SendSlackNotification(title string, cobraCmd *cobra.Command, output string, mgr credential.Manager) error {\n\tuserID, err := mgr.GetUserIDByParseToken()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"while parsinng oauth2 token\")\n\t}\n\n\tslackhookUrl := GlobalOpts.SlackAPIURL()\n\ttext_msg := \"New \" + title + \" is triggerred on \" + getClusterType() + \" by \" + userID\n\ttriggeredCmd := revertUpgradeOpts(title, cobraCmd)\n\tattachment := Attachment{\n\t\tColor: SLACK_COLOR,\n\t\tText: triggeredCmd + \"\\n\" + output,\n\t}\n\n\tslackBody, _ := json.Marshal(SlackRequestBody{Text: text_msg, Icon_emoji: ICON_EMOJI, Username: SLACK_USER_NAME,\n\t\tAttachments: []Attachment{attachment}})\n\treq, err := http.NewRequest(http.MethodPost, slackhookUrl, bytes.NewBuffer(slackBody))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Drain response body and close, return error to context if there isn't any.\n\tdefer func() {\n\t\tderr := drainResponseBody(resp.Body)\n\t\tif err == nil {\n\t\t\terr = derr\n\t\t}\n\t\tcerr := resp.Body.Close()\n\t\tif err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"calling %s returned %s status\", slackhookUrl, resp.Status)\n\t}\n\n\tbodyBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while decoding slack response body\")\n\t}\n\tbodyString := string(bodyBytes)\n\tif bodyString != \"ok\" {\n\t\treturn fmt.Errorf(\"non-ok response returned from Slack\")\n\t}\n\treturn nil\n}",
"func (m *MatterMail) postMessage(client *model.Client, channelID string, message string, fileIds []string) error {\n\tpost := &model.Post{ChannelId: channelID, Message: message}\n\n\tif len(fileIds) > 0 {\n\t\tpost.FileIds = fileIds\n\t}\n\n\tres, err := client.CreatePost(post)\n\tif res == nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func SlackSendMsg(slackAPI *slack.Client, MsgText string) error {\n\tif _, _, err := slackAPI.PostMessage(viper.GetString(\"slack_channel_id\"), slack.MsgOptionText(MsgText, false)); err != nil {\n\t\tlog.Errorf(\"[ERROR] %s\\n\", err)\n\t\treturn err\n\t}\n\tlog.Infof(\"[Slack] Send %s\", MsgText)\n\treturn nil\n}",
"func (c *Consumer) CreatePost(ctx context.Context, m gosqs.Message) error {\n\tvar p Post\n\tif err := m.Decode(&p); err != nil {\n\t\treturn err\n\t}\n\n\t// send a message to the same queue\n\tc.MessageSelf(ctx, \"some_new_message\", &p)\n\n\t//forward the message to another queue\n\tc.Message(ctx, \"notification-worker\", \"some_new_message\", &p)\n\n\treturn nil\n}",
"func (c *Client) postMessage(ctx context.Context, username, iconurl, channel, text string) error {\n\tif iconurl != \"\" {\n\t\ticonurl = \"icon_url=\" + iconurl\n\t}\n\tvar resp ResponseHeader\n\treturn rpc(ctx, c, &resp, \"chat.postMessage\",\n\t\t\"username=\"+username,\n\t\ticonurl,\n\t\t\"as_user=false\",\n\t\t\"channel=\"+channel,\n\t\t\"text=\"+text)\n}",
"func (post *Post) Send(app *state.AppState) error {\n\tsession := app.MgoSession.Clone()\n\tdefer session.Close()\n\n\tchannel, err := GetChannelByID(app, post.ChannelID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbot, err := GetBotById(app, channel.BotID.Hex())\n\tif err != nil {\n\t\treturn err\n\t}\n\tbotAPI, err := tg.NewBotAPI(bot.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcoll := session.DB(dbName).C(\"posts\")\n\tchange := mgo.Change{\n\t\tUpdate: bson.M{\"$set\": bson.M{\"isSent\": true}},\n\t\tReturnNew: false,\n\t}\n\tvar prevPostState Post\n\t_, err = coll.FindId(post.ID).Apply(change, &prevPostState)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// return if post already processed by something\n\tif prevPostState.IsSent {\n\t\treturn nil\n\t}\n\n\tmsg := tg.NewMessage(channel.Chat.ID, post.Text)\n\tmsg.ReplyMarkup = post.BuildReplyMarkup()\n\tmsg.DisableNotification = !post.WithNotification\n\tmsg.DisableWebPagePreview = !post.WithPreview\n\tif post.Mode == tg.ModeMarkdown {\n\t\tmsg.ParseMode = tg.ModeMarkdown\n\t} else {\n\t\tmsg.ParseMode = \"\"\n\t}\n\n\t_, err = botAPI.Send(msg)\n\tif err != nil {\n\t\tisInvalidSyntax := strings.Contains(err.Error(), \"can't parse entities in message text\")\n\n\t\t// If syntax incorrect mark as sent to prevent retrying\n\t\tset := bson.M{\"isSent\": false, \"error\": \"\"}\n\t\tif isInvalidSyntax {\n\t\t\tset = bson.M{\"isSent\": true, \"error\": \"Invalid markdown syntax\"}\n\t\t}\n\n\t\tif err := coll.UpdateId(post.ID, bson.M{\"$set\": set}); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *SlcLogger) sendNotification(logLevel logLevel, color string, message interface{}, titleParam []string) error {\n\n\tvar text string\n\tif t, ok := message.(error); ok {\n\t\ttext = t.Error()\n\t} else if t, ok := message.(string); ok {\n\t\ttext = t\n\t} else {\n\t\treturn &SlcErr{errors.New(\"the type of message parameter should be string or error\"), 0}\n\t}\n\n\tif logLevel < s.LogLevel {\n\t\treturn nil\n\t}\n\tslackChannel := s.getTargetChannel(logLevel)\n\n\tpayload, err := s.buildPayload(slackChannel, color, text, titleParam)\n\tif err != nil {\n\t\treturn &SlcErr{err, 0}\n\t}\n\n\treq, err := http.NewRequest(\"POST\", s.WebHookURL, bytes.NewBuffer(payload))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\treturn &SlcErr{err, 0}\n\t}\n\tctx := context.Background()\n\treq.WithContext(ctx)\n\n\tresp, err := http.DefaultClient.Do(req)\n\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\t_, _ = io.Copy(ioutil.Discard, resp.Body)\n\t\t\t_ = resp.Body.Close()\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn &SlcErr{err, 0}\n\t}\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn &SlcErr{errors.New(string(body)), resp.StatusCode}\n\t}\n\n\treturn nil\n}",
"func (m *Slack) Send(mes *message.Message) error {\n\topts := createSlackMessageOptions(mes.Text, mes.Image, mes.Fields, mes.Level)\n\n\t_channel, _timestamp, _text, err := m.api.SendMessage(m.channel, opts...)\n\n\tm.logger.Debug(\"send slack message\",\n\t\tzap.String(\"channel\", _channel),\n\t\tzap.String(\"timestamp\", _timestamp),\n\t\tzap.String(\"text\", _text),\n\t\tzap.Error(err),\n\t)\n\n\treturn err\n}",
"func sendEchoMessage(slackClient *slack.RTM, message, slackChannel string) {\n\tsplitMessage := strings.Fields(strings.ToLower(message))\n\tslackClient.SendMessage(slackClient.NewOutgoingMessage(strings.Join(splitMessage[1:], \" \"), slackChannel))\n}",
"func (s *Slack) SendMessage(message string) error {\n\t_, _, err := s.client.PostMessage(s.channelID, slack.MsgOptionText(message, false))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"client.PostMessage(): failed to post message\")\n\t}\n\n\treturn nil\n}",
"func (c *Client) WebhookPost(falcopayload types.FalcoPayload) {\n\terr := c.Post(falcopayload)\n\tif err != nil {\n\t\tc.Stats.Webhook.Add(Error, 1)\n\t\tc.PromStats.Outputs.With(map[string]string{\"destination\": \"webhook\", \"status\": Error}).Inc()\n\t} else {\n\t\tc.Stats.Webhook.Add(OK, 1)\n\t\tc.PromStats.Outputs.With(map[string]string{\"destination\": \"webhook\", \"status\": OK}).Inc()\n\t}\n\n\tc.Stats.Webhook.Add(Total, 1)\n}",
"func (r *searchBot) Post(p *reddit.Post) error {\n\tif strings.Contains(p.SelfText, r.searchText) {\n\t\t<-time.After(2 * time.Second) // Buffer\n\t\tpost := datamanager.PostMessage{URL: p.URL, Text: p.SelfText}\n\t\tmsg, err := json.Marshal(post)\n\t\tif err != nil {\n\t\t\tlogger.Error(fmt.Sprintf(\"Error converting to JSON for Reddit post %s\", p.URL))\n\t\t}\n\t\tpubErr := r.distClient.Channel.Publish(\n\t\t\tconfig.DefaultExchange(),\n\t\t\tr.distClient.Queue.Name,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tdistributed.PublishBody(msg),\n\t\t)\n\t\tif pubErr != nil {\n\t\t\tlogger.Error(fmt.Sprintf(\"Error publishing message to queue %s\", r.distClient.Queue.Name))\n\t\t}\n\t}\n\treturn nil\n}",
"func Send(config config.Config, text string) {\n\tapi := slack.New(config.Slack.Token)\n\tparams := slack.PostMessageParameters{}\n\tparams.IconEmoji = config.Slack.IconEmoji\n\tparams.Username = config.Slack.Username\n\tchannelID, timestamp, err := api.PostMessage(config.Slack.Channel, text, params)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"Message successfully sent to channel %s at %s\", channelID, timestamp)\n}",
"func (player *Athelete) MediaPost(t, m string, v bool) {\n\tMessage := &Message{\n\t\tFrom: player.GetName(),\n\t\tTitle: t,\n\t\tMessage: m,\n\t\tVisible: v,\n\t}\n\tif v == true {\n\t\t//IMPLEMENT LEAGUE BOARD\n\t\tplayer.League.MessBoard = append(player.League.MessBoard, Message)\n\t} else if v == false {\n\t\tplayer.Team.MessBoard = append(player.Team.MessBoard, Message)\n\t}\n\n}",
"func sendNotificationToSlack(payloadJSONEncoded []byte, response chan<- *http.Response) {\n\tfmt.Println(\"Sending notification to Slack...\")\n\n\t// Récupération des paramètres\n\t// ---------------------------\n\thookURL = config.SlackHookURL\n\thookPayload = config.SlackHookPayload\n\n\t// Envoi de la requête\n\t// -------------------\n\tresponse <- sendNotificationToApplication(hookURL, hookPayload, payloadJSONEncoded)\n}",
"func (s *Slack) Send(pending []reviewit.MergeRequest) error {\n\ts.pending = pending\n\ts.build()\n\tbuf, err := json.Marshal(&s.msg)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failure to Marshal\")\n\t}\n\treq, _ := http.NewRequest(\"POST\", s.WebhookURL, bytes.NewReader(buf))\n\tif _, err := http.DefaultClient.Do(req); err != nil {\n\t\treturn errors.WithMessage(err, \"slack post message failed\")\n\t}\n\treturn nil\n}",
"func (s DialogflowServer) DialogPostMessage(w http.ResponseWriter, r *http.Request) {\r\n\tvars := mux.Vars(r)\r\n\tsessionID := vars[\"sessionID\"]\r\n\r\n\tlog.Printf(\"Route: %s , SessionID: %s\\n\", r.URL, sessionID)\r\n\r\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\r\n\tif r.Method == http.MethodOptions {\r\n\t\treturn\r\n\t}\r\n\r\n\tbody, err := ioutil.ReadAll(r.Body)\r\n\tif err != nil {\r\n\t\thttp.Error(w, \"Error reading request body\",\r\n\t\t\thttp.StatusInternalServerError)\r\n\t}\r\n\r\n\tvar m DialogflowMessage\r\n\terr = json.Unmarshal(body, &m)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\trequest := dialogflowpb.DetectIntentRequest{\r\n\t\tSession: fmt.Sprintf(\"projects/%s/agent/sessions/%s\", s.projectID, sessionID),\r\n\t\tQueryInput: &dialogflowpb.QueryInput{\r\n\t\t\tInput: &dialogflowpb.QueryInput_Text{\r\n\t\t\t\tText: &dialogflowpb.TextInput{\r\n\t\t\t\t\tText: m.Message,\r\n\t\t\t\t\tLanguageCode: s.lang,\r\n\t\t\t\t},\r\n\t\t\t},\r\n\t\t},\r\n\t}\r\n\r\n\tresponse, err := s.sessionClient.DetectIntent(s.ctx, &request)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error in communication with Dialogflow %s\", err.Error())\r\n\t\treturn\r\n\t}\r\n\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tjson.NewEncoder(w).Encode(response)\r\n}",
"func PostWebhooks(diagnostic structs.DiagnosticSpec, status string, promotestatus string, isCron bool, result structs.ResultSpec) error {\n\t// No configured webhook destinations\n\tif diagnostic.WebhookURLs == \"\" {\n\t\treturn nil\n\t}\n\n\t// Assemble the webhook payload\n\tpayload := WebhookPayload{\n\t\tJob: diagnostic.JobSpace + \"/\" + diagnostic.Job,\n\t\tTarget: diagnostic.App + \"-\" + diagnostic.Space,\n\t\tStatus: status,\n\t\tIsPreview: diagnostic.IsPreview,\n\t\tIsCron: isCron,\n\t\tLogURL: os.Getenv(\"LOG_URL\") + \"/logs/\" + diagnostic.RunID,\n\t\tKibanaURL: os.Getenv(\"KIBANA_URL\") + \"/app/kibana#/doc/logs/logs/run/?id=\" + diagnostic.RunID,\n\t\tArtifactsURL: os.Getenv(\"ARTIFACTS_URL\") + \"/v1/artifacts/\" + diagnostic.RunID + \"/\",\n\t\tRerunURL: os.Getenv(\"RERUN_URL\") + \"?space=\" + diagnostic.Space + \"&app=\" + diagnostic.App + \"&action=\" + diagnostic.Action + \"&result=\" + diagnostic.Result + \"&releaseid=\" + diagnostic.ReleaseID + \"&buildid=\" + diagnostic.BuildID,\n\t\tCommitAuthor: diagnostic.CommitAuthor,\n\t\tStartTime: result.Payload.StartTime,\n\t\tStopTime: result.Payload.StopTime,\n\t\tRunDurationMs: result.Payload.BuildTimeMillis,\n\t\tPromotionResults: PromotionResults{},\n\t}\n\n\tif diagnostic.GithubVersion != \"\" {\n\t\tpayload.GithubVersion = diagnostic.GithubVersion\n\t}\n\n\tif status != \"success\" {\n\t\tpayload.PromotionResults.Message = \"No promotion triggered - tests not successful\"\n\t} else if diagnostic.PipelineName == \"manual\" {\n\t\tpayload.PromotionResults.Message = \"No promotion triggered - set to manual\"\n\t\tpayload.PromotionResults.Pipeline = diagnostic.PipelineName\n\t} else {\n\t\tpayload.PromotionResults.Message = \"Promotion was triggered with result \" + promotestatus\n\t\tpayload.PromotionResults.Status = promotestatus\n\t\tpayload.PromotionResults.From = diagnostic.TransitionFrom\n\t\tpayload.PromotionResults.To = diagnostic.TransitionTo\n\t\tpayload.PromotionResults.Pipeline = diagnostic.PipelineName\n\t}\n\n\t// Send message to each hook URL\n\tfor _, hookURL := range strings.Split(diagnostic.WebhookURLs, \",\") {\n\t\tpayloadBytes, err := json.Marshal(payload)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\tif !strings.HasPrefix(hookURL, \"http://\") && !strings.HasPrefix(hookURL, \"https://\") {\n\t\t\thookURL = \"https://\" + hookURL\n\t\t}\n\n\t\treq, err := http.NewRequest(\"POST\", hookURL, bytes.NewBuffer(payloadBytes))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Add(\"Content-type\", \"application/json\")\n\n\t\tclient := http.Client{}\n\t\t_, err = client.Do(req)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\n\t\t// If we ever want to save the result and do something with it\n\t\t// defer resp.Body.Close()\n\t\t// bodybytes, err := ioutil.ReadAll(resp.Body)\n\t\t// if err != nil {\n\t\t// \tfmt.Println(err)\n\t\t// \treturn err\n\t\t// }\n\t\t// fmt.Println(string(bodybytes))\n\t\t// fmt.Println(resp.status)\n\t}\n\treturn nil\n}",
"func makePost(client *mm.Client, channelId string, message string) *mm.Post {\n\tpost := &mm.Post{}\n\tpost.ChannelId = channelId\n\tpost.Message = message\n\treturn post\n}",
"func (s *SlackService) SendMessage(channel string, message string) {\n\t// https://godoc.org/github.com/nlopes/slack#PostMessageParameters\n\tpostParams := slack.PostMessageParameters{\n\t\tAsUser: true,\n\t}\n\n\t// https://godoc.org/github.com/nlopes/slack#Client.PostMessage\n\ts.Client.PostMessage(channel, message, postParams)\n}",
"func SendSlackNotification(webhookURL string, msg data.SlackRequestBody) error {\n\n\tslackBody, _ := json.Marshal(msg)\n\n\tlog.Printf(\"Sending message to Slack:\\n %v\\n\", string(slackBody))\n\n\treq, err := http.NewRequest(http.MethodPost, webhookURL, bytes.NewBuffer(slackBody))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tif buf.String() != \"ok\" {\n\t\treturn errors.New(\"Non-ok response returned from Slack\")\n\t}\n\treturn nil\n}",
"func echoMessage(slackClient *slack.RTM, message, slackChannel string) {\n\tsplitMessage := strings.Fields(strings.ToLower(message))\n\n\tslackClient.SendMessage(slackClient.NewOutgoingMessage(strings.Join(splitMessage[1:], \" \"), slackChannel))\n}",
"func (d *DingTalkClient) SendMessage(msg DingTalkMessage) error {\n\n\tvar message string\n\tswitch msg.Type {\n\tcase \"text\":\n\t\tmessage = fmt.Sprintf(`{\"msgtype\": \"text\",\"text\": {\"content\": \"监控报警: %s\"}}`, msg.Message)\n\tcase \"markdown\":\n\t\tmessage = fmt.Sprintf(`{\"msgtype\": \"markdown\",\"markdown\":{\"title\": 监控报警: \"%s\", \"text\": \"%s\"}}`, msg.Title, msg.Message)\n\tdefault:\n\t\tmessage = fmt.Sprintf(`{\"msgtype\": \"text\",\"text\": {\"content\": \"监控报警: %s\"}}`, msg.Message)\n\t}\n\n\tclient := &http.Client{}\n\trequest, _ := http.NewRequest(\"POST\", d.RobotURL, bytes.NewBuffer([]byte(message)))\n\trequest.Header.Set(\"Content-type\", \"application/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"访问钉钉URL(%s) 出错了: %s\", d.RobotURL, err)\n\t}\n\tif response.StatusCode != 200 {\n\t\tbody, _ := ioutil.ReadAll(response.Body)\n\t\treturn fmt.Errorf(\"访问钉钉URL(%s) 出错了: %s\", d.RobotURL, string(body))\n\t}\n\treturn nil\n}",
"func (s *Slack) SendEvent(event events.Event) error {\n\tlog.Logger.Info(fmt.Sprintf(\">> Sending to slack: %+v\", event))\n\n\tapi := slack.New(s.Token)\n\tparams := slack.PostMessageParameters{\n\t\tAsUser: true,\n\t}\n\tattachment := slack.Attachment{\n\t\tFields: []slack.AttachmentField{\n\t\t\t{\n\t\t\t\tTitle: \"Kind\",\n\t\t\t\tValue: event.Kind,\n\t\t\t\tShort: true,\n\t\t\t},\n\t\t\t{\n\n\t\t\t\tTitle: \"Name\",\n\t\t\t\tValue: event.Name,\n\t\t\t\tShort: true,\n\t\t\t},\n\t\t},\n\t\tFooter: \"BotKube\",\n\t}\n\n\t// Add timestamp\n\tts := json.Number(strconv.FormatInt(event.TimeStamp.Unix(), 10))\n\tif ts > \"0\" {\n\t\tattachment.Ts = ts\n\t}\n\n\tif event.Namespace != \"\" {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Namespace\",\n\t\t\tValue: event.Namespace,\n\t\t\tShort: true,\n\t\t})\n\t}\n\n\tif event.Reason != \"\" {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Reason\",\n\t\t\tValue: event.Reason,\n\t\t\tShort: true,\n\t\t})\n\t}\n\n\tif len(event.Messages) > 0 {\n\t\tmessage := \"\"\n\t\tfor _, m := range event.Messages {\n\t\t\tmessage = message + m\n\t\t}\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Message\",\n\t\t\tValue: message,\n\t\t})\n\t}\n\n\tif event.Action != \"\" {\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Action\",\n\t\t\tValue: event.Action,\n\t\t})\n\t}\n\n\tif len(event.Recommendations) > 0 {\n\t\trec := \"\"\n\t\tfor _, r := range event.Recommendations {\n\t\t\trec = rec + r\n\t\t}\n\t\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\t\tTitle: \"Recommendations\",\n\t\t\tValue: rec,\n\t\t})\n\t}\n\n\t// Add clustername in the message\n\tattachment.Fields = append(attachment.Fields, slack.AttachmentField{\n\t\tTitle: \"Cluster\",\n\t\tValue: s.ClusterName,\n\t})\n\n\tattachment.Color = attachmentColor[event.Level]\n\tparams.Attachments = []slack.Attachment{attachment}\n\n\tchannelID, timestamp, err := api.PostMessage(s.Channel, \"\", params)\n\tif err != nil {\n\t\tlog.Logger.Errorf(\"Error in sending slack message %s\", err.Error())\n\t\treturn err\n\t}\n\n\tlog.Logger.Infof(\"Message successfully sent to channel %s at %s\", channelID, timestamp)\n\treturn nil\n}",
"func manageSlack() {\n\tfor msg := range slackRtm.IncomingEvents {\n\t\tfmt.Print(\"Slack Event Received: \")\n\t\tswitch ev := msg.Data.(type) {\n\t\tcase *slack.HelloEvent:\n\t\t\t// Ignore hello\n\t\tcase *slack.ConnectedEvent:\n\t\t\tfmt.Println(\"Infos:\", ev.Info)\n\t\t\tfmt.Println(\"Connection counter:\", ev.ConnectionCount)\n\t\tcase *slack.MessageEvent:\n\t\t\tslackUser, err := slackApi.GetUserInfo(ev.User)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error retrieving slack user: %s\\n\", err)\n\t\t\t} else {\n\t\t\t\tif slackUser.Name != \"mumblerelay\" {\n\t\t\t\t\t//Mumble accepts HTML, Slack (just API; IRC uses plain text) uses their own weird formatting. Lets fix that.\n\t\t\t\t\tvar re = regexp.MustCompile(`<(http[\\$\\+\\!\\*\\'\\(\\)\\,\\?\\=%\\_a-zA-Z0-9\\/\\.:-]*)\\|?(.*)?>`)\n\t\t\t\t\ttext := re.ReplaceAllString(ev.Text, `<a href=\"$1\">$2</a>`)\n\t\t\t\t\ttext = strings.Replace(text, `\"></a>`, `\">Link has no title? I didn't know Slack would even do that...</a>`, -1)\n\t\t\t\t\tmsg := slackUser.Name + \": \" + text\n\t\t\t\t\tmumbleClient.Self.Channel.Send(msg, false)\n\t\t\t\t\tfmt.Println(msg)\n\t\t\t\t}\n\t\t\t}\n\t\tcase *slack.PresenceChangeEvent:\n\t\t\tfmt.Printf(\"Presence Change: %v\\n\", ev)\n\t\tcase *slack.LatencyReport:\n\t\t\tfmt.Printf(\"Current latency: %v\\n\", ev.Value)\n\t\tcase *slack.RTMError:\n\t\t\tfmt.Printf(\"Error: %s\\n\", ev.Error())\n\t\tcase *slack.DisconnectedEvent:\n\t\t\t//Nothing yet...\n\t\tcase *slack.FileCommentEditedEvent:\n\t\t\t//Maybe\n\t\t\tfmt.Printf(\"FileCommentEdited: %v\\n\", msg.Data)\n\t\tcase *slack.FilePublicEvent:\n\t\t\t//Maybe\n\t\t\tfmt.Printf(\"FilePublic: %v\\n\", msg.Data)\n\t\tcase *slack.FileSharedEvent:\n\t\t\t//Maybe\n\t\t\tfmt.Printf(\"FileShared: %v\\n\", msg.Data)\n\t\tcase *slack.ChannelJoinedEvent:\n\t\t\t//Maybe\n\t\t\t//Perhaps when I join a non-configured channel, I complain and leave..\n\t\t\t//Or better yet, enter spy mode; Read history, members and future messages :P\n\t\t\tfmt.Printf(\"ChannelJoined: %v\\n\", msg.Data)\n\t\tcase *slack.ReactionAddedEvent:\n\t\t\t//Maybe\n\t\t\tfmt.Printf(\"ReactionAdded: %v\\n\", msg.Data)\n\t\tcase *slack.MessageTooLongEvent:\n\t\t\t//Maybe\n\t\t\tfmt.Printf(\"MessageTooLong: %v\\n\", msg.Data)\n\t\tcase *slack.FileCommentAddedEvent:\n\t\t\tfmt.Printf(\"File Comment Added: %s: %s\\n\", ev.Comment.User, ev.Comment.Comment)\n\t\tcase *slack.InvalidAuthEvent:\n\t\t\tfmt.Printf(\"Invalid credentials\")\n\t\t\treturn\n\t\tdefault:\n\t\t\t// Ignore other events..\n\t\t\tfmt.Printf(\"Unexpected: %v\\n\", msg.Data)\n\t\t}\n\t}\n}",
"func (s *Slack) SendMessage(msg string) error {\n\tlog.Logger.Info(fmt.Sprintf(\">> Sending to slack: %+v\", msg))\n\n\tapi := slack.New(s.Token)\n\tparams := slack.PostMessageParameters{\n\t\tAsUser: true,\n\t}\n\n\tchannelID, timestamp, err := api.PostMessage(s.Channel, msg, params)\n\tif err != nil {\n\t\tlog.Logger.Errorf(\"Error in sending slack message %s\", err.Error())\n\t\treturn err\n\t}\n\n\tlog.Logger.Infof(\"Message successfully sent to channel %s at %s\", channelID, timestamp)\n\treturn nil\n}",
"func (bot *SlackBot) sendMessage(msg Message) error {\n\tmsg.ID = atomic.AddUint64(&counter, 1)\n\terr := websocket.JSON.Send(bot.ws, msg)\n\treturn err\n}",
"func (cli *chatLogInteral) postInternal(llp LogLineParsed) {\n\n\t// Forward Alert\nSubLoop:\n\tfor _, subs := range cli.subbedToChat {\n\t\t// Check if Channel is Subbed to this Topic\n\t\tfor _, t := range subs.Subbed {\n\t\t\tif t == llp.Cat {\n\t\t\t\tselect {\n\t\t\t\tcase subs.C <- llp:\n\t\t\t\tdefault: // Non blocking\n\t\t\t\t}\n\t\t\t\tcontinue SubLoop\n\t\t\t}\n\t\t}\n\n\t\t// Not Subbed to Topic\n\t}\n}",
"func slackMeetup(w http.ResponseWriter, r *http.Request) {\n\ttext := r.FormValue(\"text\")\n\ttrigger := r.FormValue(\"trigger_word\")\n\n\ttext = strings.Replace(text, trigger, \"\", -1)\n\n\tevents := filterEventsByKeyword(text)\n\tvar response string\n\n\tif len(events) > 0 {\n\t\te := events[0]\n\t\tt := time.Unix(0, e.Time*int64(time.Millisecond)).In(time.Local)\n\t\tlayout := \"Jan 2, 2006 at 3:04pm (MST)\"\n\t\tresponse = fmt.Sprint(e.Name, \" | \", t.Format(layout), \" @ \", e.Venue.Name, \" | \", e.EventUrl)\n\t} else {\n\t\tresponse = \"No matching meetup found.\"\n\t}\n\n\tdata := struct {\n\t\tText string `json:\"text\"`\n\t}{response}\n\n\toutput, _ := json.Marshal(data)\n\tlog.Println(string(output))\n\tw.Write(output)\n}",
"func postDiscordMessage(session *discordgo.Session, channelID, msg string) {\n\t_, err := session.ChannelMessageSend(channelID, msg)\n\tif err != nil {\n\t\t// See if the error is a common one that we recognize.\n\t\tif err.Error() == msgTooLongErr {\n\t\t\terrMsg := \"The generated message was too long. Discord doesn't let messages that are\" +\n\t\t\t\t\" longer than 2000 characters go through.\"\n\t\t\tsession.ChannelMessageSend(channelID, errMsg)\n\t\t\treturn\n\t\t}\n\t\t// We didn't recognize the error at this point. Post a general response.\n\t\terrMsg := fmt.Sprintf(\"Something went wrong: %v\", err)\n\t\tsession.ChannelMessageSend(channelID, errMsg)\n\t}\n}",
"func HandlePostMessage(w http.ResponseWriter, r *http.Request) {\n\tvar res []byte\n\tb, _ := readRequestParams(r.Body)\n\tm, _ := initChannels(b, channels...)\n\terr := sendMessages(m)\n\tif err != nil {\n\t\terr := err.Error()\n\t\tres, _ = json.Marshal(MsgResBody{Sent: false, Error: &err})\n\t} else {\n\t\tres, _ = json.Marshal(MsgResBody{Sent: true, Error: nil})\n\t}\n\n\tw.Write(res)\n}",
"func NotifySlack(token string, channelID string, url string) error {\n\tapi := slack.New(token)\n\t_, _, err := api.PostMessage(channelID, slack.MsgOptionText(url, false))\n\treturn err\n}",
"func (h *Hookbot) ServePublish(w http.ResponseWriter, r *http.Request) {\n\n\ttopic := Topic(r)\n\n\tvar (\n\t\tbody []byte\n\t\terr error\n\t)\n\n\tbody, err = ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(\"Error in ServePublish reading body:\", err)\n\t\thttp.Error(w, \"500 Internal Server Error\",\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\n\textraMetadata := r.URL.Query()[\"extra-metadata\"]\n\tif len(extraMetadata) > 0 {\n\t\tswitch extraMetadata[0] {\n\t\tcase \"github\":\n\n\t\t\tbody, err = json.Marshal(map[string]interface{}{\n\t\t\t\t\"Signature\": r.Header.Get(\"X-Hub-Signature\"),\n\t\t\t\t\"Event\": r.Header.Get(\"X-GitHub-Event\"),\n\t\t\t\t\"Delivery\": r.Header.Get(\"X-GitHub-Delivery\"),\n\t\t\t\t\"Payload\": body,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error in ServePublish serializing payload:\", err)\n\t\t\t\thttp.Error(w, \"500 Internal Server Error\",\n\t\t\t\t\thttp.StatusInternalServerError)\n\t\t\t}\n\n\t\tdefault:\n\t\t\thttp.Error(w, \"400 Bad Request (bad ?extra-metadata=)\",\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Printf(\"Publish %q\", topic)\n\n\tok := h.Publish(Message{Topic: topic, Body: body})\n\n\tif !ok {\n\t\thttp.Error(w, \"Timeout in send\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tfmt.Fprintln(w, \"OK\")\n}",
"func handleAppMentionEvent(event *slackevents.AppMentionEvent, client *slack.Client) error {\n\n\t// Grab the user name based on the ID of the one who mentioned the bot\n\tuser, err := client.GetUserInfo(event.User)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Check if the user said Hello to the bot\n\ttext := strings.ToLower(event.Text)\n\n\t// Create the attachment and assigned based on the message\n\tattachment := slack.Attachment{}\n\t// Add Some default context like user who mentioned the bot\n\tattachment.Fields = []slack.AttachmentField{\n\t\t{\n\t\t\tTitle: \"Date\",\n\t\t\tValue: time.Now().String(),\n\t\t}, {\n\t\t\tTitle: \"Initializer\",\n\t\t\tValue: user.Name,\n\t\t},\n\t}\n\tif strings.Contains(text, \"hello\") {\n\t\t// Greet the user\n\t\tattachment.Text = fmt.Sprintf(\"Hello %s\", user.Name)\n\t\tattachment.Pretext = \"Greetings\"\n\t\tattachment.Color = \"#4af030\"\n\t} else {\n\t\t// Send a message to the user\n\t\tattachment.Text = fmt.Sprintf(\"How can I help you %s?\", user.Name)\n\t\tattachment.Pretext = \"How can I be of service\"\n\t\tattachment.Color = \"#3d3d3d\"\n\t}\n\t// Send the message to the channel\n\t// The Channel is available in the event message\n\t_, _, err = client.PostMessage(event.Channel, slack.MsgOptionAttachments(attachment))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to post message: %w\", err)\n\t}\n\treturn nil\n}",
"func (s *Slack) Message(msg string, v ...interface{}) error {\n\treturn s.Send(\"\", msg, v...)\n}",
"func (handler *MessageHandler) PostMessage(w http.ResponseWriter, r *http.Request) {\n\ttokenString := r.Header.Get(\"Authorization\")\n\n\tvar message entities.Message\n\terr := json.NewDecoder(r.Body).Decode(&message)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(entities.Error{\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tmessageID, timestamp, err := handler.usecases.PostMessage(tokenString, message)\n\tif err != nil && err.Error() == \"not authenticated\" {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tjson.NewEncoder(w).Encode(entities.Error{\n\t\t\tError: \"not authenticated\",\n\t\t})\n\t\treturn\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(entities.Error{\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tnewMessageOutput := struct {\n\t\tID uint `json:\"id\"`\n\t\tTimestamp time.Time `json:\"timestamp\"`\n\t}{\n\t\tID: messageID,\n\t\tTimestamp: timestamp,\n\t}\n\tjson.NewEncoder(w).Encode(&newMessageOutput)\n\tw.WriteHeader(http.StatusOK)\n}",
"func (p *Plugin) PostToChannelByIDAsBot(channelID, message string) error {\n\t_, appError := p.API.CreatePost(&model.Post{\n\t\tUserId: p.BotUserID,\n\t\tChannelId: channelID,\n\t\tMessage: message,\n\t})\n\tif appError != nil {\n\t\treturn appError\n\t}\n\n\treturn nil\n}",
"func SendMessage(version, build string) error {\n\texpanded, err := homedir.Expand(configPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := os.Stat(expanded); os.IsNotExist(err) {\n\t\treturn errgo.Mask(ErrNotConfigured, errgo.Any)\n\t}\n\n\tslackConfiguration := SlackConfiguration{\n\t\tNotificationUsername: \"KochoBot\",\n\t\tEmojiIcon: \":robot_face:\",\n\t}\n\n\tconfigFile, err := os.Open(expanded)\n\tif err != nil {\n\t\treturn errgo.WithCausef(err, ErrInvalidConfiguration, \"couldn't open Slack configuration file\")\n\t}\n\tdefer configFile.Close()\n\n\tif err := json.NewDecoder(configFile).Decode(&slackConfiguration); err != nil {\n\t\treturn errgo.WithCausef(err, ErrInvalidConfiguration, \"couldn't decode Slack configuration\")\n\t}\n\n\tclient := slack.New(slackConfiguration.Token)\n\n\tparams := slack.PostMessageParameters{}\n\tparams.Attachments = []slack.Attachment{\n\t\tslack.Attachment{\n\t\t\tColor: \"#2484BE\",\n\t\t\tText: fmt.Sprintf(\"*Kocho*: %s ran `%s`\", slackConfiguration.Username, strings.Join(os.Args, \" \")),\n\t\t\tFields: []slack.AttachmentField{\n\t\t\t\tslack.AttachmentField{\n\t\t\t\t\tTitle: \"Kocho Version\",\n\t\t\t\t\tValue: version,\n\t\t\t\t\tShort: true,\n\t\t\t\t},\n\t\t\t\tslack.AttachmentField{\n\t\t\t\t\tTitle: \"Kocho Build\",\n\t\t\t\t\tValue: build,\n\t\t\t\t\tShort: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tMarkdownIn: []string{\"text\"},\n\t\t},\n\t}\n\tparams.Username = slackConfiguration.NotificationUsername\n\tparams.IconEmoji = slackConfiguration.EmojiIcon\n\n\tif _, _, err := client.PostMessage(slackConfiguration.NotificationChannel, \"\", params); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (t *Typetalk) ChatPostMessage(ctx context.Context, message string) (*typetalk.PostedMessageResult, *typetalkShared.Response, error) {\n\treturn t.Client.Messages.PostMessage(ctx, t.TopicID, message, nil)\n}",
"func (n Notification) Post(obj *mgh.MungeObject) error {\n\treturn obj.WriteComment(n.String())\n}",
"func PushNote(title string, body string, token string, device string) error {\n\t// curl --header 'Access-Token: <your_access_token_here>' \\\n\t// --header 'Content-Type: application/json' \\\n\t// --data-binary '{\"body\":\"Space Elevator, Mars Hyperloop, Space Model S (Model Space?)\",\"title\":\"Space Travel Ideas\",\"type\":\"note\"}' \\\n\t// --request POST \\\n\t// https://api.pushbullet.com/v2/pushes\n\n\tuserDevice, err := GetDefaultDevice(device, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpbURL := \"https://api.pushbullet.com\"\n\theaders := []header{\n\t\t{\"Access-Token\", token},\n\t\t{\"Content-Type\", \"application/json\"},\n\t}\n\tcontent := pushBody{\n\t\ttitle,\n\t\tbody,\n\t\t\"note\",\n\t\tuserDevice.Iden,\n\t}\n\n\treqContent, err := json.Marshal(content)\n\tres, err := makeRequest(pbURL+\"/v2/pushes\", \"POST\", headers, bytes.NewBuffer(reqContent))\n\tdefer res.Body.Close()\n\treturn err\n}",
"func post(body map[string]interface{}) {\n\tif len(Token) == 0 {\n\t\tstderr(\"Token is empty\")\n\t\treturn\n\t}\n\n\tjsonBody, err := json.Marshal(body)\n\tif err != nil {\n\t\tstderr(fmt.Sprintf(\"Rollbar payload couldn't be encoded: %s\", err.Error()))\n\t\treturn\n\t}\n\n\tresp, err := http.Post(Endpoint, \"application/json\", bytes.NewReader(jsonBody))\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\tstderr(fmt.Sprintf(\"Rollbar POST failed: %s\", err.Error()))\n\t} else if resp.StatusCode != 200 {\n\t\tstderr(fmt.Sprintf(\"Rollbar response: %s\", resp.Status))\n\t}\n}",
"func hookPerformWebPOST(url string, data interface{}) error {\n\t// Generate a buffer for us to store some JSON\n\tb := new(bytes.Buffer)\n\n\t// Take the data we have received and encode it in JSON to POST\n\tjson.NewEncoder(b).Encode(data)\n\n\t// It's always important to log.\n\tlog.WithFields(log.Fields{\n\t\t\"url\": url,\n\t\t\"data\": b,\n\t}).Debug(\"POSTing to webhook\")\n\n\t// POST up our data and then return if we got an error or not.\n\tres, err := http.Post(url, \"application/json; charset=utf-8\", b)\n\n\tlog.WithFields(log.Fields{\n\t\t\"url\": url,\n\t\t\"code\": res.StatusCode,\n\t\t\"status\": res.Status,\n\t}).Debug(\"Response received from webhook\")\n\n\treturn err\n}",
"func sendAnonymousMessage(username, message string) error {\n\turl := os.Getenv(webhookConfig)\n\tpayload, err := json.Marshal(slackMsg{\n\t\tText: message,\n\t\tChannel: username,\n\t\tUsername: fmt.Sprintf( animals[rand.Intn(len(animals))]),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = http.Post(url, \"application/json\", bytes.NewBuffer(payload))\n\treturn err\n}",
"func sendPush(apiKey string, name string, url string, newStatus string, oldStatus string) {\n\tlogging.MustGetLogger(\"\").Debug(\"Sending Push about \\\"\" + url + \"\\\"...\")\n\n\tpb := pushbullet.New(apiKey)\n\n\tpush := requests.NewLink()\n\tpush.Title = GetConfiguration().Application.Title + \" - Status Change\"\n\tpush.Body = name + \" went from \\\"\" + oldStatus + \"\\\" to \\\"\" + newStatus + \"\\\".\"\n\tpush.Url = url\n\n\t_, err := pb.PostPushesLink(push)\n\tif err != nil {\n\t\tlogging.MustGetLogger(\"\").Error(\"Unable to send Push: \", err)\n\t}\n}",
"func PostEvent(client *http.Client, hookURL string, message EventMessage) (*http.Response, error) {\n\tbuf := new(bytes.Buffer)\n\terr := json.NewEncoder(buf).Encode(message)\n\tif err != nil {\n\t\tlog.Printf(\"Encode json failed: %+v\\n\", err)\n\t\treturn nil, err\n\t}\n\tresp, err := client.Post(hookURL, \"application/json; charset=utf-8\", buf)\n\treturn resp, err\n}",
"func handlePostMessage(w http.ResponseWriter, r *http.Request) {\n\t//TODO do some sanity checks on the body or message passed in.\n\t//TODO use this as part of validation if r.Header.Get(\"Content-Type\") == \"application/x-www-form-urlencoded\" {\n\tfmt.Printf(\"Got input new method.\\n\")\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmessage := string(body)\n\tfmt.Printf(\"Message sent in \" + message + \"\\n\")\n\t//Add message to the map....\n\tvar messageid string = addToMessageMap(message)\n\tfmt.Printf(\"Message ID \" + messageid + \"\\n\")\n\n\t//return json object with message id\n\n\tmis := messageIdStruct{messageid, message}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tif err := json.NewEncoder(w).Encode(mis); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {\r\n // Ignore all messages created by the bot itself\r\n // This isn't required in this specific example but it's a good practice.\r\n if m.Author.ID == s.State.User.ID {\r\n return\r\n }\r\n // If the message is \"ping\" reply with \"Pong!\"\r\n if m.Content == \"ping\" {\r\n s.ChannelMessageSend(m.ChannelID, \"Pong!\")\r\n }\r\n\r\n // If the message is \"pong\" reply with \"Ping!\"\r\n if m.Content == \"pong\" {\r\n s.ChannelMessageSend(m.ChannelID, \"Ping!\")\r\n }\r\n\r\n // If the message is \"getmyid\" reply with \"Ping!\"\r\n if m.Content == \"getchannelid\" {\r\n s.ChannelMessageSend(m.ChannelID, \"ChannelID:\" + m.ChannelID)\r\n }\r\n\r\n if m.Content == \"getpushurl\" {\r\n s.ChannelMessageSend(m.ChannelID, sendMessageURLGen(m.ChannelID))\r\n }\r\n}",
"func (c *EventController) Post(ctx *app.PostEventContext) error {\n\teid := uuid.NewV4().String()\n\tocctime := time.Now()\n\tif ctx.Payload.Occtime != nil {\n\t\tocctime = *ctx.Payload.Occtime\n\t}\n\tvar data interface{}\n\tif ctx.Payload.Params != nil {\n\t\tdata = *ctx.Payload.Params\n\t}\n\n\tevt := engine.Event{\n\t\tEid: eid,\n\t\tEtype: ctx.Payload.Etype,\n\t\tAction: ctx.Payload.Action,\n\t\tOccTime: occtime,\n\t\tFrom: ctx.Payload.From,\n\t\tData: data,\n\t}\n\t// Put your logic here\n\tengine.Put(evt)\n\t// EventController_Put: end_implement\n\tres := &app.AntResult{Eid: &eid}\n\treturn ctx.OK(res)\n}",
"func (d *Deogracias) Post(p *reddit.Post) error {\n\terr := d.bot.Reply(p.Name, d.getPostQuote())\n\tif err != nil {\n\t\tlog.Println(errors.WithStack(errors.Errorf(\"failed to make post reply: %v\", err)))\n\t}\n\treturn nil\n}",
"func sendButtonMessage(senderID int64, text string) {\n\trecipient := new(Recipient)\n\trecipient.ID = senderID\n\tbuttonMessage := new(ButtonMessage)\n\tbuttonMessage.Recipient = *recipient\n\tbuttonMessage.ButtonMessageBody.Attachment.Type = \"template\"\n\tbuttonMessage.ButtonMessageBody.Attachment.Payload.TemplateType = \"button\"\n\tbuttonMessage.ButtonMessageBody.Attachment.Payload.Text = text\n\tbuttonMessage.ButtonMessageBody.Attachment.Payload.Buttons.Type = \"web_url\"\n\tbuttonMessage.ButtonMessageBody.Attachment.Payload.Buttons.Url = \"https://still-bayou-19762.herokuapp.com/\"\n\tbuttonMessage.ButtonMessageBody.Attachment.Payload.Buttons.Title = \"May 13\"\n\tlog.Print(buttonMessage)\n\tbuttonMessageBody, err := json.Marshal(buttonMessage)\n\tlog.Print(buttonMessageBody)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\treq, err := http.NewRequest(\"POST\", FacebookEndPoint, bytes.NewBuffer(buttonMessageBody))\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tvalues := url.Values{}\n\tvalues.Add(\"access_token\", accessToken)\n\treq.URL.RawQuery = values.Encode()\n\treq.Header.Add(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tclient := &http.Client{Timeout: time.Duration(30 * time.Second)}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tdefer res.Body.Close()\n\tvar result map[string]interface{}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tif err := json.Unmarshal(body, &result); err != nil {\n\t\tlog.Print(err)\n\t}\n\tlog.Print(result)\n}"
] | [
"0.76651305",
"0.7442603",
"0.71486974",
"0.71397513",
"0.71380925",
"0.70558584",
"0.7021113",
"0.6846197",
"0.6742398",
"0.6505753",
"0.650403",
"0.6456163",
"0.63341874",
"0.63200605",
"0.6228203",
"0.62201923",
"0.61911505",
"0.6082172",
"0.6075067",
"0.60693914",
"0.6054156",
"0.60284084",
"0.60275143",
"0.60164434",
"0.5992761",
"0.5987074",
"0.5971948",
"0.59535545",
"0.59267896",
"0.592586",
"0.59144145",
"0.589187",
"0.58902436",
"0.58862007",
"0.5866575",
"0.58536565",
"0.5852144",
"0.585074",
"0.58442205",
"0.5821414",
"0.57997364",
"0.5798188",
"0.57973397",
"0.57961005",
"0.5773535",
"0.5770005",
"0.5768645",
"0.5760105",
"0.5753005",
"0.57427377",
"0.572583",
"0.5681387",
"0.5680179",
"0.5659953",
"0.565788",
"0.5653832",
"0.5633531",
"0.5627167",
"0.56271404",
"0.56227934",
"0.561682",
"0.5614212",
"0.56134677",
"0.56130975",
"0.5611089",
"0.56009203",
"0.5579959",
"0.55797225",
"0.5578415",
"0.5568151",
"0.55473214",
"0.55233115",
"0.5520833",
"0.5516489",
"0.5505784",
"0.5504449",
"0.5483549",
"0.54628664",
"0.5462299",
"0.54433703",
"0.5431114",
"0.54105467",
"0.53861415",
"0.5381168",
"0.5369228",
"0.53646237",
"0.53544354",
"0.53470343",
"0.53432614",
"0.5331731",
"0.53292453",
"0.53250057",
"0.53242564",
"0.5317173",
"0.5297283",
"0.52965456",
"0.5290303",
"0.52797174",
"0.52770376",
"0.5270181"
] | 0.7452913 | 1 |
Get takes name of the cloudwatchEventTarget, and returns the corresponding cloudwatchEventTarget object, and an error if there is any. | Get получает имя целевого объекта cloudwatchEventTarget и возвращает соответствующий объект cloudwatchEventTarget и ошибку, если она есть. | func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.CloudwatchEventTarget), err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (e *EventAPI) Get(name string) (*EventType, error) {\n\teventType := &EventType{}\n\terr := e.client.httpGET(e.backOffConf.create(), e.eventURL(name), eventType, \"unable to request event types\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn eventType, nil\n}",
"func (m *DeviceManagementTroubleshootingEvent) GetEventName()(*string) {\n val, err := m.GetBackingStore().Get(\"eventName\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (c *OutputEventContext) Get(eventName string) (*protocol.Event, error) {\n\te, ok := c.Context[eventName]\n\tif !ok {\n\t\terr := fmt.Errorf(\"cannot find the event name in OutputEventContext : %s\", eventName)\n\t\treturn nil, err\n\t}\n\treturn e, nil\n}",
"func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\treturn err\n}",
"func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (t EventType) GetName() string {\n\treturn C.GoString((*C.char)(C.gst_event_type_get_name(C.GstEventType(t))))\n}",
"func (r *ProjectsTraceSinksService) Get(name string) *ProjectsTraceSinksGetCall {\n\tc := &ProjectsTraceSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func (o *EventTypeIn) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}",
"func (w *Watcher) GetTarget(targetName string) (*Target, error) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.TargetMap == nil {\n\t\tw.TargetMap = make(map[string]*Target)\n\t}\n\ttarget, ok := w.TargetMap[targetName]\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"not exist domain\")\n\t}\n\treturn target, nil\n}",
"func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (c *Channel) Get(eventID string) (Event, error) {\n\ttxn := c.db.NewTransaction(false)\n\tdefer txn.Discard()\n\n\te, err := getEvent(txn, c.topic, eventID, c.name)\n\tif err != nil {\n\t\treturn Event{}, err\n\t}\n\n\treturn *e, nil\n}",
"func (a ProblemAdapter) GetEvent() string {\n\treturn a.cloudEvent.GetType()\n}",
"func (d *DevClient) GetTrigger(systemKey, name string) (map[string]interface{}, error) {\n\treturn d.GetEventHandler(systemKey, name)\n}",
"func (s CloudEventResource) GetName() string {\n\treturn s.Name\n}",
"func (tc *Target) EventType(space string, name eventpkg.TypeName) *eventpkg.Type {\n\ttc.eventTypeCache.RLock()\n\tdefer tc.eventTypeCache.RUnlock()\n\treturn tc.eventTypeCache.cache[libkv.EventTypeKey{Space: space, Name: name}]\n}",
"func (s googleCloudStorageTargetNamespaceLister) Get(name string) (*v1alpha1.GoogleCloudStorageTarget, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"googlecloudstoragetarget\"), name)\n\t}\n\treturn obj.(*v1alpha1.GoogleCloudStorageTarget), nil\n}",
"func (m *AppliedAuthenticationEventListener) GetEventType()(*AuthenticationEventType) {\n val, err := m.GetBackingStore().Get(\"eventType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*AuthenticationEventType)\n }\n return nil\n}",
"func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (m *UserSimulationEventInfo) GetEventName()(*string) {\n return m.eventName\n}",
"func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (s *StopEvent) Name() string {\n\treturn s.name\n}",
"func (e *UnknownEvent) GetEventName() string {\n\treturn \"unknown\"\n}",
"func (c *Event) Name() string {\n\treturn c.name\n}",
"func NameOfEvent(t uint32) string {\n\tswitch t {\n\tcase SessionCreate:\n\t\treturn \"SessionCreate\"\n\tcase SessionDestroy:\n\t\treturn \"SessionDestroy\"\n\tcase TopicPublish:\n\t\treturn \"TopicPublish\"\n\tcase TopicSubscribe:\n\t\treturn \"TopicSubscribe\"\n\tcase TopicUnsubscribe:\n\t\treturn \"TopicUnsubscribe\"\n\tcase QutoChange:\n\t\treturn \"QutoChange\"\n\tcase SessionResume:\n\t\treturn \"SessionResume\"\n\tcase AuthChange:\n\t\treturn \"AuthChange\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}",
"func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) (r GetResult) {\n\tresp, err := c.Get(getURL(c, stackName, stackID, resourceName, eventID), &r.Body, nil)\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
"func (s knativeEventingNamespaceLister) Get(name string) (*v1beta1.KnativeEventing, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"knativeeventing\"), name)\n\t}\n\treturn obj.(*v1beta1.KnativeEventing), nil\n}",
"func (s *Structured) GetName() string {\n\treturn s.cloudEvent.Source\n}",
"func (o *WebhooksJsonWebhook) GetEvent() string {\n\tif o == nil || o.Event == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Event\n}",
"func (e *Event) Get(name string) uint64 {\n\tif idx, has := e.Type.Arg(name); has && idx <= len(e.Args) {\n\t\treturn e.Args[idx]\n\t}\n\treturn 0\n}",
"func (m *DomainDnsSrvRecord) GetNameTarget()(*string) {\n val, err := m.GetBackingStore().Get(\"nameTarget\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (m *EventLog) Get(key string) interface{} {\n\treturn m.Payload()[key]\n}",
"func (m *Reminder) GetEventWebLink()(*string) {\n val, err := m.GetBackingStore().Get(\"eventWebLink\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func GetGoCallback(name string) GoCallback {\n\tcallback, ok := goCallbacks[name]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn callback\n}",
"func (t Tags) Get(name string) string {\n\treturn t[name]\n}",
"func Get(name string) (AlertClient, error) {\n\tlock.RLock()\n\tdefer lock.RUnlock()\n\n\tif v, ok := instances[name]; ok {\n\t\treturn v, nil\n\t}\n\treturn nil, ErrAlertClientNotFound\n}",
"func (n *Node) GetEvent(ctx context.Context, req *api.EventRequest) (*wire.Event, error) {\n\tif err := checkSource(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// food for discovery\n\thost := api.GrpcPeerHost(ctx)\n\tn.CheckPeerIsKnown(host, nil)\n\n\tvar eventHash hash.Event\n\n\tif req.Hash != nil {\n\t\teventHash.SetBytes(req.Hash)\n\t} else {\n\t\tcreator := hash.HexToPeer(req.PeerID)\n\t\th := n.store.GetEventHash(creator, req.Index)\n\t\tif h == nil {\n\t\t\treturn nil, status.Error(codes.NotFound, fmt.Sprintf(\"event not found: %s-%d\", req.PeerID, req.Index))\n\t\t}\n\t\teventHash = *h\n\t}\n\n\tevent := n.store.GetWireEvent(eventHash)\n\tif event == nil {\n\t\treturn nil, status.Error(codes.NotFound, fmt.Sprintf(\"event not found: %s\", eventHash.Hex()))\n\t}\n\n\treturn event, nil\n}",
"func (el *EventListener) GetEventName() string {\n\treturn el.EventName\n}",
"func (c *EventService) Get(id string) (Event, *http.Response, error) {\n\toutput := &struct {\n\t\tData Event `json:\"data\"`\n\t}{}\n\tpath := fmt.Sprintf(\"%s/%s\", c.endpoint, id)\n\tresp, err := doGet(c.sling, path, output)\n\treturn output.Data, resp, err\n}",
"func (e *TracepointEvent) GetString(name string) string {\n\tv, _ := e.tp.format.decodeString(e.data, name)\n\treturn v\n}",
"func (dep *Deps) Get(name string) T {\n\n\treturn dep.deps[name]\n\n}",
"func (el *EventListener) GetEventSourceName() string {\n\treturn el.EventSourceName\n}",
"func (es *EventService) Get(eventID string) (e Event, err error) {\n\t// GET: /event/:eventID\n\tvar req *http.Request\n\treq, err = es.c.NewRequest(\"GET\", \"/event/\"+eventID, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp := struct {\n\t\tStatus string\n\t\tData Event\n\t\tMessage string\n\t}{}\n\n\terr = es.c.Do(req, &resp)\n\treturn resp.Data, err\n}",
"func (e *Event) GetTypeName() string {\n\treturn e.GetType().GetName()\n}",
"func (m *UserExperienceAnalyticsAnomalyDevice) GetDeviceName()(*string) {\n val, err := m.GetBackingStore().Get(\"deviceName\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (s tektonListenerNamespaceLister) Get(name string) (*v1alpha1.TektonListener, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"tektonlistener\"), name)\n\t}\n\treturn obj.(*v1alpha1.TektonListener), nil\n}",
"func (o *CreateEventPayloadActions) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}",
"func (s eventProviderNamespaceLister) Get(name string) (*v1alpha1.EventProvider, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"eventprovider\"), name)\n\t}\n\treturn obj.(*v1alpha1.EventProvider), nil\n}",
"func getMessageName(eventStruct interface{}) string {\n\tif t := reflect.TypeOf(eventStruct); t.Kind() == reflect.Ptr {\n\t\treturn t.Elem().Name()\n\t} else {\n\t\treturn t.Name()\n\t}\n}",
"func (s *Service) GetNode(name string) *EventNode {\n\tfor _, node := range s.eventDefinition.EventDestinations {\n\t\tif node.Name == name {\n\t\t\treturn node\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *DeviceManagementTroubleshootingEventRequest) Get(ctx context.Context) (resObj *DeviceManagementTroubleshootingEvent, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (t *Tuner) getEvent() (*C.struct_dvb_frontend_event, error) {\n\tev := new(C.struct_dvb_frontend_event)\n\terr := ioctl(t.fe.Fd(), C.iFE_GET_EVENT, unsafe.Pointer(ev))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ev, nil\n}",
"func (c *Collection) GetEvent(\n\tkey, typ string, ts time.Time, ordinal int64, value interface{},\n) (*Event, error) {\n\tevent := &Event{\n\t\tCollection: c,\n\t\tKey: key,\n\t\tOrdinal: ordinal,\n\t\tTimestamp: ts,\n\t\tType: typ,\n\t}\n\n\t// Perform the actual GET\n\tpath := fmt.Sprintf(\"%s/%s/events/%s/%d/%d\", c.Name, key, typ,\n\t\tts.UnixNano()/1000000, ordinal)\n\tvar responseData jsonEvent\n\t_, err := c.client.jsonReply(\"GET\", path, nil, 200, &responseData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Move the data from the returned values into the Event object.\n\tevent.Value = responseData.Value\n\tevent.Ref = responseData.Path.Ref\n\tsecs := responseData.Timestamp / 1000\n\tnsecs := (responseData.Timestamp % 1000) * 1000000\n\tevent.Timestamp = time.Unix(secs, nsecs)\n\tevent.Ordinal = responseData.Ordinal\n\n\t// If the user provided us a place to unmarshal the 'value' field into\n\t// we do that here.\n\tif value != nil {\n\t\treturn event, event.Unmarshal(value)\n\t}\n\n\t// Success\n\treturn event, nil\n}",
"func (o *StorageNetAppCloudTargetAllOf) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}",
"func (m *CalendarGroup) GetName()(*string) {\n val, err := m.GetBackingStore().Get(\"name\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (client *Client) Target(kind string, name string) Target {\n\tclient.mutex.RLock()\n\tdefer client.mutex.RUnlock()\n\n\tfor _, target := range client.targets {\n\t\tif target.Kind() == kind && strings.EqualFold(name, target.Name()) {\n\t\t\treturn target\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (e *EventAPI) Delete(name string) error {\n\treturn e.client.httpDELETE(e.backOffConf.create(), e.eventURL(name), \"unable to delete event type\")\n}",
"func Get(v reflect.Value, name string) reflect.Value {\n\tif v.Kind() == reflect.Interface {\n\t\tv = v.Elem()\n\t}\n\t// dereference pointers\n\tfor v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\tswitch v.Kind() {\n\tcase reflect.Struct:\n\t\ttyp := v.Type()\n\t\tfor i := 0; i < typ.NumField(); i++ {\n\t\t\tf := typ.Field(i)\n\t\t\tif f.PkgPath != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tns, mapped := Field(f)\n\t\t\tif (f.Anonymous && !mapped) || ns == \"-\" {\n\t\t\t\tnV := Get(v.Field(i), name)\n\t\t\t\tif nV.IsValid() {\n\t\t\t\t\treturn nV\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ns == name {\n\t\t\t\treturn v.Field(i)\n\t\t\t}\n\t\t}\n\tcase reflect.Map:\n\t\treturn v.MapIndex(reflect.ValueOf(name))\n\t}\n\treturn reflect.Value{}\n}",
"func (e *BasicEvent) Name() string {\n\treturn e.name\n}",
"func GetEventDataStore(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventDataStoreState, opts ...pulumi.ResourceOption) (*EventDataStore, error) {\n\tvar resource EventDataStore\n\terr := ctx.ReadResource(\"aws-native:cloudtrail:EventDataStore\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (o *EventAttributes) GetEvt() Event {\n\tif o == nil || o.Evt == nil {\n\t\tvar ret Event\n\t\treturn ret\n\t}\n\treturn *o.Evt\n}",
"func (r *Record) Get(name string) (Value, bool) {\n\tvalue, ok := r.nameValueMap[name]\n\treturn value, ok\n}",
"func (box *EventBox) Get(id uint64) (*Event, error) {\n\tobject, err := box.Box.Get(id)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if object == nil {\n\t\treturn nil, nil\n\t}\n\treturn object.(*Event), nil\n}",
"func (m *Descriptor) GetMessage(name string) *Descriptor {\n\tfor _, msg := range m.GetMessages() {\n\t\t// can lookup by name or message prefixed name (qualified)\n\t\tif msg.GetName() == name || msg.GetLongName() == name {\n\t\t\treturn msg\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *SyntheticMonitorUpdate) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}",
"func (s *StatusEvent) GetName() string {\n\tif s == nil || s.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *s.Name\n}",
"func (hc *Actions) Get(name string) (*release.Release, error) {\n\tactGet := action.NewGet(hc.Config)\n\treturn actGet.Run(name)\n}",
"func GetWebhook(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *WebhookState, opts ...pulumi.ResourceOption) (*Webhook, error) {\n\tvar resource Webhook\n\terr := ctx.ReadResource(\"datadog:index/webhook:Webhook\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (h *EventsHandlers)Get(e interface{})[]IAppEventHandler{\n\th.RLock()\n\tdefer h.RUnlock()\n\tif old, ok := h.handlers[e]; ok{\n\t\treturn old\n\t}\n\treturn nil\n}",
"func (e *BasicEvent) Get(key string) any {\n\tif v, ok := e.data[key]; ok {\n\t\treturn v\n\t}\n\n\treturn nil\n}",
"func (d *V8interceptor) GetByname(name string, object *V8value, retval **V8value, exception *string) int32 {\n\tname_ := C.cef_string_userfree_alloc()\n\tsetCEFStr(name, name_)\n\tdefer func() {\n\t\tC.cef_string_userfree_free(name_)\n\t}()\n\tretval_ := (*retval).toNative()\n\texception_ := C.cef_string_userfree_alloc()\n\tsetCEFStr(*exception, exception_)\n\tdefer func() {\n\t\t*exception = cefstrToString(exception_)\n\t\tC.cef_string_userfree_free(exception_)\n\t}()\n\treturn int32(C.gocef_v8interceptor_get_byname(d.toNative(), (*C.cef_string_t)(name_), object.toNative(), &retval_, (*C.cef_string_t)(exception_), d.get_byname))\n}",
"func (f *FileDescriptor) GetMessage(name string) *Descriptor {\n\tfor _, m := range f.GetMessages() {\n\t\tif m.GetName() == name || m.GetLongName() == name {\n\t\t\treturn m\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (t *Timeline) GetEvent() string {\n\tif t == nil || t.Event == nil {\n\t\treturn \"\"\n\t}\n\treturn *t.Event\n}",
"func (obs *Observer) Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {\n\treturn obs.client.Namespace(obs.namespace).Get(name, options, subresources...)\n}",
"func (domain *Domain) GetEvent(uuid string) (*Event, error) {\n\t// determine event\n\tdomain.EventsX.RLock()\n\tevent, ok := domain.Events[uuid]\n\tdomain.EventsX.RUnlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"event not found\")\n\t}\n\n\t// success\n\treturn event, nil\n}",
"func (r *DeviceManagementAutopilotEventRequest) Get(ctx context.Context) (resObj *DeviceManagementAutopilotEvent, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func derefEvent(e Event) Event {\n\tswitch e := e.(type) {\n\tcase *SwitchScenesEvent:\n\t\treturn *e\n\tcase *ScenesChangedEvent:\n\t\treturn *e\n\tcase *SceneCollectionChangedEvent:\n\t\treturn *e\n\tcase *SceneCollectionListChangedEvent:\n\t\treturn *e\n\tcase *SwitchTransitionEvent:\n\t\treturn *e\n\tcase *TransitionListChangedEvent:\n\t\treturn *e\n\tcase *TransitionDurationChangedEvent:\n\t\treturn *e\n\tcase *TransitionBeginEvent:\n\t\treturn *e\n\tcase *ProfileChangedEvent:\n\t\treturn *e\n\tcase *ProfileListChangedEvent:\n\t\treturn *e\n\tcase *StreamStartingEvent:\n\t\treturn *e\n\tcase *StreamStartedEvent:\n\t\treturn *e\n\tcase *StreamStoppingEvent:\n\t\treturn *e\n\tcase *StreamStoppedEvent:\n\t\treturn *e\n\tcase *StreamStatusEvent:\n\t\treturn *e\n\tcase *RecordingStartingEvent:\n\t\treturn *e\n\tcase *RecordingStartedEvent:\n\t\treturn *e\n\tcase *RecordingStoppingEvent:\n\t\treturn *e\n\tcase *RecordingStoppedEvent:\n\t\treturn *e\n\tcase *RecordingPausedEvent:\n\t\treturn *e\n\tcase *RecordingResumedEvent:\n\t\treturn *e\n\tcase *ReplayStartingEvent:\n\t\treturn *e\n\tcase *ReplayStartedEvent:\n\t\treturn *e\n\tcase *ReplayStoppingEvent:\n\t\treturn *e\n\tcase *ReplayStoppedEvent:\n\t\treturn *e\n\tcase *ExitingEvent:\n\t\treturn *e\n\tcase *HeartbeatEvent:\n\t\treturn *e\n\tcase *BroadcastCustomMessageEvent:\n\t\treturn *e\n\tcase *SourceCreatedEvent:\n\t\treturn *e\n\tcase *SourceDestroyedEvent:\n\t\treturn *e\n\tcase *SourceVolumeChangedEvent:\n\t\treturn *e\n\tcase *SourceMuteStateChangedEvent:\n\t\treturn *e\n\tcase *SourceAudioSyncOffsetChangedEvent:\n\t\treturn *e\n\tcase *SourceAudioMixersChangedEvent:\n\t\treturn *e\n\tcase *SourceRenamedEvent:\n\t\treturn *e\n\tcase *SourceFilterAddedEvent:\n\t\treturn *e\n\tcase *SourceFilterRemovedEvent:\n\t\treturn *e\n\tcase *SourceFilterVisibilityChangedEvent:\n\t\treturn *e\n\tcase *SourceFiltersReorderedEvent:\n\t\treturn *e\n\tcase *SourceOrderChangedEvent:\n\t\treturn *e\n\tcase *SceneItemAddedEvent:\n\t\treturn *e\n\tcase *SceneItemRemovedEvent:\n\t\treturn *e\n\tcase *SceneItemVisibilityChangedEvent:\n\t\treturn *e\n\tcase *SceneItemTransformChangedEvent:\n\t\treturn *e\n\tcase *SceneItemSelectedEvent:\n\t\treturn *e\n\tcase *SceneItemDeselectedEvent:\n\t\treturn *e\n\tcase *PreviewSceneChangedEvent:\n\t\treturn *e\n\tcase *StudioModeSwitchedEvent:\n\t\treturn *e\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (pgs *PGStorage) GetEvent(eventUUID uuid.UUID) (event.Event, error) {\n\tsql := \"select uuid, title, datetime, duration, description, userid, notify from events where uuid = $1\"\n\tres := make(map[string]interface{})\n\terr := pgs.DB.QueryRowxContext(pgs.Ctx, sql, eventUUID.String()).MapScan(res)\n\tif err != nil {\n\t\treturn event.Event{}, err\n\t}\n\te := event.Event{\n\t\tUUID: eventUUID,\n\t\tTitle: res[\"title\"].(string),\n\t\tDatetime: res[\"datetime\"].(time.Time),\n\t\tDuration: res[\"duration\"].(string),\n\t\tDesc: res[\"description\"].(string),\n\t\tUser: res[\"userid\"].(string),\n\t\tNotify: res[\"notify\"].(bool),\n\t}\n\treturn e, nil\n}",
"func (store Manager) GetEvent(id string) (Event, error) {\n\t//\tOur return item\n\tretval := Event{}\n\n\terr := store.systemdb.View(func(tx *buntdb.Tx) error {\n\t\titem, err := tx.Get(GetKey(\"Event\", id))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(item) > 0 {\n\t\t\t//\tUnmarshal data into our item\n\t\t\tval := []byte(item)\n\t\t\tif err := json.Unmarshal(val, &retval); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\t//\tIf there was an error, report it:\n\tif err != nil {\n\t\treturn retval, fmt.Errorf(\"problem getting the event: %s\", err)\n\t}\n\n\t//\tReturn our data:\n\treturn retval, nil\n}",
"func (m *ChatMessageAttachment) GetName()(*string) {\n val, err := m.GetBackingStore().Get(\"name\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (ns *Namespace) GetSource(gvk schema.GroupVersionKind, name string) *unstructured.Unstructured {\n\tpos := ns.GetPropagatedObjects(gvk)\n\tfor _, po := range pos {\n\t\tif po.GetName() == name {\n\t\t\treturn po\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *AuditEventClient) Get(ctx context.Context, guid string) (*resource.AuditEvent, error) {\n\tvar a resource.AuditEvent\n\terr := c.client.get(ctx, path.Format(\"/v3/audit_events/%s\", guid), &a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &a, nil\n}",
"func (a Event_Payload) Get(fieldName string) (value interface{}, found bool) {\n\tif a.AdditionalProperties != nil {\n\t\tvalue, found = a.AdditionalProperties[fieldName]\n\t}\n\treturn\n}",
"func (s redisTriggerNamespaceLister) Get(name string) (*v1beta1.RedisTrigger, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"redistrigger\"), name)\n\t}\n\treturn obj.(*v1beta1.RedisTrigger), nil\n}",
"func (b Build) GetEvent() *api.Event {\n\treturn b.event\n}",
"func (crawl *Crawl) GetHandler(name interface{}) Handler {\n\tcrawl.mutex.RLock()\n\tdefer crawl.mutex.RUnlock()\n\treturn crawl.handlers[name]\n}",
"func derefEvent(e Event) Event {\n\tswitch e := e.(type) {\n\tcase *SwitchScenesEvent:\n\t\treturn *e\n\tcase *ScenesChangedEvent:\n\t\treturn *e\n\tcase *SceneCollectionChangedEvent:\n\t\treturn *e\n\tcase *SceneCollectionListChangedEvent:\n\t\treturn *e\n\tcase *SwitchTransitionEvent:\n\t\treturn *e\n\tcase *TransitionListChangedEvent:\n\t\treturn *e\n\tcase *TransitionDurationChangedEvent:\n\t\treturn *e\n\tcase *TransitionBeginEvent:\n\t\treturn *e\n\tcase *ProfileChangedEvent:\n\t\treturn *e\n\tcase *ProfileListChangedEvent:\n\t\treturn *e\n\tcase *StreamStartingEvent:\n\t\treturn *e\n\tcase *StreamStartedEvent:\n\t\treturn *e\n\tcase *StreamStoppingEvent:\n\t\treturn *e\n\tcase *StreamStoppedEvent:\n\t\treturn *e\n\tcase *StreamStatusEvent:\n\t\treturn *e\n\tcase *RecordingStartingEvent:\n\t\treturn *e\n\tcase *RecordingStartedEvent:\n\t\treturn *e\n\tcase *RecordingStoppingEvent:\n\t\treturn *e\n\tcase *RecordingStoppedEvent:\n\t\treturn *e\n\tcase *ReplayStartingEvent:\n\t\treturn *e\n\tcase *ReplayStartedEvent:\n\t\treturn *e\n\tcase *ReplayStoppingEvent:\n\t\treturn *e\n\tcase *ReplayStoppedEvent:\n\t\treturn *e\n\tcase *ExitingEvent:\n\t\treturn *e\n\tcase *HeartbeatEvent:\n\t\treturn *e\n\tcase *SourceOrderChangedEvent:\n\t\treturn *e\n\tcase *SceneItemAddedEvent:\n\t\treturn *e\n\tcase *SceneItemRemovedEvent:\n\t\treturn *e\n\tcase *SceneItemVisibilityChangedEvent:\n\t\treturn *e\n\tcase *PreviewSceneChangedEvent:\n\t\treturn *e\n\tcase *StudioModeSwitchedEvent:\n\t\treturn *e\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func GetKeyValueExpireViaName(iName string) (*KeyValueExpire, error) {\n\tvar _KeyValueExpire = &KeyValueExpire{Name: iName}\n\thas, err := Engine.Get(_KeyValueExpire)\n\tif has {\n\t\treturn _KeyValueExpire, err\n\t} else {\n\t\treturn nil, err\n\t}\n}",
"func (b EmployeeDeletedEvent) EventName() string {\n\treturn b.Event\n}",
"func (r *ProjectsMetricDescriptorsService) Get(name string) *ProjectsMetricDescriptorsGetCall {\n\tc := &ProjectsMetricDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func (r *ProjectsLocationsProcessesRunsLineageEventsService) Get(name string) *ProjectsLocationsProcessesRunsLineageEventsGetCall {\n\tc := &ProjectsLocationsProcessesRunsLineageEventsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func (s *CloudService) GetByName(name string) (*getting.Cloud, error) {\n\tcount := 0\n\tindex := 0\n\titems, err := s.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, item := range items {\n\t\tif name == *item.Name {\n\t\t\tindex = i\n\t\t\tcount++\n\t\t}\n\t}\n\tif count == 0 {\n\t\treturn nil, errors.New(\"Cloud not found\")\n\t} else if count > 1 {\n\t\treturn nil, errors.New(\"More than one Cloud found by this name, use GetByID instead\")\n\t}\n\treturn items[index], nil\n}",
"func (d *DI) Get(name string) interface{} {\n\td.mutex.RLock()\n\tv := d.store[name]\n\td.mutex.RUnlock()\n\treturn v\n}",
"func getFieldValue(e reflect.Value, name string) interface{} {\n\tswitch e.Kind() {\n\tcase reflect.Ptr:\n\t\telem := e.Elem()\n\t\treturn getFieldValue(elem, name)\n\tcase reflect.Struct:\n\t\tf := e.FieldByName(strings.Title(name))\n\t\tif f.IsValid() {\n\t\t\treturn f.Interface()\n\t\t}\n\tcase reflect.Map:\n\t\tm, _ := e.Interface().(map[string]interface{})\n\t\treturn m[name]\n\t}\n\treturn nil\n}",
"func (engine *ECE) RetrieveEvent(reqId string) *Event {\n\tengine.RLock()\n\te, exists := engine.Events[reqId]\n\tengine.RUnlock()\n\n\tif exists {\n\t\treturn e\n\t}\n\n\treturn nil\n}",
"func (s targetNamespaceLister) Get(name string) (*v1alpha1.Target, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"target\"), name)\n\t}\n\treturn obj.(*v1alpha1.Target), nil\n}",
"func Get(name string) *Logger {\n\treturn loggers[name]\n}",
"func (t *Target) GetValue(name string) string {\n\treturn t.labels.Get(name)\n}",
"func (c *FakeAWSSNSTargets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AWSSNSTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(awssnstargetsResource, c.ns, name), &v1alpha1.AWSSNSTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.AWSSNSTarget), err\n}",
"func (e *EventParser) GetBitbucketCloudPullEventType(eventTypeHeader string, sha string, pr string) models.PullRequestEventType {\n\tswitch eventTypeHeader {\n\tcase bitbucketcloud.PullCreatedHeader:\n\t\tlastBitbucketSha.Add(pr, sha)\n\t\treturn models.OpenedPullEvent\n\tcase bitbucketcloud.PullUpdatedHeader:\n\t\tlastSha, _ := lastBitbucketSha.Get(pr)\n\t\tif sha == lastSha {\n\t\t\t// No change, ignore\n\t\t\treturn models.OtherPullEvent\n\t\t}\n\t\tlastBitbucketSha.Add(pr, sha)\n\t\treturn models.UpdatedPullEvent\n\tcase bitbucketcloud.PullFulfilledHeader, bitbucketcloud.PullRejectedHeader:\n\t\treturn models.ClosedPullEvent\n\t}\n\treturn models.OtherPullEvent\n}"
] | [
"0.72349846",
"0.60446095",
"0.5682909",
"0.54533374",
"0.5292888",
"0.5286852",
"0.52845055",
"0.51380754",
"0.51060545",
"0.5100123",
"0.49953666",
"0.49781477",
"0.4978093",
"0.49507922",
"0.49051094",
"0.48928928",
"0.4878934",
"0.48690373",
"0.48504093",
"0.48341855",
"0.48302954",
"0.4774812",
"0.47597277",
"0.47561",
"0.4737843",
"0.47368854",
"0.47326946",
"0.47318193",
"0.4702869",
"0.47004792",
"0.46971053",
"0.46969885",
"0.469356",
"0.46752384",
"0.4648671",
"0.46459308",
"0.46454394",
"0.46365753",
"0.46216327",
"0.45898575",
"0.4567365",
"0.45666626",
"0.45638457",
"0.45501667",
"0.45167488",
"0.45086628",
"0.44899186",
"0.44868094",
"0.44737318",
"0.4464219",
"0.44599128",
"0.44492722",
"0.4447878",
"0.44311395",
"0.44306734",
"0.44193646",
"0.44167453",
"0.44123593",
"0.4411542",
"0.4396613",
"0.43874782",
"0.43808848",
"0.43713176",
"0.43671682",
"0.4363337",
"0.43624514",
"0.43539387",
"0.43509743",
"0.4345924",
"0.43457192",
"0.43426266",
"0.43399456",
"0.43395478",
"0.43394813",
"0.43322703",
"0.43315062",
"0.43249848",
"0.43246126",
"0.43208012",
"0.43195018",
"0.43167838",
"0.43166974",
"0.43058062",
"0.43052024",
"0.42996928",
"0.4294583",
"0.4294274",
"0.42930076",
"0.42927662",
"0.4288362",
"0.4287482",
"0.4287287",
"0.428574",
"0.42823574",
"0.4277565",
"0.42764813",
"0.427329",
"0.4269642",
"0.42634568",
"0.42626032"
] | 0.7304512 | 0 |
List takes label and field selectors, and returns the list of CloudwatchEventTargets that match those selectors. | Список принимает метки и селекторы полей и возвращает список CloudwatchEventTargets, соответствующих этим селекторам. | func (c *FakeCloudwatchEventTargets) List(opts v1.ListOptions) (result *v1alpha1.CloudwatchEventTargetList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(cloudwatcheventtargetsResource, cloudwatcheventtargetsKind, c.ns, opts), &v1alpha1.CloudwatchEventTargetList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.CloudwatchEventTargetList{ListMeta: obj.(*v1alpha1.CloudwatchEventTargetList).ListMeta}
for _, item := range obj.(*v1alpha1.CloudwatchEventTargetList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *FakeAWSSNSTargets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AWSSNSTargetList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(awssnstargetsResource, awssnstargetsKind, c.ns, opts), &v1alpha1.AWSSNSTargetList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.AWSSNSTargetList{ListMeta: obj.(*v1alpha1.AWSSNSTargetList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.AWSSNSTargetList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (w *Watcher) GetTargetNameList() ([]string) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.TargetMap == nil {\n\t\tw.TargetMap = make(map[string]*Target)\n\t}\n\ttargetNameList := make([]string, 0, len(w.TargetMap))\n\tfor tn := range w.TargetMap {\n\t\ttargetNameList = append(targetNameList, tn)\n\t}\n\treturn targetNameList\n}",
"func (s *targetLister) List(selector labels.Selector) (ret []*v1alpha1.Target, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Target))\n\t})\n\treturn ret, err\n}",
"func (s *googleCloudStorageTargetLister) List(selector labels.Selector) (ret []*v1alpha1.GoogleCloudStorageTarget, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.GoogleCloudStorageTarget))\n\t})\n\treturn ret, err\n}",
"func (c *FakeAzureEventHubsSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AzureEventHubsSourceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(azureeventhubssourcesResource, azureeventhubssourcesKind, c.ns, opts), &v1alpha1.AzureEventHubsSourceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.AzureEventHubsSourceList{ListMeta: obj.(*v1alpha1.AzureEventHubsSourceList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.AzureEventHubsSourceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (es *EventService) List(q *EventListRequest) (el EventList, err error) {\n\t// GET: /events\n\tvar req *http.Request\n\treq, err = es.c.NewRequest(\"GET\", \"/events\", q)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = es.c.Do(req, &el)\n\treturn\n}",
"func (c *RPCClient) ListTargets() ([]api.Target, error) {\n\tout := &ListTargetsOut{}\n\terr := c.call(\"ListTargets\", ListTargetsIn{}, out)\n\treturn out.Targets, err\n}",
"func (h *Handler) ListByLabel(labels string) ([]*unstructured.Unstructured, error) {\n\tlistOptions := h.Options.ListOptions.DeepCopy()\n\tlistOptions.LabelSelector = labels\n\n\tif err := h.getGVRAndNamespaceScope(); err != nil {\n\t\treturn nil, err\n\t}\n\tif h.isNamespaced {\n\t\treturn extractList(h.dynamicClient.Resource(h.gvr).Namespace(h.namespace).List(h.ctx, *listOptions))\n\t}\n\treturn extractList(h.dynamicClient.Resource(h.gvr).List(h.ctx, *listOptions))\n}",
"func (t *Target) List(verbose bool, nameWidth int) {\n\tif !verbose {\n\t\tif strings.HasPrefix(t.Name, \"_\") {\n\t\t\t// skip targets in non verbose mode as hidden.\n\t\t\treturn\n\t\t}\n\t\tpadWidth := nameWidth - len(t.Name)\n\t\tpaddedName := color.Yellow(t.Name)\n\t\tif padWidth > 0 {\n\t\t\tpaddedName += strings.Repeat(\" \", padWidth)\n\t\t}\n\t\tout := fmt.Sprintf(\"%s %s\\n\", paddedName, strings.TrimSpace(t.Description))\n\t\t_, err := t.W.Write([]byte(out))\n\t\tif err != nil {\n\t\t\tlog.Println(color.Red(err.Error()))\n\t\t}\n\t\treturn\n\t}\n\n\t// target name\n\tout := fmt.Sprintf(\"%s: \\n\", color.Yellow(t.Name))\n\n\t// target description\n\tif t.Description != \"\" {\n\t\tout += fmt.Sprintf(\" - description: %s\\n\", strings.TrimSpace(t.Description))\n\t}\n\n\t// target before\n\tif len(t.Before) > 0 {\n\t\tbeforeList := \" - before: \" + strings.Join(t.Before, \", \")\n\t\tout += fmt.Sprintln(beforeList)\n\t}\n\n\t// target after\n\tif len(t.After) > 0 {\n\t\tafterList := \" - after: \" + strings.Join(t.After, \", \")\n\t\tout += fmt.Sprintln(afterList)\n\t}\n\n\t// target command\n\tout += fmt.Sprintf(\" - cmd:\\n \")\n\tout += fmt.Sprintln(strings.Replace(t.Cmd, \"\\n\", \"\\n \", -1))\n\t_, err := t.W.Write([]byte(out))\n\tif err != nil {\n\t\tlog.Println(color.Red(err.Error()))\n\t}\n}",
"func (c *FakeRedisTriggers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RedisTriggerList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(redistriggersResource, redistriggersKind, c.ns, opts), &v1beta1.RedisTriggerList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1beta1.RedisTriggerList{ListMeta: obj.(*v1beta1.RedisTriggerList).ListMeta}\n\tfor _, item := range obj.(*v1beta1.RedisTriggerList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (c *FakeGoogleCloudPubSubSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GoogleCloudPubSubSourceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(googlecloudpubsubsourcesResource, googlecloudpubsubsourcesKind, c.ns, opts), &v1alpha1.GoogleCloudPubSubSourceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.GoogleCloudPubSubSourceList{ListMeta: obj.(*v1alpha1.GoogleCloudPubSubSourceList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.GoogleCloudPubSubSourceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))\n\n}",
"func (client *Client) ListTargets(request *ListTargetsRequest) (_result *ListTargetsResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &ListTargetsResponse{}\n\t_body, _err := client.ListTargetsWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}",
"func listTargetsHandler(w http.ResponseWriter, r *http.Request) {\n\n}",
"func (c *AuditEventClient) List(ctx context.Context, opts *AuditEventListOptions) ([]*resource.AuditEvent, *Pager, error) {\n\tif opts == nil {\n\t\topts = NewAuditEventListOptions()\n\t}\n\tvar res resource.AuditEventList\n\terr := c.client.get(ctx, path.Format(\"/v3/audit_events?%s\", opts.ToQueryString()), &res)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpager := NewPager(res.Pagination)\n\treturn res.Resources, pager, nil\n}",
"func (client *Client) ListTargetsWithOptions(request *ListTargetsRequest, runtime *util.RuntimeOptions) (_result *ListTargetsResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = &ListTargetsResponse{}\n\t_body, _err := client.DoRequest(tea.String(\"listTargets\"), tea.String(\"HTTP\"), tea.String(\"POST\"), tea.String(\"/openapi/listTargets\"), nil, tea.ToMap(request), runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}",
"func (e *event) List() []string {\n\te.RLock()\n\tdefer e.RUnlock()\n\tlist := make([]string, 0, len(e.events))\n\tfor name := range e.events {\n\t\tlist = append(list, name)\n\t}\n\treturn list\n}",
"func List() *Event {\n\treturn &Event{}\n}",
"func (s googleCloudStorageTargetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.GoogleCloudStorageTarget, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.GoogleCloudStorageTarget))\n\t})\n\treturn ret, err\n}",
"func (h *Handler) ListByLabel(labels string) ([]*corev1.Node, error) {\n\tlistOptions := h.Options.ListOptions.DeepCopy()\n\tlistOptions.LabelSelector = labels\n\tnodeList, err := h.clientset.CoreV1().Nodes().List(h.ctx, *listOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn extractList(nodeList), nil\n}",
"func (c *FakeTraefikServices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TraefikServiceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(traefikservicesResource, traefikservicesKind, c.ns, opts), &v1alpha1.TraefikServiceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.TraefikServiceList{ListMeta: obj.(*v1alpha1.TraefikServiceList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.TraefikServiceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (tc *Configs) List(verbose bool, fuzzy string) {\n\tfilePrefix, fuzzyTarget := splitTarget(fuzzy)\nLOOP_FILES:\n\tfor _, tf := range tc.Files {\n\t\t// If a file prefix is provided check this file matches.\n\t\tif filePrefix != \"\" && tf.Basename() != filePrefix {\n\t\t\tcontinue LOOP_FILES\n\t\t}\n\n\t\ttargetNameWidth := 0\n\t\ttargetNames := []string{}\n\tLOOP_TARGETS:\n\t\tfor k := range tf.Targets {\n\t\t\tif len(k) > targetNameWidth {\n\t\t\t\ttargetNameWidth = len(k)\n\t\t\t}\n\t\t\tif fuzzyTarget != \"\" {\n\t\t\t\tif ok, _ := filepath.Match(fuzzyTarget, k); !ok {\n\t\t\t\t\tcontinue LOOP_TARGETS\n\t\t\t\t}\n\t\t\t}\n\t\t\ttargetNames = append(targetNames, k)\n\t\t}\n\t\tsort.Strings(targetNames)\n\t\tbasename := tf.Basename()\n\t\ttc.StdOut.Write([]byte(color.Green(fmt.Sprintf(\"(%s) %s\\n\", basename, tf.Filepath))))\n\t\tfor _, targetName := range targetNames {\n\t\t\tif target, ok := tc.Target(basename + \":\" + targetName); ok {\n\t\t\t\ttarget.List(verbose, targetNameWidth)\n\t\t\t}\n\t\t}\n\t\ttc.StdOut.Write([]byte(color.Green(\"---\\n\\n\")))\n\t}\n}",
"func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) {\n\top := &request.Operation{\n\t\tName: opListTargetsByRule,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/\",\n\t}\n\n\tif input == nil {\n\t\tinput = &ListTargetsByRuleInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &ListTargetsByRuleOutput{}\n\treq.Data = output\n\treturn\n}",
"func (nlbHandler *GCPNLBHandler) listTargetPools(regionID string, filter string) (*compute.TargetPoolList, error) {\n\n\t// path param\n\tprojectID := nlbHandler.Credential.ProjectID\n\n\tresp, err := nlbHandler.Client.TargetPools.List(projectID, regionID).Do()\n\tif err != nil {\n\t\treturn &compute.TargetPoolList{}, err\n\t}\n\n\tfor _, item := range resp.Items {\n\t\tcblogger.Info(item)\n\t}\n\n\treturn resp, nil\n\n}",
"func (c *FakeListeners) List(ctx context.Context, opts v1.ListOptions) (result *networkextensionv1.ListenerList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(listenersResource, listenersKind, c.ns, opts), &networkextensionv1.ListenerList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &networkextensionv1.ListenerList{ListMeta: obj.(*networkextensionv1.ListenerList).ListMeta}\n\tfor _, item := range obj.(*networkextensionv1.ListenerList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (e *Event) List(c echo.Context, p *takrib.Pagination) ([]takrib.Event, error) {\n\tau := e.rbac.User(c)\n\tq, err := query.List(au)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e.udb.List(e.db, q, p)\n}",
"func List() []string {\n\tvar ret []string\n\tfor k := range loggers {\n\t\tret = append(ret, k)\n\t}\n\treturn ret\n}",
"func (s targetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Target, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Target))\n\t})\n\treturn ret, err\n}",
"func (CloudWatchEventSource) Values() []CloudWatchEventSource {\n\treturn []CloudWatchEventSource{\n\t\t\"EC2\",\n\t\t\"CODE_DEPLOY\",\n\t\t\"HEALTH\",\n\t\t\"RDS\",\n\t}\n}",
"func (c *TestClient) ListTargetInstances(project, zone string, opts ...ListCallOption) ([]*compute.TargetInstance, error) {\n\tif c.ListTargetInstancesFn != nil {\n\t\treturn c.ListTargetInstancesFn(project, zone, opts...)\n\t}\n\treturn c.client.ListTargetInstances(project, zone, opts...)\n}",
"func (n *GlobalNotification) Targets() []string {\r\n\treturn make([]string, 0)\r\n}",
"func (d *Dao) AllTargets(c context.Context, state int) (res []*model.Target, err error) {\n\tvar rows *xsql.Rows\n\tif rows, err = d.db.Query(c, _allTargetsSQL, state); err != nil {\n\t\tlog.Error(\"d.AllTargets.Query error(%+v), sql(%s)\", err, _allTargetsSQL)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar t = &model.Target{}\n\t\tif err = rows.Scan(&t.ID, &t.SubEvent, &t.Event, &t.Product, &t.Source, &t.GroupIDs, &t.Threshold, &t.Duration, &t.State, &t.Ctime, &t.Mtime); err != nil {\n\t\t\tlog.Error(\"d.AllTargets.Scan error(%+v), sql(%s)\", err, _allTargetsSQL)\n\t\t\treturn\n\t\t}\n\t\tif t.GroupIDs != \"\" {\n\t\t\tvar gids []int64\n\t\t\tif gids, err = xstr.SplitInts(t.GroupIDs); err != nil {\n\t\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, t.GroupIDs)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif t.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tres = append(res, t)\n\t}\n\terr = rows.Err()\n\treturn\n}",
"func (e EmptyTargetsNotaryRepository) ListTargets(...data.RoleName) ([]*client.TargetWithRole, error) {\n\treturn []*client.TargetWithRole{}, nil\n}",
"func list_targets(w rest.ResponseWriter, r *rest.Request) {\n\tarch := r.PathParam(\"arch\")\n\tsoftware := r.PathParam(\"software\")\n\tversion := r.PathParam(\"version\")\n\n\t// Doesn't need to be cached, as its calls are already cached.\n\ttargets := BasicResults{}\n\tlatest_date := time.Time{}\n\ttarget_path := get_target_path(arch, version)\n\tfiles := get_files(cache_instance, db, target_path)\n\tfor _, file := range files {\n\t\tarchive := new(Archive)\n\t\tarchive = archive.Init(file.Path)\n\t\tif archive.Software == software {\n\t\t\tparsed_time, err := time.Parse(\"2006-01-02\", archive.Date)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tparsed_time = time.Time{}\n\t\t\t}\n\t\t\tif parsed_time.After(latest_date) {\n\t\t\t\tlatest_date = parsed_time\n\t\t\t}\n\t\t\ttargets = append(targets, BasicResult{archive.Tag, archive.Date})\n\t\t}\n\t}\n\ttargets = append(targets, BasicResult{\"latest\", latest_date.Format(\"2006-01-02\")})\n\n\t// Sort the targets by date descending.\n\tsort.Sort(targets)\n\n\tw.WriteJson(targets)\n}",
"func (r *EventTagsService) List(profileId int64) *EventTagsListCall {\n\tc := &EventTagsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\treturn c\n}",
"func (c *FakeEndpointsServices) List(opts v1.ListOptions) (result *v1alpha1.EndpointsServiceList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(endpointsservicesResource, endpointsservicesKind, c.ns, opts), &v1alpha1.EndpointsServiceList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.EndpointsServiceList{ListMeta: obj.(*v1alpha1.EndpointsServiceList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.EndpointsServiceList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func eventFilterList(amount int) string {\n\tvar eventTypes []string\n\tfor i := 0; i < amount; i++ {\n\t\teventTypes = append(eventTypes, fmt.Sprintf(\":eventType%d\", i))\n\t}\n\treturn \"(\" + strings.Join(eventTypes, \", \") + \")\"\n}",
"func (l LoadedNotaryRepository) ListTargets(roles ...data.RoleName) ([]*client.TargetWithRole, error) {\n\tfilteredTargets := []*client.TargetWithRole{}\n\tfor _, tgt := range loadedTargets {\n\t\tif len(roles) == 0 || (len(roles) > 0 && roles[0] == tgt.Role.Name) {\n\t\t\tfilteredTargets = append(filteredTargets, &client.TargetWithRole{Target: tgt.Target, Role: tgt.Role.Name})\n\t\t}\n\t}\n\treturn filteredTargets, nil\n}",
"func (lw *EventListWatch) List(options api.ListOptions) (runtime.Object, error) {\n\treturn lw.ListFunc(options)\n}",
"func (l LoadedWithNoSignersNotaryRepository) ListTargets(roles ...data.RoleName) ([]*client.TargetWithRole, error) {\n\tfilteredTargets := []*client.TargetWithRole{}\n\tfor _, tgt := range loadedTargets {\n\t\tif len(roles) == 0 || (len(roles) > 0 && roles[0] == tgt.Role.Name) {\n\t\t\tfilteredTargets = append(filteredTargets, &client.TargetWithRole{Target: tgt.Target, Role: tgt.Role.Name})\n\t\t}\n\t}\n\treturn filteredTargets, nil\n}",
"func (e *EventAPI) List() ([]*EventType, error) {\n\teventTypes := []*EventType{}\n\terr := e.client.httpGET(e.backOffConf.create(), e.eventBaseURL(), &eventTypes, \"unable to request event types\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn eventTypes, nil\n}",
"func (c *FakeECSDeployments) List(opts v1.ListOptions) (result *ecskube.ECSDeploymentList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(ecsdeploymentsResource, ecsdeploymentsKind, c.ns, opts), &ecskube.ECSDeploymentList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &ecskube.ECSDeploymentList{}\n\tfor _, item := range obj.(*ecskube.ECSDeploymentList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (re *stubRegistrationService) ListBySelector(ctx context.Context, request common.Selector) (reply common.RegistrationEntries, err error) {\n\treturn reply, err\n}",
"func (c *EventService) List(params *EventParams) ([]Event, *http.Response, error) {\n\toutput := &struct {\n\t\tData []Event `json:\"data\"`\n\t}{}\n\tresp, err := doList(c.sling, c.endpoint, params, output)\n\treturn output.Data, resp, err\n}",
"func (svc *Service) List(ownerID string, search string, maxResults int) ([]*calendar.Event, error) {\n\tmsEvents := &types.Events{}\n\turl := fmt.Sprintf(\"%s&$top=%v&$filter=startswith(subject,'%s')\", deltaLink(ownerID), maxResults, search)\n\t// fmt.Println(url)\n\t_, err := svc.client.Get(url, msEvents)\n\tif err != nil {\n\t\treturn []*calendar.Event{}, errors.Wrap(err, \"Unable to perform list\")\n\t}\n\n\tevents := []*calendar.Event{}\n\tfor _, msEvent := range msEvents.Events {\n\t\tevent := convertToGoogleEvent(msEvent)\n\t\tevents = append(events, event)\n\t}\n\n\treturn events, nil\n}",
"func (c *FakeRobots) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RobotList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(robotsResource, robotsKind, c.ns, opts), &v1alpha1.RobotList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.RobotList{ListMeta: obj.(*v1alpha1.RobotList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.RobotList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (c *FakeRuleEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *rulesv1.RuleEndpointList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(ruleendpointsResource, ruleendpointsKind, c.ns, opts), &rulesv1.RuleEndpointList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &rulesv1.RuleEndpointList{ListMeta: obj.(*rulesv1.RuleEndpointList).ListMeta}\n\tfor _, item := range obj.(*rulesv1.RuleEndpointList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func targetsForEndpoints(endpoints []util.Endpoint) []kongstate.Target {\n\ttargets := []kongstate.Target{}\n\tfor _, endpoint := range endpoints {\n\t\ttarget := kongstate.Target{\n\t\t\tTarget: kong.Target{\n\t\t\t\tTarget: kong.String(endpoint.Address + \":\" + endpoint.Port),\n\t\t\t},\n\t\t}\n\t\ttargets = append(targets, target)\n\t}\n\treturn targets\n}",
"func (c *FakeIotDpses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IotDpsList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(iotdpsesResource, iotdpsesKind, c.ns, opts), &v1alpha1.IotDpsList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.IotDpsList{ListMeta: obj.(*v1alpha1.IotDpsList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.IotDpsList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (l Leftovers) List(filter string, regex bool) {\n\tl.logger.NoConfirm()\n\tvar deletables []common.Deletable\n\n\tfor _, r := range l.resources {\n\t\tlist, err := r.List(filter, regex)\n\t\tif err != nil {\n\t\t\tl.logger.Println(color.YellowString(err.Error()))\n\t\t}\n\n\t\tdeletables = append(deletables, list...)\n\t}\n\n\tfor _, d := range deletables {\n\t\tl.logger.Println(fmt.Sprintf(\"[%s: %s]\", d.Type(), d.Name()))\n\t}\n}",
"func (l Leftovers) List(filter string, regex bool) {\n\tl.logger.NoConfirm()\n\n\tvar deletables []common.Deletable\n\n\tfor _, r := range l.resources {\n\t\tlist, err := r.List(filter, regex)\n\t\tif err != nil {\n\t\t\tl.logger.Println(color.YellowString(err.Error()))\n\t\t}\n\n\t\tdeletables = append(deletables, list...)\n\t}\n\n\tfor _, d := range deletables {\n\t\tl.logger.Println(fmt.Sprintf(\"[%s: %s]\", d.Type(), d.Name()))\n\t}\n}",
"func (r *ProjectsLogServicesSinksService) List(projectsId string, logServicesId string) *ProjectsLogServicesSinksListCall {\n\treturn &ProjectsLogServicesSinksListCall{\n\t\ts: r.s,\n\t\tprojectsId: projectsId,\n\t\tlogServicesId: logServicesId,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks\",\n\t}\n}",
"func (k *Client) ListActiveTargets(upstream string) (*TargetListResponse, error) {\n\tres, err := k.get(\"/upstreams/\"+upstream+\"/targets/active\", nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"upstream\": upstream,\n\t\t}).Error(\"unable to fetch upstream targets\")\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"upstream\": upstream,\n\t\t}).Error(\"unable to fetch upstream targets\")\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode >= 400 {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"upstream\": upstream,\n\t\t}).Error(\"unable to fetch upstream targets\")\n\t\treturn nil, errors.New(res.Status)\n\t}\n\t// FIXME\n\t// We have to verify the result size before unmarshal\n\t// because Kong API is not consitent and return an empty\n\t// object instead of an empty array if no results.\n\tvar lightResult LightTargetListResponse\n\terr = json.Unmarshal(body, &lightResult)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"upstream\": upstream,\n\t\t}).Error(\"unable to decode response\")\n\t\treturn nil, err\n\t}\n\tvar result TargetListResponse\n\tif lightResult.Total == 0 {\n\t\tresult = TargetListResponse{\n\t\t\tTotal: 0,\n\t\t\tData: make([]TargetResponse, 0, 0),\n\t\t}\n\t} else {\n\t\terr = json.Unmarshal(body, &result)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\"upstream\": upstream,\n\t\t\t}).Error(\"unable to decode response\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &result, nil\n}",
"func (*TagsService) List(tagsFields []TagsField, tagsArgs ...TagsArgument) Query {\n\tif len(tagsFields) == 0 {\n\t\treturn Query{\n\t\t\tname: \"tags\",\n\t\t\tfields: []field{\n\t\t\t\tTagsIDField().field,\n\t\t\t},\n\t\t}\n\t}\n\n\tvar fields []field\n\tfor _, tf := range tagsFields {\n\t\tfields = append(fields, tf.field)\n\t}\n\tvar args []argument\n\tfor _, ta := range tagsArgs {\n\t\targs = append(args, ta.arg)\n\t}\n\treturn Query{\n\t\tname: \"tags\",\n\t\tfields: fields,\n\t\targs: args,\n\t}\n}",
"func (client *Client) Targets(kinds ...string) []Target {\n\tif len(kinds) == 0 {\n\t\tclient.mutex.Lock()\n\t\ttargets := make([]Target, len(client.targets))\n\t\tcopy(targets, client.targets)\n\t\tclient.mutex.Unlock()\n\n\t\treturn targets\n\t}\n\n\tclient.mutex.Lock()\n\ttargets := make([]Target, 0, len(client.targets))\n\tfor _, target := range client.targets {\n\t\tfor _, kind := range kinds {\n\t\t\tif target.Kind() == kind {\n\t\t\t\ttargets = append(targets, target)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tclient.mutex.Unlock()\n\n\treturn targets\n}",
"func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) {\n\top := &request.Operation{\n\t\tName: opListRuleNamesByTarget,\n\t\tHTTPMethod: \"POST\",\n\t\tHTTPPath: \"/\",\n\t}\n\n\tif input == nil {\n\t\tinput = &ListRuleNamesByTargetInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &ListRuleNamesByTargetOutput{}\n\treq.Data = output\n\treturn\n}",
"func SelectorLabels(name, instance string) map[string]string {\n\treturn map[string]string{\n\t\tApplicationNameLabelKey: name,\n\t\tApplicationInstanceLabelKey: instance,\n\t}\n}",
"func (NotificationTarget) Values() []NotificationTarget {\n\treturn []NotificationTarget{\n\t\t\"EventBridge\",\n\t\t\"SNS\",\n\t\t\"SQS\",\n\t}\n}",
"func Targets(t []string) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.Targets = t\n\t}\n}",
"func (s *tektonListenerLister) List(selector labels.Selector) (ret []*v1alpha1.TektonListener, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.TektonListener))\n\t})\n\treturn ret, err\n}",
"func (s *eventProviderLister) List(selector labels.Selector) (ret []*v1alpha1.EventProvider, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.EventProvider))\n\t})\n\treturn ret, err\n}",
"func (dtm *DfgetTaskManager) List(ctx context.Context, filter map[string]string) (dfgetTaskList []*types.DfGetTask, err error) {\n\treturn nil, nil\n}",
"func findFirewallRulesByTarget(rules []*gkeCompute.Firewall, clusterName string) []*gkeCompute.Firewall {\n\tvar firewalls []*gkeCompute.Firewall\n\tfor _, r := range rules {\n\t\tif r != nil {\n\t\t\tif strings.Contains(r.Description, kubernetesIO) {\n\t\t\t\tfor _, tag := range r.TargetTags {\n\t\t\t\t\tlog.Debugf(\"firewall rule[%s] target tag: %s\", r.Name, tag)\n\t\t\t\t\tif strings.HasPrefix(tag, targetPrefix+clusterName) {\n\t\t\t\t\t\tlog.Debugf(\"append firewall list[%s]\", r.Name)\n\t\t\t\t\t\tfirewalls = append(firewalls, r)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn firewalls\n}",
"func GetTargets(addrList string) []*Target {\r\n\taddrs := strings.Split(addrList, \",\")\r\n\ttargets := make([]*Target, len(addrs))\r\n\tfor i, addr := range addrs {\r\n\t\taddr = strings.TrimPrefix(addr, \"http://\") // convert to correct format\r\n\t\ttargets[i] = &Target{addr, \"http\"}\r\n\t}\r\n\treturn targets\r\n}",
"func (d *Dao) TargetsByQuery(c context.Context, where string) (res []*model.Target, err error) {\n\tvar rows *xsql.Rows\n\tif rows, err = d.db.Query(c, _targetQuerySQL+where); err != nil {\n\t\tlog.Error(\"d.TargetsByQuery.Query error(%+v), sql(%s)\", err, _targetQuerySQL+where)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar t = &model.Target{}\n\t\tif err = rows.Scan(&t.ID, &t.SubEvent, &t.Event, &t.Product, &t.Source, &t.GroupIDs, &t.Threshold, &t.Duration, &t.State, &t.Ctime, &t.Mtime); err != nil {\n\t\t\tlog.Error(\"d.TargetsByQuery.Scan error(%+v), sql(%s)\", err, _targetQuerySQL+where)\n\t\t\treturn\n\t\t}\n\t\tif t.GroupIDs != \"\" {\n\t\t\tvar gids []int64\n\t\t\tif gids, err = xstr.SplitInts(t.GroupIDs); err != nil {\n\t\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, t.GroupIDs)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif t.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tres = append(res, t)\n\t}\n\treturn\n}",
"func getLabelSelectors() string {\n\treturn \"!ephemeral-enforcer\"\n}",
"func (instance *Host) ListLabels(ctx context.Context) (_ map[string]string, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif instance == nil || valid.IsNil(instance) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\tvar labelsV1 *propertiesv1.HostLabels\n\txerr := instance.Alter(ctx, func(_ data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\treturn props.Alter(hostproperty.LabelsV1, func(clonable data.Clonable) fail.Error {\n\t\t\tvar ok bool\n\t\t\tlabelsV1, ok = clonable.(*propertiesv1.HostLabels)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\"'*propertiesv1.HostTags' expected, '%s' provided\", reflect.TypeOf(clonable).String())\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t})\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\treturn labelsV1.ByID, nil\n}",
"func GetTargetsList(baseURL string) {\n\ttargetURL := baseURL + \"?name=\" + tl.Name\n\tfmt.Println(\"==> GET\", targetURL)\n\n\t// Read beegosessionID from .cookie.yaml\n\tc, err := utils.CookieLoad()\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\tutils.Request.Get(targetURL).\n\t\tSet(\"Cookie\", \"harbor-lang=zh-cn; beegosessionID=\"+c.BeegosessionID).\n\t\tEnd(utils.PrintStatus)\n}",
"func (op *metadataLookup) selectList() error {\n\tqueryStmt := op.executeCtx.Query\n\tif queryStmt.AllFields {\n\t\tfields, err := op.metadata.GetAllFields(queryStmt.Namespace, queryStmt.MetricName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, fieldMeta := range fields {\n\t\t\top.planField(nil, fieldMeta)\n\t\t}\n\t\treturn nil\n\t}\n\tselectItems := queryStmt.SelectItems\n\tif len(selectItems) == 0 {\n\t\treturn constants.ErrEmptySelectList\n\t}\n\n\tfor _, selectItem := range selectItems {\n\t\top.field(nil, selectItem)\n\t\tif op.err != nil {\n\t\t\treturn op.err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *FakeKconfigs) List(opts v1.ListOptions) (result *v1alpha1.KconfigList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(kconfigsResource, kconfigsKind, c.ns, opts), &v1alpha1.KconfigList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.KconfigList{ListMeta: obj.(*v1alpha1.KconfigList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.KconfigList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (obs *Observer) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {\n\treturn obs.client.Namespace(obs.namespace).List(opts)\n}",
"func (s *NamespaceWebhook) ObjectSelector() *metav1.LabelSelector { return nil }",
"func OnList(c *grumble.Context) error {\n\tlen := len(config.AppConfig.Plans)\n\tif len == 0 {\n\t\tfmt.Println(\"No plans available. Try \\\"read\\\".\")\n\t\treturn nil\n\t}\n\n\tfor i, plan := range config.AppConfig.Plans {\n\t\tfmt.Println(i+1, plan.Name)\n\t\tfor i, task := range plan.Tasks {\n\t\t\tif task.GetDescription() != \"\" {\n\t\t\t\tfmt.Println(\" \", strconv.Itoa(i+1)+\".\", task.GetDescription())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *Client) ListEvents(namespace string, opts metav1.ListOptions) (*corev1.EventList, error) {\n\tif err := c.initClient(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.kubernetes.CoreV1().Events(namespace).List(opts)\n}",
"func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) {\n\ttargets = targets[:0]\n\tfailures := []error{}\n\n\tfor i, tlset := range tg.Targets {\n\t\tlb.Reset(labels.EmptyLabels())\n\n\t\tfor ln, lv := range tlset {\n\t\t\tlb.Set(string(ln), string(lv))\n\t\t}\n\t\tfor ln, lv := range tg.Labels {\n\t\t\tif _, ok := tlset[ln]; !ok {\n\t\t\t\tlb.Set(string(ln), string(lv))\n\t\t\t}\n\t\t}\n\n\t\tlset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort)\n\t\tif err != nil {\n\t\t\tfailures = append(failures, errors.Wrapf(err, \"instance %d in group %s\", i, tg))\n\t\t}\n\t\tif !lset.IsEmpty() || !origLabels.IsEmpty() {\n\t\t\ttargets = append(targets, NewTarget(lset, origLabels, cfg.Params))\n\t\t}\n\t}\n\treturn targets, failures\n}",
"func List() []string {\n\tvar keys []string\n\tfor k := range loggers {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}",
"func (c *FakeTZCronJobs) List(opts v1.ListOptions) (result *v1alpha1.TZCronJobList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(tzcronjobsResource, tzcronjobsKind, c.ns, opts), &v1alpha1.TZCronJobList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.TZCronJobList{ListMeta: obj.(*v1alpha1.TZCronJobList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.TZCronJobList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func collectMatchingEvents(ctx context.Context, kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string) ([]*corev1.Event, error) {\n\tvar events []*corev1.Event\n\n\twatchEvents, err := kubeClient.CoreV1().Events(namespace).Watch(ctx, metav1.ListOptions{})\n\t// close watchEvents channel\n\tdefer watchEvents.Stop()\n\tif err != nil {\n\t\treturn events, err\n\t}\n\n\t// create timer to not wait for events longer than 5 seconds\n\ttimer := time.NewTimer(5 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase wevent := <-watchEvents.ResultChan():\n\t\t\tevent := wevent.Object.(*corev1.Event)\n\t\t\tif val, ok := kinds[event.InvolvedObject.Kind]; ok {\n\t\t\t\tfor _, expectedName := range val {\n\t\t\t\t\tif event.InvolvedObject.Name == expectedName {\n\t\t\t\t\t\tevents = append(events, event)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-timer.C:\n\t\t\treturn events, nil\n\t\t}\n\t}\n}",
"func (r *ProjectsTraceSinksService) List(parent string) *ProjectsTraceSinksListCall {\n\tc := &ProjectsTraceSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}",
"func (r *Trail) EventSelectors() pulumi.ArrayOutput {\n\treturn (pulumi.ArrayOutput)(r.s.State[\"eventSelectors\"])\n}",
"func (c *OperatorDNS) List() (srvRecords map[string][]SrvRecord, err error) {\n\treturn nil, ErrNotImplemented\n}",
"func (s *redisTriggerLister) List(selector labels.Selector) (ret []*v1beta1.RedisTrigger, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.RedisTrigger))\n\t})\n\treturn ret, err\n}",
"func ListLogEvents(group, name, region string) error {\n\tsvc := assertCloudWatch(region)\n\tstartTime := time.Now().Add(-10 * time.Minute)\n\teventParams := &cloudwatchlogs.GetLogEventsInput{\n\t\tLogGroupName: aws.String(group),\n\t\tLogStreamName: aws.String(name),\n\t\tStartTime: aws.Int64(startTime.Unix() * 1000),\n\t}\n\teventPage := 0\n\teventErr := svc.GetLogEventsPages(eventParams, func(events *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool {\n\t\teventPage++\n\t\tfor _, event := range events.Events {\n\t\t\tval := time.Unix(*event.Timestamp/1000, 0)\n\t\t\tfmt.Println(fmt.Sprintf(\"[%v] %v\", val.Format(\"2006-01-02 15:04:05 MST\"), *event.Message))\n\t\t}\n\t\treturn eventPage <= 3\n\t})\n\tif eventErr != nil {\n\t\treturn eventErr\n\t}\n\treturn nil\n}",
"func ListProjectEvents(id string) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tevents, _, err := client.Events.ListProjectEvents(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te := MarshallAndPrint(events)\n\treturn e\n}",
"func List(client *gophercloud.ServiceClient, stackName, stackID string, opts ListOptsBuilder) pagination.Pager {\n\turl := listURL(client, stackName, stackID)\n\tif opts != nil {\n\t\tquery, err := opts.ToStackEventListQuery()\n\t\tif err != nil {\n\t\t\treturn pagination.Pager{Err: err}\n\t\t}\n\t\turl += query\n\t}\n\treturn pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {\n\t\tp := EventPage{pagination.MarkerPageBase{PageResult: r}}\n\t\tp.MarkerPageBase.Owner = p\n\t\treturn p\n\t})\n}",
"func getLabelSelectorListOpts(m *v1alpha1.PerconaServerMongoDB, replset *v1alpha1.ReplsetSpec) *metav1.ListOptions {\n\tlabelSelector := labels.SelectorFromSet(labelsForPerconaServerMongoDB(m, replset)).String()\n\treturn &metav1.ListOptions{LabelSelector: labelSelector}\n}",
"func (e *VisitorService) List(opts *ListOptions) ([]Visitor, *Response, error) {\n\tendpoint := \"/data/visitors\"\n\tvisitors := new([]Visitor)\n\tresp, err := e.client.getRequestListDecode(endpoint, visitors, opts)\n\treturn *visitors, resp, err\n}",
"func (c *FakeTTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TTemplateList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(ttemplatesResource, ttemplatesKind, c.ns, opts), &v1alpha1.TTemplateList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.TTemplateList{ListMeta: obj.(*v1alpha1.TTemplateList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.TTemplateList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*ListTargetsByRuleOutput, error) {\n\treq, out := c.ListTargetsByRuleRequest(input)\n\terr := req.Send()\n\treturn out, err\n}",
"func (t *Target) runTargetList(targets []string) (int, string, error) {\n\tfor _, target := range targets {\n\t\tif target == t.Name {\n\t\t\tcontinue\n\t\t}\n\t\tif t, ok := t.targetConfigs.Target(target); ok {\n\t\t\tstatus, out, err := t.Run()\n\t\t\tif status != 0 || err != nil {\n\t\t\t\treturn status, out, err\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, \"\", nil\n}",
"func (a *EventsApiService) ListEvents(ctx _context.Context, localVarOptionals *ListEventsOpts) (EventsList, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EventsList\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/events\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.SourceServicename.IsSet() {\n\t\tlocalVarQueryParams.Add(\"source_servicename\", parameterToString(localVarOptionals.SourceServicename.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SourceHostid.IsSet() {\n\t\tlocalVarQueryParams.Add(\"source_hostid\", parameterToString(localVarOptionals.SourceHostid.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EventType.IsSet() {\n\t\tlocalVarQueryParams.Add(\"event_type\", parameterToString(localVarOptionals.EventType.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ResourceType.IsSet() {\n\t\tlocalVarQueryParams.Add(\"resource_type\", parameterToString(localVarOptionals.ResourceType.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ResourceId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"resource_id\", parameterToString(localVarOptionals.ResourceId.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Level.IsSet() {\n\t\tlocalVarQueryParams.Add(\"level\", parameterToString(localVarOptionals.Level.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Since.IsSet() {\n\t\tlocalVarQueryParams.Add(\"since\", parameterToString(localVarOptionals.Since.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Before.IsSet() {\n\t\tlocalVarQueryParams.Add(\"before\", parameterToString(localVarOptionals.Before.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Page.IsSet() {\n\t\tlocalVarQueryParams.Add(\"page\", parameterToString(localVarOptionals.Page.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Limit.IsSet() {\n\t\tlocalVarQueryParams.Add(\"limit\", parameterToString(localVarOptionals.Limit.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif localVarOptionals != nil && localVarOptionals.XAnchoreAccount.IsSet() {\n\t\tlocalVarHeaderParams[\"x-anchore-account\"] = parameterToString(localVarOptionals.XAnchoreAccount.Value(), \"\")\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func (d *Deployment) List(namespace string, labelSelector map[string]string) (*appsv1.DeploymentList, error) {\n\toptions := metav1.ListOptions{}\n\n\tif len(labelSelector) > 0 {\n\t\tkvs := make([]string, 0, len(labelSelector))\n\t\tfor key, value := range labelSelector {\n\t\t\tkvs = append(kvs, fmt.Sprintf(\"%s=%s\", key, value))\n\t\t}\n\n\t\tselector, err := labels.Parse(strings.Join(kvs, \",\"))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse label selector, %v\", err)\n\t\t}\n\n\t\toptions.LabelSelector = selector.String()\n\t}\n\n\tdeployList, err := d.cs.AppsV1().Deployments(namespace).List(context.Background(), options)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, k8serror.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn deployList, nil\n}",
"func (s *kogitoSourceLister) List(selector labels.Selector) (ret []*v1alpha1.KogitoSource, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.KogitoSource))\n\t})\n\treturn ret, err\n}",
"func getTargets() ([]string, error) {\n\tclient := http.Client{}\n\treq, err := http.NewRequest(\n\t\t\"GET\",\n\t\t\"http://\"+endpoint+\"/targets\",\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tvar challenge Challenge\n\terr = json.Unmarshal(body, &challenge)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tdata, err := base64.StdEncoding.DecodeString(challenge.Challenge)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\treturn strings.Split(string(data), \"\\n\"), nil\n}",
"func (t *TargetController) GetAll(c *gin.Context) {\n\tt.db.Init()\n\tdefer t.db.Free()\n\tpageSizeString := c.DefaultQuery(\"pageSize\", \"10\")\n\tpageIndexString := c.DefaultQuery(\"pageIndex\", \"0\")\n\tpageSize, err := strconv.ParseInt(pageSizeString, 10, 64)\n\tif err != nil || pageSize > 100 || pageSize < 1 {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"message\": \"Invalid page size.\",\n\t\t})\n\t\treturn\n\t}\n\tpageIndex, err := strconv.ParseInt(pageIndexString, 10, 64)\n\tif err != nil || pageIndex < 0 {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"message\": \"Invalid page index.\",\n\t\t})\n\t\treturn\n\t}\n\tcontains := c.Query(\"contains\")\n\tif contains != \"\" && !IsValidIDFragment(contains) {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"message\": \"Invalid contains expression.\",\n\t\t})\n\t\treturn\n\t}\n\ttargets, err := t.db.GetAllTargets(pageSize, pageIndex, contains)\n\tif err != nil {\n\t\tt.log.Error(err)\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"message\": \"Invalid operation. Try again.\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, []*types.Target(targets))\n}",
"func SupportSelectors(flagSet *pflag.FlagSet, p *[]string) {\n\tflagSet.StringArrayVarP(p, \"selector\", \"l\", []string{}, \"filter results by a set of comma-separated label selectors\")\n}",
"func (s *awsElasticsearchDomainLister) List(selector labels.Selector) (ret []*v1.AwsElasticsearchDomain, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.AwsElasticsearchDomain))\n\t})\n\treturn ret, err\n}",
"func NewTargets(c *Configuration) ([]*Target, error) {\n\tfor _, rule := range c.Rules {\n\t\terr := rule.setup()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trule.tags = c.Tags\n\t}\n\n\ttargets := []*Target{}\n\tfor ruleName, addrs := range c.Targets {\n\t\trule, ok := c.Rules[ruleName]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unknown rule %s\", ruleName)\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\ttargets = append(targets, &Target{\n\t\t\t\tName: addr,\n\t\t\t\tAddr: addr,\n\t\t\t\tRule: rule,\n\t\t\t})\n\t\t}\n\t}\n\treturn targets, nil\n}",
"func (client *ClientImpl) ListEventTypes(ctx context.Context, args ListEventTypesArgs) (*[]EventTypeDescriptor, error) {\n\trouteValues := make(map[string]string)\n\tif args.PublisherId == nil || *args.PublisherId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.PublisherId\"}\n\t}\n\trouteValues[\"publisherId\"] = *args.PublisherId\n\n\tlocationId, _ := uuid.Parse(\"db4777cd-8e08-4a84-8ba3-c974ea033718\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", routeValues, nil, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []EventTypeDescriptor\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}",
"func (o OfflineNotaryRepository) ListTargets(...data.RoleName) ([]*client.TargetWithRole, error) {\n\treturn nil, storage.ErrOffline{}\n}"
] | [
"0.61464715",
"0.56628346",
"0.56543076",
"0.54902506",
"0.5228712",
"0.52107954",
"0.51932913",
"0.5172048",
"0.5141491",
"0.51324284",
"0.5112907",
"0.5101192",
"0.50956565",
"0.5068237",
"0.50504357",
"0.50469285",
"0.50433314",
"0.502446",
"0.49555793",
"0.49431434",
"0.4890991",
"0.48637363",
"0.48377275",
"0.4837639",
"0.48279905",
"0.4820106",
"0.4819277",
"0.48138517",
"0.47858772",
"0.47808796",
"0.47775558",
"0.47714558",
"0.47456992",
"0.4741946",
"0.47233582",
"0.47221497",
"0.47219318",
"0.4720415",
"0.471284",
"0.47070622",
"0.470582",
"0.4701967",
"0.46998334",
"0.46949494",
"0.469097",
"0.46909356",
"0.46846756",
"0.46703893",
"0.46664444",
"0.46609202",
"0.46569017",
"0.46565476",
"0.46399862",
"0.46365407",
"0.46266747",
"0.46183187",
"0.46066663",
"0.45967364",
"0.4593124",
"0.4592424",
"0.4580714",
"0.45771426",
"0.45750618",
"0.45736378",
"0.4553976",
"0.4538368",
"0.45294258",
"0.452352",
"0.45183286",
"0.44921914",
"0.44899762",
"0.4488049",
"0.44810113",
"0.44742554",
"0.44694737",
"0.44682884",
"0.44679326",
"0.44665366",
"0.4465299",
"0.44428277",
"0.44373694",
"0.4436916",
"0.44307053",
"0.44261205",
"0.44249293",
"0.44227755",
"0.4416238",
"0.4409937",
"0.44081956",
"0.44050202",
"0.44027027",
"0.43999338",
"0.43974254",
"0.43959203",
"0.43933827",
"0.43892038",
"0.4383154",
"0.43764722",
"0.43760303",
"0.43742332"
] | 0.77347326 | 0 |
Watch returns a watch.Interface that watches the requested cloudwatchEventTargets. | Watch возвращает интерфейс watch.Interface, который наблюдает за запрошенными cloudwatchEventTargets. | func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *ConsulClient) Watch(ctx context.Context, wh *WatchConfig) (IWatcher, error) {\n\tregistryOperationCount.WithLabelValues(env, \"Watch\").Inc()\n\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tregistryOperationTimeTaken.WithLabelValues(env, \"Watch\").Observe(time.Now().Sub(startTime).Seconds())\n\t}()\n\n\tparams := map[string]interface{}{}\n\n\tif wh.WatchType == \"key\" {\n\t\tparams[\"type\"] = wh.WatchType\n\t\tparams[\"key\"] = wh.WatchPath\n\t} else if wh.WatchType == \"keyprefix\" {\n\t\tparams[\"type\"] = wh.WatchType\n\t\tparams[\"prefix\"] = wh.WatchPath\n\t}\n\n\tplan, err := watch.Parse(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcwh := NewConsulWatcher(ctx, wh, plan, c.client)\n\n\treturn cwh, nil\n}",
"func (c *FakeAWSSNSTargets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(awssnstargetsResource, c.ns, opts))\n\n}",
"func Watch(ctx context.Context, watcher watch.Interface) (chan *Target, chan *Target, chan *Target) {\n\tadded := make(chan *Target)\n\tfinished := make(chan *Target)\n\tdeleted := make(chan *Target)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.ResultChan():\n\t\t\t\tif e.Object == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tpod := e.Object.(*v1.Pod)\n\n\t\t\t\tswitch e.Type {\n\t\t\t\tcase watch.Added:\n\t\t\t\t\tif pod.Status.Phase != v1.PodRunning {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\t\tadded <- NewTarget(pod.Namespace, pod.Name, container.Name)\n\t\t\t\t\t}\n\t\t\t\tcase watch.Modified:\n\t\t\t\t\tswitch pod.Status.Phase {\n\t\t\t\t\tcase v1.PodRunning:\n\t\t\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\t\t\tadded <- NewTarget(pod.Namespace, pod.Name, container.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase v1.PodSucceeded, v1.PodFailed:\n\t\t\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\t\t\tfinished <- NewTarget(pod.Namespace, pod.Name, container.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\t\tdeleted <- NewTarget(pod.Namespace, pod.Name, container.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase <-ctx.Done():\n\t\t\t\twatcher.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn added, finished, deleted\n}",
"func (obs *Observer) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\treturn obs.client.Namespace(obs.namespace).Watch(opts)\n}",
"func (k *kubernetes) Watch(opts ...router.WatchOption) (router.Watcher, error) {\n\treturn &watcher{\n\t\tevents: make(chan *router.Event),\n\t}, nil\n}",
"func watch(k *kite.Client, eventType string, eventId string, interval time.Duration) error {\n\teventArgs := kloud.EventArgs([]kloud.EventArg{\n\t\tkloud.EventArg{\n\t\t\tType: eventType,\n\t\t\tEventId: eventId,\n\t\t},\n\t})\n\n\tfor {\n\t\tresp, err := k.TellWithTimeout(\"event\", defaultTellTimeout, eventArgs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar events []kloud.EventResponse\n\t\tif err := resp.Unmarshal(&events); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(events) == 0 {\n\t\t\treturn errors.New(\"incoming event response is not an array\")\n\t\t}\n\n\t\tif events[0].Error != nil {\n\t\t\treturn events[0].Error\n\t\t}\n\n\t\tDefaultUi.Info(fmt.Sprintf(\"%s ==> %s [Status: %s Percentage: %d]\",\n\t\t\tfmt.Sprint(time.Now())[:19],\n\t\t\tevents[0].Event.Message,\n\t\t\tevents[0].Event.Status,\n\t\t\tevents[0].Event.Percentage,\n\t\t))\n\n\t\tif events[0].Event.Error != \"\" {\n\t\t\terr := errors.New(events[0].Event.Error)\n\t\t\tDefaultUi.Error(err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif events[0].Event.Percentage == 100 {\n\t\t\treturn nil\n\t\t}\n\n\t\ttime.Sleep(interval)\n\t}\n}",
"func (c *googleCloudStorageSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"googlecloudstoragesources\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *FakeListeners) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(listenersResource, c.ns, opts))\n\n}",
"func (t *FakeObjectTracker) Watch(gvr schema.GroupVersionResource, name string) (watch.Interface, error) {\n\tif t.fakingOptions.failAll != nil {\n\t\terr := t.fakingOptions.failAll.RunFakeInvocations()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t.delegatee.Watch(gvr, name)\n}",
"func (w *Watcher) Watch(\n\tctx context.Context,\n\teventCh chan<- Event,\n) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tdefer close(eventCh)\n\n\terrCh := make(chan error, 3)\n\ttaggedEventCh := make(chan Event)\n\n\tgo func() {\n\t\tif err := w.watchTagged(ctx, taggedEventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := w.watchTags(ctx, taggedEventCh, eventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := w.watchUnsorted(ctx, eventCh); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\n\treturn <-errCh\n}",
"func Watch(paths ...string) (*Watcher, error) {\n\tevent := make(chan EventItem)\n\terr := watch(paths, event)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Watcher{\n\t\tEvent: event,\n\t}, nil\n}",
"func (c *kongs) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"kongs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (c *interacts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"interacts\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (k *Kubernetes) Watch(qname string) error {\n\treturn k.APIConn.Watch(qname)\n}",
"func (s *ConfigService) Watch(d time.Duration)",
"func Watch(ctx context.Context, i v1.PodInterface, podFilter *regexp.Regexp,\n\tcontainerFilter *regexp.Regexp, containerExcludeFilter *regexp.Regexp,\n\tcontainerState ContainerState, labelSelector labels.Selector) (chan *Target, chan *Target, error) {\n\n\tlogger := requestctx.Logger(ctx).WithName(\"pod-watch\").V(4)\n\n\tlogger.Info(\"create\")\n\twatcher, err := i.Watch(ctx, metav1.ListOptions{Watch: true, LabelSelector: labelSelector.String()})\n\tif err != nil {\n\t\tfmt.Printf(\"err.Error() = %+v\\n\", err.Error())\n\t\treturn nil, nil, errors.Wrap(err, \"failed to set up watch\")\n\t}\n\n\tadded := make(chan *Target)\n\tremoved := make(chan *Target)\n\n\tgo func() {\n\t\tlogger.Info(\"await events\")\n\t\tdefer func() {\n\t\t\tlogger.Info(\"event processing ends\")\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.ResultChan():\n\t\t\t\tlogger.Info(\"received event\")\n\n\t\t\t\tif e.Object == nil {\n\t\t\t\t\tlogger.Info(\"event error, no object\")\n\t\t\t\t\t// Closed because of error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tpod, ok := e.Object.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Info(\"event error, object not a pod\")\n\t\t\t\t\t// Not a Pod\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif !podFilter.MatchString(pod.Name) {\n\t\t\t\t\tlogger.Info(\"filtered\", \"pod\", pod.Name, \"filter\", podFilter.String())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch e.Type {\n\t\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\t\tlogger.Info(\"pod added/modified\", \"name\", pod.Name)\n\n\t\t\t\t\tvar statuses []corev1.ContainerStatus\n\t\t\t\t\tstatuses = append(statuses, pod.Status.InitContainerStatuses...)\n\t\t\t\t\tstatuses = append(statuses, pod.Status.ContainerStatuses...)\n\n\t\t\t\t\tfor _, c := range statuses {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif c.State.Running != nil || c.State.Terminated != nil { // There are logs to read\n\t\t\t\t\t\t\tlogger.Info(\"report added\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\t\tadded <- &Target{\n\t\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\tlogger.Info(\"pod deleted\", \"name\", pod.Name)\n\n\t\t\t\t\tvar containers []corev1.Container\n\t\t\t\t\tcontainers = append(containers, pod.Spec.Containers...)\n\t\t\t\t\tcontainers = append(containers, pod.Spec.InitContainers...)\n\n\t\t\t\t\tfor _, c := range containers {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlogger.Info(\"report removed\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\tremoved <- &Target{\n\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlogger.Info(\"received stop request\")\n\t\t\t\twatcher.Stop()\n\t\t\t\tclose(added)\n\t\t\t\tclose(removed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tlogger.Info(\"pass watch report channels\")\n\treturn added, removed, nil\n}",
"func (r workloadEndpoints) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {\r\n\treturn r.client.resources.Watch(ctx, opts, apiv3.KindWorkloadEndpoint, nil)\r\n}",
"func (fk *FakeRouter) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tpanic(\"not implemented\")\n}",
"func (s *svc) Watch(opts ...router.WatchOption) (router.Watcher, error) {\n\trsp, err := s.router.Watch(context.Background(), &pb.WatchRequest{}, s.callOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptions := router.WatchOptions{\n\t\tService: \"*\",\n\t}\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\treturn newWatcher(rsp, options)\n}",
"func (c *externalInterfaces) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"externalinterfaces\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (c *cronFederatedHPAs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"cronfederatedhpas\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *klusterlets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"klusterlets\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *demos) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"demos\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (c *AnalyticsController) runWatches() {\n\tlastResourceVersion := big.NewInt(0)\n\tcurrentResourceVersion := big.NewInt(0)\n\twatchListItems := WatchFuncList(c.kclient, c.client)\n\tfor name := range watchListItems {\n\n\t\t// assign local variable (not in range operator above) so that each\n\t\t// goroutine gets the correct watch function required\n\t\twfnc := watchListItems[name]\n\t\tn := name\n\t\tbackoff := 1 * time.Second\n\n\t\tgo wait.Until(func() {\n\t\t\t// any return from this func only exits that invocation of the func.\n\t\t\t// wait.Until will call it again after its sync period.\n\t\t\twatchLog := log.WithFields(log.Fields{\n\t\t\t\t\"watch\": n,\n\t\t\t})\n\t\t\twatchLog.Infof(\"starting watch\")\n\t\t\tw, err := wfnc.watchFunc(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\twatchLog.Errorf(\"error creating watch: %v\", err)\n\t\t\t}\n\n\t\t\twatchLog.Debugf(\"backing off watch for %v seconds\", backoff)\n\t\t\ttime.Sleep(backoff)\n\t\t\tbackoff = backoff * 2\n\t\t\tif backoff > 60*time.Second {\n\t\t\t\tbackoff = 60 * time.Second\n\t\t\t}\n\n\t\t\tif w == nil {\n\t\t\t\twatchLog.Errorln(\"watch function nil, watch not created, returning\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase event, ok := <-w.ResultChan():\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\twatchLog.Warnln(\"watch channel closed unexpectedly, attempting to re-establish\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tif event.Type == watch.Error {\n\t\t\t\t\t\twatchLog.Errorf(\"watch channel returned error: %s\", spew.Sdump(event))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// success means the watch is working.\n\t\t\t\t\t// reset the backoff back to 1s for this watch\n\t\t\t\t\tbackoff = 1 * time.Second\n\n\t\t\t\t\tif event.Type == watch.Added || event.Type == watch.Deleted {\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"Unable to create object meta for %v: %v\", event.Object, err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tm, err := meta.Accessor(event.Object)\n\t\t\t\t\t\t// if both resource versions can be converted to numbers\n\t\t\t\t\t\t// and if the current resource version is lower than the\n\t\t\t\t\t\t// last recorded resource version for this resource type\n\t\t\t\t\t\t// then skip the event\n\t\t\t\t\t\tc.mutex.RLock()\n\t\t\t\t\t\tif _, ok := lastResourceVersion.SetString(c.watchResourceVersions[n], 10); ok {\n\t\t\t\t\t\t\tif _, ok = currentResourceVersion.SetString(m.GetResourceVersion(), 10); ok {\n\t\t\t\t\t\t\t\tif lastResourceVersion.Cmp(currentResourceVersion) == 1 {\n\t\t\t\t\t\t\t\t\twatchLog.Debugf(\"ResourceVersion %v is to old (%v)\",\n\t\t\t\t\t\t\t\t\t\tcurrentResourceVersion, c.watchResourceVersions[n])\n\t\t\t\t\t\t\t\t\tc.mutex.RUnlock()\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.mutex.RUnlock()\n\n\t\t\t\t\t\t// each watch is a separate go routine\n\t\t\t\t\t\tc.mutex.Lock()\n\t\t\t\t\t\tc.watchResourceVersions[n] = m.GetResourceVersion()\n\t\t\t\t\t\tc.mutex.Unlock()\n\n\t\t\t\t\t\tanalytic, err := newEvent(c.typer, event.Object, event.Type)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"unexpected error creating analytic from watch event %#v\", event.Object)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// additional info will be set to the analytic and\n\t\t\t\t\t\t\t// an instance queued for all destinations\n\t\t\t\t\t\t\terr := c.AddEvent(analytic)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\twatchLog.Errorf(\"error adding event: %v - %v\", err, analytic)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, 1*time.Millisecond, c.stopChannel)\n\t}\n}",
"func (c *cloudFormationTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"cloudformationtemplates\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (h *HealthImpl) Watch(*v1.HealthCheckRequest, v1.Health_WatchServer) error {\n\treturn nil\n}",
"func (s *V3Backend) Watch(ctx context.Context, key string) <-chan *args.ChangeEvent {\n\twatchChan := s.Client.Watch(ctx, key, etcd.WithPrefix())\n\ts.changeChan = make(chan *args.ChangeEvent)\n\ts.done = make(chan struct{})\n\n\ts.wg.Add(1)\n\tgo func() {\n\t\tvar resp etcd.WatchResponse\n\t\tvar ok bool\n\t\tdefer s.wg.Done()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resp, ok = <-watchChan:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif resp.Canceled {\n\t\t\t\t\ts.changeChan <- NewChangeError(errors.Wrap(resp.Err(),\n\t\t\t\t\t\t\"V3Backend.Watch(): ETCD server cancelled watch\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfor _, event := range resp.Events {\n\t\t\t\t\ts.changeChan <- NewChangeEvent(event)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn s.changeChan\n}",
"func (c *Client) Watch(gvr schema.GroupVersionResource, opts ...ListOption) (w watch.Interface, err error) {\n\trestClient, err := c.rest(gvr.GroupVersion())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithTimeout(c.ctx, c.timeout)\n\tlistOpts := ListOptions{Raw: &metav1.ListOptions{Watch: true}}\n\tlistOpts.ApplyOptions(opts)\n\tw, err = restClient.Get().\n\t\tTimeout(c.timeout).\n\t\tNamespaceIfScoped(listOpts.Namespace, listOpts.Namespace != \"\").\n\t\tResource(gvr.Resource).\n\t\tVersionedParams(listOpts.AsListOptions(), scheme.ParameterCodec).\n\t\tWatch(ctx)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, err\n\t}\n\treturn &watcher{Interface: w, cancel: cancel}, nil\n}",
"func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) {\n\treturn c.r.Get().\n\t\tPrefix(\"watch\").\n\t\tNamespace(api.NamespaceAll).\n\t\tResource(c.resourceName()).\n\t\tVersionedParams(&opts, api.ParameterCodec).\n\t\tWatch()\n}",
"func (c *globalThreatFeeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"globalthreatfeeds\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (h *HealthImpl) Watch(in *grpc_health_v1.HealthCheckRequest, stream grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}",
"func (s *HealthServer) Watch(in *healthpb.HealthCheckRequest, srv healthpb.Health_WatchServer) error {\n\treturn status.Error(codes.Unimplemented, \"Watch is not implemented\")\n}",
"func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) {\n\treturn c.r.Get().\n\t\tPrefix(\"watch\").\n\t\tNamespace(c.ns).\n\t\tResource(\"scheduledjobs\").\n\t\tVersionedParams(&opts, api.ParameterCodec).\n\t\tWatch()\n}",
"func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {\n\treturn newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)\n}",
"func (rr *Registry) Watch(ctx context.Context) ([]*WatchEvent, <-chan *WatchEvent, error) {\n\trr.mu.Lock()\n\tdefer rr.mu.Unlock()\n\n\tprefix := rr.prefixPath()\n\n\tgetRes, err := rr.kv.Get(ctx, prefix, etcdv3.WithPrefix())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcurrentEvents := make([]*WatchEvent, 0, len(getRes.Kvs))\n\tfor _, kv := range getRes.Kvs {\n\t\treg, err := rr.unmarshalRegistration(kv)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\twev := &WatchEvent{\n\t\t\tKey: string(kv.Key),\n\t\t\tReg: reg,\n\t\t\tType: Create,\n\t\t}\n\t\tcurrentEvents = append(currentEvents, wev)\n\t}\n\n\t// Channel to publish registry changes.\n\twatchEvents := make(chan *WatchEvent)\n\n\t// Write a change or exit the watcher.\n\tput := func(we *WatchEvent) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase watchEvents <- we:\n\t\t}\n\t}\n\tputTerminalError := func(we *WatchEvent) {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\trecover()\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Minute):\n\t\t\tcase watchEvents <- we:\n\t\t\t}\n\t\t}()\n\t}\n\t// Create a watch-event from an event.\n\tcreateWatchEvent := func(ev *etcdv3.Event) *WatchEvent {\n\t\twev := &WatchEvent{Key: string(ev.Kv.Key)}\n\t\tif ev.IsCreate() {\n\t\t\twev.Type = Create\n\t\t} else if ev.IsModify() {\n\t\t\twev.Type = Modify\n\t\t} else {\n\t\t\twev.Type = Delete\n\t\t\t// Create base registration from just key.\n\t\t\treg := &Registration{}\n\t\t\tgraphType, graphName, err := rr.graphTypeAndNameFromKey(string(ev.Kv.Key))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\treg.Type = graphType\n\t\t\treg.Name = graphName\n\t\t\twev.Reg = reg\n\t\t\t// Need to return now because\n\t\t\t// delete events don't contain\n\t\t\t// any data to unmarshal.\n\t\t\treturn wev\n\t\t}\n\t\treg, err := rr.unmarshalRegistration(ev.Kv)\n\t\tif err != nil {\n\t\t\twev.Error = fmt.Errorf(\"%v: failed unmarshaling value: '%s'\", err, ev.Kv.Value)\n\t\t} else {\n\t\t\twev.Reg = reg\n\t\t}\n\t\treturn wev\n\t}\n\n\t// Watch deltas in etcd, with the give prefix, starting\n\t// at the revision of the get call above.\n\tdeltas := rr.client.Watch(ctx, prefix, etcdv3.WithPrefix(), etcdv3.WithRev(getRes.Header.Revision+1))\n\tgo func() {\n\t\tdefer close(watchEvents)\n\t\tfor {\n\t\t\tdelta, open := <-deltas\n\t\t\tif !open {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\tdefault:\n\t\t\t\t\tputTerminalError(&WatchEvent{Error: ErrWatchClosedUnexpectedly})\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif delta.Err() != nil {\n\t\t\t\tputTerminalError(&WatchEvent{Error: delta.Err()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, event := range delta.Events {\n\t\t\t\tput(createWatchEvent(event))\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn currentEvents, watchEvents, nil\n}",
"func (c *ioTConfigs) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"iotconfigs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (c *concurrencyControls) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"concurrencycontrols\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *gitTracks) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"gittracks\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func New() *CloudWatch {\n\treturn &CloudWatch{\n\t\tCacheTTL: config.Duration(time.Hour),\n\t\tRateLimit: 25,\n\t\tTimeout: config.Duration(time.Second * 5),\n\t\tBatchSize: 500,\n\t}\n}",
"func (c *FakeRedisTriggers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(redistriggersResource, c.ns, opts))\n\n}",
"func (s *CAServer) Watch(_ *ghc.HealthCheckRequest, _ ghc.Health_WatchServer) error {\n\treturn nil\n}",
"func (c *snapshotRules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"snapshotrules\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *previewFeatures) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"previewfeatures\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (f Forwarder) Watch() {\n\thandler := func(i interface{}) {\n\t\tvar container string\n\t\tvar message string\n\t\tvar context map[string]interface{}\n\t\tdata := i.(map[string]interface{})\n\t\tmetadata := data[\"metadata\"].(map[string]interface{})\n\n\t\ttmp, ok := metadata[\"context\"]\n\t\tif ok {\n\t\t\tcontext = tmp.(map[string]interface{})\n\t\t}\n\n\t\ttmp, ok = context[\"container\"]\n\t\tif ok {\n\t\t\tcontainer = tmp.(string)\n\t\t}\n\n\t\t_, ok = f.Forwards[container]\n\t\tif ok {\n\t\t\ttmp, ok := metadata[\"message\"]\n\t\t\tif ok {\n\t\t\t\tmessage = tmp.(string)\n\t\t\t}\n\t\t\tswitch message {\n\t\t\tcase ContainerStarted:\n\t\t\t\tgo func() {\n\t\t\t\t\t// Wait a few seconds for the newly running container to get an IP address\n\t\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\t\tf.ForwardContainer(container)\n\t\t\t\t}()\n\n\t\t\tcase ContainerStopped:\n\t\t\t\tf.ReverseContainer(container)\n\t\t\t}\n\t\t}\n\n\t}\n\n\tf.Monitor([]string{}, handler)\n}",
"func (srv *HealthServer) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}",
"func Watch(ctx context.Context, cliEngine *engine.Engine, task string, t ox.Task) {\n\ttaskCtx, cancel := context.WithCancel(ctx)\n\n\tfiles, err := getWatcherFiles(t.Sources, t.Dir)\n\tif err != nil {\n\t\tutils.PrintError(err)\n\t\treturn\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tutils.PrintError(err)\n\t\treturn\n\t}\n\tdefer watcher.Close()\n\n\tfor _, file := range files {\n\t\terr = watcher.Add(file)\n\t\tif err != nil {\n\t\t\tutils.PrintError(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\trunOnWatch := func() {\n\t\terr := cliEngine.Run(taskCtx, task)\n\t\tif err != nil {\n\t\t\tutils.PrintError(err)\n\t\t}\n\t}\n\n\tgo runOnWatch()\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-watcher.Events:\n\t\t\tswitch {\n\t\t\tcase event.Op&fsnotify.Write == fsnotify.Write:\n\t\t\t\tfallthrough\n\t\t\tcase event.Op&fsnotify.Create == fsnotify.Create:\n\t\t\t\tfallthrough\n\t\t\tcase event.Op&fsnotify.Remove == fsnotify.Remove:\n\t\t\t\tfallthrough\n\t\t\tcase event.Op&fsnotify.Rename == fsnotify.Rename:\n\t\t\t\tcancel()\n\t\t\t\ttaskCtx, cancel = context.WithCancel(ctx)\n\t\t\t\tgo runOnWatch()\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\t\treturn\n\t\tcase err := <-watcher.Errors:\n\t\t\tutils.PrintError(err)\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (c *FakeTraefikServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(traefikservicesResource, c.ns, opts))\n\n}",
"func New(s *session.Session) *CloudWatch {\n\treturn &CloudWatch{\n\t\tcwClient: cloudwatch.New(s),\n\t\trgClient: resourcegroups.New(s),\n\t}\n}",
"func (w *TaskWatcher) Watch(ctx context.Context) <-chan *TaskEvent {\n\tc := make(chan *TaskEvent, w.cfg.ChannelSize)\n\tgo w.watch(ctx, c)\n\treturn c\n}",
"func (c *sandboxes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"sandboxes\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (g *Gulf) Watch(patterns []string, tasks ...string) {}",
"func (s *Spec) Watch() error {\n\tif s.stopWatching != nil {\n\t\tlog.WithFields(s.Fields()).Debug(\"already watching\")\n\t\treturn nil\n\t}\n\n\tselectors := strings.Join(s.Details.Selector, \",\")\n\n\topts := metav1.ListOptions{}\n\topts.LabelSelector = selectors\n\twatcher, err := cluster.Client.CoreV1().Pods(s.Details.Namespace).Watch(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(s.Fields()).Debug(\"watching for updates\")\n\n\ts.stopWatching = make(chan bool)\n\tgo func() {\n\t\tdefer watcher.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.stopWatching:\n\t\t\t\tlog.WithFields(s.Fields()).Debug(\"stopping watch\")\n\t\t\t\treturn\n\n\t\t\tcase event := <-watcher.ResultChan():\n\t\t\t\t// For whatever reason under the sun, if the watcher looses the\n\t\t\t\t// connection with the cluster, it ends up sending empty events\n\t\t\t\t// as fast as possible. We want to just kill this when that's the\n\t\t\t\t// case.\n\t\t\t\tif event.Type == \"\" && event.Object == nil {\n\t\t\t\t\tlog.WithFields(s.Fields()).Error(\"lost connection to cluster\")\n\t\t\t\t\tSignalLoss <- true\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif event.Object == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif err := s.handleEvent(event); err != nil {\n\t\t\t\t\tlog.WithFields(s.Fields()).Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (c *FakeRuleEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(ruleendpointsResource, c.ns, opts))\n\n}",
"func (c *awsMediaStoreContainers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"awsmediastorecontainers\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (c *FakeGoogleCloudPubSubSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(googlecloudpubsubsourcesResource, c.ns, opts))\n\n}",
"func NewWatch() *Watch {\n\tr := &Watch{\n\t\tActions: make(map[string]WatcherAction, 0),\n\t}\n\n\treturn r\n}",
"func Watch(s Selector, a Action) (*Watcher, error) {\n\tfsw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw := &Watcher{\n\t\tfsw: fsw,\n\t\tcache: make(map[string]string),\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event, ok := <-w.fsw.Events:\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Println(\"There was an error in an event consumer [events].\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\te := Event{event}\n\t\t\t\tcached := w.getCache(e.Name)\n\t\t\t\tw.setCache(e.Name, a(e, cached))\n\t\t\tcase err, ok := <-w.fsw.Errors:\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Println(\"There was an error in an event consumer [errs].\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Println(\"error:\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, name := range s() {\n\t\terr = w.fsw.Add(name)\n\t}\n\n\treturn w, err\n}",
"func v1Trigger(c *gin.Context) {\n\t/**\n\t * @I Implement authentication of the caller\n\t * @I Does the id need any escaping?\n\t * @I Ensure the caller has the permissions to trigger evaluation of a Watch\n\t * @I Investigate whether we need our own response status codes\n\t */\n\n\t// The \"ids\" parameter is required. We allow for multiple comma-separated\n\t// string IDs, so we need to convert them to an array of integer IDs.\n\t// We want to make sure that the caller makes the request they want to without\n\t// mistakes, so we do not trigger any Watches if there is any error, even in\n\t// one of the IDs.\n\t// @I Refactor converting a comma-separated list of string IDs to an array of\n\t// integer IDs into a utility function\n\tsIDs := c.Param(\"ids\")\n\taIDsInt, err := util.StringToIntegers(sIDs, \",\")\n\tif err != nil {\n\t\tc.JSON(\n\t\t\thttp.StatusNotFound,\n\t\t\tgin.H{\n\t\t\t\t\"status\": http.StatusNotFound,\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\n\t// Get the Watches with the requested IDs from storage.\n\tstorage := c.MustGet(\"storage\").(storage.Storage)\n\n\tvar watches []*common.Watch\n\tfor iID := range aIDsInt {\n\t\twatch, err := storage.Get(iID)\n\t\tif err != nil {\n\t\t\t// Return a Not Found response if there is no Watch with such ID.\n\t\t\tif watch == nil {\n\t\t\t\tc.JSON(\n\t\t\t\t\thttp.StatusNotFound,\n\t\t\t\t\tgin.H{\n\t\t\t\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// We could trigger the Watch at this point, however we prefer to check\n\t\t// that all Watches exist first.\n\t\twatches = append(watches, watch)\n\t}\n\n\t// Trigger execution of the Watches.\n\t// We only need to acknowledge that the Watches were triggered; we don't have to\n\t// for the execution to finish as this can take time.\n\twatchAPIConfig := c.MustGet(\"config\").(config.Config)\n\tsdkConfig := sdk.Config{\n\t\twatchAPIConfig.ActionAPI.BaseURL,\n\t\twatchAPIConfig.ActionAPI.Version,\n\t}\n\tfor _, pointer := range watches {\n\t\tgo func() {\n\t\t\twatch := *pointer\n\t\t\tactionsIds := watch.Do()\n\t\t\tif len(actionsIds) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// @I Trigger all Watch Actions in one request\n\t\t\tfor _, actionID := range actionsIds {\n\t\t\t\tgo func() {\n\t\t\t\t\terr := sdk.TriggerByID(actionID, sdkConfig)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// @I Investigate log management strategy for all services\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}()\n\t}\n\n\t// All good.\n\tc.JSON(\n\t\thttp.StatusOK,\n\t\tgin.H{\n\t\t\t\"status\": http.StatusOK,\n\t\t},\n\t)\n}",
"func (c *volumeSnapshotSchedules) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"volumesnapshotschedules\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (m *Manager) Watch(mObj *models.CrudWatcherCreateArgs, client crude.Watcher) (string, error) {\n\tm.InWObj = mObj\n\treturn m.RetWID, m.RetWErr\n}",
"func (s *Server) Watch(in *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {\n\tresp := &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}\n\treturn server.Send(resp)\n}",
"func Watch(strinput2 string, tc net.Conn, watcherport int, dest *string) {\n\tnetwork.SendDataMessage(&tc, 4, 8, watcherport, strinput2)\n\tgo ListentoWatcherport(watcherport, dest)\n}",
"func (c *ClusterResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\treturn c.clientCache.ClusterOrDie(logicalcluster.Wildcard).Resource(c.resource).Watch(ctx, opts)\n}",
"func (c *stewards) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"stewards\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (c *aITrainingJobs) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"aitrainingjobs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (b *StatefulSetBox) Watch(namespace, labelSelector string, timeoutSeconds *int64) (watch.Interface, error) {\n\t// labelSelector: example \"app\", \"app=test-app\"\n\topt := metav1.ListOptions{TimeoutSeconds: timeoutSeconds, LabelSelector: labelSelector}\n\tw, err := b.clientset.AppsV1().StatefulSets(namespace).Watch(opt)\n\treturn w, err\n}",
"func (s *Session) Watch(watchObject ...map[string]bool) {\n\tobjectString := \"\"\n\tif len(watchObject) == 1 {\n\t\tvar values []string\n\t\tfor k, v := range watchObject[0] {\n\t\t\tvalues = append(values, fmt.Sprintf(`\"%s\":%v`, k, v))\n\t\t}\n\t\tobjectString = fmt.Sprintf(`={%s}`, strings.Join(values, \",\"))\n\t}\n\ts.SendCommand(WatchCommand + objectString)\n}",
"func watch(url string, etcdcli *etcdutil.EtcdClient) {\n\n\t// test given host provides a remote api.\n\ttestUrl := url + \"/images/json\"\n\tif _, ret := apiwatch.GetContent(testUrl); ret == false {\n\t\tglog.Errorf(\"cloud not access test endpoint %s. It might not provide a docker remote api.\", testUrl)\n\t\tos.Exit(1)\n\t}\n\n\t// watch http streaming on /events.\n\teventUrl := url + \"/events\"\n\tglog.Infof(\"start watching docker api: %s\", eventUrl)\n\n\tapiwatch.ReadStream(eventUrl, func(id string, status string) {\n\t\tinspectUrl := url + \"/containers/\" + id + \"/json\"\n\n\t\tswitch status {\n\t\tcase \"start\":\n\t\t\tglog.Infof(\"inspect: %s\\n\", inspectUrl)\n\t\t\tdata, _ := apiwatch.GetContent(inspectUrl)\n\t\t\tcontainerInfo := apiwatch.JsonToMap(data)\n\t\t\tconfig, _ := containerInfo[\"Config\"].(map[string]interface{})\n\n\t\t\tnetworkSettings, _ := containerInfo[\"NetworkSettings\"].(map[string]interface{})\n\t\t\tregisterIp(config[\"Hostname\"].(string), networkSettings[\"IPAddress\"].(string), etcdcli)\n\t\tcase \"stop\":\n\t\t\tglog.Infof(\"inspect: %s\\n\", inspectUrl)\n\t\t\tdata, _ := apiwatch.GetContent(inspectUrl)\n\t\t\tcontainerInfo := apiwatch.JsonToMap(data)\n\t\t\tconfig, _ := containerInfo[\"Config\"].(map[string]interface{})\n\n\t\t\tunregisterIp(config[\"Hostname\"].(string), etcdcli)\n\t\tdefault:\n\t\t}\n\t})\n}",
"func New() ResourceWatcher {\n\treturn ResourceWatcher{\n\t\twatched: make(map[types.NamespacedName][]types.NamespacedName),\n\t}\n}",
"func (mw *MultiWatcher) Watch(ctx context.Context) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(mw.watchers))\n\tfor _, w := range mw.watchers {\n\t\tgo func(w *Watcher) {\n\t\t\tdefer wg.Done()\n\t\t\tw.Watch(ctx)\n\t\t}(w)\n\t}\n\twg.Wait()\n}",
"func NewWatcher(conn Searcher, dur time.Duration, logger *log.Logger) (*Watcher, error) {\n\tif dur == 0 {\n\t\tdur = defaultDuration\n\t}\n\n\tif logger == nil {\n\t\tlogger = log.New(os.Stdout, \"\", log.LstdFlags)\n\t}\n\n\tw := Watcher{\n\t\tconn: conn,\n\t\tduration: dur,\n\t\tlogger: logger,\n\t\twatches: make([]*Watch, 0),\n\t}\n\n\treturn &w, nil\n}",
"func (c *configAuditReports) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"configauditreports\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (w *Watcher) Watch() {\n\tch := make(chan struct{})\n\tgo func(stopCh <-chan struct{}) {\n\t\tw.Informer.Informer().AddEventHandler(w.ResourceEventHandlerFuncs)\n\t\tw.Informer.Informer().Run(stopCh)\n\t}(ch)\n\t<-w.StopChannel\n\tclose(ch)\n\tlogrus.Info(\"stoping watcher for \", w.GroupVersionResource)\n}",
"func (api *hostAPI) Watch(handler HostHandler) error {\n\tapi.ct.startWorkerPool(\"Host\")\n\treturn api.ct.WatchHost(handler)\n}",
"func (m *MPD) Watch(ctx context.Context) Watch {\n\treturn goWatch(ctx, m.url)\n}",
"func Watch(path ...string) (Watcher, error) {\n\treturn conf.Watch(path...)\n}",
"func (self *manager) WatchForEvents(request *events.Request) (*events.EventChannel, error) {\n\treturn self.eventHandler.WatchEvents(request)\n}",
"func (c *FakeProjects) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(core.NewWatchAction(projectsResource, c.ns, opts))\n\n}",
"func (cs *checkoutService) Watch(req *healthpb.HealthCheckRequest, server healthpb.Health_WatchServer) error {\n\treturn nil\n}",
"func (w *Watcher) Watch() {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-w.watcher.Event:\n\t\t\tfor _, handler := range w.modifiedHandlers {\n\t\t\t\tif strings.HasPrefix(ev.Name, handler.path) {\n\t\t\t\t\tfmt.Println(handler)\n\t\t\t\t\thandler.callback(ev.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Println(\"event:\", ev)\n\t\t\tlog.Println(\"handlers:\", w.modifiedHandlers)\n\t\t\t//case addreq :=\n\t\tcase err := <-w.watcher.Error:\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t}\n}",
"func (inn *LocalNode) consulWatch(conf *Config) error {\n\tfilter := map[string]interface{}{\n\t\t\"type\": \"service\",\n\t\t\"service\": conf.Consul.Service,\n\t}\n\n\tpl, err := consulwatch.Parse(filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpl.Handler = inn.serviceHandler\n\treturn pl.RunWithConfig(conf.Consul.Address, &conf.Consul.Config)\n}",
"func (w *ClusterDynamicClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\treturn w.dClient.Resource(w.resource).Namespace(w.namespace).Watch(w.ctx, opts)\n}",
"func (c *awsServiceDiscoveryServices) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"awsservicediscoveryservices\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (m *EtcdManager) Watch(key string, opts ...clientv3.OpOption) <-chan clientv3.WatchResponse {\n\treturn m.cli.Watch(context.Background(), key, opts...)\n}",
"func (f *extendedPodFactory) ListWatch(customResourceClient interface{}, ns string, fieldSelector string) cache.ListerWatcher {\n\tclient := customResourceClient.(clientset.Interface)\n\treturn &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn client.CoreV1().Pods(ns).List(context.TODO(), opts)\n\t\t},\n\t\tWatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn client.CoreV1().Pods(ns).Watch(context.TODO(), opts)\n\t\t},\n\t}\n}",
"func (c *tiKVGroups) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"tikvgroups\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch()\n}",
"func (st *fakeConn) Watch(ctx context.Context, filePath string) (current *WatchData, changes <-chan *WatchData, err error) {\n\treturn current, changes, err\n}",
"func (c *rpcServices) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"rpcservices\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}",
"func (api *configurationsnapshotAPI) Watch(handler ConfigurationSnapshotHandler) error {\n\tapi.ct.startWorkerPool(\"ConfigurationSnapshot\")\n\treturn api.ct.WatchConfigurationSnapshot(handler)\n}",
"func (c *staticFabricNetworkAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"staticfabricnetworkattachments\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (service *DaemonHeartbeat) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}",
"func (c *clusterVulnerabilityReports) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tResource(\"clustervulnerabilityreports\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(ctx)\n}",
"func (c *backingservices) Watch(opts kapi.ListOptions) (watch.Interface, error) {\n\treturn c.r.Get().\n\t\tNamespace(c.ns).\n\t\tPrefix(\"watch\").\n\t\tResource(\"backingservices\").\n\t\tVersionedParams(&opts, kapi.ParameterCodec).\n\t\tWatch()\n}",
"func (c *FakeECSDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(ecsdeploymentsResource, c.ns, opts))\n\n}",
"func (s *stateManager) Watch(watcher *AllocationWatcher) func() {\n\tstopChan := make(chan interface{})\n\ts.stopChan = append(s.stopChan, stopChan)\n\tctx := context.Background()\n\n\tkey := fmt.Sprintf(\"%s/allocations\", etcdPrefix)\n\twatchChan := s.cli.Watch(ctx, key, clientv3.WithPrefix(), clientv3.WithPrevKV())\n\n\tstopFunc := func() {\n\t\tstopChan <- true\n\t}\n\n\t// Start a new thread and watch for changes in etcd\n\tgo s.watchChannel(watchChan, stopChan, watcher)\n\n\treturn stopFunc\n}",
"func (c *kuberhealthyChecks) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"khchecks\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tWatch(context.TODO())\n}",
"func (cmw *SecretWatcher) watch() error {\n\tsec, err := cmw.kubeClient.Secrets(cmw.namespace).Get(cmw.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsel := generic.ObjectMetaFieldsSet(&sec.ObjectMeta, true)\n\tw, err := cmw.kubeClient.Secrets(cmw.namespace).Watch(api.ListOptions{\n\t\tFieldSelector: sel.AsSelector(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmw.w = w\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event, ok := <-w.ResultChan():\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif event.Type != watch.Added {\n\t\t\t\t\tcmw.OnEvent()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (m *Manager) Watch() error {\n\tfor req, channel := range m.changesChannels {\n\t\tif err := m.startWatchingFlow(req, channel); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (watcher *GitWatcher) watchLoop() {\n\tc := watcher.KubeClient\n\tns := watcher.Namespace\n\tw, err := c.Extensions().Deployments(ns).Watch(*watcher.ListOpts)\n\tif err != nil {\n\t\tprintError(err)\n\t}\n\tkubectl.WatchLoop(w, func(e watch.Event) error {\n\t\to := e.Object\n\t\tswitch o := o.(type) {\n\t\tcase *extensions.Deployment:\n\t\t\twatcher.CheckC <- o\n\t\tdefault:\n\t\t\tutil.Warnf(\"Unknown watch object type %v\\n\", o)\n\t\t}\n\t\treturn nil\n\t})\n}",
"func (c *FakeKconfigs) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(kconfigsResource, c.ns, opts))\n\n}"
] | [
"0.5909597",
"0.59083927",
"0.5884586",
"0.58768004",
"0.58135056",
"0.5808285",
"0.5632079",
"0.5628538",
"0.5565414",
"0.55580974",
"0.5540909",
"0.5540064",
"0.55053604",
"0.55044025",
"0.55031914",
"0.5497162",
"0.54961246",
"0.5490093",
"0.5444556",
"0.5431618",
"0.5420408",
"0.5418779",
"0.54105276",
"0.5396375",
"0.53775835",
"0.53772193",
"0.536893",
"0.5358953",
"0.53499526",
"0.5348972",
"0.53407836",
"0.5335128",
"0.5327566",
"0.5321739",
"0.53141207",
"0.5312987",
"0.53027195",
"0.53026253",
"0.52891433",
"0.52725303",
"0.5255772",
"0.5251981",
"0.5247656",
"0.52442753",
"0.5221991",
"0.52133435",
"0.5199371",
"0.5194138",
"0.51865613",
"0.5185779",
"0.51644915",
"0.5137732",
"0.51328963",
"0.5125818",
"0.5118756",
"0.5118321",
"0.50858617",
"0.5082259",
"0.5078108",
"0.5069619",
"0.50672626",
"0.50648123",
"0.50585246",
"0.50523853",
"0.50375193",
"0.50319135",
"0.50229007",
"0.50061774",
"0.50026155",
"0.4998762",
"0.4995775",
"0.49842614",
"0.4981646",
"0.49617863",
"0.49551943",
"0.4948122",
"0.49477625",
"0.4940457",
"0.49367863",
"0.4929746",
"0.49285692",
"0.49262688",
"0.49203494",
"0.49073344",
"0.49000624",
"0.48969886",
"0.48964828",
"0.48934254",
"0.48887238",
"0.4885322",
"0.48805565",
"0.48758",
"0.4874977",
"0.487414",
"0.48731765",
"0.48712346",
"0.4862879",
"0.48627776",
"0.48619696",
"0.4860093"
] | 0.74523795 | 0 |
Create takes the representation of a cloudwatchEventTarget and creates it. Returns the server's representation of the cloudwatchEventTarget, and an error, if there is any. | Create принимает представление cloudwatchEventTarget и создаёт его. Возвращает представление cloudwatchEventTarget, предоставленное сервером, и ошибку, если она есть. | func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.CloudwatchEventTarget), err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (e *EventAPI) Create(eventType *EventType) error {\n\tconst errMsg = \"unable to create event type\"\n\n\tresponse, err := e.client.httpPOST(e.backOffConf.create(), e.eventBaseURL(), eventType, errMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusCreated {\n\t\tbuffer, err := io.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"%s: unable to read response body\", errMsg)\n\t\t}\n\t\treturn decodeResponseToError(buffer, errMsg)\n\t}\n\n\treturn nil\n}",
"func CreateCloudEvent(cloudEventVersion string) *event.Event {\n\tcloudEvent := event.New(cloudEventVersion)\n\tcloudEvent.SetID(EventId)\n\tcloudEvent.SetType(EventType)\n\tcloudEvent.SetSource(EventSource)\n\tcloudEvent.SetDataContentType(EventDataContentType)\n\tcloudEvent.SetSubject(EventSubject)\n\tcloudEvent.SetDataSchema(EventDataSchema)\n\tcloudEvent.SetExtension(constants.ExtensionKeyPartitionKey, PartitionKey)\n\t_ = cloudEvent.SetData(EventDataContentType, EventDataJson)\n\treturn &cloudEvent\n}",
"func (s *TargetCRUD) Create(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\tprint.CreatePrintln(\"creating target\", *target.Target.Target,\n\t\t\"on upstream\", *target.Upstream.ID)\n\treturn target, nil\n}",
"func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\treturn err\n}",
"func createEvent(action string) *Event {\n\treturn &Event{\n\t\tID: uuid.Generate().String(),\n\t\tTimestamp: time.Now(),\n\t\tAction: action,\n\t}\n}",
"func (s *server) CreateEvent(context.Context, *pb.CreateEventRequest) (*pb.CreateEventResponse, error) {\n\treturn nil, nil\n}",
"func (s *Service) CreateEvent(ctx context.Context, req *request.CreateEvent) (*response.Message, error) {\n\t// TODO\n\treturn nil, nil\n}",
"func NewCloudPcAuditEvent()(*CloudPcAuditEvent) {\n m := &CloudPcAuditEvent{\n Entity: *NewEntity(),\n }\n return m\n}",
"func (es *EntityEventService) Create(campID int, entID int, evt SimpleEntityEvent) (*EntityEvent, error) {\n\tvar err error\n\tend := EndpointCampaign\n\n\tif end, err = end.id(campID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Campaign ID: %w\", err)\n\t}\n\tend = end.concat(endpointEntity)\n\n\tif end, err = end.id(entID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Entity ID: %w\", err)\n\t}\n\tend = end.concat(es.end)\n\n\tb, err := json.Marshal(evt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot marshal SimpleEntityEvent: %w\", err)\n\t}\n\n\tvar wrap struct {\n\t\tData *EntityEvent `json:\"data\"`\n\t}\n\n\tif err = es.client.post(end, bytes.NewReader(b), &wrap); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create EntityEvent for Campaign (ID: %d): %w\", campID, err)\n\t}\n\n\treturn wrap.Data, nil\n}",
"func (h *eventServiceHTTPHandler) CreateEvent(c echo.Context) error {\n\tlogCtx := fmt.Sprintf(\"%T.CreateEvent\", *h)\n\n\tparams := model.CreateEventReq{}\n\tif err := c.Bind(¶ms); err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_bind_params\")\n\t\treturn helper.NewResponse(http.StatusBadRequest, http.StatusBadRequest, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tif err := sanitizer.ValidateCreateEvent(¶ms); err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_validate_params\")\n\t\treturn helper.NewResponse(http.StatusBadRequest, http.StatusBadRequest, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tresp, err := h.eventUseCase.CreateEvent(¶ms)\n\tif err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_create_event\")\n\t\treturn helper.NewResponse(http.StatusInternalServerError, http.StatusInternalServerError, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tdata := make(map[string]interface{})\n\tdata[\"event\"] = resp\n\treturn helper.NewResponse(http.StatusCreated, http.StatusCreated, \"Success\", data).WriteResponse(c)\n}",
"func (c *EventClient) Create() *EventCreate {\n\tmutation := newEventMutation(c.config, OpCreate)\n\treturn &EventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}",
"func CreateEvent(request protocols.Request, outputEvent output.InternalEvent, isResponseDebug bool) *output.InternalWrappedEvent {\n\treturn CreateEventWithAdditionalOptions(request, outputEvent, isResponseDebug, nil)\n}",
"func CreateEvent(name string) *corev1.Event {\n\treturn &corev1.Event{\n\t\tTypeMeta: genTypeMeta(gvk.Event),\n\t\tObjectMeta: genObjectMeta(name, true),\n\t}\n}",
"func (s service) Create(ctx context.Context, email, component, environment, message string, data map[string]string) (*models.Event, error) {\n\tval, _ := json.Marshal(data)\n\te := &models.Event{\n\t\tEmail: email,\n\t\tComponent: component,\n\t\tEnvironment: environment,\n\t\tMessage: message,\n\t\tData: datatypes.JSON([]byte(val)),\n\t}\n\tevent, err := e.Create(s.DB)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}",
"func New(t Type, opts ...Option) (Event, error) {\n\tfactory, ok := eventFactories[t]\n\tif !ok {\n\t\treturn nil, errors.New(\"unknown event type\")\n\t}\n\n\treturn factory(opts...), nil\n}",
"func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewCloudEvent(req *CloudEvent, metadata map[string]string) (map[string]interface{}, error) {\n\tif contribContenttype.IsCloudEventContentType(req.DataContentType) {\n\t\treturn contribPubsub.FromCloudEvent(req.Data, req.Topic, req.Pubsub, req.TraceID, req.TraceState)\n\t}\n\n\t// certain metadata beginning with \"cloudevent.\" are considered overrides to the cloudevent envelope\n\t// we ignore any error here as the original cloud event envelope is still valid\n\t_ = mapstructure.WeakDecode(metadata, req) // allows ignoring of case\n\n\t// the final cloud event envelope contains both \"traceid\" and \"traceparent\" set to the same value (req.TraceID)\n\t// eventually \"traceid\" will be deprecated as it was superseded by \"traceparent\"\n\t// currently \"traceparent\" is not set by the pubsub component and can only set by the user via metadata override\n\t// therefore, if an override is set for \"traceparent\", we use it, otherwise we use the original or overridden \"traceid\" value\n\tif req.TraceParent != \"\" {\n\t\treq.TraceID = req.TraceParent\n\t}\n\treturn contribPubsub.NewCloudEventsEnvelope(req.ID, req.Source, req.Type,\n\t\t\"\", req.Topic, req.Pubsub, req.DataContentType, req.Data, req.TraceID, req.TraceState), nil\n}",
"func (e *Event) Create(c echo.Context, req takrib.Event) (*takrib.Event, error) {\n\treturn e.udb.Create(e.db, req)\n}",
"func (s *Server) CreateEvent(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tvar event Event\n\tif err = json.Unmarshal(b, &event); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\teventUUID, err := uuid.NewUUID()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\teventID := eventUUID.String()\n\n\tevent.ID = eventID\n\n\terr1 := s.database.CreateEvent(ctx, &event)\n\tif httperr.HandleError(w, err, http.StatusInternalServerError) {\n\t\ts.logger.For(ctx).Error(\"request failed\", zap.Error(err1))\n\t\treturn\n\t}\n\n\tresponse := CreatePostResponse{\"success\"}\n\tjsonResponse, err := json.Marshal(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(jsonResponse)\n}",
"func (c *EventService) Create(input *EventCreateInput) (CreateResult, *http.Response, error) {\n\treturn doCreate(c.sling, c.endpoint, input)\n}",
"func (s *gRPCServer) Create(ctx context.Context, req *pb.CreateEventRequest) (*pb.CreateEventResponse, error) {\n\t_, resp, err := s.create.ServeGRPC(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.(*pb.CreateEventResponse), nil\n}",
"func (ec *EventController) Create(ctx context.Context, event *Event) error {\n\t_, err := ec.collection.InsertOne(ctx, *event)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *Client) CreateEvent(event *corev1.Event) (*corev1.Event, error) {\n\tif err := c.initClient(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.kubernetes.CoreV1().Events(event.Namespace).Create(event)\n}",
"func NewEvent(timestampMs int64, message string) *Event {\n\tevent := &Event{\n\t\tInputLogEvent: &cloudwatchlogs.InputLogEvent{\n\t\t\tTimestamp: aws.Int64(timestampMs),\n\t\t\tMessage: aws.String(message)},\n\t}\n\treturn event\n}",
"func (c *FakeCloudwatchEventTargets) List(opts v1.ListOptions) (result *v1alpha1.CloudwatchEventTargetList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(cloudwatcheventtargetsResource, cloudwatcheventtargetsKind, c.ns, opts), &v1alpha1.CloudwatchEventTargetList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.CloudwatchEventTargetList{ListMeta: obj.(*v1alpha1.CloudwatchEventTargetList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.CloudwatchEventTargetList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func New(message, detail interface{}) (CustomEvent, error) {\n\tvar event CustomEvent\n\tvar jsObj js.Value\n\n\tif objGo, ok := detail.(baseobject.ObjectFrom); ok {\n\t\tjsObj = objGo.JSObject()\n\t} else {\n\t\tjsObj = js.ValueOf(detail)\n\t}\n\n\tif eventi := GetInterface(); !eventi.IsNull() {\n\t\tevent.BaseObject = event.SetObject(eventi.New(js.ValueOf(message), js.ValueOf(map[string]interface{}{\"detail\": jsObj})))\n\t\treturn event, nil\n\t}\n\treturn event, ErrNotImplemented\n}",
"func (c *DefaultApiService) CreateSink(params *CreateSinkParams) (*EventsV1Sink, error) {\n\tpath := \"/v1/Sinks\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.Description != nil {\n\t\tdata.Set(\"Description\", *params.Description)\n\t}\n\tif params != nil && params.SinkConfiguration != nil {\n\t\tv, err := json.Marshal(params.SinkConfiguration)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdata.Set(\"SinkConfiguration\", string(v))\n\t}\n\tif params != nil && params.SinkType != nil {\n\t\tdata.Set(\"SinkType\", *params.SinkType)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &EventsV1Sink{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}",
"func New() Event {\n\treturn Event{}\n}",
"func New(eventType Type, srv server.Server, bootType, script string, params map[string]interface{}) Event {\n\tvar event Event\n\n\tevent.Type = eventType\n\tevent.Date = time.Now()\n\tevent.Server = srv\n\tevent.BootType = bootType\n\tevent.Script = script\n\tevent.Params = params\n\n\tevent.setMessage()\n\n\treturn event\n}",
"func (a *Auditor) NewEvent(tenantID, userID, eventName, eventResult, resourceCategory, resourceType, resourceID, resourceName string, body string) {\n\ttenantName := tenantID\n\tuserName, err := cas.GetUserNameByID(userID)\n\tif err != nil {\n\t\tuserName = userID\n\t\tklog.Errorf(\"cannot get username with id %s: %+v\", userID, err)\n\t}\n\te := event{\n\t\tdata: AutoGenerated{\n\t\t\tTenantID: tenantID,\n\t\t\tTenantName: tenantName,\n\t\t\tUserID: userID,\n\t\t\tUserName: userName,\n\t\t\tRecordTime: time.Now().Format(\"2006-01-02 15:04:05\"),\n\t\t\tEventName: eventName,\n\t\t\tEventResult: eventResult,\n\t\t\tResourceCategory: resourceCategory,\n\t\t\tResourceType: resourceType,\n\t\t\tResourceID: resourceID,\n\t\t\tResourceName: resourceName,\n\t\t\tRequestBody: body,\n\t\t},\n\t\tretry: 2,\n\t}\n\tklog.V(5).Infof(\"new audit log: %+v\", e.data)\n\ta.queue <- e\n}",
"func (s *eventServer) Create(ctx context.Context, in *eventPb.CreateRequest) (*eventPb.CreateReply, error) {\n\tid := uuid.Must(uuid.NewV4()).String()\n\n\tidentity := &identityPb.Identity{}\n\tproto.Unmarshal(in.Data, identity)\n\tdata[in.ProcessId] = identity\n\tfmt.Printf(\"Create Event Called %s: %s\\n\", id, data[in.ProcessId].PhoneNumber)\n\treturn &eventPb.CreateReply{Id: id}, nil\n}",
"func newEvent(name string, args ...interface{}) (*Event, error) {\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theader := &EventHeader{Id: id.String(), Version: ProtocolVersion}\n\n\te := Event{\n\t\tHeader: header,\n\t\tName: name,\n\t\tArgs: args,\n\t}\n\n\treturn &e, nil\n}",
"func (c *RabbitEventStoreClient) CreateEvent(event framework.Event) (newEvent framework.Event, err error) {\n\terr = c.client.Send(Request{\n\t\tAction: \"CreateEvent\",\n\t\tData: event,\n\t}, &newEvent)\n\n\treturn newEvent, err\n}",
"func NewEvent(data map[string]interface{}) (Event, error) {\n\treturn parseToEventType(data)\n}",
"func (c *FakeAWSSNSTargets) Create(ctx context.Context, aWSSNSTarget *v1alpha1.AWSSNSTarget, opts v1.CreateOptions) (result *v1alpha1.AWSSNSTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(awssnstargetsResource, c.ns, aWSSNSTarget), &v1alpha1.AWSSNSTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.AWSSNSTarget), err\n}",
"func NewEvent(host, token string) *EventCollectorClient {\n\te := &EventCollectorClient{}\n\n\t// Start generating the new request\n\treq, err := http.NewRequest(\"POST\", host+\"/services/collector/event\", nil)\n\tif err != nil {\n\t\tfmt.Printf(err.Error())\n\t\treturn e\n\t}\n\treq.Header.Set(\"User-Agent\", \"Go-Splunk/\"+Version)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Splunk \"+token)\n\n\t// Set the request within Client\n\te.Request = req\n\treturn e\n}",
"func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (b Build) CreateEvent(c *gin.Context) {\n\tbuild, err := b.unauthOne(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !b.canPostEvent(c, build) {\n\t\tc.AbortWithStatus(403)\n\t\treturn\n\t}\n\n\tevent := models.PostBatchEvent{}\n\tc.BindJSON(&event)\n\n\tif !sugar.ValidateRequest(c, event) {\n\t\treturn\n\t}\n\n\tcurrentStatus := build.Status()\n\n\tif !models.CanTransition(currentStatus, event.Status) {\n\t\tsugar.ErrResponse(c, 400, fmt.Sprintf(\"%s not valid when current status is %s\", event.Status, currentStatus))\n\t\treturn\n\t}\n\n\t_, isUser := middleware.CheckUser(c)\n\tif event.Status == models.StatusTerminated && isUser {\n\t\tsugar.ErrResponse(c, 400, fmt.Sprintf(\"Users cannot post TERMINATED events, please upgrade to reco v0.3.1 or above\"))\n\t}\n\n\tnewEvent, err := BatchService{AWS: b.AWS}.AddEvent(&build.BatchJob, event)\n\n\tif event.Status == \"CREATING_IMAGE\" {\n\t\terr = db.Model(&build).Update(\"FPGAImage\", event.Message).Error\n\t}\n\n\tif err != nil {\n\t\tsugar.InternalError(c, err)\n\t\treturn\n\t}\n\teventMessage := \"Build entered state:\" + event.Status\n\tsugar.EnqueueEvent(b.Events, c, eventMessage, build.Project.UserID, map[string]interface{}{\"build_id\": build.ID, \"project_name\": build.Project.Name, \"message\": event.Message})\n\tsugar.SuccessResponse(c, 200, newEvent)\n}",
"func (x *fastReflection_EventCreateClass) New() protoreflect.Message {\n\treturn new(fastReflection_EventCreateClass)\n}",
"func EventCreate(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\teventId, _ := strconv.ParseInt(vars[\"eventId\"], 10, 64)\n\n\tevent, err := event.EventGetById(userId)\n\n\tif err == nil {\n\t\tresponse.Success(w, event)\n\t} else {\n\t\tresponse.Fail(w, http.StatusNotFound, err.Error())\n\t}\n}",
"func NewEvent(action string, version int8, parent uuid.UUID, key []byte, data []byte) Event {\n\tid := uuid.Must(uuid.NewV4())\n\tif key == nil {\n\t\tkey = id.Bytes()\n\t}\n\n\t// Fix: unexpected end of JSON input\n\tif len(data) == 0 {\n\t\tdata = []byte(\"null\")\n\t}\n\n\tevent := Event{\n\t\tParent: parent,\n\t\tID: id,\n\t\tHeaders: make(map[string]string),\n\t\tAction: action,\n\t\tData: data,\n\t\tKey: key,\n\t\tStatus: StatusOK,\n\t\tVersion: version,\n\t\tCtx: context.Background(),\n\t}\n\n\treturn event\n}",
"func Create(c *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToListenerCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = c.Post(rootURL(c), b, &r.Body, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{200},\n\t})\n\treturn\n}",
"func New(client kubernetes.Interface) *Event {\n\tbroadcaster := record.NewBroadcaster()\n\tbroadcaster.StartLogging(glog.V(4).Infof)\n\tbroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: corev1.New(client.CoreV1().RESTClient()).Events(namespace)})\n\treturn &Event{\n\t\trecorder: broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component}),\n\t}\n}",
"func (s *WebhooksServiceOp) Create(webhook Webhook, options ...interface{}) (Webhook, error) {\n\tvar webhookResponse GetWebhookResponse\n\tjsonBody, err := json.Marshal(webhook)\n\tif err != nil {\n\t\treturn webhookResponse.Data, err\n\t}\n\treqBody := bytes.NewReader(jsonBody)\n\tbody, reqErr := s.client.DoRequest(http.MethodPost, \"/v3/hooks\", reqBody)\n\tif reqErr != nil {\n\t\treturn webhookResponse.Data, reqErr\n\t}\n\n\tjsonErr := json.Unmarshal(body, &webhookResponse)\n\tif jsonErr != nil {\n\t\treturn webhookResponse.Data, jsonErr\n\t}\n\n\treturn webhookResponse.Data, nil\n}",
"func (els *EventLocalStore) Create(e *entities.Event) error {\n\tels.mutex.Lock()\n\tels.events[e.ID] = e\n\tels.mutex.Unlock()\n\n\treturn nil\n}",
"func HandleCreateEvent(w rest.ResponseWriter, req *rest.Request) {\n\tif err := RequireWriteKey(w, req); err != nil {\n\t\trest.Error(w, err.Error(), err.(StatusError).Code)\n\t\treturn\n\t}\n\n\tproject := currentProject(req)\n\tevent := req.PathParam(\"event_name\")\n\n\tvar data CreateSingleEventParams\n\tvar err error\n\tif err = eventData(req, &data); err != nil {\n\t\trest.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tevents := make(map[string][]interface{})\n\tevents[event] = []interface{}{data}\n\n\tresult, err := createEvent(project, event, data)\n\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusBadRequest)\n\t} else {\n\t\tw.WriteJson(result)\n\t}\n}",
"func New(secret string, h Func) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"reading body\")\n\t\t\tresponse.InternalServerError(w)\n\t\t\treturn\n\t\t}\n\n\t\tsignature := r.Header.Get(\"Stripe-Signature\")\n\t\te, err := webhook.ConstructEvent(b, signature, secret)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"constructing event\")\n\t\t\tresponse.InternalServerError(w)\n\t\t\treturn\n\t\t}\n\n\t\tctx := log.WithFields(log.Fields{\n\t\t\t\"event_id\": e.ID,\n\t\t\t\"event_type\": e.Type,\n\t\t})\n\n\t\tctx.Info(\"handling stripe event\")\n\t\tif err := h(&e); err != nil {\n\t\t\tctx.WithError(err).Error(\"handling stripe event\")\n\t\t\tresponse.InternalServerError(w)\n\t\t\treturn\n\t\t}\n\n\t\tctx.Info(\"handled stripe event\")\n\t\tresponse.OK(w)\n\t})\n}",
"func CreateEvent(client *http.Client, c *config.Config) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Verify the JWT token since it's a protected route.\n\t\ttokenCookie, err := r.Cookie(c.Jwt.CookieName)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to get cookie: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\t_, err = jwt.VerifyToken(tokenCookie.Value, &c.Jwt)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to verify token: %v\", err)\n\t\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\t// Create the request that's gonna call our event service.\n\t\treq, err := http.NewRequest(http.MethodPost, fmt.Sprintf(\"http://%v:%v\", c.Service.Event.Host, c.Service.Event.Port), r.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to create new request: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// Make the request.\n\t\tres, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to do request: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t// Read the response body (the created event if the request was successful).\n\t\tb, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to read response body: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t// Respond with the received response (the status code is hopefully 201 Status Created).\n\t\tw.Header().Set(CONTENT_TYPE, res.Header.Get(CONTENT_TYPE))\n\t\tw.WriteHeader(res.StatusCode)\n\t\tw.Write(b)\n\t}\n}",
"func (pgs *PGStorage) CreateEvent(e event.Event) (event.Event, error) {\n\tsql := `insert into events(uuid, title, datetime, duration, description, userid, notify) \n\t\tvalues(:uuid, :title, :datetime, :duration, :description, :userid, :notify)`\n\teventUUID := uuid.New()\n\t_, err := pgs.DB.NamedExecContext(pgs.Ctx, sql, map[string]interface{}{\n\t\t\"uuid\": eventUUID.String(),\n\t\t\"title\": e.Title,\n\t\t\"datetime\": e.Datetime,\n\t\t\"duration\": e.Duration,\n\t\t\"description\": e.Desc,\n\t\t\"userid\": e.User,\n\t\t\"notify\": e.Notify,\n\t})\n\tif err != nil {\n\t\treturn event.Event{}, err\n\t}\n\te.UUID = eventUUID\n\treturn e, nil\n}",
"func (c Create) BuildEvent(context.Context) (goes.EventData, interface{}, error) {\n\treturn CreatedV1{\n\t\tID: \"0563019f-ade9-4cb1-81a7-4f1bb3213cb0\",\n\t\tFirstName: c.FirstName,\n\t\tLastName: c.LastName,\n\t}, nil, nil\n}",
"func (w *WebhookServiceOp) Create(webhook Webhook) (*Webhook, error) {\n\tpath := fmt.Sprintf(\"%s\", webhooksBasePath)\n\tresource := new(Webhook)\n\terr := w.client.Post(path, webhook, &resource)\n\treturn resource, err\n}",
"func (s *baseStore[T, E, TPtr, EPtr]) Create(ctx context.Context, object TPtr) error {\n\teventPtr := s.newObjectEvent(ctx, CreateEvent)\n\teventPtr.SetObject(*object)\n\tif err := s.createObjectEvent(ctx, eventPtr); err != nil {\n\t\treturn err\n\t}\n\t*object = eventPtr.Object()\n\treturn nil\n}",
"func (s *DirectMessageService) EventsCreate(event *DirectMessageEventMessage) (*DirectMessageEventsCreateResponse, *http.Response, error) {\n\tapiParams := &directMessageEventsCreateParams{\n\t\tEvent: &directMessageEventsCreateEvent{\n\t\t\tType: \"message_create\",\n\t\t\tMessage: &directMessageEventsCreateMessage{\n\t\t\t\tTarget: &directMessageEventsCreateTarget{\n\t\t\t\t\tRecipientID: event.Target.RecipientID,\n\t\t\t\t},\n\t\t\t\tData: &directMessageEventsCreateData{\n\t\t\t\t\tText: event.Data.Text,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tapiError := new(APIError)\n\teventResponse := new(DirectMessageEventsCreateResponse)\n\tresp, err := s.sling.New().Post(\"events/new.json\").BodyJSON(apiParams).Receive(eventResponse, apiError)\n\treturn eventResponse, resp, relevantError(err, *apiError)\n}",
"func EventCreate(ctx *gin.Context) {\n\tvar user *model.User\n\tif userInterface, exists := ctx.Get(\"User\"); !exists {\n\t\tmisc.ReturnStandardError(ctx, http.StatusForbidden, \"you have to be a registered user to create event\")\n\t\treturn\n\t} else {\n\t\tuser = userInterface.(*model.User)\n\t}\n\tevent := &model.Event{}\n\tphysicalLocation := &model.PhysicalLocation{}\n\tonlineLocation := &model.OnlineLocation{}\n\tif err := jsonapi.UnmarshalPayload(ctx.Request.Body, event); err != nil {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"cannot unmarshal JSON of request: \"+err.Error())\n\t\treturn\n\t} else if event.Title == nil ||\n\t\tevent.TimeBegin == nil ||\n\t\tevent.TimeEnd == nil ||\n\t\tevent.Type == nil ||\n\t\treflect.ValueOf(event.Location).IsNil() {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"not all fields required are provided\")\n\t\treturn\n\t} else if eventType, exists := event.Location.(map[string]interface{})[\"type\"]; !exists || (eventType != \"physical\" && eventType != \"online\") {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"illegal event type\")\n\t\treturn\n\t} else if eventType == \"physical\" &&\n\t\t(mapstructure.Decode(event.Location, physicalLocation) != nil ||\n\t\t\tphysicalLocation.Address == \"\" ||\n\t\t\tphysicalLocation.ZipCode == \"\") {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"illegal physical location\")\n\t\treturn\n\t} else if eventType == \"online\" &&\n\t\t(mapstructure.Decode(event.Location, onlineLocation) != nil ||\n\t\t\tonlineLocation.Platform == \"\" ||\n\t\t\tonlineLocation.Link == \"\") {\n\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"illegal online location\")\n\t\treturn\n\t}\n\tevent.OrganizerID = &user.ID\n\tevent.Organizer = user\n\timages := event.Images\n\tdb := ctx.MustGet(\"DB\").(*gorm.DB)\n\ttx := db.Begin()\n\t// we must omit images as inspection has to be gone through before they are linked\n\tif err := tx.Omit(\"Images\").Save(event).Error; err != nil {\n\t\tmisc.ReturnStandardError(ctx, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\t// link images to this event\n\teventString := \"events\"\n\tfor _, image := range images {\n\t\tif image.ID <= 0 {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"invalid image file ID\")\n\t\t} else if err := db.Where(image).Find(image).Error; errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusNotFound, \"image specified not found\")\n\t\t} else if err != nil {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusInternalServerError, err.Error())\n\t\t} else if *image.Status != \"active\" || *image.Type != \"images\" {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"image specified is not active or is not an image\")\n\t\t} else if image.LinkType != nil {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusBadRequest, \"image has been linked to some other resource object\")\n\t\t} else if err := db.Model(&image).Updates(model.File{LinkID: &event.ID, LinkType: &eventString}).Error; err != nil {\n\t\t\tmisc.ReturnStandardError(ctx, http.StatusInternalServerError, err.Error())\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\t\t// roll back the action of event creation if any of the images cannot pass integrity check\n\t\ttx.Rollback()\n\t\treturn\n\t}\n\tif err := tx.Commit().Error; err != nil {\n\t\ttx.Rollback()\n\t\tmisc.ReturnStandardError(ctx, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tctx.Status(http.StatusCreated)\n\tif err := jsonapi.MarshalPayload(ctx.Writer, event); err != nil {\n\t\tmisc.ReturnStandardError(ctx, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n}",
"func newEvent(id string, img model.Image) Event {\n\treturn Event{\n\t\tSource: \"kyma-showcase\",\n\t\tSpecVersion: \"1.0\",\n\t\tEventTypeVersion: \"v1\",\n\t\tData: img.ID,\n\t\tId: id,\n\t\tDataContentType: \"application/json\",\n\t\tEventType: eventType,\n\t}\n}",
"func NewEvent(id, eventType string, version int, payload []byte) *Event {\n\tpayloadStr := string(payload)\n\treturn &Event{\n\t\tAggregateID: id,\n\t\tEventType: eventType,\n\t\tVersion: version,\n\t\tEventAt: time.Now(),\n\t\tPayload: &payloadStr,\n\t}\n}",
"func (s *server) createEvent(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tdefer r.Body.Close()\n\n\t// Read the body out into a buffer.\n\tbuf, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Read the body as generic JSON, so we can perform JDDF validation on it.\n\t//\n\t// If the request body is invalid JSON, send the user a 400 Bad Request.\n\tvar eventRaw interface{}\n\tif err := json.Unmarshal(buf, &eventRaw); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Validate the event (in eventRaw) against our schema for JDDF events.\n\t//\n\t// In practice, there will never be errors arising here -- see the jddf-go\n\t// docs for details, but basically jddf.Validator.Validate can only error if\n\t// you use \"ref\" in a cyclic manner in your schemas.\n\t//\n\t// Therefore, we ignore the possibility of an error here.\n\tvalidator := jddf.Validator{}\n\tvalidationResult, _ := validator.Validate(s.EventSchema, eventRaw)\n\n\t// If there were validation errors, then we write them out to the response\n\t// body, and send the user a 400 Bad Request.\n\tif len(validationResult.Errors) != 0 {\n\t\tencoder := json.NewEncoder(w)\n\t\tif err := encoder.Encode(validationResult.Errors); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// If we made it here, the request body contained JSON that passed our schema.\n\t// Let's now write it into the database.\n\t//\n\t// The events table has a \"payload\" column of type \"jsonb\". In Golang-land,\n\t// you can send that to Postgres by just using []byte. The user's request\n\t// payload is already in that format, so we'll use that.\n\t_, err = s.DB.ExecContext(r.Context(), `\n\t\tinsert into events (payload) values ($1)\n\t`, buf)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// We're done!\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"%s\", buf)\n}",
"func (wh *Webhook) Create(accountId string, data map[string]interface{}, extraHeaders map[string]string) (map[string]interface{}, error) {\n\tif accountId != \"\" {\n\t\turl := fmt.Sprintf(\"/%s%s/%s%s\", constants.VERSION_V2, constants.ACCOUNT_URL, url.PathEscape(accountId), constants.WEBHOOK)\n\t\treturn wh.Request.Post(url, data, extraHeaders)\n\t}\n\turl := fmt.Sprintf(\"/%s%s\", constants.VERSION_V1, constants.WEBHOOK)\n\treturn wh.Request.Post(url, data, extraHeaders)\n}",
"func (command HelloWorldResource) Create(ctx context.Context, awsConfig awsv2.Config,\n\tevent *CloudFormationLambdaEvent,\n\tlogger *zerolog.Logger) (map[string]interface{}, error) {\n\trequest := HelloWorldResourceRequest{}\n\trequestPropsErr := json.Unmarshal(event.ResourceProperties, &request)\n\tif requestPropsErr != nil {\n\t\treturn nil, requestPropsErr\n\t}\n\tlogger.Info().Msgf(\"create: Hello %s\", request.Message)\n\treturn map[string]interface{}{\n\t\t\"Resource\": \"Created message: \" + request.Message,\n\t}, nil\n}",
"func NewEvent(x, y float64, button, event string) Event {\n\treturn Event{\n\t\tPoint2: floatgeom.Point2{x, y},\n\t\tButton: button,\n\t\tEvent: event,\n\t}\n}",
"func NewCustomEvent(eventType model.EventType, marshaler easyjson.Marshaler, tags ...string) *CustomEvent {\n\treturn NewCustomEventLazy(eventType, func() easyjson.Marshaler {\n\t\treturn marshaler\n\t}, tags...)\n}",
"func (c *Client) TimestampCreate(ctx context.Context, hash string, options *CreateOptions) (*TimestampResponse, error) {\n\n\ttype createRequest struct {\n\t\tComment string `json:\"comment,omitempty\"`\n\t\tHash string `json:\"hash\"`\n\t\tNotifications []Notification `json:\"notifications,omitempty\"`\n\t\tURL string `json:\"url,omitempty\"`\n\t}\n\n\tcReq := &createRequest{\n\t\tHash: hash,\n\t}\n\n\tif options != nil {\n\t\tcReq.Comment = options.Comment\n\t\tcReq.URL = options.URL\n\t\tcReq.Notifications = options.Notifications\n\t}\n\n\tpayload, err := json.Marshal(cReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tendpoint := path.Join(\"timestamp\", \"create\")\n\twrapper, err := c.Request(ctx, http.MethodPost, endpoint, bytes.NewReader(payload))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &TimestampResponse{}\n\terr = json.Unmarshal(wrapper.Data, resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func (c *EcomClient) CreateWebhook(ctx context.Context, p *CreateWebhookRequest) (*WebhookResponse, error) {\n\trequest, err := json.Marshal(&p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: client: json marshal\", err)\n\t}\n\n\turl := c.endpoint + \"/webhooks\"\n\tbody := strings.NewReader(string(request))\n\tres, err := c.request(http.MethodPost, url, body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: request\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode >= 400 {\n\t\tvar e badRequestResponse\n\t\tdec := json.NewDecoder(res.Body)\n\t\tdec.DisallowUnknownFields()\n\t\tif err := dec.Decode(&e); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: client decode\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"status: %d, code: %s, message: %s\", e.Status, e.Code, e.Message)\n\t}\n\n\tvar webhook WebhookResponse\n\tif err = json.NewDecoder(res.Body).Decode(&webhook); err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: decode\", err)\n\t}\n\treturn &webhook, nil\n}",
"func (t *TargetToExtent) Create(server *Server) (*http.Response, error) {\n\tendpoint := \"/api/v1.0/services/iscsi/targettoextent/\"\n\tvar targetToExtent TargetToExtent\n\tvar e interface{}\n\tresp, err := server.getSlingConnection().Post(endpoint).BodyJSON(t).Receive(&targetToExtent, &e)\n\tif err != nil {\n\t\tglog.Warningln(err)\n\t\treturn resp, err\n\t}\n\n\tif resp.StatusCode != 201 {\n\t\tbody, _ := json.Marshal(e)\n\t\treturn resp, fmt.Errorf(\"Error creating TargetToExtent for %+v - message: %s, status: %d\", *t, string(body), resp.StatusCode)\n\t}\n\n\tt.CopyFrom(&targetToExtent)\n\n\treturn resp, nil\n}",
"func CreateLambdaCloudwatchAlarm(region string, functionName string, metricName string, namespace string, threshold float64, action string) bool {\n\tawsSession, _ := InitAwsSession(region)\n\tsvc := cloudwatch.New(awsSession)\n\tinput := &cloudwatch.PutMetricAlarmInput{\n\t\tAlarmName: aws.String(fmt.Sprintf(\"%v on %v\", metricName, functionName)),\n\t\tComparisonOperator: aws.String(cloudwatch.ComparisonOperatorGreaterThanOrEqualToThreshold),\n\t\tEvaluationPeriods: aws.Int64(1),\n\t\tMetricName: aws.String(metricName),\n\t\tNamespace: aws.String(namespace),\n\t\tPeriod: aws.Int64(60),\n\t\tStatistic: aws.String(cloudwatch.StatisticSum),\n\t\tThreshold: aws.Float64(threshold),\n\t\tActionsEnabled: aws.Bool(true),\n\t\tAlarmDescription: aws.String(fmt.Sprintf(\"%v on %v greater than %v\", metricName, functionName, threshold)),\n\n\t\tDimensions: []*cloudwatch.Dimension{\n\t\t\t{\n\t\t\t\tName: aws.String(\"FunctionName\"),\n\t\t\t\tValue: aws.String(functionName),\n\t\t\t},\n\t\t},\n\n\t\tAlarmActions: []*string{\n\t\t\taws.String(action),\n\t\t},\n\t}\n\n\t// Debug input\n\t// fmt.Println(input)\n\n\t_, err := svc.PutMetricAlarm(input)\n\tif err != nil {\n\t\tfmt.Println(\"Error\", err)\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func NewEvent(x, y float64, button Button, event string) Event {\n\treturn Event{\n\t\tPoint2: floatgeom.Point2{x, y},\n\t\tButton: button,\n\t\tEvent: event,\n\t}\n}",
"func (a *Client) CreateAnomalyDetectionMetricEvent(params *CreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*CreateAnomalyDetectionMetricEventCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateAnomalyDetectionMetricEventCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewWebhook(url string, filterFnString string, timeout uint64) (*Webhook, error) {\n\n\tvar err error\n\n\tif url == \"\" {\n\t\terr = errors.New(\"url parameter must be defined for webhook events.\")\n\t\treturn nil, err\n\t}\n\n\twh := &Webhook{\n\t\turl: url,\n\t}\n\tif filterFnString != \"\" {\n\t\twh.filter = NewJSEventFunction(filterFnString)\n\t}\n\n\tif timeout != 0 {\n\t\twh.timeout = time.Duration(timeout) * time.Second\n\t} else {\n\t\twh.timeout = time.Duration(kDefaultWebhookTimeout) * time.Second\n\t}\n\n\treturn wh, err\n}",
"func (pr *PrMock) CreatePullRequestEvent(action string) *gogh.PullRequestEvent {\n\treturn &gogh.PullRequestEvent{\n\t\tAction: utils.String(action),\n\t\tNumber: pr.PullRequest.Number,\n\t\tPullRequest: pr.PullRequest,\n\t\tRepo: pr.PullRequest.Base.Repo,\n\t\tSender: pr.PullRequest.User,\n\t}\n}",
"func (b *bridge) createEvent(action string) *Event {\n\tevent := createEvent(action)\n\tevent.Source = b.source\n\tevent.Actor = b.actor\n\tevent.Request = b.request\n\n\treturn event\n}",
"func (c *SmartThingsClient) DeviceCreateEvent(deviceID string, command ...DeviceEvent) error {\n\ts := make([]DeviceEvent, 0)\n\tfor _, c := range command {\n\t\ts = append(s, c)\n\t}\n\tb := &DeviceEventList{\n\t\tDeviceEvents: s,\n\t}\n\treq, err := c.newRequest(http.MethodPost, fmt.Sprintf(\"/v1/devices/%s/events\", deviceID), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.do(req, nil)\n\treturn err\n}",
"func (a *mongoDbAdapter) makeCloudEvent(data bson.M) (*cloudevents.Event, error) {\n\t// Create Event.\n\tevent := cloudevents.NewEvent(cloudevents.VersionV1)\n\n\t// Set cloud event specs and attributes. TODO: issue #43\n\t// \t\tID -> id of mongo change object\n\t// \t\tSource -> database/collection.\n\t// \t\tType -> type of change either insert, delete or update.\n\t//\t\tData -> data payload containing either id only for\n\t// deletion or full object for other changes.\n\tevent.SetID(data[\"_id\"].(bson.M)[\"_data\"].(string))\n\tevent.SetSource(a.ceSource)\n\t// event.SetSource(data[\"ns\"].(bson.M)[\"db\"].(string) + \"/\" + data[\"ns\"].(bson.M)[\"coll\"].(string))\n\tevent.SetType(data[\"operationType\"].(string))\n\n\t// Add payload if replace or insert, else add document key.\n\tif data[\"operationType\"].(string) == \"delete\" {\n\t\tevent.SetData(cloudevents.ApplicationJSON, data[\"documentKey\"].(bson.M))\n\t} else {\n\t\tevent.SetData(cloudevents.ApplicationJSON, data[\"fullDocument\"].(bson.M))\n\t}\n\treturn &event, nil\n}",
"func CreateEventRecorder(kubeClient kubernetes.Interface) kube_record.EventRecorder {\n\teventBroadcaster := kube_record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\n\tif _, isfake := kubeClient.(*fake.Clientset); !isfake {\n\t\teventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events(\"\")})\n\t}\n\treturn eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: \"hostport-manager\"})\n}",
"func NewCustomEvent(userID, sessionID, context string) CustomEvent {\n\treturn CustomEvent{\n\t\tUserID: userID,\n\t\tSessionID: sessionID,\n\t\tContext: context,\n\t}\n}",
"func (w Webhook) Create(ctx context.Context, webhook *postmand.Webhook) error {\n\tquery, args := insertQuery(\"webhooks\", webhook)\n\t_, err := w.db.ExecContext(ctx, query, args...)\n\treturn err\n}",
"func NewCustomEvent(tp EventType, structure *Structure) *Event {\n\te := C.gst_event_new_custom(C.GstEventType(tp), structure.g())\n\tif e == nil {\n\t\treturn nil\n\t}\n\tr := new(Event)\n\tr.SetPtr(glib.Pointer(e))\n\treturn r\n}",
"func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))\n\n}",
"func (service EventService) CreateEvent(Event models.EventModel) ([]byte, error) {\n\texisting := service.repository.CheckEventExists(Event)\n\tif existing.ID == 0 {\n\t\tnewEvent := service.repository.CreateEvent(Event)\n\t\treturn json.Marshal(newEvent)\n\t}\n\treturn []byte(\"\"), exceptions.EventExistsException()\n}",
"func NewTektonCloudEventData(taskRun *v1alpha1.TaskRun) TektonCloudEventData {\n\treturn TektonCloudEventData{\n\t\tTaskRun: taskRun,\n\t}\n}",
"func (r *PropertiesConversionEventsService) Create(parent string, googleanalyticsadminv1alphaconversionevent *GoogleAnalyticsAdminV1alphaConversionEvent) *PropertiesConversionEventsCreateCall {\n\tc := &PropertiesConversionEventsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.googleanalyticsadminv1alphaconversionevent = googleanalyticsadminv1alphaconversionevent\n\treturn c\n}",
"func (c *gitlabClient) CreateEvents(context.Context, string, []interface{}) ([]sdk.VCSCreateEvent, error) {\n\treturn nil, fmt.Errorf(\"Not implemented on Gitlab\")\n}",
"func New(name string, data M) *BasicEvent {\n\treturn NewBasic(name, data)\n}",
"func (o *CloudTargetCreateParams) WithHTTPClient(client *http.Client) *CloudTargetCreateParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (r *ProjectsTraceSinksService) Create(parent string, tracesink *TraceSink) *ProjectsTraceSinksCreateCall {\n\tc := &ProjectsTraceSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.tracesink = tracesink\n\treturn c\n}",
"func Create(c *eclcloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToServerCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = c.Post(createURL(c), b, &r.Body, &eclcloud.RequestOpts{\n\t\tOkCodes: []int{200},\n\t})\n\treturn\n}",
"func createRecorder(kubecli *kubernetes.Clientset) record.EventRecorder {\n\t// Create a new broadcaster which will send events we generate to the apiserver\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{\n\t\tInterface: v1core.New(kubecli.CoreV1().RESTClient()).Events(apiv1.NamespaceAll),\n\t})\n\t// this EventRecorder can be used to send events to this EventBroadcaster\n\t// with the given event source.\n\treturn eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: \"kubeturbo\"})\n}",
"func NewEvent(args interface{}) Event {\n\tea := reflect.Indirect(reflect.ValueOf(args))\n\treturn Event{\n\t\tName: ea.Type().Name(),\n\t\tAt: time.Now().UTC(),\n\t\tArgs: ea.Interface(),\n\t}\n}",
"func NewCloudEventResource(r *PipelineResource) (*CloudEventResource, error) {\n\tif r.Spec.Type != PipelineResourceTypeCloudEvent {\n\t\treturn nil, fmt.Errorf(\"CloudEventResource: Cannot create a Cloud Event resource from a %s Pipeline Resource\", r.Spec.Type)\n\t}\n\tvar targetURI string\n\tvar targetURISpecified bool\n\n\tfor _, param := range r.Spec.Params {\n\t\tif strings.EqualFold(param.Name, \"TargetURI\") {\n\t\t\ttargetURI = param.Value\n\t\t\tif param.Value != \"\" {\n\t\t\t\ttargetURISpecified = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !targetURISpecified {\n\t\treturn nil, fmt.Errorf(\"CloudEventResource: Need URI to be specified in order to create a CloudEvent resource %s\", r.Name)\n\t}\n\treturn &CloudEventResource{\n\t\tName: r.Name,\n\t\tType: r.Spec.Type,\n\t\tTargetURI: targetURI,\n\t}, nil\n}",
"func (c *ApiService) CreateWebhook(ServiceSid string, params *CreateWebhookParams) (*VerifyV2Webhook, error) {\n\tpath := \"/v2/Services/{ServiceSid}/Webhooks\"\n\tpath = strings.Replace(path, \"{\"+\"ServiceSid\"+\"}\", ServiceSid, -1)\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.FriendlyName != nil {\n\t\tdata.Set(\"FriendlyName\", *params.FriendlyName)\n\t}\n\tif params != nil && params.EventTypes != nil {\n\t\tfor _, item := range *params.EventTypes {\n\t\t\tdata.Add(\"EventTypes\", item)\n\t\t}\n\t}\n\tif params != nil && params.WebhookUrl != nil {\n\t\tdata.Set(\"WebhookUrl\", *params.WebhookUrl)\n\t}\n\tif params != nil && params.Status != nil {\n\t\tdata.Set(\"Status\", *params.Status)\n\t}\n\tif params != nil && params.Version != nil {\n\t\tdata.Set(\"Version\", *params.Version)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &VerifyV2Webhook{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}",
"func (f *Person) createEvent(name string, year int) models.Event {\n\tvar city external.City\n\tif len(f.events) == 0 {\n\t\tcity = external.RandomCity()\n\t} else {\n\t\trecentEvent := f.events[len(f.events)-1]\n\t\tcity = external.RandomCloseCity(recentEvent.Latitude, recentEvent.Longitude)\n\t}\n\tevent := models.Event{\n\t\tEventID: util.RandomID(),\n\t\tPersonID: f.model.PersonID,\n\t\tLatitude: city.Latitude,\n\t\tLongitude: city.Longitude,\n\t\tCountry: city.Country,\n\t\tCity: city.City,\n\t\tEventType: name,\n\t\tYear: year,\n\t}\n\tf.events = append(f.events, event)\n\treturn event\n}",
"func createRecorder(kubecli *kubernetes.Clientset) record.EventRecorder {\n\t// Create a new broadcaster which will send events we generate to the apiserver\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{\n\t\tInterface: v1core.New(kubecli.Core().RESTClient()).Events(apiv1.NamespaceAll)})\n\t// this EventRecorder can be used to send events to this EventBroadcaster\n\t// with the given event source.\n\treturn eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: \"kubeturbo\"})\n}",
"func (s *WebhookServiceOp) Create(ctx context.Context, webhook Webhook) (*Webhook, error) {\n\tpath := fmt.Sprintf(\"%s.json\", webhooksBasePath)\n\twrappedData := WebhookResource{Webhook: &webhook}\n\tresource := new(WebhookResource)\n\terr := s.client.Post(ctx, path, wrappedData, resource)\n\treturn resource.Webhook, err\n}",
"func (r *ProjectsLogServicesSinksService) Create(projectsId string, logServicesId string, logsink *LogSink) *ProjectsLogServicesSinksCreateCall {\n\treturn &ProjectsLogServicesSinksCreateCall{\n\t\ts: r.s,\n\t\tprojectsId: projectsId,\n\t\tlogServicesId: logServicesId,\n\t\tlogsink: logsink,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks\",\n\t}\n}",
"func (c *V2) Create(endpoint string, model models.Model) (*http.Response, *gabs.Container, error) {\n\tjsonPayload, err := c.PrepareModel(model)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Println(\"[DEBUG] CREATE Payload: \", jsonPayload.String())\n\n\treq, err := c.PrepareRequest(http.MethodPost, endpoint, jsonPayload, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresponse, err := c.Do(req)\n\tif err != nil {\n\t\treturn response, nil, err\n\t}\n\n\tcontainer, err := GetContainer(response)\n\tif err != nil {\n\t\treturn response, nil, err\n\t}\n\treturn response, container, nil\n}",
"func (c *DefaultApiService) CreateSinkValidate(Sid string, params *CreateSinkValidateParams) (*EventsV1SinkSinkValidate, error) {\n\tpath := \"/v1/Sinks/{Sid}/Validate\"\n\tpath = strings.Replace(path, \"{\"+\"Sid\"+\"}\", Sid, -1)\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.TestId != nil {\n\t\tdata.Set(\"TestId\", *params.TestId)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &EventsV1SinkSinkValidate{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}",
"func hNewEvent(c echo.Context) error {\n\tvar e httpError\n\n\tif (len(c.FormValue(\"code\")) == 0) || (len(c.FormValue(\"title\")) == 0) {\n\t\treturn c.JSON(http.StatusNotAcceptable, \"input information is not valid\")\n\t}\n\tuserCODE := c.FormValue(\"code\")\n\n\t// read from token user id\n\tvar tokenUserID int64\n\ttokenUserID = 1\n\n\tu, errGet := blog.GetUserByCode(tokenUserID, userCODE)\n\tif errGet != nil {\n\t\te.TheError = \"user code \" + userCODE + \" not found.\"\n\t\treturn c.JSON(http.StatusNotFound, e)\n\t}\n\tvar ev Event\n\tev.OpenedByUserID = u.ID\n\tev.Contents = c.FormValue(\"content\")\n\tev.Title = c.FormValue(\"title\")\n\n\terrAdd := blog.AddEvent(&ev)\n\tif errAdd != nil {\n\t\te.TheError = errAdd.Error()\n\t\treturn c.JSON(http.StatusInternalServerError, e)\n\t}\n\tfname, errUpload := lowlevelUploadFile(c, u.ID, ev.ID)\n\tif errUpload != nil {\n\t\te.TheError = \"could not upload file: \" + errUpload.Error()\n\t\treturn c.JSON(http.StatusInternalServerError, e)\n\t}\n\te.TheError = \"OK\" + \" - \" + fname\n\treturn c.JSON(http.StatusOK, e)\n}",
"func (x *fastReflection_EventCreateBatch) New() protoreflect.Message {\n\treturn new(fastReflection_EventCreateBatch)\n}"
] | [
"0.687223",
"0.5633696",
"0.5631251",
"0.538129",
"0.5321042",
"0.529999",
"0.5294296",
"0.5265981",
"0.5259876",
"0.5250571",
"0.52373767",
"0.52258664",
"0.52252287",
"0.52154547",
"0.5198454",
"0.5183503",
"0.5154956",
"0.5070703",
"0.50693345",
"0.5064346",
"0.5045063",
"0.5028351",
"0.50175047",
"0.501624",
"0.49743232",
"0.49694666",
"0.4968072",
"0.49677986",
"0.49392426",
"0.48700213",
"0.48367006",
"0.48077118",
"0.47747523",
"0.4771383",
"0.4767434",
"0.4753219",
"0.47443905",
"0.47423786",
"0.47312295",
"0.47311044",
"0.471264",
"0.4699825",
"0.46918404",
"0.4678747",
"0.4672303",
"0.46663183",
"0.46660897",
"0.46656394",
"0.46649957",
"0.46617705",
"0.4650362",
"0.4633898",
"0.46184286",
"0.46116936",
"0.46049657",
"0.45986843",
"0.45841494",
"0.45840976",
"0.4562924",
"0.452426",
"0.45241666",
"0.45202488",
"0.4513365",
"0.44966713",
"0.44960818",
"0.44864455",
"0.44771236",
"0.44731748",
"0.44709426",
"0.4467563",
"0.44583565",
"0.44539988",
"0.44533214",
"0.44519097",
"0.44517222",
"0.44484675",
"0.44336203",
"0.44324479",
"0.44161308",
"0.4410667",
"0.44099662",
"0.4400633",
"0.43890125",
"0.4387581",
"0.43837088",
"0.4371499",
"0.43670204",
"0.43663692",
"0.43553016",
"0.43543574",
"0.43443355",
"0.43440518",
"0.43435705",
"0.43382955",
"0.43343925",
"0.43303156",
"0.43218884",
"0.43075365",
"0.43029174",
"0.43024352"
] | 0.7889584 | 0 |
Update takes the representation of a cloudwatchEventTarget and updates it. Returns the server's representation of the cloudwatchEventTarget, and an error, if there is any. | Update принимает представление cloudwatchEventTarget и обновляет его. Возвращает представление cloudwatchEventTarget, предоставленное сервером, и ошибку, если она есть. | func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.CloudwatchEventTarget), err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (c *Collection) UpdateEvent(\n\tkey, typ string, ts time.Time, ordinal int64, value interface{},\n) (*Event, error) {\n\theaders := map[string]string{\"Content-Type\": \"application/json\"}\n\treturn c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)\n}",
"func (e *Event) Update(c echo.Context, req *Update) (*takrib.Event, error) {\n\t// if err := e.rbac.EnforceEvent(c, req.ID); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\tevent, err := e.udb.View(e.db, req.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstructs.Merge(event, req)\n\tif err := e.udb.Update(e.db, event); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn event, nil\n}",
"func (e *EventAPI) Update(eventType *EventType) error {\n\tconst errMsg = \"unable to update event type\"\n\n\tresponse, err := e.client.httpPUT(e.backOffConf.create(), e.eventURL(eventType.Name), eventType, errMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tbuffer, err := io.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"%s: unable to read response body\", errMsg)\n\t\t}\n\t\treturn decodeResponseToError(buffer, \"unable to update event type\")\n\t}\n\n\treturn nil\n}",
"func UpdateEvent(c *gin.Context) {\n\tvar inp model.Event\n\n\tc.BindJSON(&inp)\n\tc.JSON(http.StatusOK, serviceEvent.UpdateEvent(&inp))\n}",
"func (e *PrecisionTiming) Update(e2 Event) error {\n\tif e.Type() != e2.Type() {\n\t\treturn fmt.Errorf(\"statsd event type conflict: %s vs %s \", e.String(), e2.String())\n\t}\n\tp := e2.Payload().(PrecisionTiming)\n\te.Count += p.Count\n\te.Value += p.Value\n\te.Min = time.Duration(minInt64(int64(e.Min), int64(p.Min)))\n\te.Max = time.Duration(maxInt64(int64(e.Max), int64(p.Min)))\n\treturn nil\n}",
"func (r *DeviceManagementAutopilotEventRequest) Update(ctx context.Context, reqObj *DeviceManagementAutopilotEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (r *DeviceManagementTroubleshootingEventRequest) Update(ctx context.Context, reqObj *DeviceManagementTroubleshootingEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (r *EventTagsService) Update(profileId int64, eventtag *EventTag) *EventTagsUpdateCall {\n\tc := &EventTagsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.eventtag = eventtag\n\treturn c\n}",
"func (s *TargetCRUD) Update(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\toldTarget, ok := event.OldObj.(*state.Target)\n\tif !ok {\n\t\tpanic(\"unexpected type, expected *state.Target\")\n\t}\n\tprint.DeletePrintln(\"deleting target\", *oldTarget.Target.Target,\n\t\t\"from upstream\", *oldTarget.Upstream.ID)\n\tprint.CreatePrintln(\"creating target\", *target.Target.Target,\n\t\t\"on upstream\", *target.Upstream.ID)\n\treturn target, nil\n}",
"func (es *EntityEventService) Update(campID int, entID int, evtID int, evt SimpleEntityEvent) (*EntityEvent, error) {\n\tvar err error\n\tend := EndpointCampaign\n\n\tif end, err = end.id(campID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Campaign ID: %w\", err)\n\t}\n\tend = end.concat(endpointEntity)\n\n\tif end, err = end.id(entID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid Entity ID: %w\", err)\n\t}\n\tend = end.concat(es.end)\n\n\tif end, err = end.id(evtID); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid EntityEvent ID: %w\", err)\n\t}\n\n\tb, err := json.Marshal(evt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot marshal SimpleEntityEvent: %w\", err)\n\t}\n\n\tvar wrap struct {\n\t\tData *EntityEvent `json:\"data\"`\n\t}\n\n\tif err = es.client.put(end, bytes.NewReader(b), &wrap); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot update EntityEvent for Campaign (ID: %d): '%w'\", campID, err)\n\t}\n\n\treturn wrap.Data, nil\n}",
"func (c *Collection) innerUpdateEvent(\n\tkey, typ string, ts time.Time, ordinal int64, value interface{},\n\theaders map[string]string,\n) (*Event, error) {\n\tevent := &Event{\n\t\tCollection: c,\n\t\tKey: key,\n\t\tOrdinal: ordinal,\n\t\tTimestamp: ts,\n\t\tType: typ,\n\t}\n\n\t// Encode the JSON message into a raw value that we can return to the\n\t// client if necessary.\n\tif rawMsg, err := json.Marshal(value); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tevent.Value = json.RawMessage(rawMsg)\n\t}\n\n\t// Perform the actual PUT\n\tpath := fmt.Sprintf(\"%s/%s/events/%s/%d/%d\", c.Name, key, typ,\n\t\tts.UnixNano()/1000000, ordinal)\n\tresp, err := c.client.emptyReply(\"PUT\", path, headers,\n\t\tbytes.NewBuffer(event.Value), 204)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the Location header and parse it. The Header will give us the\n\t// Ordinal.\n\tlocation := resp.Header.Get(\"Location\")\n\tif location == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing Location header.\")\n\t} else if parts := strings.Split(location, \"/\"); len(parts) != 8 {\n\t\treturn nil, fmt.Errorf(\"Malformed Location header.\")\n\t} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {\n\t\treturn nil, fmt.Errorf(\"Malformed Timestamp in the Location header.\")\n\t} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {\n\t\treturn nil, fmt.Errorf(\"Malformed Ordinal in the Location header.\")\n\t} else {\n\t\tsecs := ts / 1000\n\t\tnsecs := (ts % 1000) * 1000000\n\t\tevent.Timestamp = time.Unix(secs, nsecs)\n\t\tevent.Ordinal = ord\n\t}\n\n\t// Get the Ref via the Etag header.\n\tif etag := resp.Header.Get(\"Etag\"); etag == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing ETag header.\")\n\t} else if parts := strings.Split(etag, `\"`); len(parts) != 3 {\n\t\treturn nil, fmt.Errorf(\"Malformed ETag header.\")\n\t} else {\n\t\tevent.Ref = parts[1]\n\t}\n\n\t// Success\n\treturn event, nil\n}",
"func (s *S3Sink) UpdateEvents(eNew *v1.Event, eOld *v1.Event) {\n\ts.eventCh.In() <- NewEventData(eNew, eOld)\n}",
"func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (c *FakeCloudwatchEventTargets) UpdateStatus(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (*v1alpha1.CloudwatchEventTarget, error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateSubresourceAction(cloudwatcheventtargetsResource, \"status\", c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (e *Timing) Update(e2 Event) error {\n\tif e.Type() != e2.Type() {\n\t\treturn fmt.Errorf(\"statsd event type conflict: %s vs %s \", e.String(), e2.String())\n\t}\n\tp := e2.Payload().(map[string]float64)\n\te.Value += p[\"val\"]\n\te.Values = append(e.Values, p[\"val\"])\n\tif e.Count == 0 { // Count will only be 0 after Reset()\n\t\te.Min = p[\"min\"]\n\t\te.Max = p[\"max\"]\n\t} else {\n\t\te.Min = minFloat64(e.Min, p[\"min\"])\n\t\te.Max = maxFloat64(e.Max, p[\"max\"])\n\t}\n\te.Count += p[\"cnt\"]\n\te.Tags = []string{}\n\treturn nil\n}",
"func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (o *UpdateEventParams) WithHTTPClient(client *http.Client) *UpdateEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (c *k8sClient) OnUpdate(oldObj, newObj interface{}) {\n\tif !isResourceChanged(oldObj, newObj) {\n\t\treturn\n\t}\n\n\tselect {\n\tcase c.eventCh <- newObj:\n\tdefault:\n\t}\n}",
"func (es *EventService) Update(eventID string, e *Event) error {\n\t// PUT: /event/:eventID\n\tif e == nil {\n\t\treturn fmt.Errorf(\"nil Event\")\n\t}\n\treq, err := es.c.NewRequest(\"PUT\", \"/event/\"+eventID, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: return any response?\n\treturn es.c.Do(req, nil)\n}",
"func (svc *Service) Update(ownerID string, event *calendar.Event) (*calendar.Event, error) {\n\tmsEvent := convertToOutlookEvent(event)\n\t_, err := svc.client.Patch(fmt.Sprintf(\"%s/%s\", eventsURL(ownerID), event.Id), msEvent)\n\tif err != nil {\n\t\treturn &calendar.Event{}, errors.Wrap(err, \"Unable to perform Update\")\n\t}\n\treturn event, nil\n}",
"func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func UpdateEvent(event *entity.Event) (*entity.Event, error){\n\touput,err:= json.MarshalIndent(event,\"\",\"\\t\\t\")\n\t\n\tclient := &http.Client{}\n\tURL := fmt.Sprintf(\"%s%s/%d\",baseEventURL,\"update\",event.ID)\n\treq,_ := http.NewRequest(\"PUT\",URL,bytes.NewBuffer(ouput))\n\n\t//DO return an http response\n\tres,err := client.Do(req)\n\t\n\tif err != nil {\n\t\treturn nil,err\n\t}\n\teventt := &entity.Event{}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(body,eventt)\n\tif err != nil{\n\t\treturn nil,err\n\t}\n\treturn eventt,nil\n}",
"func (c *FakeCloudwatchEventTargets) List(opts v1.ListOptions) (result *v1alpha1.CloudwatchEventTargetList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(cloudwatcheventtargetsResource, cloudwatcheventtargetsKind, c.ns, opts), &v1alpha1.CloudwatchEventTargetList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.CloudwatchEventTargetList{ListMeta: obj.(*v1alpha1.CloudwatchEventTargetList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.CloudwatchEventTargetList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}",
"func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\treturn err\n}",
"func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) {\n\t// Add your code here:\n\t// * Make API calls (use req.Session)\n\t// * Mutate the model\n\t// * Check/set any callback context (req.CallbackContext / response.CallbackContext)\n\n\t// Construct a new handler.ProgressEvent and return it\n\tresponse := handler.ProgressEvent{\n\t\tOperationStatus: handler.Success,\n\t\tMessage: \"Update complete\",\n\t\tResourceModel: currentModel,\n\t}\n\n\treturn response, nil\n\n\t// Not implemented, return an empty handler.ProgressEvent\n\t// and an error\n\treturn handler.ProgressEvent{}, errors.New(\"Not implemented: Update\")\n}",
"func (s *baseStore[T, E, TPtr, EPtr]) Update(ctx context.Context, object T) error {\n\teventPtr := s.newObjectEvent(ctx, UpdateEvent)\n\teventPtr.SetObject(object)\n\treturn s.createObjectEvent(ctx, eventPtr)\n}",
"func (t *FakeObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {\n\tvar err error\n\tif t.fakingOptions.failAll != nil {\n\t\terr = t.fakingOptions.failAll.RunFakeInvocations()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif t.fakingOptions.failAt != nil {\n\t\tif gvr.Resource == \"nodes\" {\n\t\t\terr = t.fakingOptions.failAt.Node.Update.RunFakeInvocations()\n\t\t} else if gvr.Resource == \"machines\" {\n\t\t\terr = t.fakingOptions.failAt.Machine.Update.RunFakeInvocations()\n\t\t} else if gvr.Resource == \"machinedeployments\" {\n\t\t\terr = t.fakingOptions.failAt.MachineDeployment.Update.RunFakeInvocations()\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = t.delegatee.Update(gvr, obj, ns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif t.FakeWatcher == nil {\n\t\treturn errors.New(\"error sending event on a tracker with no watch support\")\n\t}\n\n\tif t.IsStopped() {\n\t\treturn errors.New(\"error sending event on a stopped tracker\")\n\t}\n\n\tt.FakeWatcher.Modify(obj)\n\treturn nil\n}",
"func UpdateEvent(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tif key, ok := vars[\"eventId\"]; ok {\n\t\tvar event Event\n\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\terr := decoder.Decode(&event)\n\n\t\t// Get EID from request URL\n\t\tif string(event.EID) != key {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, `{\"error\": \"Bad Request\"}`)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\tLogError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\t// Validate input\n\t\tif err := event.Validate(); err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, `{\"error\": \"Bad Request\"}`)\n\t\t\treturn\n\t\t}\n\n\t\tif err := event.Save(); err != nil {\n\t\t\tLogError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tfmt.Fprintf(w, `{\"eid\": %d}`, event.EID)\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, `{\"error\": \"Not Found\"}`)\n\t}\n}",
"func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))\n\n}",
"func (c *EventClient) Update() *EventUpdate {\n\tmutation := newEventMutation(c.config, OpUpdate)\n\treturn &EventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}",
"func (i *IpScheduler) OnUpdate(old, new interface{}) {}",
"func (endpointgroup *EndpointGroup) Update() error {\n\t// Get a representation of the object's original state so we can find what\n\t// to update.\n\toriginal := new(EndpointGroup)\n\terr := original.UnmarshalJSON(endpointgroup.rawData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treadWriteFields := []string{\n\t\t\"AccessState\",\n\t\t\"Endpoints\",\n\t\t\"GroupType\",\n\t\t\"Preferred\",\n\t\t\"TargetEndpointGroupIdentifier\",\n\t}\n\n\toriginalElement := reflect.ValueOf(original).Elem()\n\tcurrentElement := reflect.ValueOf(endpointgroup).Elem()\n\n\treturn endpointgroup.Entity.Update(originalElement, currentElement, readWriteFields)\n}",
"func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {\n\tu := event.UpdateEvent{}\n\n\tif o, ok := oldObj.(client.Object); ok {\n\t\tu.ObjectOld = o\n\t} else {\n\t\tlog.Error(nil, \"OnUpdate missing ObjectOld\",\n\t\t\t\"object\", oldObj, \"type\", fmt.Sprintf(\"%T\", oldObj))\n\t\treturn\n\t}\n\n\t// Pull Object out of the object\n\tif o, ok := newObj.(client.Object); ok {\n\t\tu.ObjectNew = o\n\t} else {\n\t\tlog.Error(nil, \"OnUpdate missing ObjectNew\",\n\t\t\t\"object\", newObj, \"type\", fmt.Sprintf(\"%T\", newObj))\n\t\treturn\n\t}\n\n\tfor _, p := range e.predicates {\n\t\tif !p.Update(u) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Invoke update handler\n\tctx, cancel := context.WithCancel(e.ctx)\n\tdefer cancel()\n\te.handler.Update(ctx, u, e.queue)\n}",
"func (c *runtimeTypedObjectClient) Update(\n\tctx context.Context, obj APIObject) error {\n\treturn c.rtc.Update(ctx, obj)\n}",
"func (r *UserExperienceAnalyticsMetricRequest) Update(ctx context.Context, reqObj *UserExperienceAnalyticsMetric) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (f UpdateHandlerFunc) Handle(ctx context.Context, u tg.UpdatesClass) error {\n\treturn f(ctx, u)\n}",
"func (els *EventLocalStore) Update(e *entities.Event) error {\n\tels.mutex.Lock()\n\tdefer els.mutex.Unlock()\n\n\tif _, ok := els.events[e.ID]; ok {\n\t\tels.events[e.ID].Author = e.Author\n\t\tels.events[e.ID].Title = e.Title\n\t\tels.events[e.ID].Description = e.Description\n\t\tels.events[e.ID].Start = e.Start\n\t\tels.events[e.ID].End = e.End\n\n\t\treturn nil\n\t}\n\n\treturn event.ErrEventNotFound\n}",
"func (c *peer) Update(obj *api.Peer) error {\n\tobj.UpdatedAt = time.Now().UTC().Format(time.RFC3339)\n\tjsonData, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.store.Set(path.Join(c.prefix, obj.UID), jsonData)\n}",
"func UpdateEvent(q Query, c chan error) {\n\tresult, err := Session.Run(`\n\t\tMATCH(n:EVENT)\n\t\tWHERE n.`+q.Key+`=$val\n\t\tSET n.`+q.ChangeKey+`=$val1\n\t\tRETURN n.`+q.ChangeKey+`\n\t`, map[string]interface{}{\n\t\t\"val\": q.Value,\n\t\t\"val1\": q.ChangeValue,\n\t})\n\n\tif err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tc <- nil\n\n\tresult.Next()\n\tlog.Printf(\"Updated %s to %s\", q.Key, result.Record().GetByIndex(0).(string))\n\n\tif err = result.Err(); err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n}",
"func (pgs *PGStorage) UpdateEvent(eventUUID uuid.UUID, e event.Event) (event.Event, error) {\n\tsql := `update events set \n\t\ttitle = :title, \n\t\tdatetime = :datetime, \n\t\tduration = :duration, \n\t\tdescription = :description, \n\t\tuserid = :userid, \n\t\tnotify = :notify\n\twhere uuid = :uuid`\n\t_, err := pgs.DB.NamedExecContext(pgs.Ctx, sql, map[string]interface{}{\n\t\t\"uuid\": eventUUID.String(),\n\t\t\"title\": e.Title,\n\t\t\"datetime\": e.Datetime,\n\t\t\"duration\": e.Duration,\n\t\t\"description\": e.Desc,\n\t\t\"userid\": e.User,\n\t\t\"notify\": e.Notify,\n\t})\n\tif err != nil {\n\t\treturn event.Event{}, err\n\t}\n\treturn e, nil\n}",
"func (es *EventSyncer) handleUpdateEvent(old, new interface{}) {\n\tnewEvent := new.(*corev1.Event)\n\toldEvent := old.(*corev1.Event)\n\tif newEvent.ResourceVersion == oldEvent.ResourceVersion {\n\t\t// Periodic resync will send update events for all known Deployments.\n\t\t// Two different versions of the same Deployment will always have different RVs.\n\t\treturn\n\t}\n\n\tes.addKindAndVersion(newEvent)\n\n\tgo syncToNode(watch.Modified, util.ResourceEvent, newEvent)\n\n\tsyncToStorage(es.ctx, watch.Modified, util.ResourceEvent, newEvent)\n}",
"func (c *Client) Update() goa.Endpoint {\n\treturn func(ctx context.Context, v interface{}) (interface{}, error) {\n\t\tinv := goagrpc.NewInvoker(\n\t\t\tBuildUpdateFunc(c.grpccli, c.opts...),\n\t\t\tEncodeUpdateRequest,\n\t\t\tnil)\n\t\tres, err := inv.Invoke(ctx, v)\n\t\tif err != nil {\n\t\t\treturn nil, goa.Fault(err.Error())\n\t\t}\n\t\treturn res, nil\n\t}\n}",
"func (c *Client) Update(id string, event Event, target string, enabled string, conditions []Condition) error {\n\treq := CreateBody{Event: string(event), Target: target, Enabled: enabled, Conditions: conditions}\n\tbody, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treadbody := bytes.NewReader(body)\n\t_, err = c.put(c.url.String()+\"/\"+id, readbody)\n\treturn err\n}",
"func (cs *CSRSyncer) handleUpdateEvent(old, new interface{}) {\n\tnewCSR := new.(*v1beta1.CertificateSigningRequest)\n\toldCSR := old.(*v1beta1.CertificateSigningRequest)\n\tif newCSR.ResourceVersion == oldCSR.ResourceVersion {\n\t\t// Periodic resync will send update events for all known Deployments.\n\t\t// Two different versions of the same Deployment will always have different RVs.\n\t\treturn\n\t}\n\n\tcs.addKindAndVersion(newCSR)\n\tklog.V(4).Infof(\"update csr: %s\", newCSR.Name)\n\n\tgo syncToNode(watch.Modified, util.ResourceCSR, newCSR)\n\n\tsyncToStorage(cs.ctx, watch.Modified, util.ResourceCSR, newCSR)\n}",
"func Update(k8sClient client.Client, obj client.Object) error {\n\treturn k8sClient.Update(\n\t\tcontext.TODO(),\n\t\tobj,\n\t)\n}",
"func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) {\n\t// Add your code here:\n\t// * Make API calls (use req.Session)\n\t// * Mutate the model\n\t// * Check/set any callback context (req.CallbackContext / response.CallbackContext)\n\tiotSvc := iot.New(req.Session)\n\n\t_, err := iotSvc.UpdateCertificate(&iot.UpdateCertificateInput{\n\t\tCertificateId: currentModel.Id,\n\t\tNewStatus: currentModel.Status,\n\t})\n\n\tif err != nil {\n\t\taerr, ok := err.(awserr.Error)\n\t\tif ok {\n\t\t\tfmt.Printf(\"%v\", aerr)\n\t\t}\n\t\tresponse := handler.ProgressEvent{\n\t\t\tOperationStatus: handler.Failed,\n\t\t\tMessage: aerr.Error(),\n\t\t}\n\t\treturn response, nil\n\n\t}\n\t// Not implemented, return an empty handler.ProgressEvent\n\t// and an error\n\tresponse := handler.ProgressEvent{\n\t\tOperationStatus: handler.Success,\n\t\tMessage: \"Certificate updated successfully\",\n\t\tResourceModel: currentModel,\n\t}\n\treturn response, nil\n}",
"func (StatusUpdatePredicate) Update(e event.UpdateEvent) bool {\n\tlog := log.Logger(context.Background(), \"controllers.common\", \"Update\")\n\n\tif e.ObjectOld == nil {\n\t\tlog.Error(nil, \"Update event has no old runtime object to update\", \"event\", e)\n\t\treturn false\n\t}\n\tif e.ObjectNew == nil {\n\t\tlog.Error(nil, \"Update event has no new runtime object for update\", \"event\", e)\n\t\treturn false\n\t}\n\n\t//Better way to do it is to get GVK from ObjectKind but Kind is dropped during decode.\n\t//For more details, check the status of the issue here\n\t//https://github.com/kubernetes/kubernetes/issues/80609\n\n\t// Try to type caste to WavefrontAlert first if it doesn't work move to namespace type casting\n\tif oldWFAlertObj, ok := e.ObjectOld.(*alertmanagerv1alpha1.WavefrontAlert); ok {\n\t\tnewWFAlertObj := e.ObjectNew.(*alertmanagerv1alpha1.WavefrontAlert)\n\t\tif !reflect.DeepEqual(oldWFAlertObj.Status, newWFAlertObj.Status) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif oldAlertsConfigObj, ok := e.ObjectOld.(*alertmanagerv1alpha1.AlertsConfig); ok {\n\t\tnewAlertsConfigObj := e.ObjectNew.(*alertmanagerv1alpha1.AlertsConfig)\n\t\tif !reflect.DeepEqual(oldAlertsConfigObj.Status, newAlertsConfigObj.Status) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (p *PersistableEvent) Update(objectstorage.StorableObject) {\n\tpanic(\"should not be updated\")\n}",
"func (s *Dao) Update(ctx context.Context, kv *model.KVDoc) error {\n\tkeyKv := key.KV(kv.Domain, kv.Project, kv.ID)\n\tresp, err := etcdadpt.Get(ctx, keyKv)\n\tif err != nil {\n\t\topenlog.Error(err.Error())\n\t\treturn err\n\t}\n\tif resp == nil {\n\t\treturn datasource.ErrRecordNotExists\n\t}\n\n\tvar old model.KVDoc\n\terr = json.Unmarshal(resp.Value, &old)\n\tif err != nil {\n\t\topenlog.Error(err.Error())\n\t\treturn err\n\t}\n\told.LabelFormat = kv.LabelFormat\n\told.Value = kv.Value\n\told.Status = kv.Status\n\told.Checker = kv.Checker\n\told.UpdateTime = kv.UpdateTime\n\told.UpdateRevision = kv.UpdateRevision\n\n\tbytes, err := json.Marshal(old)\n\tif err != nil {\n\t\topenlog.Error(err.Error())\n\t\treturn err\n\t}\n\terr = etcdadpt.PutBytes(ctx, keyKv, bytes)\n\tif err != nil {\n\t\topenlog.Error(err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *Service) Update(ctx context.Context, cusVisitAssoc types.CusVisitAssoc) error {\n\tif err := validation.ValidateStruct(&cusVisitAssoc,\n\t\tvalidation.Field(&cusVisitAssoc.ID, validation.Required),\n\t\tvalidation.Field(&cusVisitAssoc.VisitID, validation.Required),\n\t); err != nil {\n\t\treturn err\n\t} // not empty\n\treturn s.repo.Update(ctx, cusVisitAssoc)\n}",
"func (ue *UserEvent) Update(ctx context.Context) *spanner.Mutation {\n\tvalues, _ := ue.columnsToValues(UserEventWritableColumns())\n\treturn spanner.Update(\"UserEvent\", UserEventWritableColumns(), values)\n}",
"func (s *GenericWatchStorage) Update(obj runtime.Object) error {\n\ts.watcher.Suspend(watcher.FileEventModify)\n\treturn s.Storage.Update(obj)\n}",
"func (c *endpointCache) Update(event Event) {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\t// Happy path.\n\tif event.Err == nil {\n\t\tc.updateCache(event.Instances)\n\t\tc.err = nil\n\t\treturn\n\t}\n\n\t// Sad path. Something's gone wrong in sd.\n\tc.logger.Log(\"err\", event.Err)\n\tif !c.options.invalidateOnError {\n\t\treturn // keep returning the last known endpoints on error\n\t}\n\tif c.err != nil {\n\t\treturn // already in the error state, do nothing & keep original error\n\t}\n\tc.err = event.Err\n\t// set new deadline to invalidate Endpoints unless non-error Event is received\n\tc.invalidateDeadline = c.timeNow().Add(c.options.invalidateTimeout)\n\treturn\n}",
"func (sn *ViewUpdateNotification) Handle(elem interface{}) {\n\tif elemEvent, ok := elem.(ViewUpdate); ok {\n\t\tif sn.validation != nil && sn.validation(elemEvent) {\n\t\t\tsn.do(func() {\n\t\t\t\tfor _, sub := range sn.subs {\n\t\t\t\t\tsub.Receive(elemEvent)\n\t\t\t\t}\n\t\t\t})\n\n\t\t\treturn\n\t\t}\n\n\t\tsn.do(func() {\n\t\t\tfor _, sub := range sn.subs {\n\t\t\t\tsub.Receive(elemEvent)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (w *watcher) sendEvent(evType kvstore.WatchEventType, key string, value []byte, version int64) {\n\tf := w.f\n\n\tobj, err := f.codec.Decode(value, nil)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to decode %v with error %v\", string(value), err)\n\t\tw.sendError(err)\n\t\treturn\n\t}\n\n\terr = f.objVersioner.SetVersion(obj, uint64(version))\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to set version %v with error: %v\", version, err)\n\t\tw.sendError(err)\n\t\treturn\n\t}\n\n\te := &kvstore.WatchEvent{\n\t\tType: evType,\n\t\tObject: obj,\n\t\tKey: key,\n\t}\n\n\tif len(w.outCh) == outCount {\n\t\tlog.Warnf(\"Number of buffered watch events hit max count of %v\", outCount)\n\t}\n\n\tselect {\n\tcase w.outCh <- e:\n\tcase <-w.ctx.Done():\n\t}\n}",
"func (uu UpdateUser) UpdatedEvent(userID uuid.UUID) event.Event {\n\tparams := EventParamsUpdated{\n\t\tUserID: userID,\n\t\tUpdateUser: UpdateUser{\n\t\t\tEnabled: uu.Enabled,\n\t\t},\n\t}\n\n\trawParams, err := params.Marshal()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn event.Event{\n\t\tSource: EventSource,\n\t\tType: EventUpdated,\n\t\tRawParams: rawParams,\n\t}\n}",
"func SendCloudEvent(sinkURI, eventID, eventSourceURI string, data []byte, eventType TektonEventType, logger *zap.SugaredLogger, cloudEventClient CEClient) (cloudevents.Event, error) {\n\tvar event cloudevents.Event\n\n\tcloudEventSource := types.ParseURLRef(eventSourceURI)\n\tif cloudEventSource == nil {\n\t\tlogger.Errorf(\"Invalid eventSourceURI: %s\", eventSourceURI)\n\t\treturn event, fmt.Errorf(\"invalid eventSourceURI: %s\", eventSourceURI)\n\t}\n\n\tevent = cloudevents.Event{\n\t\tContext: cloudevents.EventContextV02{\n\t\t\tID: eventID,\n\t\t\tType: string(eventType),\n\t\t\tSource: *cloudEventSource,\n\t\t\tTime: &types.Timestamp{Time: time.Now()},\n\t\t\tExtensions: nil,\n\t\t}.AsV02(),\n\t\tData: data,\n\t}\n\tctxt := cecontext.WithTarget(context.TODO(), sinkURI)\n\t_, err := cloudEventClient.Send(ctxt, event)\n\tif err != nil {\n\t\tlogger.Errorf(\"Error sending the cloud-event: %s\", err)\n\t\treturn event, err\n\t}\n\treturn event, nil\n}",
"func (h *DaprHandler) ObjectUpdated(old interface{}, new interface{}) {\n}",
"func (r *ProjectsLogServicesSinksService) Update(projectsId string, logServicesId string, sinksId string, logsink *LogSink) *ProjectsLogServicesSinksUpdateCall {\n\treturn &ProjectsLogServicesSinksUpdateCall{\n\t\ts: r.s,\n\t\tprojectsId: projectsId,\n\t\tlogServicesId: logServicesId,\n\t\tsinksId: sinksId,\n\t\tlogsink: logsink,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"v1beta3/projects/{projectsId}/logServices/{logServicesId}/sinks/{sinksId}\",\n\t}\n}",
"func (a *apiEndpoint) Update() error {\n\ta.e.mutex.RLock()\n\ta.SetEntry(\"server\", api.String(a.e.poolEntry.Desc))\n\ta.SetEntry(\"status\", api.String(a.e.status.String()))\n\tif a.e.lastErr != nil {\n\t\ta.SetEntry(\"last_error\", api.String(a.e.lastErr.Error()))\n\t\ta.SetEntry(\"last_error_time\", api.String(a.e.lastErrTime.Format(time.RFC3339)))\n\t} else {\n\t\ta.SetEntry(\"last_error\", api.Null)\n\t\ta.SetEntry(\"last_error_time\", api.Null)\n\t}\n\ta.SetEntry(\"pendingPayloads\", api.Number(a.e.NumPending()))\n\ta.SetEntry(\"publishedLines\", api.Number(a.e.LineCount()))\n\ta.SetEntry(\"averageLatency\", api.Float(a.e.AverageLatency()/time.Millisecond))\n\ta.e.mutex.RUnlock()\n\n\treturn nil\n}",
"func (er *EventRouter) updateEvent(objOld interface{}, objNew interface{}) {\n\teOld := objOld.(*v1.Event)\n\teNew := objNew.(*v1.Event)\n\tprometheusEvent(eNew, er)\n\tif eOld.ResourceVersion != eNew.ResourceVersion {\n\t\terr := store.DefaultMongoStore.Insert(er.Clustername, eNew)\n\t\tif nil != err {\n\t\t\tlog.Warn(\"insert event %s error %s\", eNew.Name, err.Error())\n\t\t}\n\t}\n}",
"func (s serviceimpl) UpdateEvent(srv *calendar.Service, calendarID string, eventID string, name string, dateRange ...*DateRange) (*calendar.Event, error) {\n\tevt, err := srv.Events.Get(calendarID, eventID).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tevt.Summary = name\n\tif len(dateRange) > 0 {\n\t\tevt.Start.DateTime = dateRange[0].Start\n\t\tevt.End.DateTime = dateRange[0].End\n\t}\n\n\tevt, err = srv.Events.Update(calendarID, eventID, evt).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\treturn evt, nil\n}",
"func (o Occurrence) Updated(occurrence model.Occurrence) event.Domain {\n\treturn *event.NewDomainEvent(event.DomainArgs{\n\t\tCaller: tracker,\n\t\tAggregateName: occurrenceName,\n\t\tAction: update,\n\t\tAggregateID: occurrence.ID,\n\t\tBody: occurrence,\n\t})\n}",
"func (api *objectAPI) Update(obj *objstore.Object) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ObjstoreV1().Object().Update(context.Background(), obj)\n\t\treturn err\n\t}\n\n\tapi.ct.handleObjectEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Updated})\n\treturn nil\n}",
"func (ch *ClickHouse) Update(object map[string]interface{}) error {\n\treturn ch.SyncStore(nil, []map[string]interface{}{object}, \"\", true)\n}",
"func (w *WebhookServiceOp) Update(webhook *Webhook) (*Webhook, error) {\n\tpath := fmt.Sprintf(\"%s/%d\", webhooksBasePath, webhook.ID)\n\tresource := new(Webhook)\n\terr := w.client.Put(path, webhook, &resource)\n\n\treturn resource, err\n}",
"func UpdateTargetHandler(w http.ResponseWriter, r *http.Request) {\n\tenv := envFromRequest(r)\n\n\tip, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tmac, scriptName, environment, params := parsePostForm(r.PostForm)\n\tif mac == \"\" || scriptName == \"\" {\n\t\thttp.Error(w, \"MAC address and target must not be empty\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tserver := server.New(mac, ip, \"\")\n\tinputErr, err := polling.UpdateTarget(\n\t\tenv.Logger, env.ServerStates, env.Templates, env.EventLog, env.BaseURL, server,\n\t\tscriptName, environment, params)\n\n\tif err != nil {\n\t\tif inputErr {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}",
"func (s *GRPCServer) WatchUpdate(context.Context, *dashboard.WatchRequest) (*dashboard.Empty, error) {\n\tpanic(\"not implemented\")\n}",
"func (s *Store) UpdateEvent(ctx context.Context, event *corev2.Event) (*corev2.Event, *corev2.Event, error) {\n\tif event == nil || event.Check == nil {\n\t\treturn nil, nil, &store.ErrNotValid{Err: errors.New(\"event has no check\")}\n\t}\n\n\tif err := event.Check.Validate(); err != nil {\n\t\treturn nil, nil, &store.ErrNotValid{Err: err}\n\t}\n\n\tif err := event.Entity.Validate(); err != nil {\n\t\treturn nil, nil, &store.ErrNotValid{Err: err}\n\t}\n\n\tctx = store.NamespaceContext(ctx, event.Entity.Namespace)\n\n\tprevEvent, err := s.GetEventByEntityCheck(\n\t\tctx, event.Entity.Name, event.Check.Name,\n\t)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Maintain check history.\n\tif prevEvent != nil {\n\t\tif !prevEvent.HasCheck() {\n\t\t\treturn nil, nil, &store.ErrNotValid{Err: errors.New(\"invalid previous event\")}\n\t\t}\n\n\t\tevent.Check.MergeWith(prevEvent.Check)\n\t}\n\n\tupdateOccurrences(event.Check)\n\n\tpersistEvent := event\n\n\tif event.HasMetrics() {\n\t\t// Taking pains to not modify our input, set metrics to nil so they are\n\t\t// not persisted.\n\t\tnewEvent := *event\n\t\tpersistEvent = &newEvent\n\t\tpersistEvent.Metrics = nil\n\t}\n\n\t// Truncate check output if the output is larger than MaxOutputSize\n\tif size := event.Check.MaxOutputSize; size > 0 && int64(len(event.Check.Output)) > size {\n\t\t// Taking pains to not modify our input, set a bound on the check\n\t\t// output size.\n\t\tnewEvent := *persistEvent\n\t\tpersistEvent = &newEvent\n\t\tcheck := *persistEvent.Check\n\t\tcheck.Output = check.Output[:size]\n\t\tpersistEvent.Check = &check\n\t}\n\n\tif persistEvent.Timestamp == 0 {\n\t\t// If the event is being created for the first time, it may not include\n\t\t// a timestamp. Use the current time.\n\t\tpersistEvent.Timestamp = time.Now().Unix()\n\t}\n\n\t// Handle expire on resolve silenced entries\n\tif err := handleExpireOnResolveEntries(ctx, persistEvent, s); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// update the history\n\t// marshal the new event and store it.\n\teventBytes, err := proto.Marshal(persistEvent)\n\tif err != nil {\n\t\treturn nil, nil, &store.ErrEncode{Err: err}\n\t}\n\n\tcmp := namespaceExistsForResource(event.Entity)\n\treq := clientv3.OpPut(getEventPath(event), string(eventBytes))\n\tres, err := s.client.Txn(ctx).If(cmp).Then(req).Commit()\n\tif err != nil {\n\t\treturn nil, nil, &store.ErrInternal{Message: err.Error()}\n\t}\n\tif !res.Succeeded {\n\t\treturn nil, nil, &store.ErrNamespaceMissing{Namespace: event.Entity.Namespace}\n\t}\n\n\treturn event, prevEvent, nil\n}",
"func (f *EventFilter) Update(from *EventFilter, fields ...string) error {\n\tfor _, field := range fields {\n\t\tswitch field {\n\t\tcase \"Action\":\n\t\t\tf.Action = from.Action\n\t\tcase \"When\":\n\t\t\tf.When = from.When\n\t\tcase \"Expressions\":\n\t\t\tf.Expressions = append(f.Expressions[0:0], from.Expressions...)\n\t\tcase \"RuntimeAssets\":\n\t\t\tf.RuntimeAssets = append(f.RuntimeAssets[0:0], from.RuntimeAssets...)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported field: %q\", f)\n\t\t}\n\t}\n\treturn nil\n}",
"func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {\n\tif r.UpdateFunc != nil {\n\t\tr.UpdateFunc(oldObj, newObj)\n\t}\n}",
"func (ctrler CtrlDefReactor) OnObjectUpdate(oldObj *Object, newObj *objstore.Object) error {\n\tlog.Info(\"OnObjectUpdate is not implemented\")\n\treturn nil\n}",
"func newValueMetric(e *events.Envelope) *Event {\n\tvar m = e.GetValueMetric()\n\tvar r = LabelSet{\n\t\t\"cf_origin\": \"firehose\",\n\t\t\"deployment\": e.GetDeployment(),\n\t\t\"event_type\": e.GetEventType().String(),\n\t\t\"job\": e.GetJob(),\n\t\t\"job_index\": e.GetIndex(),\n\t\t\"origin\": e.GetOrigin(),\n\t}\n\tmsg := fmt.Sprintf(\"%s = %g (%s)\", m.GetName(), m.GetValue(), m.GetUnit())\n\treturn &Event{\n\t\tLabels: r,\n\t\tMsg: msg,\n\t}\n}",
"func (handler *ObjectWebHandler) Update(w http.ResponseWriter, r *http.Request) {\n\trespondWithError(w, http.StatusNotImplemented, \"Not implemented\", nil)\n}",
"func (h GuildMemberUpdateHandler) Event() api.GatewayEventType {\n\treturn api.GatewayEventGuildMemberUpdate\n}",
"func (c *DefaultApiService) UpdateSink(Sid string, params *UpdateSinkParams) (*EventsV1Sink, error) {\n\tpath := \"/v1/Sinks/{Sid}\"\n\tpath = strings.Replace(path, \"{\"+\"Sid\"+\"}\", Sid, -1)\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.Description != nil {\n\t\tdata.Set(\"Description\", *params.Description)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &EventsV1Sink{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}",
"func getProxyUpdateEvent(msg events.PubSubMessage) *proxyUpdateEvent {\n\tswitch msg.Kind {\n\tcase\n\t\t//\n\t\t// K8s native resource events\n\t\t//\n\t\t// Endpoint event\n\t\tannouncements.EndpointAdded, announcements.EndpointDeleted, announcements.EndpointUpdated,\n\t\t// k8s Ingress event\n\t\tannouncements.IngressAdded, announcements.IngressDeleted, announcements.IngressUpdated,\n\t\t//\n\t\t// OSM resource events\n\t\t//\n\t\t// Egress event\n\t\tannouncements.EgressAdded, announcements.EgressDeleted, announcements.EgressUpdated,\n\t\t// IngressBackend event\n\t\tannouncements.IngressBackendAdded, announcements.IngressBackendDeleted, announcements.IngressBackendUpdated,\n\t\t// MulticlusterService event\n\t\tannouncements.MultiClusterServiceAdded, announcements.MultiClusterServiceDeleted, announcements.MultiClusterServiceUpdated,\n\t\t//\n\t\t// SMI resource events\n\t\t//\n\t\t// SMI HTTPRouteGroup event\n\t\tannouncements.RouteGroupAdded, announcements.RouteGroupDeleted, announcements.RouteGroupUpdated,\n\t\t// SMI TCPRoute event\n\t\tannouncements.TCPRouteAdded, announcements.TCPRouteDeleted, announcements.TCPRouteUpdated,\n\t\t// SMI TrafficSplit event\n\t\tannouncements.TrafficSplitAdded, announcements.TrafficSplitDeleted, announcements.TrafficSplitUpdated,\n\t\t// SMI TrafficTarget event\n\t\tannouncements.TrafficTargetAdded, announcements.TrafficTargetDeleted, announcements.TrafficTargetUpdated,\n\t\t//\n\t\t// Proxy events\n\t\t//\n\t\tannouncements.ProxyUpdate:\n\t\treturn &proxyUpdateEvent{\n\t\t\tmsg: msg,\n\t\t\ttopic: announcements.ProxyUpdate.String(),\n\t\t}\n\n\tcase announcements.MeshConfigUpdated:\n\t\tprevMeshConfig, okPrevCast := msg.OldObj.(*v1alpha1.MeshConfig)\n\t\tnewMeshConfig, okNewCast := msg.NewObj.(*v1alpha1.MeshConfig)\n\t\tif !okPrevCast || !okNewCast {\n\t\t\tlog.Error().Msgf(\"Expected MeshConfig type, got previous=%T, new=%T\", okPrevCast, okNewCast)\n\t\t\treturn nil\n\t\t}\n\n\t\tprevSpec := prevMeshConfig.Spec\n\t\tnewSpec := newMeshConfig.Spec\n\t\t// A proxy config update must only be triggered when a MeshConfig field that maps to a proxy config\n\t\t// changes.\n\t\tif prevSpec.Traffic.EnableEgress != newSpec.Traffic.EnableEgress ||\n\t\t\tprevSpec.Traffic.EnablePermissiveTrafficPolicyMode != newSpec.Traffic.EnablePermissiveTrafficPolicyMode ||\n\t\t\tprevSpec.Observability.Tracing != newSpec.Observability.Tracing ||\n\t\t\tprevSpec.Traffic.InboundExternalAuthorization.Enable != newSpec.Traffic.InboundExternalAuthorization.Enable ||\n\t\t\t// Only trigger an update on InboundExternalAuthorization field changes if the new spec has the 'Enable' flag set to true.\n\t\t\t(newSpec.Traffic.InboundExternalAuthorization.Enable && (prevSpec.Traffic.InboundExternalAuthorization != newSpec.Traffic.InboundExternalAuthorization)) ||\n\t\t\tprevSpec.FeatureFlags != newSpec.FeatureFlags {\n\t\t\treturn &proxyUpdateEvent{\n\t\t\t\tmsg: msg,\n\t\t\t\ttopic: announcements.ProxyUpdate.String(),\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tcase announcements.PodUpdated:\n\t\t// Only trigger a proxy update for proxies associated with this pod based on the proxy UUID\n\t\tprevPod, okPrevCast := msg.OldObj.(*corev1.Pod)\n\t\tnewPod, okNewCast := msg.NewObj.(*corev1.Pod)\n\t\tif !okPrevCast || !okNewCast {\n\t\t\tlog.Error().Msgf(\"Expected *Pod type, got previous=%T, new=%T\", okPrevCast, okNewCast)\n\t\t\treturn nil\n\t\t}\n\t\tprevMetricAnnotation := prevPod.Annotations[constants.PrometheusScrapeAnnotation]\n\t\tnewMetricAnnotation := newPod.Annotations[constants.PrometheusScrapeAnnotation]\n\t\tif prevMetricAnnotation != newMetricAnnotation {\n\t\t\tproxyUUID := newPod.Labels[constants.EnvoyUniqueIDLabelName]\n\t\t\treturn &proxyUpdateEvent{\n\t\t\t\tmsg: msg,\n\t\t\t\ttopic: GetPubSubTopicForProxyUUID(proxyUUID),\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (o *UpdateEventParams) WithBody(body *models.Event) *UpdateEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (manager *Manager) OnUpdateEndpoint(endpoint *k8sTypes.CiliumEndpoint) {\n\tid := types.NamespacedName{\n\t\tName: endpoint.GetName(),\n\t\tNamespace: endpoint.GetNamespace(),\n\t}\n\n\tmanager.pendingEndpointEventsLock.Lock()\n\tmanager.pendingEndpointEvents[id] = endpoint\n\tmanager.pendingEndpointEventsLock.Unlock()\n\n\tmanager.endpointEventsQueue.Add(id)\n}",
"func (r *ImpossibleTravelRiskEventRequest) Update(ctx context.Context, reqObj *ImpossibleTravelRiskEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (c Client) Update(input *UpdateWebhookInput) (*UpdateWebhookResponse, error) {\n\treturn c.UpdateWithContext(context.Background(), input)\n}",
"func (o *UpdateEventParams) WithContext(ctx context.Context) *UpdateEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (s *Stream) Update(ctx context.Context, c Collector) (err error) {\n\tupdate := newTaskLogger(s.stdout).Task(fmt.Sprintf(\"DRIVE %s\", s.drive)).Task(\"UPDATE\")\n\n\tupdate.Log(\"Started %s\\n\", time.Now().Format(time.RFC3339))\n\tdefer func(e *error) {\n\t\tif *e != nil {\n\t\t\tupdate.Log(\"ERROR: %v\\n\", *e)\n\t\t\tupdate.Log(\"Aborted %s | %s\\n\", time.Now().Format(time.RFC3339), update.Duration())\n\t\t} else {\n\t\t\tupdate.Log(\"Finished %s | %s\\n\", time.Now().Format(time.RFC3339), update.Duration())\n\t\t}\n\t}(&err)\n\n\tif err = s.collect(ctx, c, update); err != nil {\n\t\treturn err\n\t}\n\n\treturn s.buildCommits(ctx, update)\n}",
"func UpdateEvent(id bson.ObjectId, event Event) Event {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"insapp\").C(\"event\")\n\teventID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$set\": bson.M{\n\t\t\"name\"\t\t\t\t\t:\tevent.Name,\n\t\t\"description\"\t\t:\tevent.Description,\n\t\t\"status\"\t\t\t\t:\tevent.Status,\n\t\t\"image\"\t\t\t\t\t:\tevent.Image,\n\t\t\"palette\"\t\t\t\t:\tevent.Palette,\n\t\t\"selectedcolor\"\t:\tevent.SelectedColor,\n\t\t\"datestart\"\t\t\t:\tevent.DateStart,\n\t\t\"dateend\"\t\t\t\t:\tevent.DateEnd,\n\t\t\"bgcolor\"\t\t\t\t:\tevent.BgColor,\n\t\t\"fgcolor\"\t\t\t\t: event.FgColor,\n\t}}\n\tdb.Update(eventID, change)\n\tvar result Event\n\tdb.Find(bson.M{\"_id\": id}).One(&result)\n\treturn result\n}",
"func (t *Target) Update(newTarget *Target) {\n\tmutableMutex.Lock()\n defer mutableMutex.Unlock()\n\tt.Protocol = newTarget.Protocol\n\tt.Dest = newTarget.Dest\n\tt.TCPTLS = newTarget.TCPTLS\n\tt.HTTPMethod = newTarget.HTTPMethod\n\tt.HTTPStatusList = newTarget.HTTPStatusList\n\tt.Regexp = newTarget.Regexp\n\tt.ResSize = newTarget.ResSize\n\tt.Retry = newTarget.Retry\n\tt.RetryWait = newTarget.RetryWait\n\tt.Timeout = newTarget.Timeout\n\tt.TLSSkipVerify = newTarget.TLSSkipVerify\n}",
"func (command HelloWorldResource) Update(ctx context.Context, awsConfig awsv2.Config,\n\tevent *CloudFormationLambdaEvent,\n\tlogger *zerolog.Logger) (map[string]interface{}, error) {\n\trequest := HelloWorldResourceRequest{}\n\trequestPropsErr := json.Unmarshal(event.ResourceProperties, &request)\n\tif requestPropsErr != nil {\n\t\treturn nil, requestPropsErr\n\t}\n\tlogger.Info().Msgf(\"update: %s\", request.Message)\n\treturn nil, nil\n}",
"func (br *BGPReflector) Update(event controller.Event, txn controller.UpdateOperations) (changeDescription string, err error) {\n\n\tif bgpRouteUpdate, isBGPRouteUpdate := event.(*BGPRouteUpdate); isBGPRouteUpdate {\n\t\tbr.Log.Debugf(\"BGP route update: %v\", bgpRouteUpdate)\n\n\t\t// add / delete route on VPP\n\t\tkey, route := br.vppRoute(bgpRouteUpdate.DstNetwork, bgpRouteUpdate.GwAddr)\n\t\tif bgpRouteUpdate.Type == RouteAdd {\n\t\t\ttxn.Put(key, route)\n\t\t\tchangeDescription = \"BGP route Add\"\n\t\t} else {\n\t\t\ttxn.Delete(key)\n\t\t\tchangeDescription = \"BGP route Delete\"\n\t\t}\n\t}\n\treturn\n}",
"func (r *PodsIncomingReflector) HandleEvent(e interface{}) {\n\tevent := e.(watch.Event)\n\tpod, ok := event.Object.(*corev1.Pod)\n\tif !ok {\n\t\tklog.Error(\"INCOMING REFLECTION: cannot cast object to pod\")\n\t\treturn\n\t}\n\n\tif pod == nil {\n\t\tklog.V(4).Info(\"INCOMING REFLECTION: received nil pod to process\")\n\t\treturn\n\t}\n\n\tklog.V(3).Infof(\"INCOMING REFLECTION: received %v for pod %v\", event.Type, pod.Name)\n\n\tr.PushToInforming(pod)\n}",
"func (f *Flow) Update(packet *Packet, opts *Opts) {\n\tnow := UnixMilli(packet.GoPacket.Metadata().CaptureInfo.Timestamp)\n\tf.Last = now\n\tf.Metric.Last = now\n\n\tif opts.LayerKeyMode == L3PreferredKeyMode {\n\t\t// use the ethernet length as we want to get the full size and we want to\n\t\t// rely on the l3 address order.\n\t\tlength := packet.Length\n\t\tif length == 0 {\n\t\t\tif ethernetPacket := getLinkLayer(packet); ethernetPacket != nil {\n\t\t\t\tlength = getLinkLayerLength(ethernetPacket)\n\t\t\t}\n\t\t}\n\n\t\tf.updateMetricsWithNetworkLayer(packet, length)\n\t} else {\n\t\tif updated := f.updateMetricsWithLinkLayer(packet); !updated {\n\t\t\tf.updateMetricsWithNetworkLayer(packet, 0)\n\t\t}\n\t}\n\n\tf.updateRTT(packet)\n\n\t// depends on options\n\tif f.TCPMetric != nil {\n\t\tf.updateTCPMetrics(packet)\n\t}\n\tif (opts.ExtraLayers & DNSLayer) != 0 {\n\t\tif layer := packet.Layer(layers.LayerTypeDNS); layer != nil {\n\t\t\tf.updateDNSLayer(layer, packet.GoPacket.Metadata().CaptureInfo.Timestamp)\n\t\t}\n\t}\n}",
"func (r *ProjectsLogsSinksService) Update(projectsId string, logsId string, sinksId string, logsink *LogSink) *ProjectsLogsSinksUpdateCall {\n\treturn &ProjectsLogsSinksUpdateCall{\n\t\ts: r.s,\n\t\tprojectsId: projectsId,\n\t\tlogsId: logsId,\n\t\tsinksId: sinksId,\n\t\tlogsink: logsink,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"v1beta3/projects/{projectsId}/logs/{logsId}/sinks/{sinksId}\",\n\t}\n}",
"func Update(c client.Client, obj runtime.Object) error {\n\tkind := obj.GetObjectKind().GroupVersionKind().Kind\n\n\t// Only allow updates on specic kinds.\n\tif kind != \"ConfigMap\" {\n\t\treturn errors.New(\"update not supported for this object kind\")\n\t}\n\n\tif err := c.Update(context.Background(), obj); err != nil {\n\t\treturn fmt.Errorf(\"failed to update %s: %v\", kind, err)\n\t}\n\treturn nil\n}",
"func (s *store) OnUpdate(oldObj, newObj interface{}) {\n\toldPod, ok1 := oldObj.(*api.Pod)\n\tnewPod, ok2 := newObj.(*api.Pod)\n\tif !ok1 || !ok2 {\n\t\tlog.Errorf(\"Expected Pod but OnUpdate handler received %+v %+v\", oldObj, newObj)\n\t\treturn\n\t}\n\n\tif oldPod.Status.PodIP != newPod.Status.PodIP {\n\t\ts.OnDelete(oldPod)\n\t\ts.OnAdd(newPod)\n\t}\n}",
"func (s *WebhooksServiceOp) Update(webhook Webhook, options ...interface{}) (Webhook, error) {\n\tvar webhookResponse GetWebhookResponse\n\tjsonBody, err := json.Marshal(webhook)\n\tif err != nil {\n\t\treturn webhookResponse.Data, err\n\t}\n\treqBody := bytes.NewReader(jsonBody)\n\tbody, reqErr := s.client.DoRequest(http.MethodPut, fmt.Sprintf(\"/v3/hooks/%d\", webhook.ID), reqBody)\n\tif reqErr != nil {\n\t\treturn webhookResponse.Data, reqErr\n\t}\n\n\tjsonErr := json.Unmarshal(body, &webhookResponse)\n\tif jsonErr != nil {\n\t\treturn webhookResponse.Data, jsonErr\n\t}\n\n\treturn webhookResponse.Data, nil\n}",
"func (sp *ServiceProcessor) Update(event controller.Event) error {\n\tif ksChange, isKSChange := event.(*controller.KubeStateChange); isKSChange {\n\t\treturn sp.propagateDataChangeEv(ksChange)\n\t}\n\n\tif addPod, isAddPod := event.(*podmanager.AddPod); isAddPod {\n\t\treturn sp.ProcessNewPod(addPod.Pod.Namespace, addPod.Pod.Name)\n\t}\n\tif deletePod, isDeletePod := event.(*podmanager.DeletePod); isDeletePod {\n\t\treturn sp.ProcessDeletingPod(deletePod.Pod.Namespace, deletePod.Pod.Name)\n\t}\n\n\tif _, isNodeUpdate := event.(*nodesync.NodeUpdate); isNodeUpdate {\n\t\treturn sp.renderNodePorts()\n\t}\n\n\treturn nil\n}",
"func (pe *WzPingEvent) Update(msg *wzlib_transport.WzGenericMessage) {\n\tuid := msg.Payload[wzlib_transport.PAYLOAD_SYSTEM_ID].(string)\n\tif pe.uid != uid {\n\t\treturn\n\t}\n\n\tpingId, ok := msg.Payload[wzlib_transport.PAYLOAD_PING_ID]\n\tif !ok {\n\t\tpe.GetLogger().Errorln(\"Ping message contains no 'ping.id' section!\")\n\t} else {\n\t\tpingStatItf, ok := pe.pings.Get(pingId.(string))\n\t\tpingStat := pingStatItf.(*WzPingStat)\n\t\tif !ok {\n\t\t\tpe.GetLogger().Errorln(\"Unable to find ping ID for\", pingId)\n\t\t} else {\n\t\t\tpingStat.Ticks = time.Now().Unix() - pingStat.Ticks\n\t\t\tpingStat.Responded = true\n\t\t}\n\t}\n}",
"func (c *MetricCollector) Update(ctx context.Context, metric *Metric) (*Metric, error) {\n\tc.collectionsMutex.Lock()\n\tdefer c.collectionsMutex.Unlock()\n\n\tkey := NewMetricKey(metric.Namespace, metric.Name)\n\tif collection, exists := c.collections[key]; exists {\n\t\tcollection.metric = metric\n\t\treturn collection.metric.DeepCopy(), nil\n\t}\n\treturn nil, k8serrors.NewNotFound(kpa.Resource(\"Metrics\"), key)\n}",
"func (r *DeviceHealthScriptDeviceStateRequest) Update(ctx context.Context, reqObj *DeviceHealthScriptDeviceState) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (r *DeviceHealthScriptDeviceStateRequest) Update(ctx context.Context, reqObj *DeviceHealthScriptDeviceState) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}"
] | [
"0.5389559",
"0.5306597",
"0.51904404",
"0.515413",
"0.5118005",
"0.5084545",
"0.50144255",
"0.49988312",
"0.4901944",
"0.48523718",
"0.483984",
"0.48148668",
"0.4811473",
"0.47936144",
"0.47582054",
"0.47557688",
"0.4751602",
"0.47373387",
"0.46917793",
"0.46882197",
"0.4630409",
"0.45665073",
"0.45600775",
"0.45559874",
"0.44874272",
"0.4484724",
"0.44804913",
"0.4476848",
"0.4467734",
"0.446562",
"0.44222078",
"0.43961412",
"0.43947494",
"0.43847638",
"0.43725422",
"0.43682176",
"0.43459013",
"0.43257058",
"0.4323969",
"0.43237996",
"0.43177798",
"0.4274234",
"0.4265449",
"0.42307928",
"0.42283267",
"0.42193687",
"0.4217768",
"0.42141625",
"0.41826412",
"0.41670316",
"0.4146307",
"0.41429093",
"0.4135115",
"0.4131156",
"0.40986776",
"0.4097153",
"0.40946868",
"0.40942353",
"0.40916345",
"0.40864825",
"0.40832",
"0.407994",
"0.40794954",
"0.4073989",
"0.4064043",
"0.406131",
"0.4058489",
"0.40425506",
"0.40402585",
"0.40319553",
"0.40302578",
"0.40268233",
"0.40226558",
"0.40144104",
"0.4008464",
"0.40071732",
"0.4006675",
"0.4003437",
"0.3997828",
"0.39976394",
"0.39919174",
"0.39886042",
"0.39852947",
"0.3979595",
"0.39789408",
"0.397639",
"0.3975948",
"0.39642754",
"0.39576438",
"0.39441174",
"0.39405307",
"0.3940058",
"0.39385852",
"0.39352337",
"0.39318398",
"0.39318076",
"0.39307123",
"0.39261103",
"0.39250004",
"0.39250004"
] | 0.7178435 | 0 |
Delete takes name of the cloudwatchEventTarget and deletes it. Returns an error if one occurs. | Delete удаляет имя целевого объекта cloudwatchEventTarget и удаляет его. Возвращает ошибку, если она возникает. | func (c *FakeCloudwatchEventTargets) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})
return err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (e *EventAPI) Delete(name string) error {\n\treturn e.client.httpDELETE(e.backOffConf.create(), e.eventURL(name), \"unable to delete event type\")\n}",
"func (w *Watcher) DeleteTarget(targetName string) (error) {\n\tmutableMutex.Lock()\n\tdefer mutableMutex.Unlock()\n\tif w.TargetMap == nil {\n\t\tw.TargetMap = make(map[string]*Target)\n\t}\n\t_, ok := w.TargetMap[targetName]\n\tif !ok {\n\t\treturn errors.Errorf(\"not exist domain\")\n\t}\n\tdelete(w.TargetMap, targetName)\n\treturn nil\n}",
"func Delete(name string) error {\n\tprofile := &performancev2.PerformanceProfile{}\n\tif err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: name}, profile); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tif err := testclient.Client.Delete(context.TODO(), profile); err != nil {\n\t\treturn err\n\t}\n\tkey := client.ObjectKey{\n\t\tName: name,\n\t}\n\treturn WaitForDeletion(key, 2*time.Minute)\n}",
"func (r *ScanRequest) Delete(*cloudformationevt.Event, *runtime.Context) error {\n return nil\n}",
"func Delete(name string) error {\n\treturn <-delete(name)\n}",
"func (s *serverMetricsRecorder) DeleteNamedMetric(name string) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tdelete(s.state.NamedMetrics, name)\n}",
"func (r *ProjectsTraceSinksService) Delete(nameid string) *ProjectsTraceSinksDeleteCall {\n\tc := &ProjectsTraceSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\treturn c\n}",
"func (app *frame) Delete(name string) error {\n\tif app.isStopped {\n\t\treturn nil\n\t}\n\n\tif _, ok := app.variables[name]; !ok {\n\t\tstr := fmt.Sprintf(\"variable: the name variable (%s) is not defined\", name)\n\t\treturn errors.New(str)\n\t}\n\n\tdelete(app.variables, name)\n\treturn nil\n}",
"func (r *DeviceManagementAutopilotEventRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}",
"func Delete(name string) {\n\tkt.Remove(name)\n}",
"func (ep *eventsProvider) DeleteByName(name string) error {\n\tindices, _ := ep.findByName(name)\n\tif len(indices) == 0 {\n\t\treturn nil\n\t}\n\n\tfor len(indices) != 0 {\n\t\tep.mutex.Lock()\n\t\tep.Data = append(ep.Data[:indices[0]], ep.Data[indices[0]+1:]...)\n\t\tep.mutex.Unlock()\n\t\tindices, _ = ep.findByName(name)\n\t}\n\n\treturn nil\n}",
"func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (r *ProjectsLocationsProcessesRunsLineageEventsService) Delete(name string) *ProjectsLocationsProcessesRunsLineageEventsDeleteCall {\n\tc := &ProjectsLocationsProcessesRunsLineageEventsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func deleteEvent(key model.Key) api.WatchEvent {\n\treturn api.WatchEvent{\n\t\tType: api.WatchDeleted,\n\t\tOld: &model.KVPair{\n\t\t\tKey: key,\n\t\t\tValue: uuid.NewString(),\n\t\t\tRevision: uuid.NewString(),\n\t\t},\n\t}\n}",
"func (svc *Service) Delete(ownerID string, eventID string) error {\n\t_, err := svc.client.Delete(fmt.Sprintf(\"%s/%s\", eventsURL(ownerID), eventID))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Unable to perform Update\")\n\t}\n\treturn nil\n}",
"func (r *DeviceManagementTroubleshootingEventRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}",
"func (s *TargetCRUD) Delete(arg ...crud.Arg) (crud.Arg, error) {\n\tevent := eventFromArg(arg[0])\n\ttarget := targetFromStuct(event)\n\tprint.DeletePrintln(\"deleting target\", *target.Target.Target,\n\t\t\"from upstream\", *target.Upstream.ID)\n\treturn target, nil\n}",
"func (c *k8sClient) OnDelete(obj interface{}) {\n\tselect {\n\tcase c.eventCh <- obj:\n\tdefault:\n\t}\n}",
"func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) {\n\turl := resourceURL(c, id)\n\t//fmt.Printf(\"Delete listener url: %s.\\n\", url)\n\t_, r.Err = c.Delete(url, &golangsdk.RequestOpts{\n\t\tOkCodes: []int{204},\n\t})\n\treturn\n}",
"func (c *peer) Delete(name string) error {\n\tkey := path.Join(c.prefix, name)\n\treturn c.store.Del(key)\n}",
"func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tdelete(s.state.Utilization, name)\n}",
"func (c *client) Delete(ctx context.Context, name string) error {\n\tlcn, err := c.Get(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(*lcn) == 0 {\n\t\treturn fmt.Errorf(\"Location [%s] not found\", name)\n\t}\n\n\trequest, err := c.getLocationRequest(wssdcloudcommon.Operation_DELETE, name, &(*lcn)[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.LocationAgentClient.Invoke(ctx, request)\n\n\treturn err\n}",
"func (es *EventService) Delete(eventID string) error {\n\t// DELETE: /event/:eventID\n\treq, err := es.c.NewRequest(\"DELETE\", \"/event/\"+eventID, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn es.c.Do(req, nil)\n}",
"func (c *googleCloudStorageSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"googlecloudstoragesources\").\n\t\tName(name).\n\t\tBody(&opts).\n\t\tDo(ctx).\n\t\tError()\n}",
"func (c *client) Delete(ctx context.Context, group, name string) error {\n\tvm, err := c.Get(ctx, group, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(*vm) == 0 {\n\t\treturn fmt.Errorf(\"Virtual Machine [%s] not found\", name)\n\t}\n\n\trequest, err := c.getVirtualMachineRequest(wssdcloudproto.Operation_DELETE, group, name, &(*vm)[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.VirtualMachineAgentClient.Invoke(ctx, request)\n\n\treturn err\n}",
"func Delete(client *gophercloud.ServiceClient, name string) os.DeleteResult {\n\treturn os.Delete(client, name)\n}",
"func DeleteMessage(regInfo *RegistrationInfo, receiptHandle *string) error {\n\tsvc := getSQSClient(regInfo)\n\n\tlogging.Debug(\"Deleting the event from SQS.\", nil)\n\tparams := &sqs.DeleteMessageInput{\n\t\tQueueUrl: ®Info.ActionQueueEndpoint,\n\t\tReceiptHandle: receiptHandle,\n\t}\n\t_, err := svc.DeleteMessage(params)\n\n\tif err != nil {\n\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t// Message from an error.\n\t\tlogging.Error(\"Could not delete the event.\", logging.Fields{\"error\": err})\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (t Timers) Delete(k string) {\n\tdelete(t, k)\n}",
"func (c *ControlPlaneClient) Delete(ctx context.Context, location, name string) error {\n\treturn c.internal.Delete(ctx, location, name)\n}",
"func (els *EventLocalStore) Delete(id string) error {\n\tels.mutex.Lock()\n\tdefer els.mutex.Unlock()\n\n\tif _, ok := els.events[id]; ok {\n\t\tdelete(els.events, id)\n\t\treturn nil\n\t}\n\n\treturn event.ErrEventNotFound\n}",
"func DeleteFor(source collection.Schema, name resource.FullName, v resource.Version) Event {\n\treturn DeleteForResource(source, &resource.Instance{\n\t\tMetadata: resource.Metadata{\n\t\t\tFullName: name,\n\t\t\tVersion: v,\n\t\t},\n\t})\n}",
"func (km *Keystore) Delete(name string) error {\n\tif err := validateName(name); err != nil {\n\t\treturn err\n\t}\n\treturn km.ds.Delete(ds.NewKey(name))\n}",
"func (c *client) Delete(ctx context.Context, group, name string) error {\n\tid, err := c.Get(ctx, group, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(*id) == 0 {\n\t\treturn fmt.Errorf(\"Identity [%s] not found\", name)\n\t}\n\n\trequest, err := getIdentityRequest(wssdcloudcommon.Operation_DELETE, name, &(*id)[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.IdentityAgentClient.Invoke(ctx, request)\n\treturn err\n}",
"func (vfs *volatileVFS) Delete(name string) C.int {\n\tvfs.mu.Lock()\n\tdefer vfs.mu.Unlock()\n\n\tfile, ok := vfs.files[name]\n\tif !ok {\n\t\tvfs.errno = C.ENOENT\n\t\treturn C.SQLITE_IOERR_DELETE_NOENT\n\t}\n\n\t// Check that there are no consumers of this file.\n\tfor iFd := range vfs.fds {\n\t\tif vfs.fds[iFd] == file {\n\t\t\tvfs.errno = C.EBUSY\n\t\t\treturn C.SQLITE_IOERR_DELETE\n\t\t}\n\t}\n\n\tdelete(vfs.files, name)\n\n\treturn C.SQLITE_OK\n}",
"func Delete(rw http.ResponseWriter, r *http.Request) {\n\t// Get userID and eventID\n\tuserID := r.Header.Get(\"userid\")\n\turlParams := strings.Replace(r.URL.String(), \"/api/deleteEvent/\", \"\", 1)\n\teventID := strings.Split(urlParams, \"/\")[0]\n\n\t// eventID must not be empty\n\tif strings.TrimSpace(eventID) == \"\" {\n\t\tlog.Printf(\"Missing event id to delete\\n\")\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Event ID must be provided\"))\n\t\treturn\n\t}\n\n\t// Delete from DB\n\tquery := `DELETE\n\t\tFROM events\n\t\tWHERE id = $1\n\t\tAND owner_id = $2`\n\n\tres, err := conn.DB.Exec(query, eventID, userID)\n\tif err != nil {\n\t\tlog.Printf(\"Error deleting event: %s\\n\", err)\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Error deleting event\"))\n\t\treturn\n\t}\n\n\tif count, err := res.RowsAffected(); err != nil || count == 0 {\n\t\tlog.Printf(\"Error deleting event, count: %d, err: %s\\n\", count, err)\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Error deleting event\"))\n\t\treturn\n\t}\n\n\tlog.Println(\"Event deleted\")\n\trw.WriteHeader(http.StatusOK)\n\trw.Write([]byte(\"Event deleted\"))\n}",
"func (e *Event) Delete(c echo.Context, id int) error {\n\tevent, err := e.udb.View(e.db, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// if err := e.rbac.IsLowerRole(c, event.Role.AccessLevel); err != nil {\n\t// \treturn err\n\t// }\n\treturn e.udb.Delete(e.db, event)\n}",
"func DeleteEvent(q Query, c chan error) {\n\tresult, err := Session.Run(`\n\t\tMATCH(n:EVENT)-[r]->(a)\n\t\tWHERE n.`+q.Key+`=$val\n\t\tDETACH DELETE n, a\n\t`, map[string]interface{}{\n\t\t\"val\": q.Value,\n\t})\n\tif err != nil {\n\t\tc <- err\n\t}\n\n\tresult.Next()\n\tlog.Println(result.Record())\n\n\tif err = result.Err(); err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tlog.Println(\"Event deleted\")\n\tc <- nil\n\treturn\n}",
"func Deleteevent(collection *mongo.Collection, st string) {\n\n\tfilter := bson.D{primitive.E{Key: \"eventsname\", Value: st}}\n\tdeleteResult, err := collection.DeleteOne(context.TODO(), filter)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"Deleted %v documents in the events collection\\n\", deleteResult.DeletedCount)\n}",
"func (b *Bucket) Delete(_ context.Context, name string) error {\n\tdelete(b.objects, name)\n\treturn nil\n}",
"func (s *Synk) Delete(ctx context.Context, name string) error {\n\tpolicy := metav1.DeletePropagationForeground\n\tdeleteOpts := metav1.DeleteOptions{PropagationPolicy: &policy}\n\treturn s.client.Resource(resourceSetGVR).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"name=%s\", name),\n\t})\n}",
"func (c *client) Delete(ctx context.Context, group, name string) error {\n\tvnet, err := c.Get(ctx, group, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(*vnet) == 0 {\n\t\treturn fmt.Errorf(\"Virtual Network [%s] not found\", name)\n\t}\n\n\trequest, err := getVirtualNetworkRequest(wssdcloudcommon.Operation_DELETE, group, name, &(*vnet)[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.VirtualNetworkAgentClient.Invoke(ctx, request)\n\n\treturn err\n}",
"func Remove(name string) error",
"func (b *Bucket) Delete(ctx context.Context, name string) error {\n\treturn b.bkt.Object(name).Delete(ctx)\n}",
"func (t *FakeObjectTracker) Delete(gvr schema.GroupVersionResource, ns, name string) error {\n\treturn nil\n}",
"func (d *Double) Delete(stackName string) error {\n\treturn d.DeleteFn(stackName)\n}",
"func (c *EventClient) Delete() *EventDelete {\n\tmutation := newEventMutation(c.config, OpDelete)\n\treturn &EventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}",
"func (e *EnqueueRequestForNamespaces) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {\n\te.add(ctx, evt.Object, q)\n}",
"func (e *EventMapper) DeleteEvent() {\n\n}",
"func (hs *PeerHandleMapSync) Delete(name string) {\n\ths.Lock()\n\t// TODO-WORKSHOP-STEP-5: This code should remove the handle from the PeerHandleMap based on the key name\n\ths.Unlock()\n\tfmt.Println(\"UserHandle Removed for \", name)\n}",
"func (c *ResourcesHandler) Delete(event.DeleteEvent, workqueue.RateLimitingInterface) {}",
"func (c *client) Delete(_ context.Context, request *blobstore.DeleteRequest) (*blobstore.DeleteResponse, error) {\n\tif err := os.Remove(c.bodyPath(request.Key)); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := os.Remove(c.tagsPath(request.Key)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &blobstore.DeleteResponse{}, nil\n}",
"func (c *FakeAWSSNSTargets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteActionWithOptions(awssnstargetsResource, c.ns, name, opts), &v1alpha1.AWSSNSTarget{})\n\n\treturn err\n}",
"func (ep *eventsProvider) Delete(id string) error {\n\tind, _ := ep.findByID(id)\n\tif ind == -1 {\n\t\treturn provider.ErrNotExistingEvent\n\t}\n\n\tep.mutex.Lock()\n\tdefer ep.mutex.Unlock()\n\tep.Data = append(ep.Data[:ind], ep.Data[ind+1:]...)\n\n\treturn nil\n}",
"func (l *Listeners) Delete(id xid.ID) {\n\n\tl.Lock()\n\tdelete(l.listeners, id)\n\tl.Unlock()\n}",
"func (s *fsStore) Delete(typ namespace.Type, name string) error {\n\tif !s.Exists(typ, name) {\n\t\treturn store.ErrNotExists\n\t}\n\ttrgt := s.targetPath(name, typ)\n\terr := unix.Unmount(trgt, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Remove(trgt)\n}",
"func (r provisionTokenClient) Delete(ctx context.Context, name string) error {\n\tteleportClient, err := r.TeleportClientAccessor(ctx)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\treturn trace.Wrap(teleportClient.DeleteToken(ctx, name))\n}",
"func (e *ContainerService) Delete(name string) (err error) {\n\turl := fmt.Sprintf(\"containers/%s\", name)\n\terr = e.client.magicRequestDecoder(\"DELETE\", url, nil, nil)\n\treturn\n}",
"func (r *AWSMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) error {\n\treturn nil\n}",
"func (s impl) Delete(ctx context.Context, name string) error {\n\toperation, err := s.service.SslCertificates.Delete(s.projectID, name).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.waitFor(ctx, operation.Name)\n}",
"func (w *wireguardServerConfig) Delete(name string) error {\n\treturn w.store.Del(path.Join(w.prefix, name))\n}",
"func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDescriptorsDeleteCall {\n\tc := &ProjectsMetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func DeleteKeyValueExpireViaName(iName string) (err error) {\n\tvar has bool\n\tvar _KeyValueExpire = &KeyValueExpire{Name: iName}\n\tif has, err = Engine.Get(_KeyValueExpire); (has == true) && (err == nil) {\n\t\tif row, err := Engine.Where(\"name = ?\", iName).Delete(new(KeyValueExpire)); (err != nil) || (row <= 0) {\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn\n}",
"func (c *MetricCollector) Delete(ctx context.Context, namespace, name string) error {\n\tc.collectionsMutex.Lock()\n\tdefer c.collectionsMutex.Unlock()\n\n\tc.logger.Debugf(\"Stopping metric collection of %s/%s\", namespace, name)\n\n\tkey := NewMetricKey(namespace, name)\n\tif _, ok := c.collections[key]; ok {\n\t\tdelete(c.collections, key)\n\t}\n\treturn nil\n}",
"func Delete(ctx context.Context, name string) error {\n\tname, ok := extractName(ctx)\n\tif !ok {\n\t\treturn ErrNoTreeAttached\n\t}\n\tmu.Lock()\n\tsvr, ok := trees[name]\n\tmu.Unlock()\n\tif !ok {\n\t\tpanic(\"oversight tree not found\")\n\t}\n\tsvr.Delete(name)\n\treturn nil\n}",
"func (c *Collection) DeleteEvent(\n\tkey, typ string, ts time.Time, ordinal int64,\n) error {\n\tpath := fmt.Sprintf(\"%s/%s/events/%s/%d/%d?purge=true\",\n\t\tc.Name, key, typ, ts.UnixNano()/1000000, ordinal)\n\t_, err := c.client.emptyReply(\"DELETE\", path, nil, nil, 204)\n\treturn err\n}",
"func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (w *Webhook) Delete() error {\n\treturn w.DeleteContext(context.TODO())\n}",
"func Delete(name string) error {\n\tlink, err := netlink.LinkByName(name)\n\tif err != nil {\n\t\tif _, ok := err.(netlink.LinkNotFoundError); ok {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn netlink.LinkDel(link)\n}",
"func (signup *EventSignup) OnDeleted(container *ioccontainer.Container) error {\n\terr := signup.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventRepository EventRepository\n\tcontainer.Make(&eventRepository)\n\n\tevent, err := eventRepository.GetEventByID(signup.EventID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn signup.sendNotification(event, \"member_signed_out\", container)\n}",
"func (m *MongoStore) DeleteName(name string) bool {\n\ts := m.session.Clone()\n\tdefer s.Close()\n\tc := s.DB(\"kittenserver\").C(\"kittens\")\n\terr := c.Remove((bson.M{\"name\": name}))\n\treturn err == nil\n}",
"func (d *DevClient) DeleteEventHandler(systemKey, name string) error {\n\treturn deleteEventHandler(d, _EVENTS_HDLRS_PREAMBLE+systemKey+\"/\"+name)\n}",
"func (d *DevClient) DeleteTrigger(systemKey, name string) error {\n\treturn d.DeleteEventHandler(systemKey, name)\n}",
"func (b *Bucket) Delete(_ context.Context, name string) error {\n\treturn b.client.DeleteObject(b.name, name)\n}",
"func skykeydeletenamecmd(name string) {\n\terr := httpClient.SkykeyDeleteByNamePost(name)\n\tif err != nil {\n\t\tdie(err)\n\t}\n\n\tfmt.Println(\"Skykey Deleted!\")\n}",
"func Delete(deleteSnapshotURL string, snapshotName string) error {\n\tlogger.Infof(\"Deleting snapshot %s\", snapshotName)\n\tformData := url.Values{\n\t\t\"snapshot\": {snapshotName},\n\t}\n\n\tu, err := url.Parse(deleteSnapshotURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := http.PostForm(u.String(), formData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsnap := snapshot{}\n\terr = json.Unmarshal(body, &snap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif snap.Status == \"ok\" {\n\t\tlogger.Infof(\"Snapshot %s deleted\", snapshotName)\n\t\treturn nil\n\t} else if snap.Status == \"error\" {\n\t\treturn errors.New(snap.Msg)\n\t} else {\n\t\treturn fmt.Errorf(\"Unkown status: %v\", snap.Status)\n\t}\n}",
"func (command HelloWorldResource) Delete(ctx context.Context, awsConfig awsv2.Config,\n\tevent *CloudFormationLambdaEvent,\n\tlogger *zerolog.Logger) (map[string]interface{}, error) {\n\trequest := HelloWorldResourceRequest{}\n\n\trequestPropsErr := json.Unmarshal(event.ResourceProperties, &request)\n\tif requestPropsErr != nil {\n\t\treturn nil, requestPropsErr\n\t}\n\tlogger.Info().Msgf(\"delete: %s\", request.Message)\n\treturn nil, nil\n}",
"func (r *EventTagsService) Delete(profileId int64, id int64) *EventTagsDeleteCall {\n\tc := &EventTagsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.id = id\n\treturn c\n}",
"func (pgs *PGStorage) DeleteEvent(eventUUID uuid.UUID) error {\n\tsql := \"delete from events where uuid = $1\"\n\t_, err := pgs.DB.ExecContext(pgs.Ctx, sql, eventUUID.String())\n\treturn err\n}",
"func (m *Medium) Delete(name string) error {\n\tt, ok := m.topics[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"Cannot delete. No topic named %s.\", name)\n\t}\n\tt.Close()\n\tm.mx.Lock()\n\tdelete(m.topics, name)\n\tm.mx.Unlock()\n\treturn nil\n}",
"func (s *Storage) DeleteRequest(id string) error {\n\t// TODO: Unimplemented\n\treturn nil\n}",
"func (s *FilesystemStore) Delete(c *kelly.Context, name string) error {\n\tsession, error := s.Get(c, name)\n\tif error != nil {\n\t\treturn error\n\t}\n\tsession.Options.MaxAge = -1\n\treturn s.Save(c, session)\n}",
"func (c *GalleryImageClient) Delete(ctx context.Context, location, name string) error {\n\treturn c.internal.Delete(ctx, location, name)\n}",
"func (dl *DataLayer) DeleteEvent(id int64) error {\n\t_, err := dl.db.Connection().Exec(deleteEventQuery, id)\n\treturn err\n}",
"func (s *serverMetricsRecorder) DeleteRequestCost(name string) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tdelete(s.state.RequestCost, name)\n}",
"func (c Claims) Del(name string) { delete(c, name) }",
"func (ue *UserEvent) Delete(ctx context.Context) *spanner.Mutation {\n\tvalues, _ := ue.columnsToValues(UserEventPrimaryKeys())\n\treturn spanner.Delete(\"UserEvent\", spanner.Key(values))\n}",
"func (rl *Instance) DelEvent(keyPress string) {\n\tdelete(rl.evtKeyPress, keyPress)\n}",
"func DeleteKeyValueViaName(iName string) (err error) {\n\tvar has bool\n\tvar _KeyValue = &KeyValue{Name: iName}\n\tif has, err = Engine.Get(_KeyValue); (has == true) && (err == nil) {\n\t\tif row, err := Engine.Where(\"name = ?\", iName).Delete(new(KeyValue)); (err != nil) || (row <= 0) {\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn\n}",
"func (f *LogFile) DeleteMeasurement(name []byte) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\te := LogEntry{Flag: LogEntryMeasurementTombstoneFlag, Name: name}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\n\t// Flush buffer and sync to disk.\n\treturn f.FlushAndSync()\n}",
"func (r *InspectOperationsService) Delete(name string) *InspectOperationsDeleteCall {\n\tc := &InspectOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func (c *kongs) Delete(name string, options *v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"kongs\").\n\t\tName(name).\n\t\tBody(options).\n\t\tDo().\n\t\tError()\n}",
"func (h *Handler) Delete(bucket, name string) error {\n\treq := h.Client.DeleteObjectRequest(&s3.DeleteObjectInput{\n\t\tBucket: &bucket,\n\t\tKey: &name,\n\t})\n\n\tresp, err := req.Send(context.Background())\n\tif err != nil {\n\t\tklog.Error(\"Failed to send Delete request. error: \", err)\n\t\treturn err\n\t}\n\n\tklog.V(10).Info(\"Delete Success\", resp)\n\n\treturn nil\n}",
"func (c *Collection) Delete(k string) {\n\tc.itemsLock.Lock()\n\tdefer func() {\n\t\tc.itemsLock.Unlock()\n\t\tc.events <- &event.Event{\n\t\t\tTopic: c.Topic(TopicCollectionGone),\n\t\t\tType: event.Type(\"CollectionGone\"),\n\t\t\tID: c.EventID(k),\n\t\t\tMessage: \"Removed from collection\",\n\t\t}\n\t}()\n\tdelete(c.items, k)\n}",
"func (store *Engine) DeleteRequest(requestID string) error {\n\t_, err := store.api.\n\t\tURL(\"/workflow-engine/api/v1/requests/%s\", requestID).\n\t\tDelete()\n\n\treturn err\n}",
"func (r *ProjectsOccurrencesService) Delete(name string) *ProjectsOccurrencesDeleteCall {\n\tc := &ProjectsOccurrencesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func (c *SubresourceClient) Delete(namespace, name string) (e error) {\n\tif c.Error != \"\" {\n\t\te = fmt.Errorf(c.Error)\n\t}\n\treturn\n}",
"func (r *ProjectsLocationsDataExchangesService) Delete(name string) *ProjectsLocationsDataExchangesDeleteCall {\n\tc := &ProjectsLocationsDataExchangesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}",
"func TestDelete(t *testing.T) {\n\tlocalStore := NewEventLocalStore()\n\n\teventTest1 := &entities.Event{ID: \"id1\"}\n\n\terr := localStore.Create(eventTest1)\n\tassert.NoError(t, err)\n\n\terr = localStore.Delete(\"id1\")\n\tassert.NoError(t, err)\n\n\t// If Event Not Found\n\terr = localStore.Delete(\"\")\n\tassert.Error(t, err)\n\tassert.Equal(t, err, event.ErrEventNotFound)\n}",
"func (c *Firewall) Delete(e ...interface{}) error {\n\tnames, nErr := toNames(e)\n\treturn c.ns.Delete(\"\", \"\", c.pather(), names, nErr)\n}",
"func (s *baseStore[T, E, TPtr, EPtr]) Delete(ctx context.Context, id int64) error {\n\teventPtr := s.newObjectEvent(ctx, DeleteEvent)\n\teventPtr.SetObjectID(id)\n\treturn s.createObjectEvent(ctx, eventPtr)\n}"
] | [
"0.6356107",
"0.57694995",
"0.5659969",
"0.56342745",
"0.5527379",
"0.5476646",
"0.54259104",
"0.5411367",
"0.53984815",
"0.539605",
"0.5359793",
"0.5313885",
"0.52959067",
"0.5263439",
"0.5223835",
"0.51805806",
"0.51521486",
"0.51521426",
"0.51359683",
"0.5103432",
"0.50982624",
"0.50549585",
"0.50073564",
"0.50058484",
"0.4982527",
"0.4975987",
"0.49668816",
"0.49429354",
"0.49229488",
"0.49039418",
"0.4901857",
"0.4893772",
"0.4891697",
"0.48887163",
"0.48878372",
"0.48802406",
"0.48570284",
"0.48514974",
"0.48361745",
"0.48278424",
"0.4821357",
"0.48189235",
"0.48166552",
"0.4801795",
"0.47981977",
"0.4790594",
"0.47874326",
"0.4782709",
"0.47526518",
"0.47518823",
"0.4746338",
"0.47441825",
"0.4720074",
"0.47182497",
"0.4711298",
"0.4707917",
"0.47029877",
"0.4700136",
"0.469894",
"0.46850818",
"0.46835926",
"0.46814305",
"0.4676738",
"0.46756268",
"0.46699747",
"0.46550468",
"0.46533492",
"0.4650932",
"0.4649804",
"0.4638397",
"0.4635854",
"0.4634693",
"0.4633859",
"0.46337646",
"0.46322948",
"0.46295518",
"0.4628367",
"0.46196073",
"0.46043286",
"0.460052",
"0.45977747",
"0.45920822",
"0.45893013",
"0.45752734",
"0.4575008",
"0.4574601",
"0.45735598",
"0.45698702",
"0.4567352",
"0.45623115",
"0.45610112",
"0.4558743",
"0.45558193",
"0.45525086",
"0.45362085",
"0.45357656",
"0.4528602",
"0.45276424",
"0.45256478",
"0.45251498"
] | 0.703014 | 0 |
Patch applies the patch and returns the patched cloudwatchEventTarget. | Патч применяет патч и возвращает отремонтированный cloudwatchEventTarget. | func (c *FakeCloudwatchEventTargets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CloudwatchEventTarget, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(cloudwatcheventtargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CloudwatchEventTarget{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.CloudwatchEventTarget), err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *FakeCloudwatchEventTargets) Update(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func GetEventTarget(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *EventTargetState, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tvar resource EventTarget\n\terr := ctx.ReadResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (iface *Iface) patch(dev Patchable) {\n\tiface.patched = dev\n}",
"func NewEventTarget(ctx *pulumi.Context,\n\tname string, args *EventTargetArgs, opts ...pulumi.ResourceOption) (*EventTarget, error) {\n\tif args == nil || args.Arn == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Arn'\")\n\t}\n\tif args == nil || args.Rule == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Rule'\")\n\t}\n\tif args == nil {\n\t\targs = &EventTargetArgs{}\n\t}\n\tvar resource EventTarget\n\terr := ctx.RegisterResource(\"aws:cloudwatch/eventTarget:EventTarget\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func Patch(t testing.TB, dest, value interface{}) {\n\tNew(t).Patch(dest, value)\n}",
"func PatchFunc(target, repl any) *Patch {\n\tassertSameFuncType(target, repl)\n\ttargetVal := reflect.ValueOf(target)\n\treplVal := reflect.ValueOf(repl)\n\treturn patchFunc(targetVal, replVal)\n}",
"func Patch(pkgName, typeName, methodName string, patchFunc interface{}) {\n\t// find addr of the func\n\tsymbolName := getSymbolName(pkgName, typeName, methodName)\n\taddr := symbolTable[symbolName]\n\toriginalBytes := replaceFunction(addr, (uintptr)(getPtr(reflect.ValueOf(patchFunc))))\n\tpatchRecord[addr] = originalBytes\n}",
"func Patch(dest, value interface{}) Restorer {\n\tdestv := reflect.ValueOf(dest).Elem()\n\toldv := reflect.New(destv.Type()).Elem()\n\toldv.Set(destv)\n\tvaluev := reflect.ValueOf(value)\n\tif !valuev.IsValid() {\n\t\t// This isn't quite right when the destination type is not\n\t\t// nilable, but it's better than the complex alternative.\n\t\tvaluev = reflect.Zero(destv.Type())\n\t}\n\tdestv.Set(valuev)\n\treturn func() {\n\t\tdestv.Set(oldv)\n\t}\n}",
"func (c *FakeListeners) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkextensionv1.Listener, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(listenersResource, c.ns, name, pt, data, subresources...), &networkextensionv1.Listener{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*networkextensionv1.Listener), err\n}",
"func (c *FakeCloudwatchEventTargets) Get(name string, options v1.GetOptions) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(cloudwatcheventtargetsResource, c.ns, name), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (c *FakeCloudwatchEventTargets) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(cloudwatcheventtargetsResource, c.ns, opts))\n\n}",
"func (r *ProjectsTraceSinksService) Patch(nameid string, tracesink *TraceSink) *ProjectsTraceSinksPatchCall {\n\tc := &ProjectsTraceSinksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.nameid = nameid\n\tc.tracesink = tracesink\n\treturn c\n}",
"func (c *FakeCloudwatchEventTargets) Create(cloudwatchEventTarget *v1alpha1.CloudwatchEventTarget) (result *v1alpha1.CloudwatchEventTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(cloudwatcheventtargetsResource, c.ns, cloudwatchEventTarget), &v1alpha1.CloudwatchEventTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.CloudwatchEventTarget), err\n}",
"func (s *GenericWatchStorage) Patch(key storage.ObjectKey, patch []byte) error {\n\ts.watcher.Suspend(watcher.FileEventModify)\n\treturn s.Storage.Patch(key, patch)\n}",
"func (p *Patch) Patch() error {\n\tif p == nil {\n\t\treturn errors.New(\"patch is nil\")\n\t}\n\tif err := isPatchable(p.target, p.redirection); err != nil {\n\t\treturn err\n\t}\n\tif err := applyPatch(p); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (r *EventTagsService) Patch(profileId int64, id int64, eventtag *EventTag) *EventTagsPatchCall {\n\tc := &EventTagsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\tc.urlParams_.Set(\"id\", fmt.Sprint(id))\n\tc.eventtag = eventtag\n\treturn c\n}",
"func (a *APITest) Patch(url string) *Request {\n\ta.request.method = http.MethodPatch\n\ta.request.url = url\n\treturn a.request\n}",
"func (fkw *FakeClientWrapper) Patch(ctx context.Context, obj runtime.Object,\n\tpatch k8sCl.Patch, opts ...k8sCl.PatchOption) error {\n\treturn fkw.client.Patch(ctx, obj, patch, opts...)\n}",
"func (tr *Transport) Patch(url string, fn HandlerFunc, options ...HandlerOption) {\n\ttr.mux.Handler(net_http.MethodPatch, url, encapsulate(fn, tr.options, options))\n}",
"func Patch(route string, do interface{}) *handler {\n\treturn handlerByMethod(&route, do, \"PATCH\")\n}",
"func (c *FakeAWSSNSTargets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AWSSNSTarget, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(awssnstargetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.AWSSNSTarget{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.AWSSNSTarget), err\n}",
"func Patch(base []byte, v interface{}) ([]byte, error) {\n\t//Clone struct v, v shoud not be modified\n\trv := reflect.ValueOf(v)\n\tfor rv.Kind() == reflect.Ptr {\n\t\trv = rv.Elem()\n\t}\n\tpv := reflect.New(rv.Type())\n\tpv.Elem().Set(rv)\n\n\tnv := pv.Interface()\n\n\t//unmarshal base\n\ttable, err := toml.Parse(base)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := toml.UnmarshalTable(table, nv); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Marshal(nv)\n}",
"func (o ExternalMetricSourcePatchOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v ExternalMetricSourcePatch) *MetricTargetPatch { return v.Target }).(MetricTargetPatchPtrOutput)\n}",
"func patch(c client.Client, opCond *operators.OperatorCondition, newCond meta.Condition) error {\n\tnewCond.LastTransitionTime = meta.Now()\n\tpatchData, err := json.Marshal([]*patcher.JSONPatch{\n\t\tpatcher.NewJSONPatch(\"add\", \"/spec/conditions\", []meta.Condition{newCond})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to generate patch request body for Condition %v: %w\", newCond, err)\n\t}\n\tif err = c.Patch(context.TODO(), opCond, client.RawPatch(types.JSONPatchType, patchData)); err != nil {\n\t\treturn fmt.Errorf(\"unable to apply patch %s to OperatorCondition %s: %w\", patchData, opCond.GetName(), err)\n\t}\n\treturn nil\n}",
"func (o ObjectMetricSourcePatchOutput) Target() CrossVersionObjectReferencePatchPtrOutput {\n\treturn o.ApplyT(func(v ObjectMetricSourcePatch) *CrossVersionObjectReferencePatch { return v.Target }).(CrossVersionObjectReferencePatchPtrOutput)\n}",
"func (f5 *f5LTM) patch(url string, payload interface{}, result interface{}) error {\n\treturn f5.restRequestPayload(\"PATCH\", url, payload, result)\n}",
"func (client *MockClient) Patch(context ctx.Context, object ctrlClient.Object, patch ctrlClient.Patch, options ...ctrlClient.PatchOption) error {\n\treturn fmt.Errorf(\"Not implemented\")\n}",
"func (o ContainerResourceMetricSourcePatchOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v ContainerResourceMetricSourcePatch) *MetricTargetPatch { return v.Target }).(MetricTargetPatchPtrOutput)\n}",
"func (o ObjectMetricSourcePatchOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v ObjectMetricSourcePatch) *MetricTargetPatch { return v.Target }).(MetricTargetPatchPtrOutput)\n}",
"func (ch *CloudwatchHook) GetHook() (func(zapcore.Entry) error, error) {\n\n\tvar cloudwatchWriter = func(e zapcore.Entry) error {\n\t\tif !ch.isAcceptedLevel(e.Level) {\n\t\t\treturn nil\n\t\t}\n\n\t\tevent := &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(fmt.Sprintf(\"[%s] %s\", e.LoggerName, e.Message)),\n\t\t\tTimestamp: aws.Int64(int64(time.Nanosecond) * time.Now().UnixNano() / int64(time.Millisecond)),\n\t\t}\n\t\tparams := &cloudwatchlogs.PutLogEventsInput{\n\t\t\tLogEvents: []*cloudwatchlogs.InputLogEvent{event},\n\t\t\tLogGroupName: aws.String(ch.GroupName),\n\t\t\tLogStreamName: aws.String(ch.StreamName),\n\t\t\tSequenceToken: ch.nextSequenceToken,\n\t\t}\n\n\t\tif ch.Async {\n\t\t\tgo ch.sendEvent(params)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn ch.sendEvent(params)\n\t}\n\n\tch.svc = cloudwatchlogs.New(session.New(ch.AWSConfig))\n\n\tlgresp, err := ch.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{LogGroupNamePrefix: aws.String(ch.GroupName), Limit: aws.Int64(1)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(lgresp.LogGroups) < 1 {\n\t\t// we need to create this log group\n\t\t_, err := ch.svc.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String(ch.GroupName)})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp, err := ch.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{\n\t\tLogGroupName: aws.String(ch.GroupName), // Required\n\t\tLogStreamNamePrefix: aws.String(ch.StreamName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// grab the next sequence token\n\tif len(resp.LogStreams) > 0 {\n\t\tch.nextSequenceToken = resp.LogStreams[0].UploadSequenceToken\n\t\treturn cloudwatchWriter, nil\n\t}\n\n\t// create stream if it doesn't exist. the next sequence token will be null\n\t_, err = ch.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(ch.GroupName),\n\t\tLogStreamName: aws.String(ch.StreamName),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchWriter, nil\n}",
"func (r *Route) Patch(handler http.Handler) *Route {\n\tr.handlers[http.MethodPatch] = handler\n\treturn r\n}",
"func (o *PatchRetryEventUsingPATCHParams) WithHTTPClient(client *http.Client) *PatchRetryEventUsingPATCHParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (b *Builder) PatchOffset(patchedOffset, patch dwarf.Offset) {\n\tinfoBytes := b.info.Bytes()\n\tbuf := new(bytes.Buffer)\n\tbinary.Write(buf, binary.LittleEndian, patch)\n\tcopy(infoBytes[patchedOffset:], buf.Bytes())\n}",
"func (g *Cloud) PatchFirewall(f *compute.Firewall) error {\n\tctx, cancel := cloud.ContextWithCallTimeout()\n\tdefer cancel()\n\n\tmc := newFirewallMetricContext(\"Patch\")\n\treturn mc.Observe(g.c.Firewalls().Patch(ctx, meta.GlobalKey(f.Name), f))\n}",
"func (r *Reconciler) patch(\n\tctx context.Context, object client.Object,\n\tpatch client.Patch, options ...client.PatchOption,\n) error {\n\toptions = append([]client.PatchOption{r.Owner}, options...)\n\treturn r.Client.Patch(ctx, object, patch, options...)\n}",
"func (o ResourceMetricSourcePatchOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v ResourceMetricSourcePatch) *MetricTargetPatch { return v.Target }).(MetricTargetPatchPtrOutput)\n}",
"func (o ObjectMetricStatusPatchOutput) Target() CrossVersionObjectReferencePatchPtrOutput {\n\treturn o.ApplyT(func(v ObjectMetricStatusPatch) *CrossVersionObjectReferencePatch { return v.Target }).(CrossVersionObjectReferencePatchPtrOutput)\n}",
"func getProxyUpdateEvent(msg events.PubSubMessage) *proxyUpdateEvent {\n\tswitch msg.Kind {\n\tcase\n\t\t//\n\t\t// K8s native resource events\n\t\t//\n\t\t// Endpoint event\n\t\tannouncements.EndpointAdded, announcements.EndpointDeleted, announcements.EndpointUpdated,\n\t\t// k8s Ingress event\n\t\tannouncements.IngressAdded, announcements.IngressDeleted, announcements.IngressUpdated,\n\t\t//\n\t\t// OSM resource events\n\t\t//\n\t\t// Egress event\n\t\tannouncements.EgressAdded, announcements.EgressDeleted, announcements.EgressUpdated,\n\t\t// IngressBackend event\n\t\tannouncements.IngressBackendAdded, announcements.IngressBackendDeleted, announcements.IngressBackendUpdated,\n\t\t// MulticlusterService event\n\t\tannouncements.MultiClusterServiceAdded, announcements.MultiClusterServiceDeleted, announcements.MultiClusterServiceUpdated,\n\t\t//\n\t\t// SMI resource events\n\t\t//\n\t\t// SMI HTTPRouteGroup event\n\t\tannouncements.RouteGroupAdded, announcements.RouteGroupDeleted, announcements.RouteGroupUpdated,\n\t\t// SMI TCPRoute event\n\t\tannouncements.TCPRouteAdded, announcements.TCPRouteDeleted, announcements.TCPRouteUpdated,\n\t\t// SMI TrafficSplit event\n\t\tannouncements.TrafficSplitAdded, announcements.TrafficSplitDeleted, announcements.TrafficSplitUpdated,\n\t\t// SMI TrafficTarget event\n\t\tannouncements.TrafficTargetAdded, announcements.TrafficTargetDeleted, announcements.TrafficTargetUpdated,\n\t\t//\n\t\t// Proxy events\n\t\t//\n\t\tannouncements.ProxyUpdate:\n\t\treturn &proxyUpdateEvent{\n\t\t\tmsg: msg,\n\t\t\ttopic: announcements.ProxyUpdate.String(),\n\t\t}\n\n\tcase announcements.MeshConfigUpdated:\n\t\tprevMeshConfig, okPrevCast := msg.OldObj.(*v1alpha1.MeshConfig)\n\t\tnewMeshConfig, okNewCast := msg.NewObj.(*v1alpha1.MeshConfig)\n\t\tif !okPrevCast || !okNewCast {\n\t\t\tlog.Error().Msgf(\"Expected MeshConfig type, got previous=%T, new=%T\", okPrevCast, okNewCast)\n\t\t\treturn nil\n\t\t}\n\n\t\tprevSpec := prevMeshConfig.Spec\n\t\tnewSpec := newMeshConfig.Spec\n\t\t// A proxy config update must only be triggered when a MeshConfig field that maps to a proxy config\n\t\t// changes.\n\t\tif prevSpec.Traffic.EnableEgress != newSpec.Traffic.EnableEgress ||\n\t\t\tprevSpec.Traffic.EnablePermissiveTrafficPolicyMode != newSpec.Traffic.EnablePermissiveTrafficPolicyMode ||\n\t\t\tprevSpec.Observability.Tracing != newSpec.Observability.Tracing ||\n\t\t\tprevSpec.Traffic.InboundExternalAuthorization.Enable != newSpec.Traffic.InboundExternalAuthorization.Enable ||\n\t\t\t// Only trigger an update on InboundExternalAuthorization field changes if the new spec has the 'Enable' flag set to true.\n\t\t\t(newSpec.Traffic.InboundExternalAuthorization.Enable && (prevSpec.Traffic.InboundExternalAuthorization != newSpec.Traffic.InboundExternalAuthorization)) ||\n\t\t\tprevSpec.FeatureFlags != newSpec.FeatureFlags {\n\t\t\treturn &proxyUpdateEvent{\n\t\t\t\tmsg: msg,\n\t\t\t\ttopic: announcements.ProxyUpdate.String(),\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tcase announcements.PodUpdated:\n\t\t// Only trigger a proxy update for proxies associated with this pod based on the proxy UUID\n\t\tprevPod, okPrevCast := msg.OldObj.(*corev1.Pod)\n\t\tnewPod, okNewCast := msg.NewObj.(*corev1.Pod)\n\t\tif !okPrevCast || !okNewCast {\n\t\t\tlog.Error().Msgf(\"Expected *Pod type, got previous=%T, new=%T\", okPrevCast, okNewCast)\n\t\t\treturn nil\n\t\t}\n\t\tprevMetricAnnotation := prevPod.Annotations[constants.PrometheusScrapeAnnotation]\n\t\tnewMetricAnnotation := newPod.Annotations[constants.PrometheusScrapeAnnotation]\n\t\tif prevMetricAnnotation != newMetricAnnotation {\n\t\t\tproxyUUID := newPod.Labels[constants.EnvoyUniqueIDLabelName]\n\t\t\treturn &proxyUpdateEvent{\n\t\t\t\tmsg: msg,\n\t\t\t\ttopic: GetPubSubTopicForProxyUUID(proxyUUID),\n\t\t\t}\n\t\t}\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func (r *FakeClient) Patch(\n\tctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption,\n) error {\n\t// TODO (covariance) implement me!\n\tpanic(\"not implemented\")\n}",
"func (o HttpRuleOutput) Patch() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HttpRule) *string { return v.Patch }).(pulumi.StringPtrOutput)\n}",
"func Patch(visitor ast.Visitor) Option {\n\treturn func(c *conf.Config) {\n\t\tc.Visitors = append(c.Visitors, visitor)\n\t}\n}",
"func (a *App) Patch(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"patching in testing mode. Get ready to send multipart-form data\"))\n}",
"func (router *Router) Patch(path string, handler Handle) *Router {\n\trouter.Mux.PATCH(path, handleProxy(handler))\n\treturn router\n}",
"func (o ExternalMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *ExternalMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}",
"func patch(opts *tf.ContextOpts) (*tf.State, error) {\n\tif opts.Destroy {\n\t\t// Need walkDestroy to implement this\n\t\tpanic(\"tfx: patch does not support pure destroy operations\")\n\t}\n\n\t// Create context with a copy of the original state\n\torig, state := opts.State, opts.State.DeepCopy()\n\topts.State = state\n\tc, err := tf.NewContext(opts)\n\tif opts.State = orig; err != nil {\n\t\treturn nil, err\n\t}\n\n\t// HACK: Get contextComponentFactory\n\tcomps := (&tf.ContextGraphWalker{Context: c}).\n\t\tEnterPath(tf.RootModulePath).(*tf.BuiltinEvalContext).Components\n\n\t// Build patch graph\n\tgraph, err := (&patchGraphBuilder{tf.ApplyGraphBuilder{\n\t\tDiff: opts.Diff,\n\t\tState: state,\n\t\tProviders: comps.ResourceProviders(),\n\t\tProvisioners: comps.ResourceProvisioners(),\n\t\tTargets: opts.Targets,\n\t\tDestroy: opts.Destroy,\n\t\tValidate: true,\n\t}}).Build(tf.RootModulePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// HACK: Get walkApply value\n\twalkApplyOnce.Do(func() {\n\t\tvar c tf.Context // Avoid deep copy of the real state\n\t\twalkApply.Operation = c.Interpolater().Operation\n\t})\n\n\t// Walk the graph\n\tw := &patchGraphWalker{ContextGraphWalker: tf.ContextGraphWalker{\n\t\tContext: c,\n\t\tOperation: walkApply.Operation,\n\t\tStopContext: context.Background(),\n\t}}\n\tif err = graph.Walk(w); len(w.ValidationErrors) > 0 {\n\t\terr = multierror.Append(err, w.ValidationErrors...)\n\t}\n\n\t// Stop providers and provisioners\n\tfor _, p := range w.rootCtx.ProviderCache {\n\t\tp.Stop()\n\t}\n\tfor _, p := range w.rootCtx.ProvisionerCache {\n\t\tp.Stop()\n\t}\n\treturn state, err\n}",
"func Struct(target, patch interface{}) (changed bool, err error) {\n\n\tvar dst = structs.New(target)\n\tvar fields = structs.New(patch).Fields() // work stack\n\n\tfor N := len(fields); N > 0; N = len(fields) {\n\t\tvar srcField = fields[N-1] // pop the top\n\t\tfields = fields[:N-1]\n\n\t\tif !srcField.IsExported() {\n\t\t\tcontinue // skip unexported fields\n\t\t}\n\t\tif srcField.IsEmbedded() {\n\t\t\t// add the embedded fields into the work stack\n\t\t\tfields = append(fields, srcField.Fields()...)\n\t\t\tcontinue\n\t\t}\n\t\tif srcField.IsZero() {\n\t\t\tcontinue // skip zero-value fields\n\t\t}\n\n\t\tvar name = srcField.Name()\n\n\t\tvar dstField, ok = dst.FieldOk(name)\n\t\tif !ok {\n\t\t\tcontinue // skip non-existing fields\n\t\t}\n\t\tvar srcValue = reflect.ValueOf(srcField.Value())\n\t\tsrcValue = reflect.Indirect(srcValue)\n\t\tif skind, dkind := srcValue.Kind(), dstField.Kind(); skind != dkind {\n\t\t\terr = fmt.Errorf(\"field `%v` types mismatch while patching: %v vs %v\", name, dkind, skind)\n\t\t\treturn\n\t\t}\n\n\t\tif !reflect.DeepEqual(srcValue.Interface(), dstField.Value()) {\n\t\t\tchanged = true\n\t\t}\n\n\t\terr = dstField.Set(srcValue.Interface())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}",
"func (this *Route) PATCH(handlers ...HTTPHandler) *Route {\n\treturn this.addHandler(\"patch\", handlers...)\n}",
"func ApplyPatch(cur, patch io.Reader) (io.Reader, error) {\n\tbuf := &bytes.Buffer{}\n\terr := binarydist.Patch(cur, buf, patch)\n\n\treturn buf, err\n}",
"func Patch(path string, fn http.HandlerFunc, c ...alice.Constructor) {\n\trecord(\"PATCH\", path)\n\n\tinfoMutex.Lock()\n\tr.PATCH(path, Handler(alice.New(c...).ThenFunc(fn)))\n\tinfoMutex.Unlock()\n}",
"func modifiedEvent(key model.Key) api.WatchEvent {\n\treturn api.WatchEvent{\n\t\tType: api.WatchModified,\n\t\tNew: &model.KVPair{\n\t\t\tKey: key,\n\t\t\tValue: uuid.NewString(),\n\t\t\tRevision: uuid.NewString(),\n\t\t},\n\t}\n}",
"func Patch(expectations []*RunExpectation) func(t *testing.T) {\n\t_mocker = &runMocker{\n\t\texpectations: expectations,\n\t\tptr: 0,\n\t\tlength: len(expectations),\n\t}\n\n\treturn func(t *testing.T) {\n\t\tif expectation := _mocker.expectation(); expectation != nil {\n\t\t\tt.Errorf(\"execkit-mock: missing call: %v\", expectation)\n\t\t\tt.FailNow()\n\t\t}\n\t\t_mocker = nil\n\t}\n}",
"func (o PodsMetricSourcePatchOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v PodsMetricSourcePatch) *MetricTargetPatch { return v.Target }).(MetricTargetPatchPtrOutput)\n}",
"func (p *Patch) Patch() {\n\tp.patched = true\n\tif p.funcInfo != nil {\n\t\tp.applyFunc()\n\t} else if p.varInfo != nil {\n\t\tp.applyVar()\n\t}\n}",
"func (o ContainerResourceMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}",
"func RawPatch(patchType types.PatchType, data []byte) Patch {\n\treturn &patch{patchType, data}\n}",
"func (client MockStatusClient) Patch(context ctx.Context, object ctrlClient.Object, patch ctrlClient.Patch, options ...ctrlClient.PatchOption) error {\n\treturn fmt.Errorf(\"not implemented\")\n}",
"func (o ObjectMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *ObjectMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}",
"func Patch() int {\n\treturn patch\n}",
"func (c *FakeAwsIamGroupPolicyAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *aws_v1.AwsIamGroupPolicyAttachment, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(awsiamgrouppolicyattachmentsResource, c.ns, name, data, subresources...), &aws_v1.AwsIamGroupPolicyAttachment{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*aws_v1.AwsIamGroupPolicyAttachment), err\n}",
"func NewPatchRetryEventUsingPATCHParamsWithHTTPClient(client *http.Client) *PatchRetryEventUsingPATCHParams {\n\tvar ()\n\treturn &PatchRetryEventUsingPATCHParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PatchRetryEventUsingPATCHParams) WithTimeout(timeout time.Duration) *PatchRetryEventUsingPATCHParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (c *awsOrganizationsPolicyAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.AwsOrganizationsPolicyAttachment, err error) {\n\tresult = &v1.AwsOrganizationsPolicyAttachment{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"awsorganizationspolicyattachments\").\n\t\tSubResource(subresources...).\n\t\tName(name).\n\t\tBody(data).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (c *FakeAwsApiGatewayVpcLinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *aws_v1.AwsApiGatewayVpcLink, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(awsapigatewayvpclinksResource, c.ns, name, data, subresources...), &aws_v1.AwsApiGatewayVpcLink{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*aws_v1.AwsApiGatewayVpcLink), err\n}",
"func (_this *IntersectionObserverEntry) Target() *dom.Element {\n\tvar ret *dom.Element\n\tvalue := _this.Value_JS.Get(\"target\")\n\tret = dom.ElementFromJS(value)\n\treturn ret\n}",
"func (e *Entity) Patch(uri string, payload interface{}) error {\n\theader := make(map[string]string)\n\tif e.etag != \"\" {\n\t\theader[\"If-Match\"] = e.etag\n\t}\n\n\tresp, err := e.client.PatchWithHeaders(uri, payload, header)\n\tif err == nil {\n\t\treturn resp.Body.Close()\n\t}\n\treturn err\n}",
"func patchResource(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, data []byte) error {\n\trestClient, err := getRESTClient(config, group, version)\n\tif err != nil {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"patchResource error: %v\", err),\n\t\t}\n\t}\n\n\tif _, err = restClient.\n\t\tPatch(k8stypes.JSONPatchType).\n\t\tResource(mapping.Resource.Resource).\n\t\tNamespaceIfScoped(namespace, mapping.Scope.Name() == \"namespace\").\n\t\tBody(data).\n\t\tDo().\n\t\tGet(); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"patchResource error: %v\", err),\n\t\t}\n\t}\n}",
"func (o ObjectMetricSourcePatchPtrOutput) Target() CrossVersionObjectReferencePatchPtrOutput {\n\treturn o.ApplyT(func(v *ObjectMetricSourcePatch) *CrossVersionObjectReferencePatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(CrossVersionObjectReferencePatchPtrOutput)\n}",
"func mutatingWatcherFor(source watch.Interface, mutator func(runtime.Object) error) watch.Interface {\n\tw := mutatingWatcher{\n\t\tmutator: mutator,\n\t\tsource: source,\n\t\toutput: make(chan watch.Event),\n\t\twg: &sync.WaitGroup{},\n\t}\n\tw.wg.Add(1)\n\tgo func(input <-chan watch.Event, output chan<- watch.Event) {\n\t\tdefer w.wg.Done()\n\t\tfor event := range input {\n\t\t\tif err := mutator(event.Object); err != nil {\n\t\t\t\toutput <- watch.Event{\n\t\t\t\t\tType: watch.Error,\n\t\t\t\t\tObject: &errors.NewInternalError(fmt.Errorf(\"failed to mutate object in watch event: %v\", err)).ErrStatus,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\toutput <- event\n\t\t\t}\n\t\t}\n\t}(source.ResultChan(), w.output)\n\treturn &w\n}",
"func (client ApplicationsClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}",
"func EventOriginal(val string) zap.Field {\n\treturn zap.String(FieldEventOriginal, val)\n}",
"func (r *volumeReactor) modifyVolumeEvent(volume *v1.PersistentVolume) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tr.volumes[volume.Name] = volume\n\t// Generate deletion event. Cloned volume is needed to prevent races (and we\n\t// would get a clone from etcd too).\n\tif r.fakeVolumeWatch != nil {\n\t\tclone, _ := api.Scheme.DeepCopy(volume)\n\t\tvolumeClone := clone.(*v1.PersistentVolume)\n\t\tr.fakeVolumeWatch.Modify(volumeClone)\n\t}\n}",
"func (o ObjectMetricStatusPatchPtrOutput) Target() CrossVersionObjectReferencePatchPtrOutput {\n\treturn o.ApplyT(func(v *ObjectMetricStatusPatch) *CrossVersionObjectReferencePatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(CrossVersionObjectReferencePatchPtrOutput)\n}",
"func (o ResourceMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *ResourceMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}",
"func patchEtcd(ctx context.Context, log *logrus.Entry, etcdcli operatorv1client.EtcdInterface, e *operatorv1.Etcd, patch string) error {\n\tlog.Infof(\"Preparing to patch etcd %s with %s\", e.Name, patch)\n\t// must be removed to force redeployment\n\te.CreationTimestamp = metav1.Time{\n\t\tTime: time.Now(),\n\t}\n\te.ResourceVersion = \"\"\n\te.UID = \"\"\n\n\tbuf := &bytes.Buffer{}\n\terr := codec.NewEncoder(buf, &codec.JsonHandle{}).Encode(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = etcdcli.Patch(ctx, e.Name, types.MergePatchType, buf.Bytes(), metav1.PatchOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Patched etcd %s with %s\", e.Name, patch)\n\n\treturn nil\n}",
"func Watch(ctx context.Context, i v1.PodInterface, podFilter *regexp.Regexp,\n\tcontainerFilter *regexp.Regexp, containerExcludeFilter *regexp.Regexp,\n\tcontainerState ContainerState, labelSelector labels.Selector) (chan *Target, chan *Target, error) {\n\n\tlogger := requestctx.Logger(ctx).WithName(\"pod-watch\").V(4)\n\n\tlogger.Info(\"create\")\n\twatcher, err := i.Watch(ctx, metav1.ListOptions{Watch: true, LabelSelector: labelSelector.String()})\n\tif err != nil {\n\t\tfmt.Printf(\"err.Error() = %+v\\n\", err.Error())\n\t\treturn nil, nil, errors.Wrap(err, \"failed to set up watch\")\n\t}\n\n\tadded := make(chan *Target)\n\tremoved := make(chan *Target)\n\n\tgo func() {\n\t\tlogger.Info(\"await events\")\n\t\tdefer func() {\n\t\t\tlogger.Info(\"event processing ends\")\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.ResultChan():\n\t\t\t\tlogger.Info(\"received event\")\n\n\t\t\t\tif e.Object == nil {\n\t\t\t\t\tlogger.Info(\"event error, no object\")\n\t\t\t\t\t// Closed because of error\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tpod, ok := e.Object.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Info(\"event error, object not a pod\")\n\t\t\t\t\t// Not a Pod\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif !podFilter.MatchString(pod.Name) {\n\t\t\t\t\tlogger.Info(\"filtered\", \"pod\", pod.Name, \"filter\", podFilter.String())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch e.Type {\n\t\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\t\tlogger.Info(\"pod added/modified\", \"name\", pod.Name)\n\n\t\t\t\t\tvar statuses []corev1.ContainerStatus\n\t\t\t\t\tstatuses = append(statuses, pod.Status.InitContainerStatuses...)\n\t\t\t\t\tstatuses = append(statuses, pod.Status.ContainerStatuses...)\n\n\t\t\t\t\tfor _, c := range statuses {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif c.State.Running != nil || c.State.Terminated != nil { // There are logs to read\n\t\t\t\t\t\t\tlogger.Info(\"report added\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\t\tadded <- &Target{\n\t\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\tlogger.Info(\"pod deleted\", \"name\", pod.Name)\n\n\t\t\t\t\tvar containers []corev1.Container\n\t\t\t\t\tcontainers = append(containers, pod.Spec.Containers...)\n\t\t\t\t\tcontainers = append(containers, pod.Spec.InitContainers...)\n\n\t\t\t\t\tfor _, c := range containers {\n\t\t\t\t\t\tif !containerFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"filtered\", \"container\", c.Name, \"filter\", containerFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerExcludeFilter != nil && containerExcludeFilter.MatchString(c.Name) {\n\t\t\t\t\t\t\tlogger.Info(\"excluded\", \"container\", c.Name, \"exclude-filter\", containerExcludeFilter.String())\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlogger.Info(\"report removed\", \"container\", c.Name, \"pod\", pod.Name, \"namespace\", pod.Namespace)\n\t\t\t\t\t\tremoved <- &Target{\n\t\t\t\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\t\t\t\tPod: pod.Name,\n\t\t\t\t\t\t\tContainer: c.Name,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlogger.Info(\"received stop request\")\n\t\t\t\twatcher.Stop()\n\t\t\t\tclose(added)\n\t\t\t\tclose(removed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tlogger.Info(\"pass watch report channels\")\n\treturn added, removed, nil\n}",
"func (node *Node) PATCH(functions ...interface{}) *Handler {\n\n\thandler := &Handler{}\n\n\tif len(functions) > 0 { handler.function = functions[0].(func (req web.RequestInterface) *web.ResponseStatus) }\n\n\tnode.addHandler(\"PATCH\", handler)\n\n\treturn handler\n}",
"func (x *fastReflection_ValidatorSlashEventRecord) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.distribution.v1beta1.ValidatorSlashEventRecord.validator_slash_event\":\n\t\tif x.ValidatorSlashEvent == nil {\n\t\t\tx.ValidatorSlashEvent = new(ValidatorSlashEvent)\n\t\t}\n\t\treturn protoreflect.ValueOfMessage(x.ValidatorSlashEvent.ProtoReflect())\n\tcase \"cosmos.distribution.v1beta1.ValidatorSlashEventRecord.validator_address\":\n\t\tpanic(fmt.Errorf(\"field validator_address of message cosmos.distribution.v1beta1.ValidatorSlashEventRecord is not mutable\"))\n\tcase \"cosmos.distribution.v1beta1.ValidatorSlashEventRecord.height\":\n\t\tpanic(fmt.Errorf(\"field height of message cosmos.distribution.v1beta1.ValidatorSlashEventRecord is not mutable\"))\n\tcase \"cosmos.distribution.v1beta1.ValidatorSlashEventRecord.period\":\n\t\tpanic(fmt.Errorf(\"field period of message cosmos.distribution.v1beta1.ValidatorSlashEventRecord is not mutable\"))\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.distribution.v1beta1.ValidatorSlashEventRecord\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.distribution.v1beta1.ValidatorSlashEventRecord does not contain field %s\", fd.FullName()))\n\t}\n}",
"func (m *MobileAppTroubleshootingEventsMobileAppTroubleshootingEventItemRequestBuilder) Patch(ctx context.Context, body iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MobileAppTroubleshootingEventable, requestConfiguration *MobileAppTroubleshootingEventsMobileAppTroubleshootingEventItemRequestBuilderPatchRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MobileAppTroubleshootingEventable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateMobileAppTroubleshootingEventFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MobileAppTroubleshootingEventable), nil\n}",
"func (c *FakeRuleEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rulesv1.RuleEndpoint, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(ruleendpointsResource, c.ns, name, pt, data, subresources...), &rulesv1.RuleEndpoint{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*rulesv1.RuleEndpoint), err\n}",
"func (r *OrganizationsEnvironmentsTraceConfigOverridesService) Patch(name string, googlecloudapigeev1traceconfigoverride *GoogleCloudApigeeV1TraceConfigOverride) *OrganizationsEnvironmentsTraceConfigOverridesPatchCall {\n\tc := &OrganizationsEnvironmentsTraceConfigOverridesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\tc.googlecloudapigeev1traceconfigoverride = googlecloudapigeev1traceconfigoverride\n\treturn c\n}",
"func NewPatch() *Patch {\n\treturn &Patch{\n\t\tpatchOps: []*patchOp{},\n\t}\n}",
"func (r *versionResolver) Patch(ctx context.Context, obj *restModel.APIVersion) (*restModel.APIPatch, error) {\n\tif !evergreen.IsPatchRequester(*obj.Requester) {\n\t\treturn nil, nil\n\t}\n\tapiPatch, err := data.FindPatchById(*obj.Id)\n\tif err != nil {\n\t\treturn nil, InternalServerError.Send(ctx, fmt.Sprintf(\"Couldn't find a patch with id '%s': %s\", *obj.Id, err.Error()))\n\t}\n\treturn apiPatch, nil\n}",
"func PatchMethodByReflect(target reflect.Method, redirection interface{}) (*Patch, error) {\n\treturn PatchMethodByReflectValue(target.Func, redirection)\n}",
"func (this WebhookFunc) Handle(log logger.LogContext, version string, obj runtime.RawExtension) (runtime.Object, error) {\n\treturn this(log, version, obj)\n}",
"func (c *FakeKubeletConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *machineconfigurationopenshiftiov1.KubeletConfig, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewRootPatchSubresourceAction(kubeletconfigsResource, name, pt, data, subresources...), &machineconfigurationopenshiftiov1.KubeletConfig{})\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*machineconfigurationopenshiftiov1.KubeletConfig), err\n}",
"func (h *HttpHandlerFactory) NewUpdateHandler(mutator WebhookUpdateHandler) http.HandlerFunc {\n\tmutateFunc := func(ctx context.Context, review v1beta1.AdmissionReview) ([]PatchOperation, error) {\n\t\t// Decode the new updated CR from the request.\n\t\tobject, err := mutator.Decode(review.Request.Object)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\townerClusterGetter := func(objectMeta metav1.ObjectMetaAccessor) (capi.Cluster, bool, error) {\n\t\t\townerCluster, ok, err := generic.TryGetOwnerCluster(ctx, h.ctrlClient, object)\n\t\t\tif err != nil {\n\t\t\t\treturn capi.Cluster{}, false, microerror.Mask(err)\n\t\t\t}\n\n\t\t\treturn ownerCluster, ok, nil\n\t\t}\n\n\t\t// Check if the CR should be mutated by the azure-admission-controller.\n\t\tok, err := filter.IsObjectReconciledByLegacyRelease(ctx, h.logger, h.ctrlReader, object, ownerClusterGetter)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\t\tvar patch []PatchOperation\n\n\t\tif ok {\n\t\t\t// Decode the old CR from the request (before the update).\n\t\t\toldObject, err := mutator.Decode(review.Request.OldObject)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, microerror.Mask(err)\n\t\t\t}\n\n\t\t\t// Mutate the CR and get patch for those mutations.\n\t\t\tpatch, err = mutator.OnUpdateMutate(ctx, oldObject, object)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, microerror.Mask(err)\n\t\t\t}\n\t\t}\n\n\t\treturn patch, nil\n\t}\n\n\treturn h.newHttpHandler(mutator, mutateFunc)\n}",
"func Patch(path string, fn http.HandlerFunc, c ...alice.Constructor) {\n\tinfoMutex.Lock()\n\trecord(\"PATCH\", path)\n\tr.Patch(path, alice.New(c...).ThenFunc(fn).(http.HandlerFunc))\n\tinfoMutex.Unlock()\n}",
"func (s *externalServiceClient) Patch(o *ExternalService, patchType types.PatchType, data []byte, subresources ...string) (*ExternalService, error) {\n\tobj, err := s.objectClient.Patch(o.Name, o, patchType, data, subresources...)\n\treturn obj.(*ExternalService), err\n}",
"func PatchMethodByReflectValue(target reflect.Value, redirection interface{}) (*Patch, error) {\n\ttValue := &target\n\trValue := getValueFrom(redirection)\n\tif err := isPatchable(tValue, &rValue); err != nil {\n\t\treturn nil, err\n\t}\n\tpatch := &Patch{target: tValue, redirection: &rValue}\n\tif err := applyPatch(patch); err != nil {\n\t\treturn nil, err\n\t}\n\treturn patch, nil\n}",
"func PatchMethodWithMakeFuncValue(target reflect.Value, fn func(args []reflect.Value) (results []reflect.Value)) (*Patch, error) {\n\treturn PatchMethodByReflectValue(target, reflect.MakeFunc(target.Type(), fn))\n}",
"func (patchwork *Patchwork) Patch(p func(repo github.Repository, directory string)) {\n\tpatchwork.patch = p\n}",
"func (o PodsMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *PodsMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}",
"func SchedulePatch(ctx context.Context, env evergreen.Environment, patchId string, version *model.Version, patchUpdateReq model.PatchUpdate) (int, error) {\n\tvar err error\n\tp, err := patch.FindOneId(patchId)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, errors.Wrapf(err, \"loading patch '%s'\", patchId)\n\t}\n\tif p == nil {\n\t\treturn http.StatusBadRequest, errors.Errorf(\"patch '%s' not found\", patchId)\n\t}\n\n\tif p.IsCommitQueuePatch() {\n\t\treturn http.StatusBadRequest, errors.New(\"can't schedule commit queue patch\")\n\t}\n\tprojectRef, err := model.FindMergedProjectRef(p.Project, p.Version, true)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, errors.Wrapf(err, \"finding project ref '%s' for version '%s'\", p.Project, p.Version)\n\t}\n\tif projectRef == nil {\n\t\treturn http.StatusInternalServerError, errors.Errorf(\"project '%s' for version '%s' not found\", p.Project, p.Version)\n\t}\n\n\tstatusCode, err := model.ConfigurePatch(ctx, env.Settings(), p, version, projectRef, patchUpdateReq)\n\tif err != nil {\n\t\treturn statusCode, err\n\t}\n\tif p.Version != \"\" { // if the version already exists, no more to do\n\t\treturn http.StatusOK, nil\n\t}\n\n\t// create a separate context from the one the caller has so that the caller\n\t// can't interrupt the db operations here\n\tnewCxt := context.Background()\n\t// Process additional patch trigger aliases added via UI.\n\t// Child patches created with the CLI --trigger-alias flag go through a separate flow, so ensure that new child patches are also created before the parent is finalized.\n\tif err := ProcessTriggerAliases(ctx, p, projectRef, env, patchUpdateReq.PatchTriggerAliases); err != nil {\n\t\treturn http.StatusInternalServerError, errors.Wrap(err, \"processing patch trigger aliases\")\n\t}\n\tif len(patchUpdateReq.PatchTriggerAliases) > 0 {\n\t\tp.Triggers.Aliases = patchUpdateReq.PatchTriggerAliases\n\t\tif err = p.SetTriggerAliases(); err != nil {\n\t\t\treturn http.StatusInternalServerError, errors.Wrapf(err, \"attaching trigger aliases '%s'\", p.Id.Hex())\n\t\t}\n\t}\n\t_, err = model.FinalizePatch(newCxt, p, p.GetRequester(), \"\")\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, errors.Wrap(err, \"finalizing patch\")\n\t}\n\n\tif p.IsGithubPRPatch() {\n\t\tjob := NewGithubStatusUpdateJobForNewPatch(p.Id.Hex())\n\t\tif err := evergreen.GetEnvironment().LocalQueue().Put(newCxt, job); err != nil {\n\t\t\treturn http.StatusInternalServerError, errors.Wrap(err, \"adding GitHub status update job to queue\")\n\t\t}\n\t}\n\treturn http.StatusOK, nil\n}",
"func mutate(newObj runtime.Object) (admission.PatchOps, error) {\n\tsupportBundle := newObj.(*longhorn.SupportBundle)\n\tvar patchOps admission.PatchOps\n\n\tpatchOp, err := common.GetLonghornFinalizerPatchOpIfNeeded(supportBundle)\n\tif err != nil {\n\t\terr := errors.Wrapf(err, \"failed to get finalizer patch for supportBundle %v\", supportBundle.Name)\n\t\treturn nil, werror.NewInvalidError(err.Error(), \"\")\n\t}\n\tif patchOp != \"\" {\n\t\tpatchOps = append(patchOps, patchOp)\n\t}\n\n\treturn patchOps, nil\n}",
"func (l *Library) Patch(src *Library) {\n\tif src.Name != \"\" {\n\t\tl.Name = src.Name\n\t}\n\tif src.Description != \"\" {\n\t\tl.Description = src.Description\n\t}\n\tif src.Version != \"\" {\n\t\tl.Version = src.Version\n\t}\n}",
"func (r *DeviceManagementAutopilotEventRequest) Update(ctx context.Context, reqObj *DeviceManagementAutopilotEvent) error {\n\treturn r.JSONRequest(ctx, \"PATCH\", \"\", reqObj, nil)\n}",
"func (wc *watchChan) transform(e *event) (res *watch.Event) {\n\tcurObj, oldObj, err := wc.prepareObjs(e)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to prepare current and previous objects: %v\", err)\n\t\twc.sendError(err)\n\t\treturn nil\n\t}\n\tswitch {\n\tcase e.isProgressNotify:\n\t\tobj := wc.watcher.newFunc()\n\t\t// todo: update object version\n\t\tres = &watch.Event{\n\t\t\tType: watch.Bookmark,\n\t\t\tObject: obj,\n\t\t}\n\tcase e.isDeleted:\n\t\tres = &watch.Event{\n\t\t\tType: watch.Deleted,\n\t\t\tObject: oldObj,\n\t\t}\n\tcase e.isCreated:\n\t\tres = &watch.Event{\n\t\t\tType: watch.Added,\n\t\t\tObject: curObj,\n\t\t}\n\tdefault:\n\t\t// TODO: emit ADDED if the modified object causes it to actually pass the filter but the previous one did not\n\t\tres = &watch.Event{\n\t\t\tType: watch.Modified,\n\t\t\tObject: curObj,\n\t\t}\n\t}\n\treturn res\n}",
"func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {\n\tmx.handle(mPATCH, pattern, handlerFn)\n}",
"func PATCH(c *httputil.Client, data DataMultipartWriter, v interface{}, url string) error {\n\treturn Do(c, \"PATCH\", data, v, url)\n}",
"func NewPatchModifier(log *logrus.Entry) (*PatchModifier, error) {\n\treturn &PatchModifier{\n\t\tLog: log,\n\t}, nil\n}"
] | [
"0.5266063",
"0.49771908",
"0.48856008",
"0.47132525",
"0.4605321",
"0.45569208",
"0.4526433",
"0.44783974",
"0.44632348",
"0.44467813",
"0.44410396",
"0.434499",
"0.4281584",
"0.42695108",
"0.42690158",
"0.42472798",
"0.4209925",
"0.4201461",
"0.41979274",
"0.4197556",
"0.4176965",
"0.41743794",
"0.4171769",
"0.41709194",
"0.41606873",
"0.41560796",
"0.41479373",
"0.41453838",
"0.41304782",
"0.41158414",
"0.41154101",
"0.41039342",
"0.40952057",
"0.40905294",
"0.40882108",
"0.40789366",
"0.40770546",
"0.40720868",
"0.4064988",
"0.40599176",
"0.40581384",
"0.4052163",
"0.4038417",
"0.4032913",
"0.4027298",
"0.40267572",
"0.402609",
"0.40239665",
"0.40235123",
"0.40178734",
"0.40159088",
"0.40060574",
"0.4003291",
"0.3991215",
"0.3988769",
"0.39810666",
"0.39792064",
"0.39718002",
"0.39688116",
"0.3966847",
"0.39610395",
"0.39604837",
"0.3957443",
"0.3953222",
"0.39488837",
"0.39468217",
"0.39460576",
"0.39451298",
"0.39443287",
"0.39423868",
"0.39375004",
"0.3931743",
"0.39299905",
"0.39276633",
"0.39253882",
"0.39240125",
"0.39230698",
"0.39128482",
"0.3911989",
"0.3909487",
"0.39045385",
"0.3887611",
"0.38868013",
"0.38842788",
"0.3883352",
"0.38803998",
"0.38768098",
"0.38756272",
"0.38650835",
"0.38614938",
"0.3848636",
"0.38483286",
"0.3848036",
"0.38396147",
"0.3838598",
"0.3837033",
"0.38315803",
"0.38218674",
"0.38131875",
"0.38130844"
] | 0.7371751 | 0 |
JSON jsonified content message. | Сообщение с jsonified содержимым JSON. | func (c contentMessage) JSON() string {
c.Status = "success"
jsonMessageBytes, e := json.MarshalIndent(c, "", " ")
fatalIf(probe.NewError(e), "Unable to marshal into JSON.")
return string(jsonMessageBytes)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (msg *Message) JsonContent() (string, error) {\n\t//if msg.Content == nil {\n\t//\treturn \"\", nil\n\t//}\n\tb, err := json.Marshal(msg.Content)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func (m lockCmdMessage) JSON() string {\n\tmsgBytes, e := json.MarshalIndent(m, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\treturn string(msgBytes)\n}",
"func (msg *Any) JSON() []byte {\n\tjsonBytes, err := json.Marshal(*msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jsonBytes\n}",
"func (a *AppController) JSON() {\n\tm := messagePool.Get().(*models.Message)\n\tm.Message = helloWorldMsg\n\ta.Reply().JSON(m)\n}",
"func (s SizeMessage) JSON() string {\n\treturn strutil.JSON(s)\n}",
"func JsonMessage(writer http.ResponseWriter, info_code int, info_message string) {\n\twriter.WriteHeader(info_code)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.Write([]byte(\"{\\\"message\\\": \\\"\" + info_message + \"\\\"}\"))\n}",
"func (msg *Message) Json() (string, error) {\n\tb, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func JSON(ctx *fasthttp.RequestCtx) {\n\tmessage := acquireMessage()\n\tmessage.Message = helloWorldStr\n\tdata, _ := json.Marshal(message)\n\n\tctx.Response.Header.SetContentType(contentTypeJSON)\n\tctx.Response.SetBody(data)\n\n\treleaseMessage(message)\n}",
"func JSON(ctx *fasthttp.RequestCtx) {\n\tmessage := acquireMessage()\n\tmessage.Message = helloWorldStr\n\tdata, _ := json.Marshal(message)\n\n\tctx.Response.Header.SetContentType(contentTypeJSON)\n\tctx.Response.SetBody(data)\n\n\treleaseMessage(message)\n}",
"func (h aliasMessage) JSON() string {\n\th.Status = \"success\"\n\tjsonMessageBytes, e := json.MarshalIndent(h, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(jsonMessageBytes)\n}",
"func (l legalHoldInfoMessage) JSON() string {\n\tmsgBytes, e := json.MarshalIndent(l, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\treturn string(msgBytes)\n}",
"func (u eventAddMessage) JSON() string {\n\tu.Status = \"success\"\n\teventAddMessageJSONBytes, e := json.MarshalIndent(u, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\treturn string(eventAddMessageJSONBytes)\n}",
"func jsonHandler(w http.ResponseWriter, r *http.Request) {\r\n w.Header().Set(\"Content-Type\", \"application/json\")\r\n json.NewEncoder(w).Encode(&Message{helloWorldString})\r\n}",
"func (f findMSG) JSON() string {\n\tf.Path = \"path\"\n\tjsonMessageBytes, e := json.Marshal(f)\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(jsonMessageBytes)\n}",
"func (s policyMessage) JSON() string {\n\tpolicyJSONBytes, e := json.MarshalIndent(s, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(policyJSONBytes)\n}",
"func (d *InfoOutput) JSON() ([]byte, error) {\n\treturn json.Marshal(d.reply)\n}",
"func (g *Game) getMessageJson() []byte {\n\te := g.Messages.Pop()\n\tif e == nil {\n\t\treturn nil\n\t}\n\tm := e.Value.(*TextMessage)\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn b\n}",
"func (msg *Int64) JSON() []byte {\n\tjsonBytes, err := json.Marshal(*msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn jsonBytes\n}",
"func (c *EzClient) JSON(j interface{}) *EzClient {\n\tb, err := json.Marshal(j)\n\tif err == nil {\n\t\tc.body = bytes.NewReader(b)\n\t}\n\n\treturn c\n}",
"func (u configExportMessage) JSON() string {\n\tu.Status = \"success\"\n\tstatusJSONBytes, e := json.MarshalIndent(u, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(statusJSONBytes)\n}",
"func (s policyLinksMessage) JSON() string {\n\tpolicyJSONBytes, e := json.MarshalIndent(s, \"\", \" \")\n\tfatalIf(probe.NewError(e), \"Unable to marshal into JSON.\")\n\n\treturn string(policyJSONBytes)\n}",
"func (msg *Error) JSON() any {\n\tjsonData := H{}\n\tif msg.Meta != nil {\n\t\tvalue := reflect.ValueOf(msg.Meta)\n\t\tswitch value.Kind() {\n\t\tcase reflect.Struct:\n\t\t\treturn msg.Meta\n\t\tcase reflect.Map:\n\t\t\tfor _, key := range value.MapKeys() {\n\t\t\t\tjsonData[key.String()] = value.MapIndex(key).Interface()\n\t\t\t}\n\t\tdefault:\n\t\t\tjsonData[\"meta\"] = msg.Meta\n\t\t}\n\t}\n\tif _, ok := jsonData[\"error\"]; !ok {\n\t\tjsonData[\"error\"] = msg.Error()\n\t}\n\treturn jsonData\n}",
"func (r *Request) dumpJson() []byte {\n\tpayload, _ := json.Marshal(r.Message)\n\treturn payload\n}",
"func (r *Reply) JSON(data interface{}) *Reply {\n\tr.ContentType(ahttp.ContentTypeJSON.String())\n\tr.Render(&jsonRender{Data: data})\n\treturn r\n}",
"func (this Message) String() string {\n\tstr, _ := json.Marshal(this)\n\treturn string(str)\n}",
"func ATJsonMessage(writer http.ResponseWriter, info_code int, info_message string) {\n\twriter.WriteHeader(info_code)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tobj := model.ATResultList{ ResultList: make([]model.ATResult,0) }\n\tobj.ResultList = append(obj.ResultList, model.ATResult{Text: info_message,\n\t\tTimestamp: util.GetTimeNowSting(), Topic: \"K/AI\"})\n\tjson_bytes, _ := json.Marshal(obj)\n\twriter.Write(json_bytes)\n}",
"func MessageToJSON(msg Message) string {\n\tvar jl string\n\tb, err := json.Marshal(msg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tjl = string(b)\n\treturn jl\n}",
"func (m *Meta) JSON() string {\n\tj, _ := json.MarshalIndent(m, \"\", \" \")\n\treturn string(j)\n}",
"func (message *Message) Bytes() ([]byte, error) {\n\treturn json.Marshal(*message) // Return encoded\n}",
"func (m Message) String() string {\n\tjm, _ := json.Marshal(m)\n\treturn string(jm)\n}",
"func (h *Host) JSON() []byte {\n\tb, _ := json.MarshalIndent(h, \"\", \" \")\n\treturn b\n}",
"func ToJSONString(message MessageInterface) string {\n\tarr := ToJSONByteSlice(message)\n\treturn string(arr)\n}",
"func (msg *Message) MarshalJSON() (out []byte, err error) {\n\tmsg.buf.Reset()\n\n\terr = msg.buf.WriteByte('{')\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(msg.channel) > 0 {\n\t\terr = bufWriteKV(&msg.buf, `\"channel\"`, []byte(msg.channel),\n\t\t\t':', '\"', '\"')\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = msg.buf.WriteByte(',')\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif len(msg.username) > 0 {\n\t\terr = bufWriteKV(&msg.buf, `\"username\"`, []byte(msg.username),\n\t\t\t':', '\"', '\"')\n\t} else {\n\t\terr = bufWriteKV(&msg.buf, `\"username\"`, []byte(msg.hostname),\n\t\t\t':', '\"', '\"')\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\terr = msg.buf.WriteByte(',')\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif msg.attc != nil {\n\t\tvar attc []byte\n\n\t\tattc, err = msg.attc.MarshalJSON()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t_, _ = msg.buf.WriteString(`\"attachments\":[`)\n\t\t_, _ = msg.buf.Write(attc)\n\t\t_ = msg.buf.WriteByte(']')\n\t} else {\n\t\terr = msg.writeText()\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = msg.buf.WriteByte('}')\n\tout = msg.buf.Bytes()\n\n\treturn\n}",
"func (m Messages) String() string {\n\tjm, _ := json.Marshal(m)\n\treturn string(jm)\n}",
"func (c *Controller) Json(data interface{}, status int, msg ...string) {\n ctx := c.Context()\n message := App().Status().Text(status, ctx.Header(\"Accept-Language\", \"\"), msg...)\n r := render.NewJson(map[string]interface{}{\n \"status\": status,\n \"message\": message,\n \"data\": data,\n })\n\n ctx.PushLog(\"status\", status)\n ctx.SetHeader(\"Content-Type\", r.ContentType())\n ctx.End(r.HttpCode(), r.Content())\n}",
"func JSON(e interface{}) []byte {\n\tcontents, err := json.Marshal(e)\n\tif err != nil {\n\t\treturn []byte{}\n\t}\n\n\treturn contents\n}",
"func (e *HTTPError) JSON() []byte {\n\tval, _ := json.Marshal(e)\n\n\treturn val\n}",
"func JsonContent(next http.Handler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tnext.ServeHTTP(w, r)\n\t}\n}",
"func writeMessage(data interface{}, w io.Writer) error {\n\n\tresBytes, err := jsoniter.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn encodeByteSlice(w, resBytes)\n}",
"func (c *Controller) Json(data interface{}, status int, msg ...string) {\n\tctx := c.Context()\n\tmessage := App().Status().Text(status, ctx.Header(\"Accept-Language\", \"\"), msg...)\n\tr := render.NewJson(map[string]interface{}{\n\t\t\"status\": status,\n\t\t\"message\": message,\n\t\t\"data\": data,\n\t})\n\n\tctx.PushLog(\"status\", status)\n\tctx.SetHeader(\"Content-Type\", r.ContentType())\n\thttpStatus := r.HttpCode()\n\tif ctx.Status() > 0 && ctx.Status() != httpStatus {\n\t\thttpStatus = ctx.Status()\n\t}\n\tctx.End(httpStatus, r.Content())\n}",
"func JSON(data interface{}, args ...interface{}) string {\n\tw := Writer{\n\t\tOptions: ojg.DefaultOptions,\n\t\tWidth: 80,\n\t\tMaxDepth: 3,\n\t\tSEN: false,\n\t}\n\tw.config(args)\n\tb, _ := w.encode(data)\n\n\treturn string(b)\n}",
"func (msg *Message) Content() []byte {\n\treturn msg.content\n}",
"func JSONHandler(ctx *atreugo.RequestCtx) error {\n\tmessage := AcquireMessage()\n\tmessage.Message = helloWorldStr\n\terr := ctx.JSONResponse(message)\n\n\tReleaseMessage(message)\n\n\treturn err\n}",
"func responderMensajeJson(writer http.ResponseWriter, r *http.Request, code int, codigoDeMensaje string){\n\tresponderJSON(writer, code, map[string]string{\"codigo\": codigoDeMensaje, \"mensaje\": getMensaje(codigoDeMensaje)})\n}",
"func (c *Ctx) JSON(data interface{}) error {\n\traw, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Response.SetBodyRaw(raw)\n\tc.Response.Header.SetContentType(MIMEApplicationJSON)\n\treturn nil\n}",
"func (m *Message) String() string {\n\tjsonBytes, err := json.MarshalIndent(m, \"\", \" \")\n\t// jsonBytes, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"message error - fail to marshal message to bytes, error: %v\", err)\n\t}\n\treturn string(jsonBytes)\n}",
"func JSON(j interface{}) string {\n\tvar buf bytes.Buffer\n\tencoder := json.NewEncoder(&buf)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \"\")\n\terr := encoder.Encode(j)\n\n\tif err == nil {\n\t\treturn strings.TrimSuffix(buf.String(), \"\\n\")\n\t}\n\n\t// j could not be serialized to json, so let's log the error and return a\n\t// helpful-ish value\n\tError().logGenericArgs(\"error serializing value to json\", err, nil, 1)\n\treturn fmt.Sprintf(\"<error: %v>\", err)\n}",
"func (message *Message) ToJson(writer io.Writer) error {\n\tencoder := json.NewEncoder(writer)\n\tencodedMessage := encoder.Encode(message)\n\treturn encodedMessage\n}",
"func (e *Response) Json() (ret []byte) {\n\tvar err error\n\tif ret, err = json.Marshal(e); err != nil {\n\t\t//e.Error = fmt.Errorf(\"Error marshal json: %s\", err.Error())\n\t\treturn\n\t}\n\treturn\n}",
"func (v Message) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer18(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (m *GCMMessage) ToJSON() (string, error) {\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func (h *Handler) JSON(c *fiber.Ctx, status int, data interface{}) error {\n\tif err := c.Status(status).JSON(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (v FetchMessages) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer24(w, v)\n}",
"func (v Messages) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer17(w, v)\n}",
"func (b Block) Message() []byte {\n\n\tm := Message{}\n\tm.Type = 0\n\t// same as func (m *Message) HandleTimeStamp()\n\t// b.Time = b.Time*2/1000 - 946684800\n\tb.Time = b.Time\n\tm.Data = b\n\n\tms := []Message{}\n\tms = append(ms, m)\n\tresult, _ := json.Marshal(ms)\n\treturn result\n}",
"func (j *Job) Json() ([]byte, error) {\n\tjson, err := json.MarshalIndent(j, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn []byte(\"\"), maskAny(err)\n\t}\n\treturn json, nil\n}",
"func (c *Client) send(ctx context.Context, session string, content Content) error {\n\t_dt := c.Now\n\tif _dt == nil {\n\t\t_dt = dt.Now\n\t}\n\t_m := newMsg(session, content, _dt)\n\tm := newJSONMsg(_m)\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody := bytes.NewReader(b)\n\tu := c.url(\"/rex/v0/messages\")\n\tresp, err := c.http().Post(u, \"application/json\", body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tb, _ := ioutil.ReadAll(resp.Body)\n\t\treturn fmt.Errorf(\"%v %s %s: %s\", resp.Status, \"POST\", u, b)\n\t}\n\treturn nil\n}",
"func (c *connection) writeJson(message msg) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\tfmt.Println(\"=>\", message)\n\treturn c.ws.WriteJSON(message)\n}",
"func (v Messages) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer17(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func JSON() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn json.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn json.NewDecoder(r)\n\t\t},\n\t)\n}",
"func (s *ResponseModifier) JSON(data interface{}) error {\n\tbuf := &bytes.Buffer{}\n\n\tswitch data.(type) {\n\tcase string:\n\t\tbuf.WriteString(data.(string))\n\tcase []byte:\n\t\tbuf.Write(data.([]byte))\n\tdefault:\n\t\tif err := json.NewEncoder(buf).Encode(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.Response.Body = ioutil.NopCloser(buf)\n\ts.Response.ContentLength = int64(buf.Len())\n\ts.Response.Header.Set(\"Content-Type\", \"application/json\")\n\treturn nil\n}",
"func (ctx *Context) Json(data interface{}) {\n\tdata, err := json.Marshal(data)\n\tif err != nil {\n\t\tfmt.Println(\"[ERROR] \" + err.Error())\n\t}\n\tctx.writer.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(ctx.writer, \"%s\", data)\n}",
"func jsonError(w http.ResponseWriter, serverMsg string, clientMsg string) {\n\tlog.Error(serverMsg)\n\tpayload := Message{\n\t\tError: clientMsg,\n\t}\n\tresJSON, err := json.Marshal(payload)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to marshal result : %v\", err)\n\t\thttpError(w, msg, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(w, \"%s\\n\", string(resJSON))\n\treturn\n}",
"func (msg *Message) _marshalJSON() (out []byte, err error) {\n\tstr := `{`\n\n\tif len(msg.channel) > 0 {\n\t\tstr += `\"channel\":\"` + msg.channel + `\",`\n\t}\n\n\tif len(msg.username) > 0 {\n\t\tstr += `\"username\":\"` + msg.username + `\",`\n\t} else {\n\t\tstr += `\"username\":\"` + msg.hostname + `\",`\n\t}\n\n\tif msg.attc != nil {\n\t\tvar attc []byte\n\n\t\tattc, err = msg.attc.MarshalJSON()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tstr += `\"attachments\":[` + string(attc) + `]`\n\t} else {\n\t\tstr += `\"text\":\"` + msg.getText() + `\"`\n\t}\n\n\tstr += `}`\n\tout = []byte(str)\n\n\treturn\n}",
"func jsonResponse(rw http.ResponseWriter, code int, msg string) {\n\trw.Header().Set(\"Content-Type\", \"application/json\")\n\trw.WriteHeader(code)\n\trw.Write([]byte(fmt.Sprintf(`{\"message\":\"%s\"}`, msg)))\n}",
"func (cm Message) Serialize() ([]byte, error) {\n\treturn []byte(cm.Content), nil\n}",
"func (c *echoContext) JSON(code int, d interface{}) error {\n\treturn c.ctx.JSON(code, d)\n}",
"func (em Message) Serialize() ([]byte, error) {\n\tmsg, err := json.Marshal(em)\n\treturn msg, err\n}",
"func (m *DynamicMessage) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(m.data)\n}",
"func (self *monitoringData) JSON() (jsonString string, e error) {\n\tb, err := json.Marshal(*self)\n\treturn string(b), err\n}",
"func (m *Message) MarshalJSON() ([]byte, error) {\n\tmessageLocation := time.FixedZone(\"\", m.TZ)\n\tutc := time.Unix(m.Time, 0)\n\tmessageTime := utc.In(messageLocation).Format(time.RFC3339)\n\treturn json.Marshal(&struct {\n\t\tID string `json:\"id\"`\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t\tText string `json:\"text\"`\n\t\tTime string `json:\"time\"`\n\t}{\n\t\tID: m.ID,\n\t\tName: m.Name,\n\t\tEmail: m.Email,\n\t\tText: m.Text,\n\t\tTime: messageTime,\n\t})\n}",
"func (cmd *command) json() []byte {\n\tj, _ := json.Marshal(cmd)\n\treturn append(j, '\\r', '\\n')\n}",
"func (c *T) JSON() cast.JSON {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn cast.JSON{\n\t\t\"id\": c.ID,\n\t\t\"cardid\": c.Proto.ID,\n\t\t\"name\": c.Proto.Name,\n\t\t\"costs\": c.Proto.Costs.JSON(),\n\t\t\"text\": c.Proto.Text,\n\t\t\"username\": c.Username,\n\t\t\"image\": c.Proto.Image,\n\t\t\"type\": c.Proto.Type.String(),\n\t\t\"powers\": c.Proto.Powers.JSON(),\n\t}\n}",
"func (a EnrichedMessage) String() string {\n\tbytes, _ := json.Marshal(a)\n\n\treturn string(bytes)\n}",
"func Json(w http.ResponseWriter, data interface{}) {\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n}",
"func (mp JSONMsgPacker) PackMsg(msg interface{}, buf []byte) ([]byte, error) {\n\tbuffer := bytes.NewBuffer(buf)\n\tjsonEncoder := json.NewEncoder(buffer)\n\terr := jsonEncoder.Encode(msg)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\tbuf = buffer.Bytes()\n\treturn buf[:len(buf)-1], nil // encoder always put '\\n' at the end, we trim it\n}",
"func GetJson(w http.ResponseWriter, Status string, Msg string, httpStatus int) string {\n\tmsgJsonStruct := &JsonMsg{Status, Msg}\n\tmsgJson, errj := json.Marshal(msgJsonStruct)\n\tif errj != nil {\n\t\tmsg := `{\"status\":\"error\",\"message\":\"We could not generate the json error!\"}`\n\t\treturn msg\n\t}\n\treturn string(msgJson)\n}",
"func (ctx *Context) JSON(code int, obj interface{}) (err error) {\n\tw := ctx.ResponseWriter\n\tw.Header().Set(HeaderContentType, MIMEApplicationJSONCharsetUTF8)\n\tw.WriteHeader(code)\n\treturn json.NewEncoder(w).Encode(obj)\n}",
"func (e *Ender) Message() []byte {\n\tvar msg []byte\n\tbroadcastEndMsg := queues.MessageBroadcastEnd{\n\t\tRoomID: e.roomModel.GetID(),\n\t\tBroadcastID: e.broadcastModel.GetID(),\n\t\tEndTime: time.Now().Unix(),\n\t}\n\n\tdata := struct {\n\t\tType int `json:\"type\"`\n\t\tData interface{} `json:\"data\"`\n\t}{\n\t\te.config.TypeID,\n\t\tbroadcastEndMsg,\n\t}\n\n\tmsg, _ = json.Marshal(data)\n\treturn msg\n}",
"func (e *Entity) JSON() string {\n\tif e == nil {\n\t\treturn \"{}\"\n\t}\n\treturn e.data.JSON()\n}",
"func JSON(w http.ResponseWriter, data interface{}) {\n\tresponse, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(\"Failed to generate json \")\n\t}\n\tfmt.Fprint(w, string(response))\n}",
"func (s *Server) JSON(w http.ResponseWriter, status int, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tjson.NewEncoder(w).Encode(data)\n}",
"func (c *Context) JSON(code int, data interface{}) {\n\tc.SetHeader(\"Content-Type\", \"application/json\")\n\tc.Status(code)\n\tencoder := json.NewEncoder(c.Writer)\n\tif err := encoder.Encode(data); err != nil {\n\t\thttp.Error(c.Writer, err.Error(), 500)\n\t}\n}",
"func (j *Message) MarshalJSON() ([]byte, error) {\n\tvar buf fflib.Buffer\n\tif j == nil {\n\t\tbuf.WriteString(\"null\")\n\t\treturn buf.Bytes(), nil\n\t}\n\terr := j.MarshalJSONBuf(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func (jf *JFile) JSON() ([]byte, error) {\n\treturn jf.rootnode.PrettyJSON()\n}",
"func (r *Response) Message(msg string) JResponseWriter {\n\treturn r.Field(fieldMsg, msg)\n}",
"func (q *Query) JSON() (string, error) {\n\tstrCh, err := q.StringChan()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\ts := StringFromChan(strCh)\n\treturn fmt.Sprintf(`{\"query\":\"%s\"}`, strings.Replace(s, `\"`, `\\\"`, -1)), nil\n}",
"func (fr FlushResult) JSON() string {\n\tj, err := json.Marshal(fr)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(j)\n}",
"func (messages *Messages) ToJson(writer io.Writer) error {\n\tencoder := json.NewEncoder(writer)\n\tencodedMessages := encoder.Encode(messages)\n\treturn encodedMessages\n}",
"func (schedule Schedule) JSON() []byte {\n\tb, err := json.Marshal(&schedule)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\treturn b\n}",
"func (m Message) MarshalJSON() ([]byte, error) {\n\ttype Message struct {\n\t\tSig hexutil.Bytes `json:\"sig,omitempty\"`\n\t\tTTL uint32 `json:\"ttl\"`\n\t\tTimestamp uint32 `json:\"timestamp\"`\n\t\tTopic TopicType `json:\"topic\"`\n\t\tPayload hexutil.Bytes `json:\"payload\"`\n\t\tPadding hexutil.Bytes `json:\"padding\"`\n\t\tPoW float64 `json:\"pow\"`\n\t\tHash hexutil.Bytes `json:\"hash\"`\n\t\tDst hexutil.Bytes `json:\"recipientPublicKey,omitempty\"`\n\t}\n\tvar enc Message\n\tenc.Sig = m.Sig\n\tenc.TTL = m.TTL\n\tenc.Timestamp = m.Timestamp\n\tenc.Topic = m.Topic\n\tenc.Payload = m.Payload\n\tenc.Padding = m.Padding\n\tenc.PoW = m.PoW\n\tenc.Hash = m.Hash\n\tenc.Dst = m.Dst\n\treturn json.Marshal(&enc)\n}",
"func MessagesHandler(w http.ResponseWriter, r *http.Request) {\n\t//fmt.Fprintln(w, \"no message to show\")\n\t// specify to the client that the request should be respect the JSON format\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\t// Encode the messages to send back some JSON to the client\n\tif err := json.NewEncoder(w).Encode(listMessages); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func RenderJSON(w http.ResponseWriter, msg interface{}) {\n\tswitch v := msg.(type) {\n\tcase *JSONResponse:\n\t\tif _, ok := v.data[\"error\"]; !ok {\n\t\t\tv.data[\"error\"] = nil\n\t\t}\n\t\tw.WriteHeader(v.status)\n\t\tmsg = v.data\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(msg)\n}",
"func (l Locale) JSON() (string, error) {\n\tb, err := json.Marshal(l.Data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}",
"func sendJSON(data interface{}, w http.ResponseWriter) {\n\tcontent, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(content)\n}",
"func (v FetchMessages) MarshalJSON() ([]byte, error) {\n\tw := jwriter.Writer{}\n\teasyjson42239ddeEncodeGithubComKhliengDispatchServer24(&w, v)\n\treturn w.Buffer.BuildBytes(), w.Error\n}",
"func (m *ConnectedMessage) ToJson() string {\n\tvar msg, _ = json.Marshal(m)\n\treturn string(msg)\n}",
"func (c *client) SendJSONMessage(msg interface{}) error {\n\treturn c.ws.WriteJSON(msg)\n}",
"func Serialize(msg Message) ([]byte, error) {\n\tvar b bytes.Buffer\n\tencoder := json.NewEncoder(&b)\n\terr := encoder.Encode(msg)\n\treturn b.Bytes(), err\n}"
] | [
"0.81981593",
"0.7193156",
"0.71297085",
"0.70724344",
"0.69950885",
"0.69847697",
"0.6969687",
"0.6947664",
"0.6947664",
"0.6902756",
"0.6756524",
"0.67506754",
"0.6678022",
"0.6643379",
"0.6576812",
"0.6566346",
"0.64627314",
"0.644587",
"0.6444574",
"0.6435986",
"0.6433906",
"0.64311713",
"0.6396124",
"0.6376626",
"0.6276202",
"0.62450135",
"0.6237225",
"0.6219143",
"0.62158734",
"0.62118524",
"0.6197187",
"0.61526227",
"0.6149122",
"0.6147296",
"0.61006314",
"0.6085855",
"0.6085102",
"0.6075197",
"0.60412395",
"0.60378146",
"0.6032388",
"0.60305935",
"0.6028207",
"0.60224605",
"0.5998671",
"0.5981975",
"0.5962032",
"0.59566134",
"0.59243715",
"0.59234095",
"0.5907936",
"0.5902254",
"0.5899456",
"0.5888897",
"0.58804035",
"0.5855274",
"0.58505416",
"0.58497745",
"0.5846182",
"0.58257324",
"0.58245015",
"0.582373",
"0.5820732",
"0.5812327",
"0.58051467",
"0.5805024",
"0.5800801",
"0.5798897",
"0.57967794",
"0.57865405",
"0.57844037",
"0.57829005",
"0.57784665",
"0.57772684",
"0.5776667",
"0.57591087",
"0.57567704",
"0.5756695",
"0.57478875",
"0.57454014",
"0.5743144",
"0.57420754",
"0.5740254",
"0.5736799",
"0.5735449",
"0.5733128",
"0.5724327",
"0.5712897",
"0.57090276",
"0.5696268",
"0.5695495",
"0.56915283",
"0.56913775",
"0.5684121",
"0.5682352",
"0.56821984",
"0.5681602",
"0.5681314",
"0.5679459"
] | 0.82813597 | 1 |
parseContent parse client Content container into printer struct. | parseContent преобразует клиентский контейнер Content в структуру printer. | func parseContent(c *clientContent) contentMessage {
content := contentMessage{}
content.Time = c.Time.Local()
// guess file type.
content.Filetype = func() string {
if c.Type.IsDir() {
return "folder"
}
return "file"
}()
content.Size = c.Size
md5sum := strings.TrimPrefix(c.ETag, "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
content.ETag = md5sum
// Convert OS Type to match console file printing style.
content.Key = getKey(c)
return content
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ParseContent(content string, params Params) (string, error) {\n\tcheckAndInitDefaultView()\n\treturn defaultViewObj.ParseContent(content, params)\n}",
"func (c *GRPCClient) Content(ctx context.Context, contentPath string) (component.ContentResponse, error) {\n\tvar contentResponse component.ContentResponse\n\n\terr := c.run(func() error {\n\t\treq := &dashboard.ContentRequest{\n\t\t\tPath: contentPath,\n\t\t}\n\n\t\tresp, err := c.client.Content(ctx, req)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"grpc client content\")\n\t\t}\n\n\t\tif err := json.Unmarshal(resp.ContentResponse, &contentResponse); err != nil {\n\t\t\treturn errors.Wrap(err, \"unmarshal content response\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn component.ContentResponse{}, err\n\t}\n\n\treturn contentResponse, nil\n}",
"func ParseText(content []byte) []interface{} {\n jsonObject := []interface{}{}\n if err := json.Unmarshal(content, &jsonObject); err != nil {\n panic(err)\n }\n return parse(jsonObject)\n}",
"func ParseContent(text []byte) (*Appcast, error) {\n\tvar appcast = New()\n\terr := xml.Unmarshal(text, appcast)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn appcast, nil\n}",
"func (view *View) ParseContent(ctx context.Context, content string, params ...Params) (string, error) {\n\tvar usedParams Params\n\tif len(params) > 0 {\n\t\tusedParams = params[0]\n\t}\n\treturn view.ParseOption(ctx, Option{\n\t\tContent: content,\n\t\tOrphan: false,\n\t\tParams: usedParams,\n\t})\n}",
"func (d *GetResult) Content(valuePtr interface{}) error {\n\treturn DefaultDecode(d.contents, d.flags, valuePtr)\n}",
"func (r *Response) ParseTplContent(content string, params ...gview.Params) (string, error) {\n\treturn r.Request.GetView().ParseContent(r.Request.Context(), content, r.buildInVars(params...))\n}",
"func (f *File) ParseContent(doc *Doc) (err error) {\n\tcontent, err := f.Open(\"content.xml\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer content.Close()\n\n\td := xml.NewDecoder(content)\n\terr = d.Decode(doc)\n\treturn\n}",
"func (p *ParseData) Content() string {\n\treturn p.content\n}",
"func (resp *Response) Content() ([]byte, error) {\n\tbuf := bufferpool.Get()\n\tdefer buf.Free()\n\terr := drainBody(resp.Body, buf)\n\treturn buf.Bytes(), err\n}",
"func (_BaseContentSpace *BaseContentSpaceFilterer) ParseCreateContent(log types.Log) (*BaseContentSpaceCreateContent, error) {\n\tevent := new(BaseContentSpaceCreateContent)\n\tif err := _BaseContentSpace.contract.UnpackLog(event, \"CreateContent\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func LoadContent (data []byte, unsafe...bool) (*Parser, error) {\n if j, e := gjson.LoadContent(data, unsafe...); e == nil {\n return &Parser{j}, nil\n } else {\n return nil, e\n }\n}",
"func (o FileContentBufferResponseOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FileContentBufferResponse) string { return v.Content }).(pulumi.StringOutput)\n}",
"func (o FileContentBufferResponsePtrOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FileContentBufferResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Content\n\t}).(pulumi.StringPtrOutput)\n}",
"func GetContent(fullUrl string) (*Content, string, error) {\n\n\t// My own Cient with my own Transport\n\t// Just to abort very slow responses\n\ttransport := http.Transport{\n\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\treturn net.DialTimeout(network, addr, time.Duration(10*time.Second))\n\t\t},\n\t}\n\n\tclient := http.Client{\n\t\tTransport: &transport,\n\t}\n\n\tresp, err := client.Get(fullUrl)\n\tif err != nil {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Desculpe, ocorreu ao tentar recuperar a pagina referente a URL passada. %s.\", err))\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Desculpe, mas a pagina passada respondeu indevidamente. O Status Code recebido foi: %d.\", resp.StatusCode))\n\t}\n\n\treader, err := charset.NewReader(resp.Body, resp.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Erro ao decodificar o charset da pagina. %s.\", err))\n\t}\n\n\tcontent := &Content{}\n\timageUrl := \"\"\n\n\t// This function create a Tokenizer for an io.Reader, obs. HTML should be UTF-8\n\tz := html.NewTokenizer(reader)\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tif tokenType == html.ErrorToken {\n\t\t\tif z.Err() == io.EOF { // EVERTHINGS WORKS WELL!\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Ops, we've got something wrong, it isn't an EOF token\n\t\t\treturn nil, \"\", errors.New(\n\t\t\t\tfmt.Sprintf(\"Desculpe, mas ocorreu um erro ao extrair as tags HTML da pagina passada. %s.\", z.Err()))\n\t\t}\n\n\t\tswitch tokenType {\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\n\t\t\ttoken := z.Token()\n\t\t\t// Check if it is an title tag opennig, it's the fastest way to compare bytes\n\t\t\tif token.Data == \"title\" {\n\t\t\t\t// log.Printf(\"TAG: '%v'\\n\", token.Data)\n\t\t\t\tnextTokenType := z.Next()\n\t\t\t\tif nextTokenType == html.TextToken {\n\t\t\t\t\tnextToken := z.Token()\n\t\t\t\t\tcontent.Title = strings.TrimSpace(nextToken.Data)\n\t\t\t\t\t// log.Println(\"<title> = \" + content.Title)\n\t\t\t\t}\n\n\t\t\t} else if token.Data == \"meta\" {\n\t\t\t\tkey := \"\"\n\t\t\t\tvalue := \"\"\n\n\t\t\t\t// log.Printf(\"NewMeta: %s : \", token.String())\n\n\t\t\t\t// Extracting this meta data information\n\t\t\t\tfor _, attr := range token.Attr {\n\t\t\t\t\tswitch attr.Key {\n\t\t\t\t\tcase \"property\", \"name\":\n\t\t\t\t\t\tkey = attr.Val\n\t\t\t\t\tcase \"content\":\n\t\t\t\t\t\tvalue = attr.Val\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tswitch key {\n\n\t\t\t\tcase \"title\", \"og:title\", \"twitter:title\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\tcontent.Title = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Title: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\n\t\t\t\tcase \"og:site_name\", \"twitter:domain\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\t//content.SiteName = strings.TrimSpace(value)\n\t\t\t\t\t\t//log.Printf(\"Site Name: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\n\t\t\t\tcase \"description\", \"og:description\", \"twitter:description\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\tcontent.Description = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Description: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\tcase \"og:image\", \"twitter:image\", \"twitter:image:src\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\timageUrl = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Image: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\tcase \"og:url\", \"twitter:url\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\t// Not used, cause user could use a redirect service\n\t\t\t\t\t\t// fullUrl = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Url: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Limiting the size of Title and Description to 250 characters\n\tif len(content.Title) > 250 {\n\t\tcontent.Title = content.Title[0:250]\n\t}\n\tif len(content.Description) > 250 {\n\t\tcontent.Description = content.Description[0:250]\n\t}\n\t// If content description is empty, lets full fill with something\n\tif len(content.Description) == 0 {\n\t\tcontent.Description = \"Veja o conteudo completo...\"\n\t}\n\n\t// Adding the host of this content\n\tcontent.Host = resp.Request.URL.Host\n\n\tlog.Printf(\"Title: %s\\n description: %s\\n host:%s\\n imageUrl:%s\\n\",\n\t\tcontent.Title, content.Description, content.Host, imageUrl)\n\n\treturn content, imageUrl, nil\n}",
"func (o FileContentBufferOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FileContentBuffer) *string { return v.Content }).(pulumi.StringPtrOutput)\n}",
"func (jv *Viewer) Content(content interface{}) error {\n\n\tjson, err := toJSON(content)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error converting %v to json: %s\", content, err.Error())\n\t}\n\twriter := colorwriter.New(\n\t\tcolorMap,\n\t\ttermbox.Attribute(jv.theme.Bg))\n\tformatter := jsonfmt.New(json, writer)\n\tif err := formatter.Format(); err != nil {\n\t\treturn err\n\t}\n\tformattedJSON := writer.Lines\n\n\tjv.tree = jsontree.New(formattedJSON)\n\tfor index := 0; index < len(formattedJSON); index++ {\n\t\tjv.tree.ToggleLine(index)\n\t}\n\treturn nil\n}",
"func (t *Template) Parse(content interface{}) {\n\n\tif t.compiled == nil {\n\t\tlog.Println(\"there is no compiled template\")\n\t\treturn\n\t}\n\n\tvar buffer bytes.Buffer\n\n\tif err := t.compiled.Execute(&buffer, content); err != nil {\n\t\tlog.Println(\"error parsing template \", err)\n\t\treturn\n\t}\n\n\tt.BodyContent = buffer.String()\n}",
"func (view *View) doParseContent(ctx context.Context, content string, params Params) (string, error) {\n\t// It's not necessary continuing parsing if template content is empty.\n\tif content == \"\" {\n\t\treturn \"\", nil\n\t}\n\tvar (\n\t\terr error\n\t\tkey = fmt.Sprintf(\"%s_%v_%v\", templateNameForContentParsing, view.config.Delimiters, view.config.AutoEncode)\n\t\ttpl = templates.GetOrSetFuncLock(key, func() interface{} {\n\t\t\tif view.config.AutoEncode {\n\t\t\t\treturn htmltpl.New(templateNameForContentParsing).Delims(\n\t\t\t\t\tview.config.Delimiters[0],\n\t\t\t\t\tview.config.Delimiters[1],\n\t\t\t\t).Funcs(view.funcMap)\n\t\t\t}\n\t\t\treturn texttpl.New(templateNameForContentParsing).Delims(\n\t\t\t\tview.config.Delimiters[0],\n\t\t\t\tview.config.Delimiters[1],\n\t\t\t).Funcs(view.funcMap)\n\t\t})\n\t)\n\t// Using memory lock to ensure concurrent safety for content parsing.\n\thash := strconv.FormatUint(ghash.DJB64([]byte(content)), 10)\n\tgmlock.LockFunc(\"gview.ParseContent:\"+hash, func() {\n\t\tif view.config.AutoEncode {\n\t\t\ttpl, err = tpl.(*htmltpl.Template).Parse(content)\n\t\t} else {\n\t\t\ttpl, err = tpl.(*texttpl.Template).Parse(content)\n\t\t}\n\t})\n\tif err != nil {\n\t\terr = gerror.Wrapf(err, `template parsing failed`)\n\t\treturn \"\", err\n\t}\n\t// Note that the template variable assignment cannot change the value\n\t// of the existing `params` or view.data because both variables are pointers.\n\t// It needs to merge the values of the two maps into a new map.\n\tvariables := gutil.MapMergeCopy(params)\n\tif len(view.data) > 0 {\n\t\tgutil.MapMerge(variables, view.data)\n\t}\n\tview.setI18nLanguageFromCtx(ctx, variables)\n\n\tbuffer := bytes.NewBuffer(nil)\n\tif view.config.AutoEncode {\n\t\tvar newTpl *htmltpl.Template\n\t\tnewTpl, err = tpl.(*htmltpl.Template).Clone()\n\t\tif err != nil {\n\t\t\terr = gerror.Wrapf(err, `template clone failed`)\n\t\t\treturn \"\", err\n\t\t}\n\t\tif err = newTpl.Execute(buffer, variables); err != nil {\n\t\t\terr = gerror.Wrapf(err, `template parsing failed`)\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tif err = tpl.(*texttpl.Template).Execute(buffer, variables); err != nil {\n\t\t\terr = gerror.Wrapf(err, `template parsing failed`)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\t// TODO any graceful plan to replace \"<no value>\"?\n\tresult := gstr.Replace(buffer.String(), \"<no value>\", \"\")\n\tresult = view.i18nTranslate(ctx, result, variables)\n\treturn result, nil\n}",
"func parseContent(content string) ([]byte, error) {\n\t// Decode and replace all occurrences of hexadecimal content.\n\tvar errpanic error\n\tdefer func() {\n\t\tr := recover()\n\t\tif r != nil {\n\t\t\terrpanic = fmt.Errorf(\"recovered from panic: %v\", r)\n\t\t}\n\t}()\n\n\tif containsUnescaped(content) {\n\t\treturn nil, fmt.Errorf(\"invalid special characters escaping\")\n\t}\n\n\tb := escapeContent.ReplaceAllString(content, \"$1\")\n\n\tb = hexRE.ReplaceAllStringFunc(b,\n\t\tfunc(h string) string {\n\t\t\tr, err := hex.DecodeString(strings.Replace(strings.Trim(h, \"|\"), \" \", \"\", -1))\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"invalid hexRE regexp\")\n\t\t\t}\n\t\t\treturn string(r)\n\t\t})\n\treturn []byte(b), errpanic\n}",
"func LoadContent(data interface{}, unsafe...bool) (*Parser, error) {\n if j, e := gjson.LoadContent(data, unsafe...); e == nil {\n return &Parser{j}, nil\n } else {\n return nil, e\n }\n}",
"func contentHandler(w http.ResponseWriter, r *http.Request) {\n\n\t//Get the filename from the url:\n\tdataFileLoc := r.URL.Path[len(\"/kanban-board/content/\"):] + \".json\"\n\n\tlog.Info(\"Request for file: \" + contentFolderLoc + dataFileLoc)\n\tdat, err := ioutil.ReadFile(contentFolderLoc + dataFileLoc)\n\tif err != nil {\n\t\t//Return a 404 for errrors.\n\t\thttp.NotFound(w, r)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tvar myPageData PageDataModel\n\tif err = json.Unmarshal(dat, &myPageData); err != nil {\n\t\thttp.Error(w, \"Error processing page\", 500)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\thtmlString, err := makeHTML(myPageData)\n\tif err != nil {\n\t\thttp.Error(w, \"Error processing page\", 500)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, htmlString)\n}",
"func parseContent(n *html.Node) []*ContentSegment {\n\tsegments := make([]*ContentSegment, 0)\n\n\tvar parser func(*html.Node, Attribution)\n\tparser = func(node *html.Node, attribution Attribution) {\n\t\tif node.Type == html.ElementNode {\n\t\t\tswitch node.Data {\n\t\t\tcase \"em\":\n\t\t\t\tattribution = attribution | AttributionEmphasis\n\t\t\tcase \"strong\":\n\t\t\t\tattribution = attribution | AttributionBold\n\t\t\tcase \"code\":\n\t\t\t\tattribution = attribution | AttributionCode\n\t\t\tcase \"a\":\n\t\t\t\tattribution |= AttributeAnchor\n\t\t\t\tseg := ContentSegment{\n\t\t\t\t\tRaw: node.FirstChild.Data,\n\t\t\t\t\tAttribution: attribution,\n\t\t\t\t}\n\t\t\t\tseg.Context = make(map[string]string)\n\t\t\t\tfor _, att := range node.Attr {\n\t\t\t\t\tif att.Key == \"href\" {\n\t\t\t\t\t\tseg.Context[\"href\"] = att.Val\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsegments = append(segments, &seg)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\t\t\tparser(c, attribution)\n\t\t\t}\n\t\t} else if node.Type == html.TextNode {\n\t\t\tseg := ContentSegment{\n\t\t\t\tRaw: node.Data,\n\t\t\t\tAttribution: attribution,\n\t\t\t}\n\t\t\tsegments = append(segments, &seg)\n\t\t}\n\t}\n\tparser(n, AttributionPlain)\n\n\t// for _, l := range segments {\n\t// \tlog.Printf(\"Segment %v\\n\", l)\n\t// }\n\n\treturn segments\n}",
"func generateContentMessages(clntURL ClientURL, ctnts []*ClientContent, printAllVersions bool) (msgs []contentMessage) {\n\tprefixPath := clntURL.Path\n\tprefixPath = filepath.ToSlash(prefixPath)\n\tif !strings.HasSuffix(prefixPath, \"/\") {\n\t\tprefixPath = prefixPath[:strings.LastIndex(prefixPath, \"/\")+1]\n\t}\n\tprefixPath = strings.TrimPrefix(prefixPath, \"./\")\n\n\tnrVersions := len(ctnts)\n\n\tfor i, c := range ctnts {\n\t\t// Convert any os specific delimiters to \"/\".\n\t\tcontentURL := filepath.ToSlash(c.URL.Path)\n\t\t// Trim prefix path from the content path.\n\t\tc.URL.Path = strings.TrimPrefix(contentURL, prefixPath)\n\n\t\tcontentMsg := contentMessage{}\n\t\tcontentMsg.Time = c.Time.Local()\n\n\t\t// guess file type.\n\t\tcontentMsg.Filetype = func() string {\n\t\t\tif c.Type.IsDir() {\n\t\t\t\treturn \"folder\"\n\t\t\t}\n\t\t\treturn \"file\"\n\t\t}()\n\n\t\tcontentMsg.Size = c.Size\n\t\tmd5sum := strings.TrimPrefix(c.ETag, \"\\\"\")\n\t\tmd5sum = strings.TrimSuffix(md5sum, \"\\\"\")\n\t\tcontentMsg.ETag = md5sum\n\t\t// Convert OS Type to match console file printing style.\n\t\tcontentMsg.Key = getKey(c)\n\t\tcontentMsg.VersionID = c.VersionID\n\t\tcontentMsg.IsDeleteMarker = c.IsDeleteMarker\n\t\tcontentMsg.VersionOrd = nrVersions - i\n\t\t// URL is empty by default\n\t\t// Set it to either relative dir (host) or public url (remote)\n\t\tcontentMsg.URL = clntURL.String()\n\n\t\tmsgs = append(msgs, contentMsg)\n\n\t\tif !printAllVersions {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}",
"func parse(message def.Message, messageTemplate interface{}) bool {\n\tlog.Infoln(message.Content())\n\tr := strings.NewReader(message.Content())\n\td := json.NewDecoder(r)\n\t//err := json.Unmarshal([]byte(message.Content()), &messageTemplate)\n\terr := d.Decode(&messageTemplate)\n\tif err != nil {\n\t\tlog.Errorf(\"JSON Unmarshal error: '%s'\\nFrom message (type %d) of client #%d: '%s'\", err, message.ClientID(), message.Type(), message.Content())\n\t\tlog.Errorf(\"JSON Unmarshal error of content: '%s'\", message.Content())\n\t\treturn false\n\t}\n\treturn true\n}",
"func (s *GRPCServer) Content(ctx context.Context, req *dashboard.ContentRequest) (*dashboard.ContentResponse, error) {\n\tservice, ok := s.Impl.(ModuleService)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"plugin is not a module, it's a %T\", s.Impl)\n\t}\n\n\tcontentResponse, err := service.Content(ctx, req.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontentResponseBytes, err := json.Marshal(&contentResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &dashboard.ContentResponse{\n\t\tContentResponse: contentResponseBytes,\n\t}, nil\n}",
"func (se *SNEntry) getContent() string {\n\tvar content string\n\n\tfor _, c := range se.Components {\n\t\t// Skip all components that aren't plain text.\n\t\tif c.Type != \"text\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't want to create too long messages, so if we have\n\t\t// more than 200 characters in the string we'll skip the\n\t\t// remaining text components.\n\t\tif len(content) > 200 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the text to our content string\n\t\tcontent += \" \" + c.Text.Value\n\t}\n\n\treturn content\n}",
"func Parse(content []byte) []map[string][]string {\n\ts := &scanner{\n\t\ts: bufio.NewScanner(bytes.NewReader(content)),\n\t\tused: true,\n\t}\n\tp := &parser{\n\t\tdata: make([]map[string][]string, 0),\n\t\tcurrentKey: \"\",\n\t\tisNewItem: true,\n\t}\n\n\tfor s.Scan() {\n\t\ttext := s.Text()\n\t\tif isComment(text) {\n\t\t\tp.isNewItem = true\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(text, \" \") {\n\t\t\tp.parse([]string{text})\n\t\t\tcontinue\n\t\t}\n\t\tp.parse(strings.Split(text, \":\"))\n\t}\n\n\treturn p.data\n}",
"func NewContent (rawContent map[string]interface{}) Content {\n if nil == rawContent {\n return Content{}\n }\n\n id, _ := rawContent[\"id\"].(string)\n parentId, _ := rawContent[\"parentId\"].(string)\n body, _ := rawContent[\"body\"].(string)\n title, _ := rawContent[\"title\"].(string)\n rawCreated, _ := rawContent[\"created\"].(string)\n rawModified, _ := rawContent[\"modified\"].(string)\n launchInNewWindow, _ := rawContent[\"launchInNewWindow\"].(bool)\n reviewable, _ := rawContent[\"reviewable\"].(bool)\n\n created, _ := time.Parse (time.RFC3339, rawCreated)\n modified, _ := time.Parse (time.RFC3339, rawModified)\n\n hasChildren, _ := rawContent[\"hasChildren\"].(bool)\n hasGradebookColumns, _ := rawContent[\"hasGradebookColumns\"].(bool)\n hasAssociatedGroups, _ := rawContent[\"hasAssociatedGroups\"].(bool)\n\n contentHandler, _ := rawContent[\"contentHandler\"].(map[string]interface{})\n\n return Content {\n Id: id,\n ParentId: parentId,\n Title: title,\n Body: body,\n Created: created,\n Modified: modified,\n Position: _parsePosition (rawContent[\"position\"]),\n HasChildren: hasChildren,\n HasGradebookColumn: hasGradebookColumns,\n HasAssociatedGroups: hasAssociatedGroups,\n LaunchInNewWindow: launchInNewWindow,\n Reviewable: reviewable,\n ContentHandler: NewContentHandler (contentHandler),\n Availability:\n _parseAvailability (rawContent[\"availability\"].(map[string]interface{})),\n }\n}",
"func getContentMarketing(wg *sync.WaitGroup) {\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", contentMarketingURL, nil)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tdefer func() {\n\t\tresp.Body.Close()\n\t\twg.Done()\n\t}()\n\n\terr = json.NewDecoder(resp.Body).Decode(&contentMarketing)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}",
"func (P *Parser) stringifyContent(content interface{}) (string, error) {\n\tswitch c := content.(type) {\n\tcase string:\n\t\treturn c, nil\n\tcase []byte:\n\t\treturn string(c), nil\n\tcase *bytes.Buffer:\n\t\tif c != nil {\n\t\t\treturn c.String(), nil\n\t\t}\n\tcase io.Reader:\n\t\tvar buf bytes.Buffer\n\t\tif _, err := io.Copy(&buf, c); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn buf.String(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"unsupported content type %T\", content)\n}",
"func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n reader, err := d.Reader(ctx, path, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(reader)\n}",
"func (r ResponseAPI) GetContent() echo.Map {\n\treturn r.Content\n}",
"func GetContent(host, path string, requiredCode int) ([]byte, error) {\n\tresp, err := GetRequest(host, path)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdata, err := out(resp, requiredCode)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}",
"func (p *Parser) Parse(_ string, fileContent []byte) ([]model.Document, error) {\n\tvar documents []model.Document\n\tcom, err := dockerfile.ParseReader(bytes.NewReader(fileContent))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to parse Dockerfile\")\n\t}\n\n\tdoc := &model.Document{}\n\n\tvar resource Resource\n\tresource.CommandList = com\n\n\tj, err := json.Marshal(resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to Marshal Dockerfile\")\n\t}\n\n\tif err := json.Unmarshal(j, &doc); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to Unmarshal Dockerfile\")\n\t}\n\n\tdocuments = append(documents, *doc)\n\n\treturn documents, nil\n}",
"func ParseContent(content string, match string) []string {\n\t// TODO: 따옴표로 감싸면 콤마 무시\n\tif content == \"!\"+match {\n\t\treturn []string{}\n\t}\n\n\treturn MapTrim(strings.Split(strings.Replace(content, \"!\"+match, \"\", 1), \",\"))\n}",
"func (p *Packet) Content() []byte {\n\treturn p._pkt.body.anon0[:p._pkt.body.length]\n}",
"func (c *HostConfig) Content() string {\n\treturn c.ContentBuffer().String()\n}",
"func (e *Engine) Parse(content []byte) ([]string, error) {\n\treturn e.parser.Parse(content)\n}",
"func Parse(content []byte) (resources []*Node, err error) {\n\tobj, err := hcl.ParseBytes(content)\n\tif err != nil {\n\t\treturn resources, err\n\t}\n\n\tast.Walk(obj.Node, func(n ast.Node) (ast.Node, bool) {\n\t\tbaseItem, ok := n.(*ast.ObjectItem)\n\t\tif !ok {\n\t\t\treturn n, true\n\t\t}\n\n\t\titem := NewNode(baseItem)\n\n\t\tif itemErr := item.Validate(); itemErr != nil {\n\t\t\terr = multierror.Append(err, itemErr)\n\t\t\treturn n, false\n\t\t}\n\n\t\tresources = append(resources, item)\n\n\t\treturn n, false\n\t})\n\n\treturn resources, err\n}",
"func NewContents (rawContents []map[string]interface{}) []Content {\n newContents := make ([]Content, len (rawContents))\n\n for i, rawContent := range rawContents {\n newContents[i] = NewContent (rawContent)\n }\n\n return newContents\n}",
"func (_BaseAccessWallet *BaseAccessWalletCaller) ContentObjects(opts *bind.CallOpts) (struct {\n\tCategory uint8\n\tLength *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _BaseAccessWallet.contract.Call(opts, &out, \"contentObjects\")\n\n\toutstruct := new(struct {\n\t\tCategory uint8\n\t\tLength *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Category = *abi.ConvertType(out[0], new(uint8)).(*uint8)\n\toutstruct.Length = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func (c *Client) LoadFromContent(content []byte) error {\n\treturn yaml.Unmarshal(content, c)\n}",
"func (s *schema) Content() []byte {\n\treturn s.content\n}",
"func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tpath = path[1:]\n\tbaseUrl := qiniu.MakeBaseUrl(d.Config.Domain,path)\n\tfmt.Print(baseUrl)\n\tres, err := http.Get(baseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := ioutil.ReadAll(res.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn content, nil\n}",
"func formatContent(content string) string {\n\t// TODO: for extremely large JSON it would be better to do this with a buffer\n\tret := \"\"\n\tfor i, line := range strings.Split(strings.TrimSuffix(content, \"\\n\"), \"\\n\") {\n\t\tif i == 0 {\n\t\t\tret += fmt.Sprintf(\"%s\\n\", line)\n\t\t} else {\n\t\t\tret += fmt.Sprintf(\"\\t %s\\n\", line)\n\n\t\t}\n\t}\n\treturn ret\n}",
"func (o *GetMessagesAllOf) GetContent() interface{} {\n\tif o == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.Content\n}",
"func handleParse(h Handler, content string) ([]string, error) {\n\tvar (\n\t\tp CmdParser\n\t\tok bool\n\t)\n\tif p, ok = h.(CmdParser); !ok {\n\t\treturn cmdParserDefault(content), nil\n\t}\n\n\treturn p.Parse(content)\n}",
"func (_BaseAccessControlGroup *BaseAccessControlGroupCaller) ContentObjects(opts *bind.CallOpts) (struct {\n\tCategory uint8\n\tLength *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _BaseAccessControlGroup.contract.Call(opts, &out, \"contentObjects\")\n\n\toutstruct := new(struct {\n\t\tCategory uint8\n\t\tLength *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Category = *abi.ConvertType(out[0], new(uint8)).(*uint8)\n\toutstruct.Length = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func GetAllContent() []Content {\n\t// Do the connection and select database.\n\tdb := MongoDBConnect()\n\n\t// Do the query to a collection on database.\n\tc, err := db.Collection(\"sample_content\").Find(nil, bson.D{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.Close(nil)\n\n\tvar content []Content\n\n\t// Start looping on the query result.\n\tfor c.Next(context.TODO()) {\n\t\teachContent := Content{}\n\n\t\terr := c.Decode(&eachContent)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tcontent = append(content, eachContent)\n\t}\n\n\treturn content\n}",
"func ContentPrint(path string) {\n\tfile2, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeAppend)\n\tdefer file2.Close()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tstats, statsErr := file2.Stat()\n\tif statsErr != nil {\n\t\tfmt.Println(\"erro\")\n\t}\n\n\tvar sizes int64 = stats.Size()\n\tbytess := make([]byte, sizes)\n\n\tbufr := bufio.NewReader(file2)\n\t_, err = bufr.Read(bytess)\n\n\tfmt.Println(bytess)\n}",
"func (s *Conn) GetMsgContent(link *Link) (content []byte, err error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\tbuf := pool.BufPoolCopy.Get().([]byte)\n\tif n, err := s.ReadFrom(buf, link.De, link.Crypt, link.Rate); err == nil {\n\t\tcontent = buf[:n]\n\t}\n\treturn\n}",
"func (c *Communication) parseText(ms []Message) []*Frame {\n\tfs := make([]*Frame, len(ms))\n\tfor i, m := range ms {\n\t\tif s, ok := m.Content().(string); ok {\n\t\t\tfs[i] = NewFrame(TextMessageType, s)\n\t\t} else { fs[i] = NewFrame(TextMessageType, \"\"); }\n\n\t\tfs[i].AddMeta(Sender, m.SenderId())\n\t\tif m.IsGroupMessage() {\n\t\t\tfs[i].AddMeta(Group, m.GroupName())\n\t\t}\n\t}\n\n\treturn fs\n}",
"func (v *Version) GetContent(ctx context.Context) ([]byte, error) {\n\tlock := v.Chart.Space.SpaceManager.Lock.Get(v.Chart.Space.Name(), v.Chart.Name(), v.Number())\n\tif !lock.RLock(v.Chart.Space.SpaceManager.LockTimeout) {\n\t\treturn nil, ErrorLocking.Format(\"version\", v.Chart.Space.Name()+\"/\"+v.Chart.Name()+\"/\"+v.Number())\n\t}\n\tdefer lock.RUnlock()\n\tif err := v.Validate(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tpath := path.Join(v.Prefix, chartPackageName)\n\tdata, err := v.Chart.Space.SpaceManager.Backend.GetContent(ctx, path)\n\tif err != nil {\n\t\treturn nil, ErrorContentNotFound.Format(v.Prefix)\n\t}\n\treturn data, nil\n}",
"func streamContent(w io.Writer) {\n\t// For simplicity, let's use a simple loop here\n\tfor i := 0; i < 10; i++ {\n\t\t// Generate a chunk of data\n\t\tchunk := []byte(fmt.Sprintf(\"Chunk %d\\n\", i))\n\n\t\t// Write the chunk to the writer\n\t\t_, err := w.Write(chunk)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Manually close the connection after streaming is complete\n\tif closer, ok := w.(io.Closer); ok {\n\t\tcloser.Close()\n\t}\n}",
"func (ps *ProxySettings) Content(ctx context.Context, config *Config) (*Config, error) {\n\tif err := ps.ui.WaitUntilExists(config.HostNode())(ctx); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to ensure node %q exists and is shown on the screen\", config.HostName())\n\t}\n\n\tinfoHostNode, err := ps.ui.Info(ctx, config.HostNode())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get node info for field %q\", config.HostName())\n\t}\n\n\tif err := ps.ui.WaitUntilExists(config.PortNode())(ctx); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to ensure node %q exists and is shown on the screen\", config.HostName())\n\t}\n\n\tinfoPortNode, err := ps.ui.Info(ctx, config.PortNode())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get node info for field %q\", config.PortName())\n\t}\n\n\treturn &Config{\n\t\tProtocol: config.Protocol,\n\t\tHost: infoHostNode.Value,\n\t\tPort: infoPortNode.Value,\n\t}, nil\n}",
"func ContentText(inStream io.Reader) ([]string, error) {\n\tscanner := bufio.NewScanner(inStream)\n\tlist := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tlist = append(list, scanner.Text())\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}",
"func ReadContent(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// Get the parameter.\n\tslug := ps.ByName(\"slug\")\n\n\t// Do the connection and select database.\n\tdb := MongoDBConnect()\n\n\tresult := Content{}\n\n\t// Do the query to a collection on database.\n\tif err := db.Collection(\"sample_content\").FindOne(nil, bson.D{{\"slug\", slug}}).Decode(&result); err != nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\t// Get content file (in markdown format).\n\tfileContent, err := ioutil.ReadFile(\"web/content/samples/\" + result.ContentFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Prepare renderer.\n\tcr := NewChromaRenderer(\"paraiso-light\")\n\tcontent := string(blackfriday.Run(fileContent, blackfriday.WithRenderer(cr)))\n\n\t// Prepare data structure for data passed to template.\n\ttype TemplateData struct {\n\t\tContent template.HTML\n\t\tSlug string\n\t\tEnv string\n\t}\n\n\ttemplateData := TemplateData{Content: template.HTML(content), Slug: slug, Env: os.Getenv(\"IGO_ENV\")}\n\n\t// Parse templates.\n\tvar templates = template.Must(template.New(\"\").ParseFiles(\"web/templates/_base.html\", \"web/templates/read-content.html\"))\n\n\t// Execute template.\n\ttemplates.ExecuteTemplate(w, \"_base.html\", templateData)\n}",
"func (o FileContentBufferPtrOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FileContentBuffer) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Content\n\t}).(pulumi.StringPtrOutput)\n}",
"func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tdefer debugTime()()\n\treader, err := d.shell.Cat(d.fullPath(path))\n\tif err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"no link named\") {\n\t\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tcontent, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Got content %s: %s\", path, content)\n\n\treturn content, nil\n}",
"func (m *WorkbookCommentReply) GetContent()(*string) {\n return m.content\n}",
"func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {\n\tsys.RLock()\n\tsys.logBuf.Do(func(p interface{}) {\n\t\tif p != nil {\n\t\t\tlg, ok := p.(log.Info)\n\t\t\tif ok {\n\t\t\t\tif (lg.Entry != log.Entry{}) {\n\t\t\t\t\tlogs = append(logs, lg.Entry)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tsys.RUnlock()\n\n\treturn\n}",
"func (api *API) getContentPrivateHandler(w http.ResponseWriter, req *http.Request) {\n\tctx := req.Context()\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\tlogdata := log.Data{\n\t\t\"request_id\": ctx.Value(dprequest.RequestIdKey),\n\t\t\"content_id\": id,\n\t\t\"function\": \"getContentPrivateHandler\",\n\t}\n\n\t// get type from query parameters, or default value\n\tqueryTypeFlags := getContentTypeParameter(req.URL.Query())\n\tif queryTypeFlags == 0 {\n\t\thandleError(ctx, w, apierrors.ErrContentUnrecognisedParameter, logdata)\n\t\treturn\n\t}\n\n\t// check topic from mongoDB by id\n\terr := api.dataStore.Backend.CheckTopicExists(id)\n\tif err != nil {\n\t\thandleError(ctx, w, err, logdata)\n\t\treturn\n\t}\n\n\t// get content from mongoDB by id\n\tcontent, err := api.dataStore.Backend.GetContent(id, queryTypeFlags)\n\tif err != nil {\n\t\t// no content found\n\t\thandleError(ctx, w, err, logdata)\n\t\treturn\n\t}\n\n\t// User has valid authentication to get raw full content document(s)\n\n\tif content.Current == nil {\n\t\t// TODO\n\t\t/*\n\t\t\tIn the future: when the API becomes more than read-only\n\t\t\tWhen a document is first created, it will only have 'next' until it is published, when it gets 'current' populated.\n\t\t\tSo current == nil is not an error.\n\n\t\t\tFor now we return an error because we dont have publishing steps.\n\t\t*/\n\t\thandleError(ctx, w, apierrors.ErrInternalServer, logdata)\n\t\treturn\n\t}\n\tif content.Next == nil {\n\t\thandleError(ctx, w, apierrors.ErrInternalServer, logdata)\n\t\treturn\n\t}\n\n\tcurrentResult := getRequiredItems(queryTypeFlags, content.Current, content.ID)\n\n\t// The 'Next' type items may have a different length to the current, so we do the above again, but for Next\n\tnextResult := getRequiredItems(queryTypeFlags, content.Next, content.ID)\n\n\tif currentResult.TotalCount == 0 && nextResult.TotalCount == 0 {\n\t\thandleError(ctx, w, apierrors.ErrContentNotFound, logdata)\n\t\treturn\n\t}\n\n\tvar result models.PrivateContentResponseAPI\n\tresult.Next = nextResult\n\tresult.Current = currentResult\n\n\tif err := WriteJSONBody(ctx, result, w, logdata); err != nil {\n\t\t// WriteJSONBody has already logged the error\n\t\treturn\n\t}\n\tlog.Event(ctx, \"request successful\", log.INFO, logdata) // NOTE: name of function is in logdata\n}",
"func Read() []string {\n\treturn content.Items\n}",
"func (_AccessIndexor *AccessIndexorCaller) ContentObjects(opts *bind.CallOpts) (struct {\n\tCategory uint8\n\tLength *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _AccessIndexor.contract.Call(opts, &out, \"contentObjects\")\n\n\toutstruct := new(struct {\n\t\tCategory uint8\n\t\tLength *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Category = *abi.ConvertType(out[0], new(uint8)).(*uint8)\n\toutstruct.Length = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}",
"func content(lines []string) []byte {\n\treturn []byte(strings.Join(lines, \"\\n\") + \"\\n\")\n}",
"func xmlParserContents(ctx context.Context, rfC <-chan *resourceFile) (<-chan *rdpb.Resource, <-chan error) {\n\tresC := make(chan *rdpb.Resource)\n\terrC := make(chan error)\n\tgo func() {\n\t\tdefer close(resC)\n\t\tdefer close(errC)\n\t\tfor rf := range rfC {\n\t\t\tif !syncParseContents(respipe.PrefixErr(ctx, fmt.Sprintf(\"%s xml-parse: \", rf.pathInfo.Path)), rf.pathInfo, bytes.NewReader(rf.contents), resC, errC) {\n\t\t\t\t// ctx must have been canceled - exit.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn resC, errC\n}",
"func contentExtractor(contents []byte, src string, vars []string) bytes.Buffer {\n\t// Create a parser\n\tprog, err := parser.ParseProgram([]byte(src), nil)\n\tif err != nil {\n\t\tFatalf(\"Failed to parse the program: %s\", src)\n\t}\n\n\t// The configuration\n\tvar buf bytes.Buffer\n\tconfig := &interp.Config{\n\t\tStdin: bytes.NewReader([]byte(contents)),\n\t\tVars: vars,\n\t\tOutput: &buf,\n\t}\n\n\t// Execute the program\n\t_, err = interp.ExecProgram(prog, config)\n\tif err != nil {\n\t\tFatalf(\"Failure in executing the goawk script: %v\", err)\n\t}\n\treturn buf\n}",
"func PbContents(cs []*table.Content) []*Content {\n\tif cs == nil {\n\t\treturn make([]*Content, 0)\n\t}\n\n\tresult := make([]*Content, 0)\n\tfor _, c := range cs {\n\t\tresult = append(result, PbContent(c))\n\t}\n\n\treturn result\n}",
"func (r *Document) Content() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"content\"])\n}",
"func (p ListParserImpl) Parse(content string) []model.Word {\n\tresult := []model.Word{}\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader(content))\n\tif err != nil {\n\t\treturn result\n\t}\n\n\twords := doc.Find(WORDS_SECTION).First()\n\n\twords.Find(WORD_BLOCK).Each(func(_ int, node *goquery.Selection) {\n\t\ttitle, _ := node.Attr(\"title\")\n\t\twordsList := strings.Split(title, \" \")\n\t\tword := wordsList[len(wordsList)-1]\n\t\tword = strings.Trim(word, \"\\n\")\n\t\tif len(word) > 3 {\n\t\t\turl, _ := node.Attr(\"href\")\n\t\t\turl = strings.Trim(url, \"\\n\")\n\t\t\twordObject := model.Word{\n\t\t\t\tName: word,\n\t\t\t\tURL: url,\n\t\t\t}\n\t\t\tresult = append(result, wordObject)\n\t\t}\n\t})\n\n\treturn result\n}",
"func (obj *request) Content() Content {\n\treturn obj.content\n}",
"func (d *Dao) Content(c context.Context, oid, dmid int64) (ct *model.Content, err error) {\n\tct = &model.Content{}\n\trow := d.dmMetaReader.QueryRow(c, fmt.Sprintf(_contentSQL, d.hitContent(oid)), dmid)\n\tif err = row.Scan(&ct.ID, &ct.FontSize, &ct.Color, &ct.Mode, &ct.IP, &ct.Plat, &ct.Msg, &ct.Ctime, &ct.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tct = nil\n\t\t\terr = nil\n\t\t} else {\n\t\t\tlog.Error(\"row.Scan() error(%v)\", err)\n\t\t}\n\t}\n\treturn\n}",
"func parseFileContent(s *setup, content []byte) error {\n\tdecoder := s.conf.FileDecoder\n\tif decoder == nil {\n\t\t// Look for the config file extension to determine the encoding.\n\t\tswitch path.Ext(s.configFilePath) {\n\t\tcase \"json\":\n\t\t\tdecoder = DecoderJSON\n\t\tcase \"toml\":\n\t\t\tdecoder = DecoderTOML\n\t\tcase \"yaml\", \"yml\":\n\t\t\tdecoder = DecoderYAML\n\t\tdefault:\n\t\t\tdecoder = DecoderTryAll\n\t\t}\n\t}\n\n\tm, err := decoder(content)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse file at %v: %v\",\n\t\t\ts.configFilePath, err)\n\t}\n\n\t// Parse the map for the options.\n\tif err := parseMapOpts(m, s.opts); err != nil {\n\t\treturn fmt.Errorf(\"error loading config vars from config file: %v\", err)\n\t}\n\n\treturn nil\n}",
"func (_BaseLibrary *BaseLibraryFilterer) ParseContentObjectCreated(log types.Log) (*BaseLibraryContentObjectCreated, error) {\n\tevent := new(BaseLibraryContentObjectCreated)\n\tif err := _BaseLibrary.contract.UnpackLog(event, \"ContentObjectCreated\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func ReadMessageContent(s Stream) (content []byte, err error) {\n\tvar (\n\t\tr = bufio.NewReader(s)\n\t)\n\ttimeoutDuration := 1 * time.Second\n\tif err = s.SetReadDeadline(time.Now().Add(timeoutDuration)); err != nil {\n\t\tlog.Error(\"cannot reset deadline for message header read\", \"error\", err)\n\t\treturn\n\t}\n\t//// Read 1 byte for message type\n\tif _, err = r.ReadByte(); err != nil {\n\t\tlog.Error(\"failed to read p2p message type field\", \"error\", err)\n\t\treturn\n\t}\n\t// TODO: check on msgType and take actions accordingly\n\t//// Read 4 bytes for message size\n\tfourBytes := make([]byte, 4)\n\tif _, err = io.ReadFull(r, fourBytes); err != nil {\n\t\tlog.Error(\"failed to read p2p message size field\", \"error\", err)\n\t\treturn\n\t}\n\n\tcontentLength := int(binary.BigEndian.Uint32(fourBytes))\n\tcontentBuf := make([]byte, contentLength)\n\ttimeoutDuration = 20 * time.Second\n\tif err = s.SetReadDeadline(time.Now().Add(timeoutDuration)); err != nil {\n\t\tlog.Error(\"cannot reset deadline for message content read\", \"error\", err)\n\t\treturn\n\t}\n\tif _, err = io.ReadFull(r, contentBuf); err != nil {\n\t\tlog.Error(\"failed to read p2p message contents\", \"error\", err)\n\t\treturn\n\t}\n\tcontent = contentBuf\n\treturn\n}",
"func (pb *PostBody) Content() string {\n\tif pb == nil {\n\t\treturn \"\"\n\t}\n\treturn pb.body\n}",
"func (r *Response) Content() (string, error) {\n\tb, err := r.Body()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn string(b), nil\n}",
"func (d *Dao) Contents(c context.Context, oid int64, dmids []int64) (res []*model.Content, err error) {\n\tvar (\n\t\twg errgroup.Group\n\t\tlock sync.Mutex\n\t)\n\tpageNum := len(dmids) / _pagesize\n\tif len(dmids)%_pagesize > 0 {\n\t\tpageNum = pageNum + 1\n\t}\n\tfor i := 0; i < pageNum; i++ {\n\t\tstart := i * _pagesize\n\t\tend := (i + 1) * _pagesize\n\t\tif end > len(dmids) {\n\t\t\tend = len(dmids)\n\t\t}\n\t\twg.Go(func() (err error) {\n\t\t\trows, err := d.dmMetaReader.Query(c, fmt.Sprintf(_contentsSQL, d.hitContent(oid), xstr.JoinInts(dmids[start:end])))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"db.Query(%s) error(%v)\", fmt.Sprintf(_contentsSQL, d.hitContent(oid), xstr.JoinInts(dmids)), err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer rows.Close()\n\t\t\tfor rows.Next() {\n\t\t\t\tct := &model.Content{}\n\t\t\t\tif err = rows.Scan(&ct.ID, &ct.FontSize, &ct.Color, &ct.Mode, &ct.IP, &ct.Plat, &ct.Msg, &ct.Ctime, &ct.Mtime); err != nil {\n\t\t\t\t\tlog.Error(\"rows.Scan() error(%v)\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlock.Lock()\n\t\t\t\tres = append(res, ct)\n\t\t\t\tlock.Unlock()\n\t\t\t}\n\t\t\terr = rows.Err()\n\t\t\treturn\n\t\t})\n\t}\n\tif err = wg.Wait(); err != nil {\n\t\tlog.Error(\"wg.Wait() error(%v)\", err)\n\t}\n\treturn\n}",
"func (client *Client) GetContent(path string) *VoidResponse {\n\tendpoint := client.baseURL + fmt.Sprintf(EndpointGetContent, client.accessToken, path)\n\trequest := gorequest.New().Get(endpoint).Set(UserAgentHeader, UserAgent+\"/\"+Version)\n\n\treturn &VoidResponse{\n\t\tClient: client,\n\t\tRequest: request,\n\t}\n}",
"func (h *Handler) ClientAccessPolicyXMLContent() []byte {\n\treturn h.clientAccessPolicyXMLContent\n}",
"func FetchContent(path string) (interface{}, error) {\n\treturn util.LoadFile(path)\n}",
"func RequestContent(method string, url string, data ...interface{}) string {\n\treturn client.New().RequestContent(method, url, data...)\n}",
"func ParseJSON(content []byte) (*Container, error) {\n\tjsonContainer, err := gabs.ParseJSON(content)\n\treturn &Container{JSONContainer: jsonContainer}, err\n}",
"func PbContent(c *table.Content) *Content {\n\tif c == nil {\n\t\treturn nil\n\t}\n\n\treturn &Content{\n\t\tId: c.ID,\n\t\tSpec: PbContentSpec(c.Spec),\n\t\tAttachment: PbContentAttachment(c.Attachment),\n\t\tRevision: pbbase.PbCreatedRevision(c.Revision),\n\t}\n}",
"func (s *Service) GetContent(c context.Context, likeSubType int, likes map[int64]*model.Like, ids []int64, wids []int64, mids []int64) (err error) {\n\tswitch likeSubType {\n\tcase model.PICTURE, model.PICTURELIKE, model.DRAWYOO, model.DRAWYOOLIKE, model.TEXT, model.TEXTLIKE, model.QUESTION:\n\t\terr = s.accountAndContent(c, ids, mids, likes)\n\tcase model.VIDEO, model.VIDEOLIKE, model.ONLINEVOTE, model.VIDEO2, model.PHONEVIDEO, model.SMALLVIDEO:\n\t\terr = s.archiveWithTag(c, wids, likes)\n\tcase model.ARTICLE:\n\t\terr = s.articles(c, wids, likes)\n\tcase model.MUSIC:\n\t\terr = s.musicsAndAct(c, wids, mids, likes)\n\tdefault:\n\t\terr = ecode.RequestErr\n\t}\n\treturn\n}",
"func (msg *Message) GetContent() interface{} {\n\treturn msg.Content\n}",
"func (o ApiImportPtrOutput) ContentFormat() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApiImport) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ContentFormat\n\t}).(pulumi.StringPtrOutput)\n}",
"func (_BaseLibrary *BaseLibraryFilterer) ParseApproveContent(log types.Log) (*BaseLibraryApproveContent, error) {\n\tevent := new(BaseLibraryApproveContent)\n\tif err := _BaseLibrary.contract.UnpackLog(event, \"ApproveContent\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func (msg *Message) Content() []byte {\n\treturn msg.content\n}",
"func (obj *Variable) GetContent(ctx context.Context) (*AlfaNumString, error) {\n\tresult := &struct {\n\t\tContent *AlfaNumString `json:\"qContent\"`\n\t}{}\n\terr := obj.RPC(ctx, \"GetContent\", result)\n\treturn result.Content, err\n}",
"func NegotiateContent(c echo.Context) Content {\n\theader := c.Request().Header.Get(\"Accept\")\n\tif header == \"\" {\n\t\treturn JSON // default\n\t}\n\n\taccept := goautoneg.ParseAccept(header)\n\tif len(accept) == 0 {\n\t\treturn JSON // default\n\t}\n\n\t// use the first element, because this has the highest priority\n\tswitch accept[0].SubType {\n\tcase \"html\":\n\t\treturn HTML\n\tcase \"json\":\n\t\treturn JSON\n\tcase \"plain\":\n\t\treturn TEXT\n\tdefault:\n\t\treturn JSON\n\t}\n}",
"func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.GetContent(ctx, path)\n}",
"func (s ShowApp) GetContent() map[string]interface{} {\n\treturn map[string]interface{}{}\n}",
"func (fh *FilesystemHandler) GetContainerContents(container *models.SimpleContainer) error {\n\n\tfullPath := fh.generateFullPath(container)\n\tdir, err := os.OpenFile(fullPath, os.O_RDONLY, 0)\n\tif err != nil {\n\t\tlog.Fatal(\"ERR OpenFile \", err)\n\t}\n\n\tfileInfos, err := dir.Readdir(0)\n\tif err != nil {\n\t\tlog.Fatal(\"ERR ReadDir\", err)\n\t}\n\n\tfor _, f := range fileInfos {\n\n\t\t// determine if file or directory.\n\t\t// do we go recursive?\n\t\tif f.IsDir() {\n\t\t\tsc := models.NewSimpleContainer()\n\t\t\tsc.Name = f.Name()\n\t\t\tsc.Origin = models.Filesystem\n\t\t\tsc.ParentContainer = container\n\t\t\tsc.Populated = false\n\t\t\tsc.IsRootContainer = false\n\t\t\tfh.GetContainerContents(sc)\n\t\t\tcontainer.ContainerSlice = append(container.ContainerSlice, sc)\n\n\t\t} else {\n\t\t\tb := models.SimpleBlob{}\n\t\t\tb.Name = f.Name()\n\t\t\tb.ParentContainer = container\n\t\t\tb.Origin = models.Filesystem\n\t\t\tb.URL = filepath.Join(fh.generateFullPath(container), b.Name)\n\t\t\tcontainer.BlobSlice = append(container.BlobSlice, &b)\n\n\t\t}\n\t}\n\tcontainer.Populated = true\n\n\treturn nil\n}",
"func (_BaseContent *BaseContentFilterer) ParseContentObjectCreate(log types.Log) (*BaseContentContentObjectCreate, error) {\n\tevent := new(BaseContentContentObjectCreate)\n\tif err := _BaseContent.contract.UnpackLog(event, \"ContentObjectCreate\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}",
"func ParseContentString(text string) (*Appcast, error) {\n\tvar appcast = New()\n\tif err := xml.Unmarshal([]byte(text), appcast); err != nil {\n\t\treturn nil, err\n\t}\n\treturn appcast, nil\n}",
"func ContentDecoder(contentType string) func(r io.Reader) Decoder {\n\tswitch contentType {\n\tcase \"application/json\":\n\t\treturn func(r io.Reader) Decoder { return json.NewDecoder(r) }\n\tcase \"application/xml\", \"text/xml\":\n\t\treturn func(r io.Reader) Decoder { return xml.NewDecoder(r) }\n\tcase \"application/x-www-form-urlencoded\", \"multipart/form-data\":\n\t\treturn func(r io.Reader) Decoder { return form.NewDecoder(r) }\n\tdefault:\n\t\treturn func(r io.Reader) Decoder { return json.NewDecoder(r) }\n\t}\n}",
"func (o LookupDocumentResultOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupDocumentResult) string { return v.Content }).(pulumi.StringOutput)\n}",
"func NewContent(toRead interface{}) *Content {\n\treturn &Content{read: toRead, eof: false}\n}"
] | [
"0.57283705",
"0.56128794",
"0.55986345",
"0.55372673",
"0.54029614",
"0.5345638",
"0.5332285",
"0.5297525",
"0.5254313",
"0.5217522",
"0.51418024",
"0.5105939",
"0.509732",
"0.50940305",
"0.5086418",
"0.5086146",
"0.50813574",
"0.50558066",
"0.50556326",
"0.50510985",
"0.50381976",
"0.5036844",
"0.50088984",
"0.5005999",
"0.50050104",
"0.5003388",
"0.49737796",
"0.49687752",
"0.49628815",
"0.4957375",
"0.49499902",
"0.49493366",
"0.4915526",
"0.4909735",
"0.48736137",
"0.48725843",
"0.4871402",
"0.4857189",
"0.48482195",
"0.48469415",
"0.4823832",
"0.48157775",
"0.48028404",
"0.48009917",
"0.47999138",
"0.47791368",
"0.47769597",
"0.47660735",
"0.4759403",
"0.47577474",
"0.47565818",
"0.4753495",
"0.47530136",
"0.47451478",
"0.47422272",
"0.47324726",
"0.4722217",
"0.47094464",
"0.47020376",
"0.4659754",
"0.4654114",
"0.46355176",
"0.46340975",
"0.46284562",
"0.46261543",
"0.46255526",
"0.46245244",
"0.46184024",
"0.46177518",
"0.46130514",
"0.4612625",
"0.46105963",
"0.46078616",
"0.46044636",
"0.45995834",
"0.45983407",
"0.45972654",
"0.45860955",
"0.45778236",
"0.45529553",
"0.45519978",
"0.45404127",
"0.45309827",
"0.452757",
"0.45239112",
"0.45196354",
"0.45143345",
"0.44995764",
"0.44953468",
"0.44910264",
"0.44909567",
"0.44891688",
"0.44876194",
"0.44847643",
"0.44774032",
"0.44762656",
"0.44761902",
"0.4474418",
"0.44701836",
"0.44604078"
] | 0.728402 | 0 |
BeforeNow checks a mm/dd/yyyy string to determine if it is before now. | BeforeNow проверяет строку в формате mm/dd/yyyy, чтобы определить, является ли она датой, предшествующей текущему моменту. | func BeforeNow(d string) bool {
f := strings.FieldsFunc(d, func(r rune) bool { return r == '/' })
t := time.Date(atoi(f[2]), time.Month(atoi(f[0])), atoi(f[1]), 0, 0, 0, 0, time.UTC)
return t.Before(time.Now())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ISODateStringBeforeToday(datetime string) (bool, error) {\n\tdate, err := IsoDateFormatter(datetime)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\ttoday := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day(), 0, 0, 0, 0, date.Location())\n\tif date.Before(today) {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}",
"func (t UnixTime) Before(t2 UnixTime) bool {\n\treturn time.Time(t).Before(time.Time(t2))\n}",
"func (d Date) Before(d2 Date) bool {\n\tif d.Year != d2.Year {\n\t\treturn d.Year < d2.Year\n\t}\n\tif d.Month != d2.Month {\n\t\treturn d.Month < d2.Month\n\t}\n\treturn d.Day < d2.Day\n}",
"func (h *ValidationHelper) Before(t time.Time) bool {\n\treturn h.now().Before(t.Add(-h.leeway))\n}",
"func ExampleTime_Before() {\n\tgt1 := gtime.New(\"2018-08-07\")\n\tgt2 := gtime.New(\"2018-08-08\")\n\n\tfmt.Println(gt1.Before(gt2))\n\n\t// Output:\n\t// true\n}",
"func (d Date) Before(t Date) bool {\n\treturn t.After(d)\n}",
"func (dt DateTime) Before(u DateTime) bool {\n\n\treturn dt.src.Before(u.src)\n}",
"func TestBackupBefore(t *testing.T) {\n\tb := backup{t: time.Unix(2, 0)}\n\tif b.before(time.Unix(1, 0)) {\n\t\tt.Errorf(\"b.before(time.Unix(1, 0)) returns false\")\n\t}\n\n\tif b.before(time.Unix(2, 0)) {\n\t\tt.Errorf(\"b.before(time.Unix(2, 0)) returns false\")\n\t}\n}",
"func ShouldHappenBefore(actual interface{}, expected ...interface{}) string {\n\tif fail := need(1, expected); fail != success {\n\t\treturn fail\n\t}\n\tactualTime, firstOk := actual.(time.Time)\n\texpectedTime, secondOk := expected[0].(time.Time)\n\n\tif !firstOk || !secondOk {\n\t\treturn shouldUseTimes\n\t}\n\n\tif !actualTime.Before(expectedTime) {\n\t\treturn fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime))\n\t}\n\n\treturn success\n}",
"func TimestampBefore(ts *tspb.Timestamp, uts *tspb.Timestamp) bool {\n\treturn ts.GetSeconds() < uts.GetSeconds() || ts.GetSeconds() == uts.GetSeconds() && ts.GetNanos() < uts.GetNanos()\n}",
"func (t Timestamp) Before(u Timestamp) bool {\n\treturn time.Time(t).Before(time.Time(u))\n}",
"func IsDateBeforeUTCToday(requestedDate time.Time) (isBefore bool) {\n\tlocation, err := time.LoadLocation(\"UTC\")\n\tif err != nil {\n\t\treturn true\n\t}\n\tutcDate := time.Now().In(location)\n\t// Can't do the direct time comparison (time.Before() time.After())\n\t// because the actual timestamp doesn't matter, just the year/month/day\n\tisBeforeUTC := requestedDate.Year() <= utcDate.Year() && requestedDate.Month() <= utcDate.Month() && requestedDate.Day() < utcDate.Day()\n\ttoLog(\"IsDateBeforeUTCToday\", \"Requested: \"+requestedDate.Format(time.RFC822)+\", UTC Date: \"+utcDate.Format(time.RFC822)+\" -> BEFORE: \"+strconv.FormatBool(isBeforeUTC))\n\treturn isBeforeUTC\n}",
"func (t *timeDataType) Before(after time.Time) *timeDataType {\n\treturn t.Validate(func(t time.Time) error {\n\t\tif t.Before(after) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"time was not before %s\", after.Format(time.RFC3339))\n\t})\n}",
"func (m *dateBefore) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o BucketLifecycleRuleConditionOutput) CreatedBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleCondition) *string { return v.CreatedBefore }).(pulumi.StringPtrOutput)\n}",
"func CheckDateBeforeOrEqual(dateFrom string, dateTo string, layoutFormatDate string) bool {\n\tdateFromTime, _ := time.Parse(layoutFormatDate, dateFrom)\n\tdateToTime, _ := time.Parse(layoutFormatDate, dateTo)\n\n\tif dateFromTime.Before(dateToTime) || dateFromTime.Equal(dateToTime) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o BucketLifecycleRuleItemConditionPtrOutput) CreatedBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BucketLifecycleRuleItemCondition) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CreatedBefore\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o BucketLifecycleRuleItemConditionOutput) CreatedBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemCondition) *string { return v.CreatedBefore }).(pulumi.StringPtrOutput)\n}",
"func (o BucketLifecycleRuleItemConditionResponseOutput) CreatedBefore() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemConditionResponse) string { return v.CreatedBefore }).(pulumi.StringOutput)\n}",
"func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string {\n\tif fail := need(1, expected); fail != success {\n\t\treturn fail\n\t}\n\tactualTime, firstOk := actual.(time.Time)\n\texpectedTime, secondOk := expected[0].(time.Time)\n\n\tif !firstOk || !secondOk {\n\t\treturn shouldUseTimes\n\t}\n\n\tif actualTime.Equal(expectedTime) {\n\t\treturn success\n\t}\n\treturn ShouldHappenBefore(actualTime, expectedTime)\n}",
"func Before(t time.Time) manifest.Filter {\n\treturn func(r *manifest.Resource) bool {\n\t\treturn r.Time.Before(t)\n\t}\n}",
"func (o *FiltersApiLog) GetQueryDateBeforeOk() (string, bool) {\n\tif o == nil || o.QueryDateBefore == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryDateBefore, true\n}",
"func BusinessDaysBefore(t time.Time, bds int) time.Time {\n\treturn travelBusinessDays(t, bds, false)\n}",
"func (m *dateBefore) DateBefore() float64 {\n\treturn m.dateBeforeField\n}",
"func dateLessThan(t1 time.Time, t2 time.Time) bool {\n\td1 := time.Date(t1.Year(), t1.Month(), t1.Day(), 0, 0, 0, 0, t1.Location())\n\td2 := time.Date(t2.Year(), t2.Month(), t2.Day(), 0, 0, 0, 0, t2.Location())\n\treturn d1.Unix() < d2.Unix()\n}",
"func currentDateEarlierThenOldDate(currentDate string, oldDate string) (bool, error) {\n\tconst shortDate = \"2006-01-02\"\n\tparsedCurrentDate, err := time.Parse(shortDate, currentDate)\n\tif err != nil {\n\t\treturn true, err // Defaults to true so entries in IoDevice Map are not changed\n\t}\n\tparsedOldDate, err := time.Parse(shortDate, oldDate)\n\tif err != nil {\n\t\treturn true, err // Defaults to true so entries in IoDevice Map are not changed\n\t}\n\n\tif parsedCurrentDate.After(parsedOldDate) {\n\t\treturn false, err\n\t}\n\treturn true, err\n}",
"func (o BucketLifecycleRuleConditionOutput) NoncurrentTimeBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleCondition) *string { return v.NoncurrentTimeBefore }).(pulumi.StringPtrOutput)\n}",
"func (p PartitionRange) SinceBefore(clock PartitionClock) bool {\n\tfor vbNo, seqRange := range p.seqRanges {\n\t\tif seqRange.Since < clock.GetSequence(vbNo) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (v VectorClock) Before(o VectorClock) bool {\n\treturn v.compareTo(o) < 0\n}",
"func (o BucketLifecycleRuleItemConditionPtrOutput) NoncurrentTimeBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BucketLifecycleRuleItemCondition) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NoncurrentTimeBefore\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o BucketLifecycleRuleItemConditionOutput) NoncurrentTimeBefore() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemCondition) *string { return v.NoncurrentTimeBefore }).(pulumi.StringPtrOutput)\n}",
"func GetQueryBeforeSince(ctx *context.APIContext) (before, since int64, err error) {\n\tqCreatedBefore, err := prepareQueryArg(ctx, \"before\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tqCreatedSince, err := prepareQueryArg(ctx, \"since\")\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tbefore, err = parseTime(qCreatedBefore)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tsince, err = parseTime(qCreatedSince)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn before, since, nil\n}",
"func (o *CalendareventsIdJsonEventReminders) SetBefore(v string) {\n\to.Before = &v\n}",
"func (s *ListContextsInput) SetCreatedBefore(v time.Time) *ListContextsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (t TimeValue) IsBefore(expected interface{}) bool {\n\treturn t.isBefore(NewTimeValue(expected))\n}",
"func CreatedAtLT(v time.Time) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldLT(FieldCreatedAt, v))\n}",
"func (o BucketLifecycleRuleItemConditionResponseOutput) NoncurrentTimeBefore() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemConditionResponse) string { return v.NoncurrentTimeBefore }).(pulumi.StringOutput)\n}",
"func (m *dateBefore) SetDateBefore(val float64) {\n\tm.dateBeforeField = val\n}",
"func (o *CalendareventsIdJsonEventReminders) GetBeforeOk() (*string, bool) {\n\tif o == nil || o.Before == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Before, true\n}",
"func (s *DatastoreFilter) SetCreatedBefore(v time.Time) *DatastoreFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (current GoVersion) Before(target GoVersion) bool {\n\treturn current.compare(target) == -1\n}",
"func (s *ListPipelinesInput) SetCreatedBefore(v time.Time) *ListPipelinesInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ReferenceStoreFilter) SetCreatedBefore(v time.Time) *ReferenceStoreFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.Step {\n\treturn predicate.Step(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (s *SequenceStoreFilter) SetCreatedBefore(v time.Time) *SequenceStoreFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.MetaSchema {\n\treturn predicate.MetaSchema(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t},\n\t)\n}",
"func Date(s string) bool { return validateDate(s) }",
"func (s *QueryFilters) SetCreatedBefore(v time.Time) *QueryFilters {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ListTrialComponentsInput) SetCreatedBefore(v time.Time) *ListTrialComponentsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ReferenceFilter) SetCreatedBefore(v time.Time) *ReferenceFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ReadSetUploadPartListFilter) SetCreatedBefore(v time.Time) *ReadSetUploadPartListFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.Block {\n\treturn predicate.Block(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (s *ListArtifactsInput) SetCreatedBefore(v time.Time) *ListArtifactsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (g *GitLocal) GetRevisionBeforeDate(dir string, t time.Time) (string, error) {\n\treturn g.GitCLI.GetRevisionBeforeDate(dir, t)\n}",
"func (s *ListLineageGroupsInput) SetCreatedBefore(v time.Time) *ListLineageGroupsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.Order {\n\treturn predicate.Order(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (s *ImportReferenceFilter) SetCreatedBefore(v time.Time) *ImportReferenceFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.K8sEvent {\n\treturn predicate.K8sEvent(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (s *ListExperimentsInput) SetCreatedBefore(v time.Time) *ListExperimentsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ListAssociationsInput) SetCreatedBefore(v time.Time) *ListAssociationsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ListPipelineExecutionsInput) SetCreatedBefore(v time.Time) *ListPipelineExecutionsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ActivateReadSetFilter) SetCreatedBefore(v time.Time) *ActivateReadSetFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ListTrialsInput) SetCreatedBefore(v time.Time) *ListTrialsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func CreatedAtLT(v time.Time) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.OrderItem {\n\treturn predicate.OrderItem(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.Job {\n\treturn predicate.Job(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (o *FiltersApiLog) GetQueryDateBefore() string {\n\tif o == nil || o.QueryDateBefore == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.QueryDateBefore\n}",
"func (s *ListActionsInput) SetCreatedBefore(v time.Time) *ListActionsInput {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ReadSetFilter) SetCreatedBefore(v time.Time) *ReadSetFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func (s *ImportReadSetFilter) SetCreatedBefore(v time.Time) *ImportReadSetFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func TestNowf(t *testing.T) {\n\tyear := strconv.Itoa(time.Now().Year())\n\ttestee := &Time{}\n\tif testee.Nowf(\"2006\") != year {\n\t\tt.Error(\"Failed: something wrong with getting time \")\n\t}\n}",
"func (o *AdminSearchUsersV2Params) SetBefore(before *string) {\n\to.Before = before\n}",
"func CompareOneHourBefore(lastSearch time.Time) bool {\n\ttoday := time.Now()\n\tcomparator := lastSearch.Add(1 * time.Hour)\n\tif today.Before(comparator) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}",
"func (skeleton *Skeleton) BeforeCreate() error {\n\ttimeNow := time.Now().UTC().Format(time.RFC3339)\n\tformattedTime, err := time.Parse(time.RFC3339, timeNow)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to parse time. %v\", err)\n\t}\n\tskeleton.UpdatedAt = &formattedTime\n\tskeleton.CreatedAt = &formattedTime\n\treturn nil\n}",
"func CreatedAtLT(v time.Time) predicate.Permission {\n\treturn predicate.Permission(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (h *ValidationHelper) ValidateNotBefore(nbf *Time) error {\n\t// 'nbf' claim is not set. ignore.\n\tif nbf == nil {\n\t\treturn nil\n\t}\n\n\t// Nbf hasn't been reached\n\tif h.Before(nbf.Time) {\n\t\tdelta := nbf.Time.Sub(h.now())\n\t\treturn &TokenNotValidYetError{At: h.now(), EarlyBy: delta}\n\t}\n\t// Nbf has been reached. valid.\n\treturn nil\n}",
"func IsLessThan(timeLeft time.Time, timeRight time.Time) bool {\n\tdurDelta := timeLeft.Sub(timeRight)\n\tif durZero, _ := time.ParseDuration(\"0ns\"); durDelta < durZero {\n\t\treturn true\n\t}\n\treturn false\n}",
"func IsDate(srcDate string) bool {\n\t_, err := strutil.ToTime(srcDate)\n\treturn err == nil\n}",
"func (me TxsdPostOfficeSequenceChoicePostOfficeNumberIndicatorOccurrence) IsBefore() bool {\r\n\treturn me == \"Before\"\r\n}",
"func CreatedAtLT(v time.Time) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.OrderLineItem {\n\treturn predicate.OrderLineItem(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func (s *ExportReadSetFilter) SetCreatedBefore(v time.Time) *ExportReadSetFilter {\n\ts.CreatedBefore = &v\n\treturn s\n}",
"func NowDate() time.Time {\n\treturn ExtractDateFromDatetime(time.Now())\n}",
"func doFindOlder(createTime time.Time, pattern string) bool {\n\ti, err := TimeHelper(pattern)\n\tnow := time.Now()\n\tfatalIf(probe.NewError(err), \"Error parsing string passed to flag older\")\n\n\t//find all time in which the time in which the object was just created is after the current time\n\tt := time.Date(now.Year(), now.Month(), now.Day()-i, now.Hour(), now.Minute(), 0, 0, time.UTC)\n\treturn createTime.Before(t)\n}",
"func (o *CalendareventsIdJsonEventReminders) GetBefore() string {\n\tif o == nil || o.Before == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Before\n}",
"func (g *GitLocal) GetRevisionBeforeDateText(dir string, dateText string) (string, error) {\n\treturn g.GitCLI.GetRevisionBeforeDateText(dir, dateText)\n}",
"func DatetimeLT(v time.Time) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldDatetime), v))\n\t})\n}",
"func ValidateDateStr(str string) bool {\n\tvar re = regexp.MustCompile(`^\\d{4}[\\-\\/\\s]?((((0[13578])|(1[02]))[\\-\\/\\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\\-\\/\\s]?(([0-2][0-9])|(30)))|(02[\\-\\/\\s]?[0-2][0-9]))$`)\n\tif len(re.FindStringIndex(str)) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (qs SysDBQuerySet) CreatedAtLt(createdAt time.Time) SysDBQuerySet {\n\treturn qs.w(qs.db.Where(\"created_at < ?\", createdAt))\n}",
"func CreatedAtLT(v time.Time) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func CreatedAtLT(v time.Time) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldCreatedAt), v))\n\t})\n}",
"func TestInitialDateTimeChecks4(t *testing.T) {\n\n\texpected := true\n\n\tcurrDate := time.Now().Add(24 * time.Hour).Format(\"2006-01-02\")\n\tactual, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"15:00\")\n\n\tif actual != expected {\n\t\tt.Fail()\n\t}\n\n}",
"func (o *DataExportQuery) SetCreatedBefore(v int32) {\n\to.CreatedBefore = &v\n}",
"func CheckDateBoundariesStr(startdate, enddate string) bool {\n\n\tlayout := \"2006-01-02T15:04:05.000Z\"\n\n\ttstart, err := time.Parse(layout, startdate)\n\tif err != nil {\n\t\treturn false //, fmt.Errorf(\"cannot parse startdate: %v\", err)\n\t}\n\ttend, err := time.Parse(layout, enddate)\n\tif err != nil {\n\t\treturn false //, fmt.Errorf(\"cannot parse enddate: %v\", err)\n\t}\n\n\tif tstart.Before(tend) {\n\t\treturn false //, fmt.Errorf(\"startdate < enddate - please set proper data boundaries\")\n\t}\n\treturn true //, err\n}",
"func (i Interval) Before(o *Interval) bool {\n\treturn i.Start < o.Start\n}",
"func HasPassed(date string) bool {\n\tlayout := \"January 2, 2006 15:04:05\"\n\treturn time.Now().After(ConvertToTime(layout, date))\n}",
"func (util *TimeUtil) NowDate() string {\n\treturn time.Now().Format(\"2006-01-02\")\n}",
"func (w *Wrapper) Now(formats ...string) function {\n\tquery := \"NOW() \"\n\tunitMap := map[string]string{\n\t\t\"Y\": \"YEAR\",\n\t\t\"M\": \"MONTH\",\n\t\t\"D\": \"DAY\",\n\t\t\"W\": \"WEEK\",\n\t\t\"h\": \"HOUR\",\n\t\t\"m\": \"MINUTE\",\n\t\t\"s\": \"SECOND\",\n\t}\n\tfor _, v := range formats {\n\t\toperator := string(v[0])\n\t\tinterval := v[1 : len(v)-1]\n\t\tunit := string(v[len(v)-1])\n\t\tquery += fmt.Sprintf(\"%s INTERVAL %s %s \", operator, interval, unitMap[unit])\n\t}\n\treturn w.Func(strings.TrimSpace(query))\n}"
] | [
"0.6379344",
"0.5772175",
"0.57539195",
"0.5719056",
"0.5665737",
"0.5650461",
"0.5630643",
"0.5603314",
"0.55422056",
"0.55358124",
"0.55042726",
"0.5495403",
"0.54829955",
"0.5410564",
"0.5358622",
"0.53530425",
"0.53356326",
"0.53226084",
"0.526414",
"0.5255999",
"0.5244797",
"0.5238911",
"0.5232905",
"0.5213636",
"0.51849914",
"0.5150107",
"0.5088221",
"0.5055265",
"0.5018673",
"0.500169",
"0.50010645",
"0.4996675",
"0.4983924",
"0.4942143",
"0.49362773",
"0.49337888",
"0.49149498",
"0.4884484",
"0.48430595",
"0.48338607",
"0.48262632",
"0.4820645",
"0.48155162",
"0.48063895",
"0.47976267",
"0.47810507",
"0.47740456",
"0.47726157",
"0.47667736",
"0.47640362",
"0.47609448",
"0.47572744",
"0.47495204",
"0.47468475",
"0.47437045",
"0.47430888",
"0.47424245",
"0.47406656",
"0.47341734",
"0.47336298",
"0.47235835",
"0.47152847",
"0.47116768",
"0.47042042",
"0.47037685",
"0.47027776",
"0.47007856",
"0.46905598",
"0.4689768",
"0.46800286",
"0.46795583",
"0.46718872",
"0.46711093",
"0.46541044",
"0.46405926",
"0.46322125",
"0.46319464",
"0.46311033",
"0.46223488",
"0.46180877",
"0.4617995",
"0.46160024",
"0.46157092",
"0.45678845",
"0.45550528",
"0.4547182",
"0.45086667",
"0.4504967",
"0.44985864",
"0.4497661",
"0.4469566",
"0.44634157",
"0.44634157",
"0.44613957",
"0.44443667",
"0.44361067",
"0.4435609",
"0.44307914",
"0.44198963",
"0.44143122"
] | 0.83958715 | 0 |
////////////////////////////////////////////////////////////////////////////////// // IsAttachment return true if content is attachment | ////////////////////////////////////////////////////////////////////////////////// // IsAttachment возвращает true, если содержимое является вложением | func (c *Content) IsAttachment() bool {
return c.Type == CONTENT_TYPE_ATTACHMENT
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Part) IsAttachment() bool {\n\tif p.gmimePart == nil {\n\t\treturn false\n\t}\n\tif !gobool(C.gmime_is_part(p.gmimePart)) || gobool(C.gmime_is_multi_part(p.gmimePart)) {\n\t\treturn false\n\t}\n\tif gobool(C.g_mime_part_is_attachment((*C.GMimePart)(unsafe.Pointer(p.gmimePart)))) {\n\t\treturn true\n\t}\n\tif len(p.Filename()) > 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *Store) IsAttachment(clientID, psychologistID string) (bool, error) {\n\n\tif strings.TrimSpace(clientID) == \"\" {\n\t\treturn false, errors.New(\"clientID is empty\")\n\t}\n\n\tif strings.TrimSpace(psychologistID) == \"\" {\n\t\treturn false, errors.New(\"psychologistID is empty\")\n\t}\n\n\tvar count int64\n\n\terr := s.db.SQL.Get(&count, `\n\tselect count(c.id) from clients c\n\t where c.client_public_id = $1 and c.psychologist_public_id = $2`, clientID, psychologistID)\n\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"an error occurred while check attachment client from psychologist\")\n\t}\n\n\tif count <= 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}",
"func isAttachmentDownload(ctx *macaron.Context) bool {\n\treturn strings.HasPrefix(ctx.Req.URL.Path, \"/attachments/\") && ctx.Req.Method == \"GET\"\n}",
"func (message *Message) HasAttachments() bool {\n\treturn message.GetInteger(3591) & 0x10 != 0\n}",
"func (e *Entry) HasAttachment() bool {\n\treturn e.Attachment.Name != \"\"\n}",
"func (attachStatus *AttachmentStatus) ShouldSend() bool {\n\treturn *attachStatus == AttachmentAttached\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsFile() bool { return me.String() == \"file\" }",
"func (m *SendDocument) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (m *SendVoice) IsMultipart() bool {\n\treturn m.File != nil\n}",
"func (m *EditMessageMedia) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (m *SendAnimation) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (m *SendVideoNote) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (r *AttachmentOriginal) HasAttachmentID() bool {\n\treturn r.hasAttachmentID\n}",
"func (o *Post) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m *SendPhoto) IsMultipart() bool {\n\treturn m.File != nil\n}",
"func (m *SendAudio) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (r *AttachmentOriginal) HasDownload() bool {\n\treturn r.hasDownload\n}",
"func (r *AttachmentPreview) HasAttachmentID() bool {\n\treturn r.hasAttachmentID\n}",
"func (o *TaskRequest) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func CfnElasticLoadBalancerAttachment_IsCfnResource(construct constructs.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnElasticLoadBalancerAttachment\",\n\t\t\"isCfnResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func (o *InlineResponse20049Post) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdNodeRoleSimpleContentExtensionCategory) IsFile() bool { return me.String() == \"file\" }",
"func (m *SendMediaGroup) IsMultipart() bool {\n\treturn false\n}",
"func (me TxsdMimeTypeSequenceType) IsImage() bool { return me.String() == \"image\" }",
"func (m *SendVideo) IsMultipart() bool {\n\treturn m.File != nil && m.ThumbFile != nil\n}",
"func (o *InlineResponse200115) HasAttachments() bool {\n\tif o != nil && o.Attachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func CfnElasticLoadBalancerAttachment_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnElasticLoadBalancerAttachment\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func (mgr *WatchController) isReconcileAttachments(\n\twatch *unstructured.Unstructured,\n\tsyncRequest *SyncHookRequest,\n\tsyncResponse *SyncHookResponse,\n) bool {\n\tif watch.GetDeletionTimestamp() == nil || // condition 1\n\t\tmgr.finalizer.ShouldFinalize(watch) || // condition 2\n\t\t(watch.GetDeletionTimestamp() != nil && // condition 3\n\t\t\tsyncRequest.Finalizing &&\n\t\t\t!syncResponse.Finalized) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c *Content) IsDraft() bool {\n\treturn c.Status == CONTENT_STATUS_DRAFT\n}",
"func (me TxsdRecordPatternSimpleContentExtensionType) IsBinary() bool { return me.String() == \"binary\" }",
"func (me TxsdCounterSimpleContentExtensionType) IsFlow() bool { return me.String() == \"flow\" }",
"func (me TxsdAddressSimpleContentExtensionCategory) IsEMail() bool { return me.String() == \"e-mail\" }",
"func (me TxsdImpactSimpleContentExtensionType) IsExtortion() bool { return me.String() == \"extortion\" }",
"func (b *Blob) IsForeignLayer() bool {\n\treturn b.ContentType == schema2.MediaTypeForeignLayer\n}",
"func (m *AttachmentItem) GetIsInline()(*bool) {\n return m.isInline\n}",
"func IsBinaryContentType(contentType string) bool {\n\treturn isContentType(contentType, \"application/octet-stream\")\n}",
"func (m *ContentAttachment) ContentAttachment() *table.ContentAttachment {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\treturn &table.ContentAttachment{\n\t\tBizID: m.BizId,\n\t\tAppID: m.AppId,\n\t\tConfigItemID: m.ConfigItemId,\n\t}\n}",
"func (n *dnode) IsFile() bool {\n\treturn n.dnType == dnTypeBlob\n}",
"func CfnElasticLoadBalancerAttachment_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnElasticLoadBalancerAttachment\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func (r *AttachmentOriginal) GetDownload() bool {\n\treturn r.Download\n}",
"func (me TxsdNodeRoleSimpleContentExtensionCategory) IsMail() bool { return me.String() == \"mail\" }",
"func (me TxsdCounterSimpleContentExtensionType) IsMessage() bool { return me.String() == \"message\" }",
"func (s SourceFilesystems) IsContent(filename string) bool {\n\treturn s.Content.Contains(filename)\n}",
"func (contentType ContentType) Is(mimeType string) bool {\n\tmediaType, _, err := mime.ParseMediaType(mimeType)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn string(contentType) == mediaType\n}",
"func PrepareAttachment(f io.Reader) (io.Reader, error) {\n\t//read f and redurns the message, m as type Message\n\tm, err := mail.ReadMessage(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theader := m.Header\n\n\t//find media type\n\tmediaType, params, err := mime.ParseMediaType(header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"PrepareAttachment: error parsing media type\")\n\t}\n\n\t//if file is multipart\n\tif strings.HasPrefix(mediaType, \"multipart/\") {\n\t\tmr := multipart.NewReader(m.Body, params[\"boundary\"])\n\n\t\tfor {\n\t\t\tp, err := mr.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil, fmt.Errorf(\"PrepareAttachment: EOF before valid attachment\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// need to add checks to ensure base64\n\t\t\tpartType, _, err := mime.ParseMediaType(p.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"PrepareAttachment: error parsing media type of part\")\n\t\t\t}\n\n\t\t\t// if gzip\n\t\t\tif strings.HasPrefix(partType, \"application/gzip\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"application/x-gzip\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"application/gzip-compressed\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"application/gzipped\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"application/x-gunzip\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"application/x-gzip-compressed\") ||\n\t\t\t\tstrings.HasPrefix(partType, \"gzip/document\") {\n\n\t\t\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, p)\n\t\t\t\tdecompressed, err := gzip.NewReader(decodedBase64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn decompressed, nil\n\t\t\t}\n\n\t\t\t// if zip\n\t\t\tif strings.HasPrefix(partType, \"application/zip\") || // google style\n\t\t\t\tstrings.HasPrefix(partType, \"application/x-zip-compressed\") { // yahoo style\n\n\t\t\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, p)\n\t\t\t\tdecompressed, err := ExtractZipFile(decodedBase64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn decompressed, nil\n\t\t\t}\n\n\t\t\t// if xml\n\t\t\tif strings.HasPrefix(partType, \"text/xml\") {\n\t\t\t\treturn p, nil\n\t\t\t}\n\n\t\t\t// if application/octetstream, check filename\n\t\t\tif strings.HasPrefix(partType, \"application/octet-stream\") {\n\t\t\t\tif strings.HasSuffix(p.FileName(), \".zip\") {\n\t\t\t\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, p)\n\t\t\t\t\tdecompressed, err := ExtractZipFile(decodedBase64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\treturn decompressed, nil\n\t\t\t\t}\n\n\t\t\t\tif strings.HasSuffix(p.FileName(), \".gz\") {\n\t\t\t\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, p)\n\t\t\t\t\tdecompressed, _ := gzip.NewReader(decodedBase64)\n\n\t\t\t\t\treturn decompressed, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// if gzip\n\tif strings.HasPrefix(mediaType, \"application/gzip\") || // proper :)\n\t\tstrings.HasPrefix(mediaType, \"application/x-gzip\") || // gmail attachment\n\t\tstrings.HasPrefix(mediaType, \"application/gzip-compressed\") ||\n\t\tstrings.HasPrefix(mediaType, \"application/gzipped\") ||\n\t\tstrings.HasPrefix(mediaType, \"application/x-gunzip\") ||\n\t\tstrings.HasPrefix(mediaType, \"application/x-gzip-compressed\") ||\n\t\tstrings.HasPrefix(mediaType, \"gzip/document\") {\n\n\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, m.Body)\n\t\tdecompressed, _ := gzip.NewReader(decodedBase64)\n\n\t\treturn decompressed, nil\n\n\t}\n\n\t// if zip\n\tif strings.HasPrefix(mediaType, \"application/zip\") || // google style\n\t\tstrings.HasPrefix(mediaType, \"application/x-zip-compressed\") { // yahoo style\n\t\tdecodedBase64 := base64.NewDecoder(base64.StdEncoding, m.Body)\n\t\tdecompressed, err := ExtractZipFile(decodedBase64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn decompressed, nil\n\t}\n\n\t// if xml\n\tif strings.HasPrefix(mediaType, \"text/xml\") {\n\t\treturn m.Body, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"prepareAttachment: reached the end, no attachment found\")\n}",
"func (o *ObjectInfo) IsEncryptedMultipart() bool {\n\t_, ok := o.UserDefined[ReservedMetadataPrefix+\"Encrypted-Multipart\"]\n\treturn ok\n}",
"func (r *Reply) IsContentTypeSet() bool {\n\treturn len(r.ContType) > 0\n}",
"func (o *Post) HasHasAttachments() bool {\n\tif o != nil && o.HasAttachments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *Post) GetAttachmentsOk() ([]MicrosoftGraphAttachment, bool) {\n\tif o == nil || o.Attachments == nil {\n\t\tvar ret []MicrosoftGraphAttachment\n\t\treturn ret, false\n\t}\n\treturn *o.Attachments, true\n}",
"func (me TdtypeType) IsFile() bool { return me.String() == \"file\" }",
"func (fa FileAttributes) IsArchive() bool {\n\treturn fa&32 > 0\n}",
"func (p *Part) IsText() bool {\n\treturn gobool(C.gmime_is_text_part(p.gmimePart))\n}",
"func (f *Firework) Attached() bool {\n\treturn f.attached\n}",
"func PbContentAttachment(at *table.ContentAttachment) *ContentAttachment {\n\tif at == nil {\n\t\treturn nil\n\t}\n\n\treturn &ContentAttachment{\n\t\tBizId: at.BizID,\n\t\tAppId: at.AppID,\n\t\tConfigItemId: at.ConfigItemID,\n\t}\n}",
"func (data EditMessageData) NeedsMultipart() bool {\n\treturn len(data.Files) > 0\n}",
"func (me TxsdMimeTypeSequenceType) IsVideo() bool { return me.String() == \"video\" }",
"func (me TxsdCounterSimpleContentExtensionType) IsByte() bool { return me.String() == \"byte\" }",
"func (e PartialContent) IsPartialContent() {}",
"func (o AttachmentAccepterOutput) AttachmentType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AttachmentAccepter) pulumi.StringOutput { return v.AttachmentType }).(pulumi.StringOutput)\n}",
"func (o *TaskRequest) HasAttachmentOptions() bool {\n\tif o != nil && o.AttachmentOptions != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func TestAttachments(t *testing.T) {\n\te := testutils.TestEmail()\n\n\tattachment := testutils.TestAttachment(t)\n\te.Attachments = append(e.Attachments, attachment)\n\n\tparams, err := b.paramsForEmail(e)\n\tif err != nil {\n\t\tt.FailNow()\n\t}\n\n\t// the sendgrid backend will have read the contents of the reader,\n\t// so we need to seek back to the beginning\n\toffset, err := attachment.Data.(io.ReadSeeker).Seek(0, 0)\n\tif err != nil {\n\t\tt.FailNow()\n\t} else if offset != int64(0) {\n\t\tt.FailNow()\n\t}\n\n\t// check that the file data is there\n\tfileData, err := ioutil.ReadAll(attachment.Data)\n\tif err != nil {\n\t\tt.FailNow()\n\t}\n\tif params.Get(fmt.Sprintf(\"files[%v]\", e.Attachments[0].Name)) != string(fileData) {\n\t\tt.FailNow()\n\t}\n}",
"func (*Content_AttachmentContent) Descriptor() ([]byte, []int) {\n\treturn file_chat_v1_messages_proto_rawDescGZIP(), []int{6, 2}\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsAlert() bool { return me.String() == \"alert\" }",
"func (m *ChatMessageAttachment) GetContentType()(*string) {\n val, err := m.GetBackingStore().Get(\"contentType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (object Object) Attachment(value interface{}) Object {\n\treturn object.Property(as.PropertyAttachment, value)\n}",
"func (input *BeegoInput) IsUpload() bool {\n\treturn strings.Contains(input.Header(\"Content-Type\"), \"multipart/form-data\")\n}",
"func IsErrAttachmentNotExist(err error) bool {\n\t_, ok := err.(ErrAttachmentNotExist)\n\treturn ok\n}",
"func (t *Link) IsMediaType() (ok bool) {\n\treturn t.mediaType != nil && t.mediaType.mimeMediaTypeValue != nil\n\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsPolicy() bool { return me.String() == \"policy\" }",
"func (a *Attachment) MarshalJSON() ([]byte, error) {\n\tvar err error\n\tswitch {\n\tcase len(a.Content) != 0:\n\t\ta.setMetadata()\n\tcase a.outputStub || a.Follows:\n\t\terr = a.readMetadata()\n\tdefault:\n\t\terr = a.readContent()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tatt := struct {\n\t\tAttachment\n\t\tContent *[]byte `json:\"data,omitempty\"` // nolint: govet\n\t\tStub *bool `json:\"stub,omitempty\"` // nolint: govet\n\t\tFollows *bool `json:\"follows,omitempty\"` // nolint: govet\n\t}{\n\t\tAttachment: *a,\n\t}\n\tswitch {\n\tcase a.outputStub:\n\t\tatt.Stub = &a.outputStub\n\tcase a.Follows:\n\t\tatt.Follows = &a.Follows\n\tcase len(a.Content) > 0:\n\t\tatt.Content = &a.Content\n\t}\n\treturn json.Marshal(att)\n}",
"func (t *Transport) Able() bool {\n\treturn t.blob.Able() && t.pubsub.Able()\n}",
"func IsAttachableVolume(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) bool {\n\tattachableVolumePlugin, _ := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)\n\tif attachableVolumePlugin != nil {\n\t\tvolumeAttacher, err := attachableVolumePlugin.NewAttacher()\n\t\tif err == nil && volumeAttacher != nil {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func isMultipartObject(storage StorageAPI, bucket, object string) bool {\n\t_, err := storage.StatFile(bucket, pathJoin(object, multipartMetaFile))\n\tif err != nil {\n\t\tif err == errFileNotFound {\n\t\t\treturn false\n\t\t}\n\t\terrorIf(err, \"Failed to stat file \"+bucket+pathJoin(object, multipartMetaFile))\n\t\treturn false\n\t}\n\treturn true\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsHost() bool { return me.String() == \"host\" }",
"func (o *CloudVolumeInstanceAttachment) HasAttachTime() bool {\n\tif o != nil && o.AttachTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m *Messenger) Attachment(to Recipient, dataType AttachmentType, url string, messagingType MessagingType, tags ...string) error {\n\tresponse := &Response{\n\t\ttoken: m.token,\n\t\tto: to,\n\t}\n\n\treturn response.Attachment(dataType, url, messagingType, tags...)\n}",
"func (me TxsdPresentationAttributesTextContentElementsDirection) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}",
"func (na *cnmNetworkAllocator) IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool {\n\tif node == nil {\n\t\treturn false\n\t}\n\n\tif networkAttachment == nil || networkAttachment.Network == nil {\n\t\treturn false\n\t}\n\n\t// If the node is not found in the allocated set, then it is\n\t// not allocated.\n\tif _, ok := na.nodes[node.ID]; !ok {\n\t\treturn false\n\t}\n\n\t// If the network is not found in the allocated set, then it is\n\t// not allocated.\n\tif _, ok := na.nodes[node.ID][networkAttachment.Network.ID]; !ok {\n\t\treturn false\n\t}\n\n\t// If the network is not allocated, the node cannot be allocated.\n\tlocalNet, ok := na.networks[networkAttachment.Network.ID]\n\tif !ok {\n\t\treturn false\n\t}\n\n\t// Addresses empty, not allocated.\n\tif len(networkAttachment.Addresses) == 0 {\n\t\treturn false\n\t}\n\n\t// The allocated IP address not found in local endpoint state. Not allocated.\n\tif _, ok := localNet.endpoints[networkAttachment.Addresses[0]]; !ok {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (lm LinksManager) DownloadContent(url *url.URL, resp *http.Response, typ resource.Type) (bool, resource.Attachment, []resource.Issue) {\n\tif !lm.LinkSettings.DownloadLinkDestinationAttachments {\n\t\treturn false, nil, nil\n\t}\n\treturn resource.DownloadFile(lm, url, resp, typ)\n}",
"func (me TxsdMimeTypeSequenceType) IsAudio() bool { return me.String() == \"audio\" }",
"func (p Page) Attachment() Attachment {\n\treturn p.DownloadedAttachment\n}",
"func (r *AttachmentPreview) HasExt() bool {\n\treturn r.hasExt\n}",
"func (o *InlineResponse20051TodoItems) HasAttachmentsCount() bool {\n\tif o != nil && o.AttachmentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsUser() bool { return me.String() == \"user\" }",
"func (ctx *Context) IsUpload() bool {\r\n\treturn strings.Contains(ctx.HeaderParam(HeaderContentType), MIMEMultipartForm)\r\n}",
"func (o *TaskRequest) GetAttachmentsOk() (*TaskRequestAttachments, bool) {\n\tif o == nil || o.Attachments == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Attachments, true\n}",
"func (m *AttachmentItem) GetAttachmentType()(*AttachmentType) {\n return m.attachmentType\n}",
"func (t *MailTestSuite) Attachment(name string) Attachment {\n\tpath := t.base + string(os.PathSeparator) + DataPath + string(os.PathSeparator) + name\n\tfile, err := ioutil.ReadFile(path)\n\n\tif err != nil {\n\t\tt.Fail(\"error getting attachment with the path: \"+path, err)\n\t}\n\n\treturn Attachment{\n\t\tFilename: name,\n\t\tBytes: file,\n\t}\n}",
"func (r *AttachmentOriginal) HasSign() bool {\n\treturn r.hasSign\n}",
"func (attachment *ReaderAttachment) Attach(mimeWriter *multipart.Writer) error {\n\n\thdr := textproto.MIMEHeader{}\n\thdr.Add(\"Content-Type\", attachment.ContentType)\n\thdr.Add(\"Content-Disposition\", fmt.Sprintf(`attachment; filename=\"%s\"`, attachment.Filename))\n\thdr.Add(\"Content-Transfer-Encoding\", \"base64\")\n\tpart, err := mimeWriter.CreatePart(hdr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tenc := base64.NewEncoder(base64.StdEncoding, part)\n\tio.Copy(enc, attachment.Content)\n\tenc.Close()\n\tif rc, ok := attachment.Content.(io.ReadCloser); ok {\n\t\trc.Close()\n\t}\n\treturn nil\n}",
"func (m *AttachmentItem) GetContentType()(*string) {\n return m.contentType\n}",
"func (o *Post) GetHasAttachmentsOk() (bool, bool) {\n\tif o == nil || o.HasAttachments == nil {\n\t\tvar ret bool\n\t\treturn ret, false\n\t}\n\treturn *o.HasAttachments, true\n}",
"func (obj *content) IsNormal() bool {\n\treturn obj.normal != nil\n}",
"func (input *Input) IsUpload() bool {\n\treturn strings.Contains(input.Header(\"Content-Type\"), \"multipart/form-data\")\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsDos() bool { return me.String() == \"dos\" }",
"func (m *ChatMessage) GetAttachments()([]ChatMessageAttachmentable) {\n return m.attachments\n}",
"func (me TAttlistCommentsCorrectionsRefType) IsAssociatedPublication() bool {\n\treturn me.String() == \"AssociatedPublication\"\n}",
"func (me TxsdPresentationAttributesTextContentElementsTextAnchor) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}",
"func (m *Metadata) IsFile() bool {\n\treturn (strings.ToLower(m.Tag) == MetadataTypeFile)\n}",
"func (c *Controller) IsAttached(isAttachedRequest k8sresources.FlexVolumeIsAttachedRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-isAttached-start\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}"
] | [
"0.8364052",
"0.712331",
"0.6851076",
"0.6623727",
"0.65944177",
"0.63823783",
"0.6326309",
"0.62711173",
"0.61442995",
"0.61011577",
"0.60864383",
"0.60676914",
"0.60450274",
"0.59972024",
"0.5980794",
"0.5883554",
"0.5855307",
"0.583973",
"0.58200735",
"0.58034015",
"0.57583386",
"0.5731236",
"0.57289475",
"0.5728218",
"0.5723137",
"0.5686201",
"0.5665809",
"0.56380385",
"0.5595032",
"0.55459005",
"0.55456525",
"0.5533175",
"0.5521235",
"0.55169576",
"0.55116314",
"0.54625976",
"0.5443489",
"0.540581",
"0.5393773",
"0.53909683",
"0.53886425",
"0.53824013",
"0.53822666",
"0.53745455",
"0.5364253",
"0.5358638",
"0.53304446",
"0.53118575",
"0.5294223",
"0.5280117",
"0.5276216",
"0.52729684",
"0.5261174",
"0.52593416",
"0.52540314",
"0.5247269",
"0.5235302",
"0.5232965",
"0.5205006",
"0.51975894",
"0.5195634",
"0.51890296",
"0.51830447",
"0.5178482",
"0.5170242",
"0.51581186",
"0.5149956",
"0.51447415",
"0.5138088",
"0.5130533",
"0.5130515",
"0.5129325",
"0.5123215",
"0.5113815",
"0.5089166",
"0.50694335",
"0.506196",
"0.5061885",
"0.5053847",
"0.5052783",
"0.5036337",
"0.50352275",
"0.5033726",
"0.5033435",
"0.5029186",
"0.5026592",
"0.5026171",
"0.502115",
"0.5018039",
"0.5009573",
"0.50078684",
"0.5005492",
"0.50054365",
"0.49976853",
"0.499121",
"0.49841577",
"0.49765426",
"0.49723363",
"0.49704477",
"0.49648845"
] | 0.9088709 | 0 |
IsComment return true if content is comment | IsComment возвращает true, если содержимое является комментарием | func (c *Content) IsComment() bool {
return c.Type == CONTENT_TYPE_COMMENT
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t Type) IsComment() bool {\n\treturn comm_start < t && t < comm_end\n}",
"func isComment(s state) bool {\n\tswitch s {\n\tcase stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (t Token) IsComment() bool {\n\treturn t.Kind == TBlockComment || t.Kind == TLineComment\n}",
"func IsComment(s string) bool {\n\treturn len(s) == 0 || s[0] == '#'\n}",
"func (l *line) isComment() bool {\n\treturn len(l.tokens) > 0 && l.tokens[0] == slash\n}",
"func (p *Lexer) Comment() bool {\n\tc, _ := p.Byte()\n\n\tif c == '#' {\n\t\tc, _ = p.Byte()\n\t\tif IsSpaceChar(c) {\n\t\t\tfor {\n\t\t\t\tc, _ = p.Byte()\n\t\t\t\tif IsEndChar(c) || IsBreakChar(c) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\tp.UnreadByte()\n\t}\n\tp.UnreadByte()\n\treturn false\n}",
"func (l *line) isHTMLComment() bool {\n\treturn len(l.tokens) > 0 && l.tokens[0] == slash+slash\n}",
"func (me TAttlistCommentsCorrectionsRefType) IsCommentIn() bool { return me.String() == \"CommentIn\" }",
"func (me TAttlistCommentsCorrectionsRefType) IsCommentOn() bool { return me.String() == \"CommentOn\" }",
"func (tb *TextBuf) InComment(pos TextPos) bool {\n\tcs := tb.CommentStart(pos.Ln)\n\tif cs < 0 {\n\t\treturn false\n\t}\n\treturn pos.Ch > cs\n}",
"func hasComment(x Expr, text string) bool {\n\tif x == nil {\n\t\treturn false\n\t}\n\tfor _, com := range x.Comment().Before {\n\t\tif strings.Contains(strings.ToLower(com.Token), text) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o *Comment) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Doc) Comment(key, comments string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\t// 如果所有注释为空\n\tif comments == \"\" {\n\t\tp.lines.InsertBefore(&line{typo: '#', value: \"#\"}, e)\n\t\treturn true\n\t}\n\n\t// 创建一个新的Scanner\n\tscanner := bufio.NewScanner(strings.NewReader(comments))\n\tfor scanner.Scan() {\n\t\tp.lines.InsertBefore(&line{typo: '#', value: \"#\" + scanner.Text()}, e)\n\t}\n\n\treturn true\n}",
"func (this *Space) HasComment() bool {\n\tif this == nil {\n\t\treturn false\n\t}\n\tfor _, s := range this.Space {\n\t\tif isComment(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func is_mysql_comment(cs string, len, pos int) bool {\n\t/* so far...\n\t * cs[pos] == '/' && cs[pos+1] == '*'\n\t */\n\n\tif pos+2 >= len {\n\t\t/* not a mysql comment */\n\t\treturn false\n\t}\n\n\tif cs[pos+2] != '!' {\n\t\t/* not a mysql comment */\n\t\treturn false\n\t}\n\n\t/*\n\t * this is a mysql comment\n\t * got \"/x!\"\n\t */\n\treturn true\n}",
"func isCommentLine(line string) bool {\n\treturn strings.HasPrefix(line, \"#\")\n}",
"func CommentIsInChangedLines(c context.Context, trackComment *track.Comment, changedLines ChangedLinesInfo) bool {\n\tvar data tricium.Data_Comment\n\tif trackComment.Comment == nil {\n\t\tlogging.Errorf(c, \"Got a comment with a nil Comment field: %+v\", trackComment)\n\t\treturn false\n\t}\n\n\tif err := jsonpb.UnmarshalString(string(trackComment.Comment), &data); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Failed to unmarshal comment.\")\n\t\treturn false\n\t}\n\n\tif len(data.Path) == 0 {\n\t\treturn true // This is a comment on the commit message, which is always kept.\n\t}\n\n\tif data.StartLine == 0 {\n\t\treturn true // File-level comment, should be kept.\n\t}\n\n\t// If the file has changed lines tracked, pass over comments that aren't in the diff.\n\tif lines, ok := changedLines[data.Path]; ok {\n\t\tstart, end := int(data.StartLine), int(data.EndLine)\n\t\tif end > start && data.EndChar == 0 {\n\t\t\tend-- // None of data.EndLine is included in the comment.\n\t\t}\n\t\tif end == 0 {\n\t\t\tend = start // Line comment.\n\t\t}\n\t\tif isInChangedLines(start, end, lines) {\n\t\t\treturn true\n\t\t}\n\t\tlogging.Debugf(c, \"Filtering out comment on lines [%d, %d].\", start, end)\n\t\treturn false\n\t}\n\tlogging.Debugf(c, \"File %q is not in changed lines.\", data.Path)\n\treturn false\n}",
"func checkComment(l *Linter, n ast.Node, name string, comment string, startWithIs bool) {\n\tprefix := name + \" \"\n\tif !strings.HasPrefix(comment, prefix) {\n\t\tif comment == \"\" {\n\t\t\tl.addError(n, \"missing comment for %q\", name)\n\t\t\treturn\n\t\t}\n\t\tl.addError(n, \"comment for %q must start with %q\", name, prefix)\n\t\treturn\n\t}\n\tif !strings.HasSuffix(strings.TrimSpace(comment), \".\") {\n\t\tl.addError(n, \"comment for %q must end with a period\", name)\n\t\treturn\n\t}\n\tif !startWithIs {\n\t\treturn\n\t}\n\tif strings.HasPrefix(comment, prefix+\"is \") || strings.HasPrefix(comment, prefix+\"are \") {\n\t\treturn\n\t}\n\tl.addError(n, \"comment for %q must start with %q\", name, prefix+\"is/are \")\n}",
"func (o *MicrosoftGraphWorkbookComment) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isCommentOrEmptyLine(text string) bool {\n\twhiteSpace := strings.Replace(text, \" \", \"\", -1)\n\twhiteSpace = strings.Replace(whiteSpace, \"\\t\", \"\", -1)\n\treturn whiteSpace == \"\" || (strings.Contains(text, \"//\") && whiteSpace[0:2] == \"//\")\n}",
"func (c *Comment) Valid() error {\n\treturn utils.ValidText(\"task comment\", c.Content)\n}",
"func (o *StorageNetAppCifsShareAllOf) HasComment() bool {\n\tif o != nil && o.Comment != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func CommentTypeIsRef(t CommentType) bool {\n\treturn t == CommentTypeCommentRef || t == CommentTypePullRef || t == CommentTypeIssueRef\n}",
"func (o *DeployKey) HasComment() bool {\n\tif o != nil && o.Comment != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (d UserData) HasComment() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Comment\", \"comment\"))\n}",
"func (o *ReservationModel) HasComment() bool {\n\tif o != nil && o.Comment != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func commentFinder() func(string) bool {\n\tcommentSectionInProgress := false\n\treturn func(line string) bool {\n\t\tif comment.FindString(line) != \"\" {\n\t\t\t// \"//\" Comment line found.\n\t\t\treturn true\n\t\t}\n\t\t// If the current line is at the start `/*` of a multi-line comment,\n\t\t// set a flag to remember we're within a multi-line comment.\n\t\tif commentStart.FindString(line) != \"\" {\n\t\t\tcommentSectionInProgress = true\n\t\t\treturn true\n\t\t}\n\t\t// At the end `*/` of a multi-line comment, clear the flag.\n\t\tif commentEnd.FindString(line) != \"\" {\n\t\t\tcommentSectionInProgress = false\n\t\t\treturn true\n\t\t}\n\t\t// The current line is within a `/*...*/` section.\n\t\tif commentSectionInProgress {\n\t\t\treturn true\n\t\t}\n\t\t// Anything else is not a comment region.\n\t\treturn false\n\t}\n}",
"func (n *NotCommentToken) Content() []byte {\n\treturn n.c\n}",
"func (lx *Lexer) comment() Token {\n\tlx.consume()\n\n\tr, _ := lx.nextChar()\n\tlx.token.writeRune(r)\n\tlx.token.Type = Comment\n\n\tif r == '/' {\n\t\tlx.endOfLineComment()\n\t} else if r == '*' {\n\t\tlx.traditionalComment()\n\t}\n\treturn lx.returnAndReset()\n}",
"func (un *Decoder) Comment(c xml.Comment) error { return nil }",
"func (v Notes) EditComment(params NotesEditCommentParams) (bool, error) {\n\tr, err := v.API.Request(\"notes.editComment\", params)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn decodeBoolIntResponse(r)\n}",
"func (c *CommentBlock) Content() []byte {\n\treturn c.content\n}",
"func (this Comment) GetContent() string {\n\tif len(this) == 0 {\n\t\treturn \"\"\n\t}\n\ts := []byte(this)\n\tif isLineComment(string(this)) {\n\t\treturn string(s[2 : len(s)-1])\n\t}\n\treturn string(s[2 : len(s)-2])\n}",
"func (o *V0037JobProperties) HasComment() bool {\n\tif o != nil && o.Comment != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *CaCertificateCreateReqWeb) GetCommentOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Comment, true\n}",
"func (s Settings) Commented() bool {\n\treturn s.comment\n}",
"func (v Notes) DeleteComment(params NotesDeleteCommentParams) (bool, error) {\n\tr, err := v.API.Request(\"notes.deleteComment\", params)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn decodeBoolIntResponse(r)\n}",
"func (p PRMirror) AddComment(id int, comment string) bool {\n\tissueComment := github.IssueComment{}\n\tissueComment.Body = &comment\n\n\t_, _, err := p.GitHubClient.Issues.CreateComment(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, &issueComment)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while adding a comment to issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {\n\t// len(list) > 0\n\tline := p.lineFor(list[0].Pos())\n\tfor i, c := range list {\n\t\tif i > 0 && p.lineFor(list[i].Pos()) != line {\n\t\t\t// not all comments on the same line\n\t\t\treturn true\n\t\t}\n\t\tif t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, \"\\n\")) {\n\t\t\treturn true\n\t\t}\n\t}\n\t_ = line\n\treturn false\n}",
"func (p *Parser) lexComment(l *lex.Lexer) lex.StateFn {\n\t// Find out which kind of comment we have, so we know how to deal with it.\n\tc := p.Commenters.First(l.Input(0))\n\n\tl.Inc(len(c.Begin))\n\tvar end = c.End\n\tif end == \"\" {\n\t\tend = \"\\n\"\n\t}\n\tfor !l.Consume(end) && l.Next() != lex.EOF {\n\t\t// absorb as long as we don't hit EOF or end-of-comment\n\t}\n\tif c.End == \"\" {\n\t\tl.Dec(1)\n\t}\n\n\tif c.Strip {\n\t\tl.Ignore()\n\t} else {\n\t\tl.Emit(typeComment)\n\t}\n\t// If we exited because of EOF, then Peek will also return EOF.\n\tif l.Peek() == lex.EOF {\n\t\tl.Emit(lex.TypeEOF)\n\t\treturn nil\n\t}\n\treturn p.lexText\n}",
"func (c *CommentBlock) ContentString() string {\n\treturn string(c.content)\n}",
"func (o *Comment) GetContentOk() (*string, bool) {\n\tif o == nil || o.Content == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Content, true\n}",
"func (c CommentCommand) IsAutoplan() bool {\n\treturn false\n}",
"func (n *NotCommentToken) ContentString() string {\n\treturn string(n.Content())\n}",
"func updateComment(username string, comment string) bool {\n\tlog.Printf(\"Updating comment for %s to %s\", username, comment)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", fmt.Sprintf(changeCommentCommand, comment, username))\n\tif _, err := cmd.CombinedOutput(); err != nil {\n\t\tlog.Printf(\"Error: Can't update comment for %s: %s\", username, err)\n\t\treturn false\n\t}\n\treturn true\n}",
"func TestCommentsStandalone(t *testing.T) {\n\ttemplate := \"Begin.\\n{{! Comment Block! }}\\nEnd.\\n\"\n\texpected := \"Begin.\\nEnd.\\n\"\n\tactual := Render(template)\n\n\tif actual != expected {\n\t\tt.Errorf(\"returned %#v, expected %#v\", actual, expected)\n\t}\n}",
"func (d UserData) Comment() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Comment\", \"comment\"))\n\tif !d.Has(models.NewFieldName(\"Comment\", \"comment\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}",
"func (o *VersionedConnection) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *StorageNetAppCifsShareAllOf) GetCommentOk() (*string, bool) {\n\tif o == nil || o.Comment == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Comment, true\n}",
"func Comment(s string) string {\n\treturn fmt.Sprintf(\"<!-- %s -->\", s)\n}",
"func (o *MicrosoftGraphWorkbookComment) HasContentType() bool {\n\tif o != nil && o.ContentType != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func processComment(l *lexer) processResult {\n\trune := l.next()\n\tif rune == '/' {\n\t\trune := l.next()\n\t\tif rune == '/' {\n\t\t\t// eat up until \\n\n\t\t\tfor l.next() != '\\n' {\n\t\t\t}\n\t\t\tl.ignore()\n\t\t\treturn resultMatch\n\t\t} else {\n\t\t\tl.backup()\n\t\t\tl.backup()\n\t\t\treturn resultNoMatch\n\t\t}\n\t} else {\n\t\tl.backup()\n\t\treturn resultNoMatch\n\t}\n}",
"func hasComments(list *ListExpr) (line, suffix bool) {\n\tcom := list.Comment()\n\tif len(com.Before) > 0 || len(com.After) > 0 || len(list.End.Before) > 0 {\n\t\tline = true\n\t}\n\tif len(com.Suffix) > 0 {\n\t\tsuffix = true\n\t}\n\tfor _, elem := range list.List {\n\t\tcom := elem.Comment()\n\t\tif len(com.Before) > 0 {\n\t\t\tline = true\n\t\t}\n\t\tif len(com.Suffix) > 0 {\n\t\t\tsuffix = true\n\t\t}\n\t}\n\treturn\n}",
"func EqualsComments(a, b Comments) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (o *MicrosoftGraphWorkbookComment) GetContentOk() (string, bool) {\n\tif o == nil || o.Content == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Content, true\n}",
"func addComment(gh *octokat.Client, repo octokat.Repo, prNum, comment, commentType string) error {\n\t// get the comments\n\tcomments, err := gh.Comments(repo, prNum, &octokat.Options{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if we already made the comment\n\tfor _, c := range comments {\n\t\t// if we already made the comment return nil\n\t\tif strings.ToLower(c.User.Login) == \"gordontheturtle\" && strings.Contains(c.Body, commentType) {\n\t\t\tlogrus.Debugf(\"Already made comment about %q on PR %s\", commentType, prNum)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// add the comment because we must not have already made it\n\tif _, err := gh.AddComment(repo, prNum, comment); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Would have added comment about %q PR %s\", commentType, prNum)\n\treturn nil\n}",
"func (o *NiaapiRevisionInfoAllOf) HasRevisionComment() bool {\n\tif o != nil && o.RevisionComment != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func Comment(ctx context.Context, cfg *v1.Config, pr int, contents []byte) error {\n\tc := newClient(ctx, cfg.Github)\n\treturn c.CommentOnPR(pr, string(contents))\n}",
"func (c CommentCommand) IsForSpecificProject() bool {\n\treturn c.RepoRelDir != \"\" || c.Workspace != \"\" || c.ProjectName != \"\"\n}",
"func (m *MessageReplies) GetComments() (value bool) {\n\tif m == nil {\n\t\treturn\n\t}\n\treturn m.Flags.Has(0)\n}",
"func (p *PKGBUILD) AppendComment(comment string) (ok bool) {\n\tbegin := position.Position{Line: 1}\n\tif last := len(p.atoms) - 1; last >= 0 {\n\t\tbegin = atom.GetEnd(p.atoms[last]).Next('\\n')\n\t}\n\tp.atoms.Push(newComment(comment, begin))\n\treturn true\n}",
"func (prrce *PullRequestReviewCommentEvent) Comment() string {\n\tcomment := prrce.raw.Payload[\"comment\"].(map[string]interface{})\n\treturn comment[\"body\"].(string)\n}",
"func (h *HSTIMParser) ParseComment(msg []byte) (HSTIMComment, error) {\n\tc := HSTIMComment{}\n\t// Verify the first character is `C`, which indicates it's a comment\n\tif (msg[0]) != 'C' {\n\t\treturn c, &ParseError{fmt.Sprintf(\"Not a comment\")}\n\t}\n\n\t// Slice off everything before <CR><EXT>\n\tmsgStr := removeTrailingValues(msg)\n\n\tsplits := strings.Split(msgStr[2:], \"|\")\n\tif len(splits) != 3 {\n\t\treturn c, &ParseError{fmt.Sprintf(\"Expected 3 fields, got %d\", len(splits))}\n\t}\n\n\tif splits[0] != \"1\" {\n\t\treturn c, &ParseError{\"Sequence number should always be 1\"}\n\t}\n\tc.SequenceNumber = 1\n\tc.TestMode = splits[2]\n\n\treturn c, nil\n}",
"func TestCommentsInline(t *testing.T) {\n\ttemplate := \"12345{{! Comment Block! }}67890\"\n\texpected := \"1234567890\"\n\tactual := Render(template)\n\n\tif actual != expected {\n\t\tt.Errorf(\"returned %#v, expected %#v\", actual, expected)\n\t}\n}",
"func (o *StorageNetAppCifsShareAllOf) GetComment() string {\n\tif o == nil || o.Comment == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Comment\n}",
"func TestFetchComment(t *testing.T) {\n\tinputs := []string{\n\t\t\"/ inline comment\",\n\t\t`*\n\t\t\tmultiline comment\n\t\t*/`,\n\t}\n\n\texpects := []string{\n\t\t\"// inline comment\",\n\t\t`/*\n\t\t\tmultiline comment\n\t\t*/`,\n\t}\n\n\tfor i, input := range inputs {\n\t\tlex := NewLexer(bytes.NewReader([]byte(input)))\n\t\tif err := lex.fetchComment(); err != nil {\n\t\t\tt.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\ttoken := lex.tokens[0]\n\n\t\tif token.Type() != TokenComment {\n\t\t\tt.Errorf(\"unexpected token type '%s', expecting type '%s'\", token.TypeString(), tokenTypeMap[TokenComment])\n\t\t\treturn\n\t\t}\n\n\t\texpected := expects[i]\n\t\tif token.String() != expected {\n\t\t\tt.Errorf(\"unexpected '%s', expecting '%s'\", token.String(), expected)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (t *TypeField) Comment() Expr {\n\treturn t.CommentExpr\n}",
"func lexComment(lx *lexer) stateFn {\r\n\tr := lx.peek()\r\n\tif isNL(r) || r == eof {\r\n\t\tlx.emit(itemText)\r\n\t\treturn lx.pop()\r\n\t}\r\n\tlx.next()\r\n\treturn lexComment\r\n}",
"func commentText(src []byte) (text string) {\n\ti := bytes.Index(src, tagBegin);\n\tj := bytes.Index(src, tagEnd);\n\tif i >= 0 && j >= i+len(tagBegin) {\n\t\ttext = string(bytes.TrimSpace(src[i+len(tagBegin) : j]))\n\t}\n\treturn;\n}",
"func (s *Scanner) scanComment() Token {\n\t// Create a buffer and read the current character into it.\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\t// Read every subsequent character into the buffer until either\n\t// newline or EOF is encountered.\n\tfor {\n\t\tch := s.read()\n\t\tif ch == rune(0) {\n\t\t\tbreak\n\t\t}\n\n\t\tif ch == rune(0) || ch == '\\n' {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t}\n\n\t\tbuf.WriteRune(ch)\n\t}\n\n\ts.tok = COMMENT\n\ts.lit = buf.String()\n\treturn COMMENT\n}",
"func (o *DeployKey) GetCommentOk() (*string, bool) {\n\tif o == nil || o.Comment == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Comment, true\n}",
"func (o *VersionedControllerService) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *Scanner) scanComment() *Token {\n\tvar buf bytes.Buffer\n\tstartPos := s.pos()\n\n\tfor {\n\t\tch0 := s.read()\n\t\tif ch0 == eof {\n\t\t\tbreak\n\t\t} else if ch0 == '-' {\n\t\t\tch1 := s.read()\n\t\t\tch2 := s.read()\n\t\t\tif ch1 == '-' && ch2 == '>' {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\ts.unread(2)\n\t\t\t}\n\t\t}\n\n\t\tbuf.WriteRune(ch0)\n\t}\n\n\treturn &Token{Type: CommentToken, Data: buf.String(), Pos: startPos}\n}",
"func CheckChunk(regex ReviewRegex,\n chunk reviewdata.DiffChunk,\n commentChan chan <- reviewdata.Comment) {\n matchRegex := regexp.MustCompile(strings.Join(regex.Line.Match, \"|\"))\n excludeRegex := regexp.MustCompile(strings.Join(regex.Line.Exclude, \"|\"))\n var comment reviewdata.Comment\n\n for _, line := range chunk.Lines {\n if (matchRegex.MatchString(line.RhText) &&\n (len(regex.Line.Exclude) == 0 ||\n !excludeRegex.MatchString(line.RhText))) {\n if (comment.NumLines == 0) {\n // First match we've found\n comment.NumLines = 1\n comment.Line = line.ReviewLine\n comment.Text = regex.Comment.SingleLine\n comment.RaiseIssue = regex.Comment.RaiseIssue\n fmt.Println(1)\n } else {\n // Previously matched something\n comment.NumLines += 1\n comment.Text = regex.Comment.MultiLine\n fmt.Println(2)\n }\n } else {\n // Didn't natch. If we previously did, send the comment\n if (comment.NumLines > 0) {\n commentChan <- comment\n comment.NumLines = 0\n fmt.Println(3)\n }\n }\n }\n\n // Checked everything. If we've previously set up a comment, send it\n if (comment.NumLines > 0) {\n commentChan <- comment\n fmt.Printf(\"commented: %s\\n\", comment.Text)\n }\n fmt.Println(5)\n}",
"func ToComment(content string) *ast.CommentGroup {\n\tcontent = strings.TrimRight(content, \"\\n\")\n\tif content == \"\" {\n\t\treturn nil\n\t}\n\n\tvar comments []*ast.Comment\n\tfor i, commentLine := range strings.Split(content, \"\\n\") {\n\t\tfmtStr := \"// %s\"\n\t\tif i == 0 {\n\t\t\tfmtStr = fmt.Sprintf(\"\\n%s\", fmtStr)\n\t\t}\n\t\tcomments = append(comments, &ast.Comment{\n\t\t\tText: fmt.Sprintf(fmtStr, commentLine),\n\t\t})\n\t}\n\n\treturn &ast.CommentGroup{\n\t\tList: comments,\n\t}\n}",
"func (cs *CommentService) Comment(id uint) (*entity.Comment, []error) {\n\tcmnt, errs := cs.commentRepo.Comment(id)\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn cmnt, errs\n}",
"func (a *TypeAlias) Comment() Expr {\n\treturn a.CommentExpr\n}",
"func (o *RowCommentCreate) GetCommentOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Comment, true\n}",
"func (cs *CommentService) Comment(id uint) (*entity.Comment, []error) {\n\tcomment, errs := cs.cmtRepo.Comment(id)\n\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn comment, nil\n}",
"func (o *ReservationModel) GetCommentOk() (*string, bool) {\n\tif o == nil || o.Comment == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Comment, true\n}",
"func (n *node) Comment() string {\n\treturn n.comment\n}",
"func (cr *MockCommentGormRepo) Comment(id uint) (*entity.Comment, []error) {\n\tcomment := entity.MockComment\n\tif id != 1 {\n\t\treturn nil, []error{errors.New(\"Not found\")}\n\t}\n\treturn &comment, nil\n}",
"func (c Config) StartComment() string {\n\tswitch c.Markup {\n\tcase stentor.MarkupMD:\n\t\treturn stentor.CommentMD\n\tcase stentor.MarkupRST:\n\t\treturn stentor.CommentRST\n\tdefault:\n\t\treturn \"\"\n\t}\n}",
"func (this *Space) HasAttachedComment() bool {\n\treturn len(this.GetAttachedComment()) > 0\n}",
"func (c *CommentBlock) CommentMethod() CommentType {\n\treturn c.ct\n}",
"func (client *Client) Comment(refType string, refId int64, text string, params map[string]interface{}) (*Comment, error) {\n\tpath := fmt.Sprintf(\"/comment/%s/%d/\", refType, refId)\n\tif params == nil {\n\t\tparams = map[string]interface{}{}\n\t}\n\tparams[\"value\"] = text\n\n\tcomment := &Comment{}\n\terr := client.RequestWithParams(\"POST\", path, nil, params, comment)\n\treturn comment, err\n}",
"func (o *Comment) GetContent() string {\n\tif o == nil || o.Content == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Content\n}",
"func (m Matcher) MatchComment(pattern string, alternatives ...string) Matcher {\n\treturn m\n}",
"func UpdateComment(c *gin.Context, in *updateCommentIn) (*task.Comment, error) {\n\tmetadata.AddActionMetadata(c, metadata.TaskID, in.TaskID)\n\tmetadata.AddActionMetadata(c, metadata.CommentID, in.CommentID)\n\n\tdbp, err := zesty.NewDBProvider(utask.DBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := task.LoadFromPublicID(dbp, in.TaskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcomment, err := task.LoadCommentFromPublicID(dbp, in.CommentID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqUsername := auth.GetIdentity(c)\n\n\tadmin := auth.IsAdmin(c) == nil\n\tisCommentAuthor := reqUsername == comment.Username\n\n\tif !isCommentAuthor && !admin {\n\t\treturn nil, errors.Forbiddenf(\"Not allowed to update comment\")\n\t} else if !isCommentAuthor {\n\t\tmetadata.SetSUDO(c)\n\t}\n\n\tif t.Resolution != nil {\n\t\tres, err := resolution.LoadFromPublicID(dbp, *t.Resolution)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmetadata.AddActionMetadata(c, metadata.ResolutionID, res.PublicID)\n\t}\n\n\tif comment.TaskID != t.ID {\n\t\treturn nil, errors.BadRequestf(\"Comment and task don't match\")\n\t}\n\n\terr = comment.Update(dbp, in.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn comment, nil\n}",
"func lexComment(l *lexer) stateFn {\n\t// Ignore leading spaces and #.\n\tl.ignore()\n\tfor {\n\t\tr := l.next()\n\t\tif unicode.IsSpace(r) || r == '#' {\n\t\t\tl.ignore()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tl.backup()\n\n\tfor {\n\t\tswitch l.next() {\n\t\tcase '\\r', '\\n':\n\t\t\tl.emit(itemComment, false)\n\t\t\treturn lexRule\n\t\tcase eof:\n\t\t\tl.backup()\n\t\t\tl.emit(itemComment, false)\n\t\t\treturn lexRule\n\t\t}\n\t}\n}",
"func (p *printer) commentBefore(next token.Position) bool {\n\treturn p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)\n}",
"func (o *CaCertificateCreateReqWeb) GetComment() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Comment\n}",
"func (b *Service) CommentDelete(ctx context.Context, TeamID string, UserID string, EventValue string) ([]byte, error, bool) {\n\tvar c struct {\n\t\tCommentId string `json:\"commentId\"`\n\t}\n\terr := json.Unmarshal([]byte(EventValue), &c)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\terr = b.CheckinService.CheckinCommentDelete(ctx, c.CommentId)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tmsg := createSocketEvent(\"comment_deleted\", \"\", \"\")\n\n\treturn msg, nil, false\n}",
"func (dw *DrawingWand) Comment(comment string) {\n\tcscomment := C.CString(comment)\n\tdefer C.free(unsafe.Pointer(cscomment))\n\tC.MagickDrawComment(dw.dw, cscomment)\n}",
"func (o *ViewProjectActivePages) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func CommentGroupsContain(comments []*ast.CommentGroup, text string) bool {\n\tif len(text) == 0 {\n\t\treturn false\n\t}\n\tfor _, cg := range comments {\n\t\tif strings.Contains(cg.Text(), text) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o *StorageNetAppCifsShareAllOf) SetComment(v string) {\n\to.Comment = &v\n}",
"func (o *V0037JobProperties) GetCommentOk() (*string, bool) {\n\tif o == nil || o.Comment == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Comment, true\n}",
"func (o SnapshotImportClientDataOutput) Comment() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SnapshotImportClientData) *string { return v.Comment }).(pulumi.StringPtrOutput)\n}",
"func (m *WorkbookCommentReply) GetContent()(*string) {\n return m.content\n}"
] | [
"0.8070134",
"0.78601515",
"0.7623746",
"0.7510202",
"0.7351018",
"0.7310748",
"0.69591284",
"0.6869606",
"0.6863134",
"0.6551534",
"0.64144266",
"0.6250492",
"0.6241997",
"0.62200755",
"0.61377007",
"0.6091443",
"0.6089278",
"0.6064151",
"0.6043184",
"0.60294676",
"0.6027368",
"0.60007584",
"0.5996023",
"0.5842577",
"0.5825427",
"0.5789278",
"0.57775295",
"0.576302",
"0.57193345",
"0.5697661",
"0.5691122",
"0.5678965",
"0.5675053",
"0.5658156",
"0.562943",
"0.5624114",
"0.5614079",
"0.5590876",
"0.5584946",
"0.5566082",
"0.5555664",
"0.55468464",
"0.55354655",
"0.55066985",
"0.5502754",
"0.5490264",
"0.548683",
"0.5486106",
"0.54854333",
"0.5484161",
"0.5475235",
"0.5453085",
"0.543578",
"0.5425426",
"0.5402112",
"0.53968614",
"0.5395518",
"0.5393594",
"0.53925055",
"0.53905",
"0.53892183",
"0.538353",
"0.5382821",
"0.53745985",
"0.53673476",
"0.5365497",
"0.53576255",
"0.53462464",
"0.5338473",
"0.5326633",
"0.5314753",
"0.5310521",
"0.53103274",
"0.5299515",
"0.52981824",
"0.52926636",
"0.5282296",
"0.5282062",
"0.52798635",
"0.5272139",
"0.527047",
"0.52694786",
"0.5262093",
"0.5258468",
"0.5250328",
"0.52494353",
"0.52471167",
"0.5244262",
"0.5240245",
"0.5239332",
"0.5238144",
"0.5229526",
"0.52277756",
"0.5224175",
"0.52158153",
"0.52118313",
"0.51999056",
"0.5188717",
"0.51852244",
"0.51838666"
] | 0.85867846 | 0 |
IsPage return true if content is page | IsPage возвращает true, если содержимое является страницей | func (c *Content) IsPage() bool {
return c.Type == CONTENT_TYPE_PAGE
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Container) IsPage() bool {\n\treturn c.Title != \"\"\n}",
"func (si *StructInfo) IsPage() bool {\n\treturn si.Kind == core.PAGE\n}",
"func (p Page) inPage(s string) bool {\n\tfor _, v := range p.Links {\n\t\tif s == v.Url.String() || v.Url.String()+\"/\" == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (p Page) IsHTML() bool {\n\treturn p.Type().MediaType() == \"text/html\"\n}",
"func (p *Paginator) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}",
"func (p *Pagination) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}",
"func (o *Bundles) HasCurrentPage() bool {\n\tif o != nil && o.CurrentPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsSite() bool { return me.String() == \"site\" }",
"func (o *SpansListRequestAttributes) HasPage() bool {\n\treturn o != nil && o.Page != nil\n}",
"func (o *Origin1) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}",
"func IsAjaxPage(vals url.Values) bool {\n\tpage := getPageName(vals)\n\tajax := vals.Get(\"ajax\")\n\tasJson := vals.Get(\"asJson\")\n\treturn page == FetchEventboxAjaxPageName ||\n\t\tpage == FetchResourcesAjaxPageName ||\n\t\tpage == GalaxyContentAjaxPageName ||\n\t\tpage == EventListAjaxPageName ||\n\t\tpage == AjaxChatAjaxPageName ||\n\t\tpage == NoticesAjaxPageName ||\n\t\tpage == RepairlayerAjaxPageName ||\n\t\tpage == TechtreeAjaxPageName ||\n\t\tpage == PhalanxAjaxPageName ||\n\t\tpage == ShareReportOverlayAjaxPageName ||\n\t\tpage == JumpgatelayerAjaxPageName ||\n\t\tpage == FederationlayerAjaxPageName ||\n\t\tpage == UnionchangeAjaxPageName ||\n\t\tpage == ChangenickAjaxPageName ||\n\t\tpage == PlanetlayerAjaxPageName ||\n\t\tpage == TraderlayerAjaxPageName ||\n\t\tpage == PlanetRenameAjaxPageName ||\n\t\tpage == RightmenuAjaxPageName ||\n\t\tpage == AllianceOverviewAjaxPageName ||\n\t\tpage == SupportAjaxPageName ||\n\t\tpage == BuffActivationAjaxPageName ||\n\t\tpage == AuctioneerAjaxPageName ||\n\t\tpage == HighscoreContentAjaxPageName ||\n\t\tajax == \"1\" ||\n\t\tasJson == \"1\"\n}",
"func (p Pagination) IsCurrent(page int) bool {\n\treturn page == p.CurrentPage\n}",
"func (p Page) IsValid() bool {\n\treturn p.valid\n}",
"func (o *OriginCollection) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}",
"func (o *PaginationProperties) HasPage() bool {\n\tif o != nil && o.Page != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *RoleList) HasPage() bool {\n\treturn r.hasPage\n}",
"func hasLandingPage(collection model.Pages, dir *model.Page) bool {\n\thasLanding := false\n\tfor _, page := range collection {\n\t\tif page.Type == \"file\" && page.Slug == dir.Slug {\n\t\t\thasLanding = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hasLanding\n}",
"func (p *Pagination) Show() bool {\n\treturn p.NumberOfPages() > 1\n}",
"func (o *AclBindingListPage) HasPage() bool {\n\tif o != nil && o.Page != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Paginator) HasPages() bool {\n\treturn p.PageNums() > 1\n}",
"func (e PartialContent) IsPartialContent() {}",
"func PageHandler(w http.ResponseWriter, r *http.Request) (handled bool) {\n\tlog.Println(\"PageHandler called in example plugin\")\n\treturn false\n}",
"func (o *InlineResponse20014Projects) HasStartPage() bool {\n\tif o != nil && o.StartPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Paginator) IsActive(page int) bool {\n\treturn p.Page() == page\n}",
"func (o *SimpleStringWeb) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (page *Page) AddPage(link *Page) bool {\n\tfor _, l := range page.Pages {\n\t\tif l == link {\n\t\t\treturn false\n\t\t}\n\t}\n\tpage.Pages = append(page.Pages, link)\n\treturn true\n}",
"func (p Page) Type() Type {\n\treturn p.PageType\n}",
"func (p *Paginator) Show() bool {\n\treturn p.NumberOfPages() > 1\n}",
"func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }",
"func (s SourceFilesystems) IsContent(filename string) bool {\n\treturn s.Content.Contains(filename)\n}",
"func (c Page) Page() revel.Result {\n\n\tc.RenderArgs[\"Site\"] = site.Site\n\n\t// Create PageData\n\tpdata := site.LoadPage(c.Params.Route.Get(\"section\"), c.Params.Route.Get(\"page\"))\n\tc.RenderArgs[\"Page\"] = pdata\n\n\tif pdata.Error != nil {\n\t\treturn c.NotFound(\"missing secton\")\n\t}\n\n\tc.RenderArgs[\"Section\"] = site.Site.Sections[pdata.Section]\n\n\treturn c.Render()\n\n}",
"func (b *OGame) GetPageContent(vals url.Values) ([]byte, error) {\n\treturn b.WithPriority(taskRunner.Normal).GetPageContent(vals)\n}",
"func (resp *PharosResponse) HasNextPage() bool {\n\treturn resp.Next != nil && *resp.Next != \"\"\n}",
"func (p *Page) Valid() bool {\n\tif p.Limit > 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (p *GetContentModerationPaginator) HasMorePages() bool {\n\treturn p.firstPage || p.nextToken != nil\n}",
"func (o *DeliveryGetOriginsResponse) HasPageInfo() bool {\n\tif o != nil && o.PageInfo != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Pagination) IsLastPage() bool {\n\treturn p.CurrentPage >= p.TotalPages\n}",
"func (o *ViewMetaPage) HasPageSize() bool {\n\tif o != nil && o.PageSize != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Pagination) isFirst() bool {\n\treturn p.PageNumber == 1\n}",
"func (p nullPage) IsEmpty() (bool, error) {\n\treturn true, nil\n}",
"func (m *PrinterDefaults) GetFitPdfToPage()(*bool) {\n val, err := m.GetBackingStore().Get(\"fitPdfToPage\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func isParagraph(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"p\"\n}",
"func (o *Origin1) GetPage() PageType {\n\tif o == nil {\n\t\tvar ret PageType\n\t\treturn ret\n\t}\n\n\treturn o.Page\n}",
"func (o *ViewSampleProject) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsPrintable(header http.Header) bool {\n\tmimeType := header.Get(http.CanonicalHeaderKey(\"content-type\"))\n\tmimeType = strings.SplitN(mimeType, \";\", 2)[0]\n\tmimeType = strings.TrimSpace(mimeType)\n\n\tif mimeType == \"\" {\n\t\treturn true\n\t}\n\n\treturn PrintableTypes[mimeType]\n}",
"func (pb *PageBuffer) End() bool {\n return pb.is_end\n}",
"func (p *Pagination) hasMoreThanOnePage() bool {\n\treturn p.Limit < p.Total\n}",
"func (m Model) OnLastPage() bool {\n\treturn m.Page == m.TotalPages-1\n}",
"func (o *Bundles) GetCurrentPageOk() (*int32, bool) {\n\tif o == nil || o.CurrentPage == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CurrentPage, true\n}",
"func (p *Pages) Has(pageName string) bool {\n\tvar pageHandle = cleanAllSlashes(handlePath(pageName))\n\tvar prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))\n\tp.sl.RLock()\n\tif _, exists := p.managers[prefixPage]; exists {\n\t\tp.sl.RUnlock()\n\t\treturn true\n\t}\n\tif _, exists := p.managers[pageHandle]; exists {\n\t\tp.sl.RUnlock()\n\t\treturn true\n\t}\n\tp.sl.RUnlock()\n\treturn false\n}",
"func (o *TlsDeliveryProfile) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}",
"func (me TAttlistLocationLabelType) IsChapter() bool { return me.String() == \"chapter\" }",
"func (w *W3CNode) IsDocument() bool {\n\tparent := w.ParentNode().(*W3CNode)\n\treturn w.ParentNode() != nil && parent.HTMLNode() == w.HTMLNode()\n}",
"func (o *CdnGetScopeRulesResponse) HasPageInfo() bool {\n\tif o != nil && o.PageInfo != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *BastionHostsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionHostListResult.NextLink == nil || len(*p.current.BastionHostListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsHost() bool { return me.String() == \"host\" }",
"func (o *PaginationProperties) GetPageOk() (*string, bool) {\n\tif o == nil || o.Page == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Page, true\n}",
"func (p *ManagementClientGetBastionShareableLinkPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionShareableLinkListResult.NextLink == nil || len(*p.current.BastionShareableLinkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getBastionShareableLinkHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (c cell) IsHyperLink() bool {\n\t_, ok := c.contentHandler.(*HyperLink)\n\treturn ok\n}",
"func (c *Collection) IsPostsCollection() bool { return c.Name == postsName }",
"func (o *BookmarkRunNoContent) IsRedirect() bool {\n\treturn false\n}",
"func (r ResourcePage) IsEmpty() (bool, error) {\n\tif r.StatusCode == 204 {\n\t\treturn true, nil\n\t}\n\n\tresources, err := ExtractResources(r)\n\treturn len(resources) == 0, err\n}",
"func (*HTML) isOutput() {\n}",
"func (p *Page) GetContent() string {\n\treturn p.Content\n}",
"func (me TxsdNodeRoleSimpleContentExtensionCategory) IsPrint() bool { return me.String() == \"print\" }",
"func (q *QuestionnaireT) IsInNavigation(pageIdx int) bool {\n\n\tif pageIdx < 0 || pageIdx > len(q.Pages)-1 {\n\t\treturn false\n\t}\n\n\tif q.Pages[pageIdx].NoNavigation {\n\t\treturn false\n\t}\n\n\tif fc, ok := naviFuncs[q.Pages[pageIdx].NavigationCondition]; ok {\n\t\treturn fc(q, pageIdx)\n\t}\n\n\treturn true\n}",
"func (s *Site) URLPage(urlpath string) (p Document, found bool) {\n\tp, found = s.Routes[urlpath]\n\tif !found {\n\t\tp, found = s.Routes[filepath.Join(urlpath, \"index.html\")]\n\t}\n\tif !found {\n\t\tp, found = s.Routes[filepath.Join(urlpath, \"index.htm\")]\n\t}\n\treturn\n}",
"func (ctx *Context) IsIframe() bool {\n\treturn ctx.Query(constant.IframeKey) == \"true\" || ctx.Headers(constant.IframeKey) == \"true\"\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsSession() bool { return me.String() == \"session\" }",
"func (this *domainController) Page(ctx *context.Context) {\n\tdefer hret.HttpPanic()\n\n\tif !hrpc.BasicAuth(ctx.Request) {\n\t\thret.Error(ctx.ResponseWriter, 403, i18n.NoAuth(ctx.Request))\n\t\treturn\n\t}\n\n\trst, err := groupcache.GetStaticFile(\"DomainPage\")\n\tif err != nil {\n\t\thret.Error(ctx.ResponseWriter, 404, i18n.Get(ctx.Request, \"as_of_date_page_not_exist\"))\n\t\treturn\n\t}\n\n\tctx.ResponseWriter.Write(rst)\n}",
"func (r ContainerPage) IsEmpty() (bool, error) {\n\tcontainers, err := ExtractContainers(r)\n\treturn len(containers) == 0, err\n}",
"func StreamPage(qw *quicktemplate.Writer, p BasePage) {\n\t//line templates/basepage.qtpl:13\n\tqw.N().S(`\n<html>\n\t<head>\n\t\t<title>`)\n\t//line templates/basepage.qtpl:16\n\tp.StreamTitle(qw)\n\t//line templates/basepage.qtpl:16\n\tqw.N().S(`</title>\n\t</head>\n\t<body>\n\t\t<div>\n\t\t\t<a href=\"/\">return to main page</a>\n\t\t</div>\n\t\t`)\n\t//line templates/basepage.qtpl:22\n\tp.StreamBody(qw)\n\t//line templates/basepage.qtpl:22\n\tqw.N().S(`\n\t</body>\n</html>\n`)\n//line templates/basepage.qtpl:25\n}",
"func (p *StoragesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.StorageResourceCollection.NextLink == nil || len(*p.current.StorageResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (*offsetPageInfoImpl) HasNextPage(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn (page.offset + page.limit) < page.totalCount, nil\n}",
"func (p *Pagination) HasOtherPages() bool {\n\treturn p.HasPrev() || p.HasNext()\n}",
"func (p *ListPagesByContactPaginator) HasMorePages() bool {\n\treturn p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)\n}",
"func (p *TestPager) Check() (bool, error) {\n return false, nil\n}",
"func (p *ListTestCasesPaginator) HasMorePages() bool {\n\treturn p.firstPage || p.nextToken != nil\n}",
"func (me TpubStatusInt) IsPubmed() bool { return me.String() == \"pubmed\" }",
"func (p *Paginator) Page() int {\n\tif p.page != 0 {\n\t\treturn p.page\n\t}\n\tif p.Request.Form == nil {\n\t\tp.Request.ParseForm()\n\t}\n\tp.page, _ = strconv.Atoi(p.Request.Form.Get(\"p\"))\n\tif p.page > p.PageNums() {\n\t\tp.page = p.PageNums()\n\t}\n\tif p.page <= 0 {\n\t\tp.page = 1\n\t}\n\treturn p.page\n}",
"func (ctx *Context) IsPost() bool {\r\n\treturn ctx.Is(\"POST\")\r\n}",
"func (p *ListStepsPaginator) HasMorePages() bool {\n\treturn p.firstPage || p.nextToken != nil\n}",
"func (o *GetVersionStagesNoContent) IsRedirect() bool {\n\treturn false\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsFlow() bool { return me.String() == \"flow\" }",
"func IsHugePageResourceName(name corev1.ResourceName) bool {\n\treturn strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix)\n}",
"func (masterPage *MasterPage) CurrentPage() Page {\n\treturn masterPage.subPages.Top()\n}",
"func (p Page) Len() int { return len(p) }",
"func (r *Repository) GetHasPages() bool {\n\tif r == nil || r.HasPages == nil {\n\t\treturn false\n\t}\n\treturn *r.HasPages\n}",
"func (o *MicrosoftGraphVisualInfo) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c *Content) IsDraft() bool {\n\treturn c.Status == CONTENT_STATUS_DRAFT\n}",
"func (p *BlobContainersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListContainerItems.NextLink == nil || len(*p.current.ListContainerItems.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (o *Bundles) GetCurrentPage() int32 {\n\tif o == nil || o.CurrentPage == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.CurrentPage\n}",
"func (is *MenuPage) WillHide() {\n}",
"func (m *Metadata) IsFile() bool {\n\treturn (strings.ToLower(m.Tag) == MetadataTypeFile)\n}",
"func (me TxsdRecordPatternSimpleContentExtensionType) IsXpath() bool { return me.String() == \"xpath\" }",
"func (o *PaginationProperties) HasNextPage() bool {\n\tif o != nil && o.NextPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *ManagementClientGetActiveSessionsPager) NextPage(ctx context.Context) bool {\n\tif !p.second {\n\t\tp.second = true\n\t\treturn true\n\t} else if !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionActiveSessionListResult.NextLink == nil || len(*p.current.BastionActiveSessionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, *p.current.BastionActiveSessionListResult.NextLink)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getActiveSessionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (r *RMFileInfo) PageIterate() (pageNo, pdfPageNo int, inserted, isTemplate bool, reader *io.ReadSeeker) {\n\tpageNo = r.thisPageNo\n\tr.thisPageNo++\n\n\t// if there is only a template, always return the first page\n\tif r.pdfPath == \"\" {\n\t\tpdfPageNo = 0\n\t\tisTemplate = true\n\t\treader = &r.templateReader\n\t\treturn\n\t}\n\n\t// older remarkable bundles don't report inserted pages; ignore\n\thasRedir := func() bool { return len(r.RedirectionPageMap) > 0 }()\n\n\t// return the template if this is an inserted page\n\tif hasRedir && r.RedirectionPageMap[pageNo] == -1 {\n\t\tpdfPageNo = 0\n\t\tinserted = true\n\t\tisTemplate = true\n\t\treader = &r.templateReader\n\t\treturn\n\t}\n\n\t// remaining target is the annotated file\n\treader = &r.pdfReader\n\n\t// if the annotated pdf has inserted pages, calculate the offset of\n\t// the original pdf to use\n\tif hasRedir && r.PageCount != r.OriginalPageCount {\n\t\tpdfPageNo = pageNo\n\t\tfor i := 0; i <= pageNo; i++ {\n\t\t\tif r.RedirectionPageMap[i] == -1 {\n\t\t\t\tpdfPageNo--\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t// fall through: the annotated pdf has no inserted pages\n\tpdfPageNo = pageNo\n\treturn\n\n}",
"func (o *FileDto) HasContent() bool {\n\tif o != nil && o.Content != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c *ColumnChunkMetaData) HasDictionaryPage() bool {\n\treturn c.columnMeta.IsSetDictionaryPageOffset()\n}"
] | [
"0.8182451",
"0.72458345",
"0.6284937",
"0.62021935",
"0.6069874",
"0.5863453",
"0.5849004",
"0.5813375",
"0.5769847",
"0.57443935",
"0.5733453",
"0.57127947",
"0.56711227",
"0.5647718",
"0.5588009",
"0.55775696",
"0.55400574",
"0.553223",
"0.55277514",
"0.5482119",
"0.5480724",
"0.5475604",
"0.5438447",
"0.5397574",
"0.53849566",
"0.5368285",
"0.535076",
"0.52992684",
"0.529924",
"0.5244348",
"0.520858",
"0.5174433",
"0.51427084",
"0.5120175",
"0.5096488",
"0.50958157",
"0.5074591",
"0.5064051",
"0.50515974",
"0.50464004",
"0.5041564",
"0.5030359",
"0.50188375",
"0.5003556",
"0.49948055",
"0.49636385",
"0.4936882",
"0.49321434",
"0.49127424",
"0.48978177",
"0.48937938",
"0.4887442",
"0.48770073",
"0.48761925",
"0.48748463",
"0.4871288",
"0.48700267",
"0.4869964",
"0.48631534",
"0.48621634",
"0.4858934",
"0.48565283",
"0.48529318",
"0.4843857",
"0.4837699",
"0.48203316",
"0.48202932",
"0.4815682",
"0.48112497",
"0.48078084",
"0.4803781",
"0.4800516",
"0.4787012",
"0.47759795",
"0.47593257",
"0.47584662",
"0.47579843",
"0.47579294",
"0.4754432",
"0.4750444",
"0.47486192",
"0.4747975",
"0.4741044",
"0.4731108",
"0.47275096",
"0.47207075",
"0.47187707",
"0.47118792",
"0.4702171",
"0.47021416",
"0.47015458",
"0.46936926",
"0.46912372",
"0.46844232",
"0.46820432",
"0.46819338",
"0.46794716",
"0.46737614",
"0.46722847",
"0.46692005"
] | 0.8657112 | 0 |
IsTrashed return true if content is trashed | IsTrashed возвращает true, если содержимое помечено как удалённое | func (c *Content) IsTrashed() bool {
return c.Status == CONTENT_STATUS_TRASHED
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *TrashStructureApplication) HasTrashed() bool {\n\tif o != nil && !IsNil(o.Trashed) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *TrashStructureApplication) GetTrashed() bool {\n\tif o == nil || IsNil(o.Trashed) {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Trashed\n}",
"func (o *TrashStructureApplication) GetTrashedOk() (*bool, bool) {\n\tif o == nil || IsNil(o.Trashed) {\n\t\treturn nil, false\n\t}\n\treturn o.Trashed, true\n}",
"func (o *TrashStructureApplication) SetTrashed(v bool) {\n\to.Trashed = &v\n}",
"func ItemTrashed(r *provider.DeleteResponse, req *provider.DeleteRequest, spaceOwner, executant *user.UserId) events.ItemTrashed {\n\topaqueID := utils.ReadPlainFromOpaque(r.Opaque, \"opaque_id\")\n\treturn events.ItemTrashed{\n\t\tSpaceOwner: spaceOwner,\n\t\tExecutant: executant,\n\t\tRef: req.Ref,\n\t\tID: &provider.ResourceId{\n\t\t\tStorageId: req.Ref.GetResourceId().GetStorageId(),\n\t\t\tSpaceId: req.Ref.GetResourceId().GetSpaceId(),\n\t\t\tOpaqueId: opaqueID,\n\t\t},\n\t\tTimestamp: utils.TSNow(),\n\t}\n}",
"func (e *Entry) IsDeleted() bool {\n\treturn e.Latest().GetTombstone()\n}",
"func (me TxsdSpace) IsPreserve() bool { return me.String() == \"preserve\" }",
"func (s *Subtitle) IsDeleted() bool {\n\treturn s.Num == -1\n}",
"func (d dynamicSystemView) Tainted() bool {\n\treturn d.mountEntry.Tainted\n}",
"func (m *UserModel) WithTrashed() *UserModel {\n\treturn m.WithoutGlobalScopes(\"soft_delete\")\n}",
"func (obj *InstallPhase) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}",
"func (*TruncateTable) IsTemporary() bool {\n\treturn false\n}",
"func (e *ExternalService) IsDeleted() bool { return !e.DeletedAt.IsZero() }",
"func (s *shard) IsFlushing() bool { return s.isFlushing.Load() }",
"func (s *shard) IsFlushing() bool { return s.isFlushing.Load() }",
"func (m *Model) IsSoftDelete() bool {\n\treturn m.SoftDelete\n}",
"func (obj *ECDSCluster) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}",
"func IsObjectShardingTable(tableName string) bool {\n\tif IsObjectInstShardingTable(tableName) {\n\t\treturn true\n\t}\n\treturn IsObjectInstAsstShardingTable(tableName)\n}",
"func (m *TrxMgr) isProcessingTrx(trx *prototype.SignedTransaction) *TrxEntry {\n\tm.waitingLock.RLock()\n\tdefer m.waitingLock.RUnlock()\n\tm.fetchedLock.RLock()\n\tdefer m.fetchedLock.RUnlock()\n\treturn m.isProcessingNoLock(trx)\n}",
"func (m *BlockDeletionMark) ThanosDeletionMark() *metadata.DeletionMark {\n\treturn &metadata.DeletionMark{\n\t\tID: m.ID,\n\t\tVersion: metadata.DeletionMarkVersion1,\n\t\tDeletionTime: m.DeletionTime,\n\t}\n}",
"func (fs *AzureDataLakeGen2FileSystem) IsBeingDeleted() bool {\n\treturn !fs.ObjectMeta.DeletionTimestamp.IsZero()\n}",
"func IsObjectInstAsstShardingTable(tableName string) bool {\n\t// check object instance association table, cc_InstAsst_{Specifier}_{ObjectID}\n\treturn strings.HasPrefix(tableName, BKObjectInstAsstShardingTablePrefix)\n}",
"func (r *Repo) IsDeleted() bool { return !r.DeletedAt.IsZero() }",
"func (shelf *Shelf) IsTopWasted() bool {\n\tif shelf.Len() == 0 {\n\t\treturn false\n\t}\n\n\treturn shelf.queue[0].IsWasted(shelf.decayModifier)\n}",
"func (level DepthOfMarketLevel) IsDeleted() bool {\n\treturn level.GetQty() == 0\n}",
"func (me TxsdIncidentPurpose) IsTraceback() bool { return me.String() == \"traceback\" }",
"func (r *DarwinReference) GetToc(tx *bolt.Tx, toc string) (*Toc, bool) {\n\tloc, exists := r.GetTocBucket(tx.Bucket([]byte(\"DarwinToc\")), toc)\n\treturn loc, exists\n}",
"func (me TxsdInvoiceType) IsTd() bool { return me.String() == \"TD\" }",
"func isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}",
"func isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}",
"func (node *CreateTable) IsTemporary() bool {\n\treturn node.Temp\n}",
"func (w *writer) IsDrained() bool {\n\tw.mutex.Lock()\n\tdefer w.mutex.Unlock()\n\treturn w.isClosed && w.size == 0\n}",
"func (app *Application) HasSeenTranc(trancHash string) bool{\n\tif _, ok := app.seenTranc[trancHash]; ok {\n\t\treturn true;\n\t}\n\treturn false;\n}",
"func (me TxsdPaymentMechanism) IsTr() bool { return me.String() == \"TR\" }",
"func (tail *Tail) isFileDeleted() bool {\n\treturn false\n}",
"func (obj *content) IsTor() bool {\n\treturn obj.tor != nil\n}",
"func (obj *transaction) IsTable() bool {\n\treturn obj.table != nil\n}",
"func (m *TestModel) IsDeleted() bool {\n\treturn m.Deleted\n}",
"func (me TisoLanguageCodes) IsTr() bool { return me.String() == \"TR\" }",
"func (m *Metadata) IsDeleted() bool {\n\treturn (strings.ToLower(m.Tag) == MetadataTypeDeleted)\n}",
"func (me TxsdComponentTransferFunctionAttributesType) IsTable() bool { return me.String() == \"table\" }",
"func (me TxsdMovementStatus) IsT() bool { return me.String() == \"T\" }",
"func (gWal *GenericWAL) IsLostOwnership() bool {\n // FIXME (bvk) This is not thread-safe.\n return gWal.lostOwnership\n}",
"func (o *StoragePhysicalDisk) HasThermal() bool {\n\tif o != nil && o.Thermal != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *Trie) Stash(rollbackCache bool) error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.Root = s.prevRoot\n\tif rollbackCache {\n\t\t// Making a temporary liveCache requires it to be copied, so it's quicker\n\t\t// to just load the cache from DB if a block state root was incorrect.\n\t\ts.db.liveCache = make(map[Hash][][]byte)\n\t\tch := make(chan error, 1)\n\t\ts.loadCache(s.Root, nil, 0, s.TrieHeight, ch)\n\t\terr := <-ch\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\ts.db.liveCache = make(map[Hash][][]byte)\n\t}\n\ts.db.updatedNodes = make(map[Hash][][]byte)\n\t// also stash past tries created by Atomic update\n\tfor i := len(s.pastTries) - 1; i >= 0; i-- {\n\t\tif bytes.Equal(s.pastTries[i], s.Root) {\n\t\t\tbreak\n\t\t} else {\n\t\t\t// remove from past tries\n\t\t\ts.pastTries = s.pastTries[:len(s.pastTries)-1]\n\t\t}\n\t}\n\treturn nil\n}",
"func (tbl RecordTable) IsTx() bool {\n\treturn tbl.db.IsTx()\n}",
"func (t *Testzzz) Deleted() bool {\n\treturn t._deleted\n}",
"func (i *MockOtherDataItem) IsDeleted() bool {\n\treturn i.Deleted\n}",
"func (dct *DjangoContentType) Deleted() bool {\n\treturn dct._deleted\n}",
"func (me TxsdPresentationAttributesTextElementsWritingMode) IsTb() bool { return me.String() == \"tb\" }",
"func (s *schedule) IsEnqueueFault() bool {\n\treturn !s.append\n}",
"func WasDeleted(o metav1.Object) bool {\n\treturn !o.GetDeletionTimestamp().IsZero()\n}",
"func (v *RawWriteCFValue) IsRollback() bool {\n\treturn v.GetWriteType() == WriteTypeRollback\n}",
"func (i *Image) SquashedTree() *tree.FileTree {\n\treturn i.Layers[len(i.Layers)-1].SquashedTree\n}",
"func TestCheckLeashed(t *testing.T) {\n\terr := initDB()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tm, err := mysql.New(\"localhost\", port, \"root\", password, \"chaosmonkey\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tins, loc, appCfg := testSetup(t)\n\n\ttrm := c.Termination{Instance: ins, Time: time.Now(), Leashed: true}\n\n\t// First check should succeed\n\terr = m.Check(trm, appCfg, endHour, loc)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttrm = c.Termination{Instance: ins, Time: time.Now(), Leashed: false}\n\n\t// Second check should fail\n\terr = m.Check(trm, appCfg, endHour, loc)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Should have allowed an unleashed termination after leashed: %v\", err)\n\t}\n}",
"func (me TxsdPresentationAttributesGraphicsDisplay) IsTableRow() bool {\n\treturn me.String() == \"table-row\"\n}",
"func (this *KeyspaceTerm) IsUnderHash() bool {\n\treturn (this.property & TERM_UNDER_HASH) != 0\n}",
"func (res ExecResult) Crashed() bool {\n\treturn (res.ExitType == Exited && res.ExitCode != 0) || res.ExitType == Signaled\n}",
"func (m *FeedMutation) TranscriptCleared() bool {\n\t_, ok := m.clearedFields[feed.FieldTranscript]\n\treturn ok\n}",
"func (conn *extHost) isDrained(replySeqNo int64) bool {\n\tconn.lk.RLock()\n\tif conn.state != extHostCloseWrite {\n\t\tconn.lk.RUnlock()\n\t\treturn false\n\t}\n\n\tret := false\n\tif atomic.LoadInt64(&conn.seqNo) == replySeqNo {\n\t\tconn.logger.WithFields(bark.Fields{\n\t\t\t`drainedSeqNo`: replySeqNo,\n\t\t}).Info(\"extent drained completely\")\n\t\tret = true\n\t}\n\n\tconn.lk.RUnlock()\n\treturn ret\n}",
"func (me TxsdFeTurbulenceTypeStitchTiles) IsNoStitch() bool { return me.String() == \"noStitch\" }",
"func (c *ClusterInstallation) IsDeleted() bool {\n\treturn c.DeleteAt != 0\n}",
"func (me TxsdPresentationAttributesGraphicsDisplay) IsTable() bool { return me.String() == \"table\" }",
"func (w *worker) isTTDReached(header *model.Header) bool {\n\ttd, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty\n\treturn td != nil && ttd != nil && td.Cmp(ttd) >= 0\n}",
"func (ss *SuffixSnapshot) isTombstone(value string) bool {\n\treturn IsTombstone(value)\n}",
"func (a *_Atom) isHydroxyl() bool {\n\treturn a.atNum == 8 && a.hCount == 1\n}",
"func (tttp *TriageTimeTableProvider) Deleted() bool {\n\treturn tttp._deleted\n}",
"func (o *TenantWithOfferWeb) HasDeletionTs() bool {\n\tif o != nil && o.DeletionTs != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TComparator) IsDoesNotExist() bool { return me.String() == \"DoesNotExist\" }",
"func (me TxsdFeTurbulenceTypeStitchTiles) IsStitch() bool { return me.String() == \"stitch\" }",
"func (me TxsdShow) IsEmbed() bool { return me.String() == \"embed\" }",
"func (tbl AssociationTable) IsTx() bool {\n\treturn tbl.db.IsTx()\n}",
"func IsObjectInstShardingTable(tableName string) bool {\n\t// check object instance table, cc_ObjectBase_{Specifier}_{ObjectID}\n\treturn strings.HasPrefix(tableName, BKObjectInstShardingTablePrefix)\n}",
"func DeleteStash(data interface{}) error {\n\tapi, m, err := findDcFromInterface(data)\n\n\tp, ok := m[\"payload\"].(map[string]interface{})\n\tif !ok {\n\t\tlogger.Warningf(\"Could not assert data interface %+v\", data)\n\t\treturn errors.New(\"Could not assert data interface\")\n\t}\n\n\terr = api.DeleteStash(p[\"path\"].(string))\n\tif err != nil {\n\t\tlogger.Warning(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (me TxsdMovementType) IsGc() bool { return me.String() == \"GC\" }",
"func (c *Config) IsStash() bool {\n\treturn c.Stash.Server != \"\"\n}",
"func (i *MockDataItem) IsDeleted() bool {\n\treturn i.Deleted\n}",
"func (e *FileEvent) IsDelete() bool { return (e.flags & EventFlagItemRemoved) == EventFlagItemRemoved}",
"func (me TxsdShow) IsEmbed() bool { return me == \"embed\" }",
"func (obj *DeletePhase) IsDeleted() bool {\n\treturn obj.GetDeletionTimestamp() != nil\n}",
"func (l *Ledger) IsTxInTrunk(txid []byte) bool {\n\tvar blk *pb.InternalBlock\n\tvar err error\n\ttable := l.ConfirmedTable\n\tpbTxBuf, kvErr := table.Get(txid)\n\tif kvErr != nil {\n\t\treturn false\n\t}\n\trealTx := &pb.Transaction{}\n\tpbErr := proto.Unmarshal(pbTxBuf, realTx)\n\tif pbErr != nil {\n\t\tl.xlog.Warn(\"IsTxInTrunk error\", \"txid\", utils.F(txid), \"pbErr\", pbErr)\n\t\treturn false\n\t}\n\tblkInCache, exist := l.blockCache.Get(string(realTx.Blockid))\n\tif exist {\n\t\tblk = blkInCache.(*pb.InternalBlock)\n\t} else {\n\t\tblk, err = l.queryBlock(realTx.Blockid, false)\n\t\tif err != nil {\n\t\t\tl.xlog.Warn(\"IsTxInTrunk error\", \"blkid\", utils.F(realTx.Blockid), \"kvErr\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn blk.InTrunk\n}",
"func isMarkableForTermination(nodeInfo *CacheEntry) bool {\n\t// If the last mark was an hour ago, mark again\n\t// Zero value for time.Time is 0001-01-01, so first mark is also executed\n\tlastMarked := nodeInfo.LastMarkedForTermination\n\treturn lastMarked.UTC().Add(time.Hour).Before(time.Now().UTC())\n}",
"func (me TxsdPresentationAttributesTextContentElementsDirection) IsLtr() bool {\n\treturn me.String() == \"ltr\"\n}",
"func (me TxsdTaxAccountingBasis) IsT() bool { return me.String() == \"T\" }",
"func (t *TextBlock) Undo() {\n\tif t.cache.Len() > 0 {\n\t\tt.currentText = t.cache.Pop()\n\t\treturn\n\t}\n\tfmt.Println(\"Cache is empty\")\n}",
"func (me TxsdPaymentMechanism) IsTb() bool { return me.String() == \"TB\" }",
"func (m *kubePackage) Truth() starlark.Bool { return starlark.True }",
"func (r TransactionResult) Reverted() bool {\n\treturn !r.Succeeded()\n}",
"func (obj *content) IsNormal() bool {\n\treturn obj.normal != nil\n}",
"func (inst *Installer) Aborted() bool {\n\treturn inst.aborted\n}",
"func (node *DropTable) IsTemporary() bool {\n\treturn node.Temp\n}",
"func (b *base) Deleted() bool { return b.Deletedx }",
"func (tbl DbCompoundTable) IsTx() bool {\n\t_, ok := tbl.db.(*sql.Tx)\n\treturn ok\n}",
"func (d *Deletable) IsDeleted() bool {\n\treturn d.Deleted\n}",
"func StockKeepingUnitContentExists(exec boil.Executor, iD string) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from \\\"stock_keeping_unit_content\\\" where \\\"id\\\"=$1 limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, iD)\n\t}\n\trow := exec.QueryRow(sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"db: unable to check if stock_keeping_unit_content exists\")\n\t}\n\n\treturn exists, nil\n}",
"func (eth *Backend) Synced() (bool, error) {\n\t// node.SyncProgress will return nil both before syncing has begun and\n\t// after it has finished. In order to discern when syncing has begun,\n\t// check that the best header came in under MaxBlockInterval.\n\tsp, err := eth.node.syncProgress(eth.rpcCtx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif sp != nil {\n\t\treturn false, nil\n\t}\n\tbh, err := eth.node.bestHeader(eth.rpcCtx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// Time in the header is in seconds.\n\tnowInSecs := time.Now().Unix() / 1000\n\ttimeDiff := nowInSecs - int64(bh.Time)\n\treturn timeDiff < dexeth.MaxBlockInterval, nil\n}",
"func (t *Table) TrickNew() bool {\n\tfor _, c := range t.trick {\n\t\tif c != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (entry *LogEntry) IsActive() bool {\n\treturn !entry.IsRemoved() && !entry.IsDeleted()\n}",
"func IsWordOnTimer(m *discordgo.Message, db *sql.DB) bool {\n\ttokens := strings.Split(m.Content, \" \")\n\tfor i, v := range removeableWordsMap {\n\t\tfor j := 0; j < len(tokens); j++ {\n\t\t\tif _, ok := removeableWordsMap[i]; !ok {\n\t\t\t\tfmt.Println(\"[ERROR] Attempt to access index out of bounds during removable search\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif strings.EqualFold(v, tokens[j]) {\n\t\t\t\tfmt.Printf(\"\\n[LOG] Message queued to be erased: %s\", m.Content)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (d *cephobject) Unmount() (bool, error) {\n\treturn false, nil\n}"
] | [
"0.67762494",
"0.67077285",
"0.6215999",
"0.5996488",
"0.5649976",
"0.533048",
"0.53276575",
"0.5198325",
"0.5166646",
"0.51392007",
"0.50936794",
"0.5033882",
"0.49881992",
"0.49692014",
"0.49692014",
"0.48998713",
"0.48027912",
"0.4781283",
"0.47776878",
"0.47514683",
"0.47429937",
"0.47319824",
"0.47279334",
"0.47137713",
"0.4677803",
"0.46655387",
"0.4649003",
"0.46465757",
"0.46401623",
"0.46401623",
"0.46373236",
"0.4635405",
"0.46341896",
"0.46334904",
"0.46330032",
"0.46299037",
"0.4611835",
"0.45998433",
"0.45819205",
"0.4575252",
"0.4571637",
"0.45656866",
"0.4563601",
"0.45481983",
"0.45433235",
"0.45377016",
"0.4531389",
"0.45218444",
"0.45205927",
"0.45170334",
"0.45122534",
"0.45049843",
"0.44835368",
"0.44722706",
"0.4469306",
"0.44605055",
"0.44549173",
"0.44531202",
"0.44494212",
"0.44492775",
"0.44424692",
"0.44337776",
"0.44326153",
"0.4432037",
"0.44303653",
"0.44301748",
"0.44287777",
"0.44280794",
"0.44235265",
"0.44167376",
"0.44094655",
"0.44062486",
"0.4401107",
"0.43971556",
"0.43896347",
"0.43895197",
"0.43848646",
"0.4384284",
"0.43731534",
"0.43716547",
"0.43649918",
"0.4359489",
"0.4350639",
"0.4341427",
"0.43349752",
"0.43297118",
"0.43195736",
"0.43173316",
"0.43151826",
"0.43134448",
"0.43088254",
"0.43029377",
"0.4291288",
"0.4284965",
"0.42823786",
"0.42798534",
"0.42785868",
"0.42736137",
"0.42653334",
"0.42596042"
] | 0.8638603 | 0 |
IsDraft return true if content is draft | IsDraft возвращает true, если содержимое является черновиком | func (c *Content) IsDraft() bool {
return c.Status == CONTENT_STATUS_DRAFT
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (m *ModelStatus) IsDraft() bool {\n\treturn m.Status == Draft\n}",
"func IsDraft(source string) bool {\n\treturn strings.Contains(filepath.Base(filepath.Dir(source)), \"drafts\")\n}",
"func (r *RepositoryRelease) GetDraft() bool {\n\tif r == nil || r.Draft == nil {\n\t\treturn false\n\t}\n\treturn *r.Draft\n}",
"func Draft(v bool) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldDraft, v))\n}",
"func (c *Client) ShowDraft(ctx context.Context, path string, contest *int, contestSlug *string, page *int, pageSize *int, sort *string, task *int, taskSlug *string, user *int) (*http.Response, error) {\n\treq, err := c.NewShowDraftRequest(ctx, path, contest, contestSlug, page, pageSize, sort, task, taskSlug, user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}",
"func (u *User) IsEditor() bool {\n\treturn u.UserGroupID == EDITOR\n}",
"func (c *Client) GetDraft(ctx context.Context, path string) (*http.Response, error) {\n\treq, err := c.NewGetDraftRequest(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}",
"func (obj *content) IsNormal() bool {\n\treturn obj.normal != nil\n}",
"func (s *Service) ChangeDraft(ctx context.Context, draftID, companyID string, post *job.Posting) (string, error) {\n\tspan := s.tracer.MakeSpan(ctx, \"ChangeDraft\")\n\tdefer span.Finish()\n\n\t// get userID\n\tuserID, err := s.authRPC.GetUserID(ctx)\n\tif err != nil {\n\t\ts.tracer.LogError(span, err)\n\t\treturn \"\", err\n\t}\n\n\terr = post.SetCompanyID(companyID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = post.SetID(draftID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// check admin level\n\tallowed := s.checkAdminLevel(\n\t\tctx,\n\t\tpost.GetCompanyID(),\n\t\tcompanyadmin.AdminLevelAdmin,\n\t\tcompanyadmin.AdminLevelJob,\n\t)\n\tif !allowed {\n\t\treturn \"\", errors.New(\"not_allowed\")\n\t}\n\n\tpost.SetUserID(userID)\n\t// id := post.GenerateID()\n\n\tif !post.JobMetadata.Anonymous {\n\t\tpost.CompanyDetails = &company.Details{\n\t\t\t// TODO: company avatar, URL, Industry, subindustry\n\t\t}\n\t\tpost.CompanyDetails.SetCompanyID(post.GetCompanyID())\n\t}\n\n\tpost.CreatedAt = time.Now()\n\n\tpost.Status = job.StatusDraft\n\t// post.JobPriority = post.JobMetadata.JobPlan.GetPriority()\n\n\t// if post.JobDetails.SalaryMin > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMin = float32(post.JobDetails.SalaryMin) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\t// if post.JobDetails.SalaryMax > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMax = float32(post.JobDetails.SalaryMax) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\terr = s.jobs.UpdateJobPosting(ctx, post)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn post.GetID(), nil\n}",
"func (s *MessagesSendMessageRequest) GetClearDraft() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Flags.Has(7)\n}",
"func (m *Member) IsPublished() bool { return m.Published }",
"func (msgr *Messenger) IsEditable(id string) bool {\n\ti, err := message.ParseID(id)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tmsgr.messageMutex.Lock()\n\tdefer msgr.messageMutex.Unlock()\n\n\tm, ok := msgr.messages[i]\n\tif ok {\n\t\t// Editable if same author.\n\t\treturn m.Author().Name().String() == msgr.channel.user.String()\n\t}\n\n\treturn false\n}",
"func (e PartialContent) IsPartialContent() {}",
"func (me TPubStatusUnion4) IsRevised() bool { return me.String() == \"revised\" }",
"func (me THITReviewStatus) IsMarkedForReview() bool { return me.String() == \"MarkedForReview\" }",
"func ShowDraftPath() string {\n\n\treturn fmt.Sprintf(\"/sao/v1/drafts/\")\n}",
"func (m *MailTips) GetIsModerated()(*bool) {\n val, err := m.GetBackingStore().Get(\"isModerated\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (rc *BypassRevisionCache) Peek(docID, revID string, copyType BodyCopyType) (docRev DocumentRevision, found bool) {\n\treturn DocumentRevision{}, false\n}",
"func (c *Content) IsAttachment() bool {\n\treturn c.Type == CONTENT_TYPE_ATTACHMENT\n}",
"func WhereDraft(q *query.Query) *query.Query {\n\treturn q.Where(\"status = ?\", Draft)\n}",
"func (s *Sender) saveDraft(ctx context.Context, req *tg.MessagesSaveDraftRequest) error {\n\t_, err := s.raw.MessagesSaveDraft(ctx, req)\n\treturn err\n}",
"func (m *ServiceUpdateMessageViewpoint) GetIsArchived()(*bool) {\n val, err := m.GetBackingStore().Get(\"isArchived\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (m *ModelStatus) IsPublished() bool {\n\treturn m.Status >= Published // NB >=\n}",
"func (a *DraftsApiService) EditDraftExecute(r ApiEditDraftRequest) (JsonSuccess, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPatch\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue JsonSuccess\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"DraftsApiService.EditDraft\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/drafts/{draft_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"draft_id\"+\"}\", _neturl.PathEscape(parameterToString(r.draftId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\tif r.draft == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"draft is required and must be specified\")\n\t}\n\n\tlocalVarQueryParams.Add(\"draft\", parameterToString(*r.draft, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v JsonError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func (s *Service) SaveDraft(ctx context.Context, companyID string, post *job.Posting) (string, error) {\n\tspan := s.tracer.MakeSpan(ctx, \"SaveDraft\")\n\tdefer span.Finish()\n\n\t// get userID\n\tuserID, err := s.authRPC.GetUserID(ctx)\n\tif err != nil {\n\t\ts.tracer.LogError(span, err)\n\t\treturn \"\", err\n\t}\n\n\t// check admin level\n\tallowed := s.checkAdminLevel(\n\t\tctx,\n\t\tcompanyID,\n\t\tcompanyadmin.AdminLevelAdmin,\n\t\tcompanyadmin.AdminLevelJob,\n\t)\n\tif !allowed {\n\t\treturn \"\", errors.New(\"not_allowed\")\n\t}\n\n\tpost.SetCompanyID(companyID)\n\tpost.SetUserID(userID)\n\tid := post.GenerateID()\n\n\tif !post.JobMetadata.Anonymous {\n\t\tpost.CompanyDetails = &company.Details{\n\t\t\t// TODO: company avatar, URL, Industry, subindustry\n\t\t}\n\t\tpost.CompanyDetails.SetCompanyID(companyID)\n\t}\n\n\tpost.CreatedAt = time.Now()\n\n\tpost.Status = job.StatusDraft\n\t// post.JobPriority = post.JobMetadata.JobPlan.GetPriority()\n\n\t// if post.JobDetails.SalaryMin > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMin = float32(post.JobDetails.SalaryMin) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\t// if post.JobDetails.SalaryMax > 0 && post.JobDetails.SalaryInterval != \"\" {\n\t// \tpost.NormalizedSalaryMax = float32(post.JobDetails.SalaryMax) / float32(post.JobDetails.SalaryInterval.GetHours()) // TODO also convert currency\n\t// }\n\n\terr = s.jobs.PostJob(ctx, post)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}",
"func (u *Update) IsEdited() bool {\n\treturn u.EditedMessage != nil\n}",
"func (t *Link) IsPreviewObject(index int) (ok bool) {\n\treturn t.preview[index].Object != nil\n\n}",
"func (o RunBookOutput) Draft() RunBookDraftPtrOutput {\n\treturn o.ApplyT(func(v *RunBook) RunBookDraftPtrOutput { return v.Draft }).(RunBookDraftPtrOutput)\n}",
"func isValidPatchDocument(document Document) bool {\n var existingContent = false\n\n if document.Content != nil {\n existingContent = document.Content.Header != nil || document.Content.Data != nil\n }\n\n return existingContent || document.Title != nil || document.Signee != nil\n}",
"func (_BaseContent *BaseContentCaller) CanEdit(opts *bind.CallOpts) (bool, error) {\n\tvar out []interface{}\n\terr := _BaseContent.contract.Call(opts, &out, \"canEdit\")\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (body *Body) IsOpen() bool {\n\treturn body.isOpen\n}",
"func (s *Subtitle) IsDeleted() bool {\n\treturn s.Num == -1\n}",
"func Draft(individuals <-chan individual) <-chan individual {\r\n\tout := make(chan individual)\r\n\tgo func() {\r\n\t\tdefer close(out)\r\n\t\tfor draft := range individuals {\r\n\t\t\tif draft.Age >= 18 && draft.Age <= 35 {\r\n\t\t\t\tout <- draft\r\n\t\t\t}\r\n\t\t}\r\n\t}()\r\n\treturn out\r\n}",
"func (me THITStatus) IsReviewable() bool { return me.String() == \"Reviewable\" }",
"func isCompleteDocument(document Document) bool {\n if document.Content != nil {\n return document.Title != nil && document.Content.Header != nil &&\n document.Content.Data != nil && document.Signee != nil\n } else {\n return false\n }\n}",
"func (r *Repo) IsDeleted() bool { return !r.DeletedAt.IsZero() }",
"func DraftEQ(v bool) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldDraft, v))\n}",
"func (_BaseContentType *BaseContentTypeCaller) CanEdit(opts *bind.CallOpts) (bool, error) {\n\tvar out []interface{}\n\terr := _BaseContentType.contract.Call(opts, &out, \"canEdit\")\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func GetBlogPostDraftCount(ctx context.Context) (int, error) {\n\tquery := datastore.NewQuery(blogPostVersionKind).\n\t\tProject(\"PostID\").\n\t\tDistinct().\n\t\tOrder(\"PostID\").\n\t\tOrder(\"-Published\").\n\t\tOrder(\"-DateCreated\").\n\t\tFilter(\"Published=\", false)\n\n\tvar x []BlogPostVersion\n\tkeys, err := query.GetAll(ctx, &x)\n\n\treturn len(keys), err\n}",
"func (m *BookingBusiness) GetIsPublished()(*bool) {\n val, err := m.GetBackingStore().Get(\"isPublished\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (m *RetentionLabelSettings) GetIsContentUpdateAllowed()(*bool) {\n val, err := m.GetBackingStore().Get(\"isContentUpdateAllowed\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (me TPubStatusUnion6) IsRetracted() bool { return me.String() == \"retracted\" }",
"func (pr LocalPackageReference) IsPreview() bool {\n\treturn ContainsPreviewVersionLabel(strings.ToLower(pr.apiVersion))\n}",
"func (me TpubStatusInt) IsPubmed() bool { return me.String() == \"pubmed\" }",
"func (me TAttlistAuthorListType) IsEditors() bool { return me.String() == \"editors\" }",
"func (obj *content) IsTor() bool {\n\treturn obj.tor != nil\n}",
"func (r *BackupItem) IsArchived() bool {\n\treturn r.Status&StatusArchived == StatusArchived\n}",
"func (r *BackupItem) IsArchived() bool {\n\treturn r.Status&StatusArchived == StatusArchived\n}",
"func (o *ViewMilestone) HasIsDeleted() bool {\n\tif o != nil && o.IsDeleted != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (e *Entry) IsDeleted() bool {\n\treturn e.Latest().GetTombstone()\n}",
"func (_BaseContentSpace *BaseContentSpaceCaller) CanEdit(opts *bind.CallOpts) (bool, error) {\n\tvar out []interface{}\n\terr := _BaseContentSpace.contract.Call(opts, &out, \"canEdit\")\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (c *Content) IsTrashed() bool {\n\treturn c.Status == CONTENT_STATUS_TRASHED\n}",
"func (_options *DeleteConfigOptions) SetDraftOnly(draftOnly bool) *DeleteConfigOptions {\n\t_options.DraftOnly = core.BoolPtr(draftOnly)\n\treturn _options\n}",
"func (t *Link) IsPreviewLink(index int) (ok bool) {\n\treturn t.preview[index].Link != nil\n\n}",
"func (v *Version) IsRevision() bool {\n\treturn v.isRevision\n}",
"func (o *ViewMilestone) GetIsDeleted() bool {\n\tif o == nil || o.IsDeleted == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.IsDeleted\n}",
"func (me TAttlistCommentsCorrectionsRefType) IsRepublishedIn() bool {\n\treturn me.String() == \"RepublishedIn\"\n}",
"func (me TAttlistCommentsCorrectionsRefType) IsRepublishedFrom() bool {\n\treturn me.String() == \"RepublishedFrom\"\n}",
"func (_BaseContentFactoryExt *BaseContentFactoryExtTransactor) IsContract(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) {\n\treturn _BaseContentFactoryExt.contract.Transact(opts, \"isContract\", addr)\n}",
"func (rev PlannerRevision) IsInitial() bool { return rev.value == \"\" }",
"func (mt *ComJossemargtSaoDraft) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\tif ok := goa.ValidatePattern(`[_a-zA-Z0-9\\-]+`, mt.ContestSlug); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.contestSlug`, mt.ContestSlug, `[_a-zA-Z0-9\\-]+`))\n\t}\n\tif ok := goa.ValidatePattern(`[_a-zA-Z0-9\\-]+`, mt.TaskSlug); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.taskSlug`, mt.TaskSlug, `[_a-zA-Z0-9\\-]+`))\n\t}\n\treturn\n}",
"func (mt *ComJossemargtSaoDraft) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\tif ok := goa.ValidatePattern(`[_a-zA-Z0-9\\-]+`, mt.ContestSlug); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.contestSlug`, mt.ContestSlug, `[_a-zA-Z0-9\\-]+`))\n\t}\n\tif ok := goa.ValidatePattern(`[_a-zA-Z0-9\\-]+`, mt.TaskSlug); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.taskSlug`, mt.TaskSlug, `[_a-zA-Z0-9\\-]+`))\n\t}\n\treturn\n}",
"func (m *sdt) IsEffective() *bool {\n\treturn m.isEffectiveField\n}",
"func (s *Space) IsArchived() bool {\n\treturn s.Type == SPACE_STATUS_ARCHIVED\n}",
"func (ref *UIElement) IsEdited() bool {\n\tret, _ := ref.BoolAttr(EditedAttribute)\n\treturn ret\n}",
"func (c *Content) IsPage() bool {\n\treturn c.Type == CONTENT_TYPE_PAGE\n}",
"func (s *TradeStage) IsDeletedOrClosed() bool {\n\tdelReq, errs := s.GetLastDeletionRequest()\n\tif errs == nil && delReq.Status == ApprovalApproved {\n\t\treturn true\n\t}\n\tcloseReq, errs := s.GetLastClosingRequest()\n\treturn errs == nil && closeReq.Status == ApprovalApproved\n}",
"func ReadPostsDraft() []models.PostsModel {\n\tdb, err := driver.Connect()\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\tdefer db.Close()\n\n\tvar result []models.PostsModel\n\n\titems, err := db.Query(\"select id, title, content, category, status from posts where status='Draft'\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\tfmt.Printf(\"%T\\n\", items)\n\n\tfor items.Next() {\n\t\tvar each = models.PostsModel{}\n\t\tvar err = items.Scan(&each.Id, &each.Title, &each.Content, &each.Category, &each.Status)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn nil\n\t\t}\n\n\t\tresult = append(result, each)\n\n\t}\n\n\tif err = items.Err(); err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\treturn result\n}",
"func (me THITReviewStatus) IsNotReviewed() bool { return me.String() == \"NotReviewed\" }",
"func (c *Client) NewShowDraftRequest(ctx context.Context, path string, contest *int, contestSlug *string, page *int, pageSize *int, sort *string, task *int, taskSlug *string, user *int) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\tvalues := u.Query()\n\tif contest != nil {\n\t\ttmp32 := strconv.Itoa(*contest)\n\t\tvalues.Set(\"contest\", tmp32)\n\t}\n\tif contestSlug != nil {\n\t\tvalues.Set(\"contest_slug\", *contestSlug)\n\t}\n\tif page != nil {\n\t\ttmp33 := strconv.Itoa(*page)\n\t\tvalues.Set(\"page\", tmp33)\n\t}\n\tif pageSize != nil {\n\t\ttmp34 := strconv.Itoa(*pageSize)\n\t\tvalues.Set(\"page_size\", tmp34)\n\t}\n\tif sort != nil {\n\t\tvalues.Set(\"sort\", *sort)\n\t}\n\tif task != nil {\n\t\ttmp35 := strconv.Itoa(*task)\n\t\tvalues.Set(\"task\", tmp35)\n\t}\n\tif taskSlug != nil {\n\t\tvalues.Set(\"task_slug\", *taskSlug)\n\t}\n\tif user != nil {\n\t\ttmp36 := strconv.Itoa(*user)\n\t\tvalues.Set(\"user\", tmp36)\n\t}\n\tu.RawQuery = values.Encode()\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}",
"func (me TAttlistMedlineCitationStatus) IsInDataReview() bool { return me.String() == \"In-Data-Review\" }",
"func (r *RPC) ArtDraft(c context.Context, arg *model.ArgAidMid, res *model.Draft) (err error) {\n\tvar v *model.Draft\n\tif v, err = r.s.ArtDraft(c, arg.Aid, arg.Mid); err == nil && v != nil {\n\t\t*res = *v\n\t}\n\treturn\n}",
"func (me TReviewActionStatus) IsIntended() bool { return me.String() == \"Intended\" }",
"func (o *WebModifyAccepted) IsSuccess() bool {\n\treturn true\n}",
"func (me TPubStatusUnion2) IsEpublish() bool { return me.String() == \"epublish\" }",
"func (entry *UtxoEntry) isModified() bool {\n\treturn entry.state&utxoStateModified == utxoStateModified\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsAdmin() bool { return me.String() == \"admin\" }",
"func (a *DraftsApiService) DeleteDraftExecute(r ApiDeleteDraftRequest) (JsonSuccess, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue JsonSuccess\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"DraftsApiService.DeleteDraft\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/drafts/{draft_id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"draft_id\"+\"}\", _neturl.PathEscape(parameterToString(r.draftId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v JsonError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func (m *Model) IsSoftDelete() bool {\n\treturn m.SoftDelete\n}",
"func (me TpubStatusInt) IsPremedline() bool { return me.String() == \"premedline\" }",
"func Show(w http.ResponseWriter, r *http.Request) {\n\n\tuser := utils.GetAuthenticatedUser(r)\n\n\tif !user.Permissions.Glsa.View {\n\t\tauthentication.AccessDenied(w, r)\n\t\treturn\n\t}\n\n\tvar drafts []*models.Glsa\n\terr := user.CanAccess(connection.DB.Model(&drafts).\n\t\tWhere(\"type = ?\", \"draft\").\n\t\tRelation(\"Bugs\").\n\t\tRelation(\"Creator\").\n\t\tRelation(\"Comments\")).\n\t\tSelect()\n\n\tif err != nil {\n\t\tlogger.Info.Println(\"Error during draft selection\")\n\t\tlogger.Info.Println(err)\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tfor _, draft := range drafts {\n\t\tdraft.ComputeStatus(user)\n\t}\n\n\trenderDraftsTemplate(w, user, drafts)\n}",
"func (r *Repository) IsMarkdowned() bool {\n\tif r.isMd != nil {\n\t\treturn *r.isMd\n\t}\n\tres := stringIsMarkdown(r.FullDescription)\n\tr.isMd = &res\n\treturn res\n}",
"func (me TAttlistMedlineCitationStatus) IsPublisher() bool { return me.String() == \"Publisher\" }",
"func (w *W3CNode) IsDocument() bool {\n\tparent := w.ParentNode().(*W3CNode)\n\treturn w.ParentNode() != nil && parent.HTMLNode() == w.HTMLNode()\n}",
"func (m *TestModel) IsDeleted() bool {\n\treturn m.Deleted\n}",
"func (me TpubStatusInt) IsPubmedr() bool { return me.String() == \"pubmedr\" }",
"func DraftNEQ(v bool) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldNEQ(FieldDraft, v))\n}",
"func (me TAttlistJournalIssueCitedMedium) IsPrint() bool { return me.String() == \"Print\" }",
"func (sdk Sdk) IsArchived() bool {\n\treturn sdk.archiveFile() != \"\"\n}",
"func (m *Member) IsPrivate() bool { return !m.Published }",
"func (ctx *Context) IsPost() bool {\r\n\treturn ctx.Is(\"POST\")\r\n}",
"func (a *DraftsApiService) GetDraftsExecute(r ApiGetDraftsRequest) (JsonSuccess, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue JsonSuccess\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"DraftsApiService.GetDrafts\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/drafts\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}",
"func draftTx(utxos []*Utxo, payToAddresses []*PayToAddress, opReturns []OpReturnData,\n\tprivateKey *bsvec.PrivateKey, standardRate, dataRate *bt.Fee) (uint64, error) {\n\n\t// Create the \"Draft tx\"\n\ttx, err := CreateTx(utxos, payToAddresses, opReturns, privateKey)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Calculate the fees for the \"Draft tx\"\n\t// todo: hack to add 1 extra sat - ensuring that fee is over the minimum with rounding issues in WOC and other systems\n\tfee := CalculateFeeForTx(tx, standardRate, dataRate) + 1\n\treturn fee, nil\n}",
"func (me TPubStatusUnion3) IsPpublish() bool { return me.String() == \"ppublish\" }",
"func (r *Summary) IsAbandoned() bool {\n\treturn r.Request.TargetRef == \"\"\n}",
"func (r *Summary) IsOpen() bool {\n\treturn !r.Submitted && !r.IsAbandoned()\n}",
"func (me TAttlistMedlineCitationStatus) IsCompleted() bool { return me.String() == \"Completed\" }",
"func (me THITStatus) IsReviewing() bool { return me.String() == \"Reviewing\" }",
"func (me TReviewActionStatus) IsFailed() bool { return me.String() == \"Failed\" }",
"func (resp *ActionExportCreateResponse) IsBlocking() bool {\n\treturn resp.Response.Meta != nil && resp.Response.Meta.ActionStateId > 0\n}"
] | [
"0.76661915",
"0.72118825",
"0.62594146",
"0.60783345",
"0.57742465",
"0.5612218",
"0.5590993",
"0.5470714",
"0.53782576",
"0.5354119",
"0.5348146",
"0.5320405",
"0.5315136",
"0.52890354",
"0.5246484",
"0.52413845",
"0.52356356",
"0.5234108",
"0.52093893",
"0.52065074",
"0.5161753",
"0.51418304",
"0.5128882",
"0.5114721",
"0.5107894",
"0.5078718",
"0.5057712",
"0.50510424",
"0.50496167",
"0.5043748",
"0.50139475",
"0.49958795",
"0.49907595",
"0.4965508",
"0.49383593",
"0.49336424",
"0.4914338",
"0.4912065",
"0.49115703",
"0.4910062",
"0.49097976",
"0.4906811",
"0.4900717",
"0.48857707",
"0.4882155",
"0.48783454",
"0.48680982",
"0.48680982",
"0.48652562",
"0.48512384",
"0.48376867",
"0.48307",
"0.4826266",
"0.48229787",
"0.48197457",
"0.47974092",
"0.47838768",
"0.47806984",
"0.4777947",
"0.4774078",
"0.47677866",
"0.47677866",
"0.47642863",
"0.47544014",
"0.475215",
"0.47409853",
"0.47405693",
"0.47355437",
"0.47354683",
"0.47267443",
"0.47153732",
"0.470505",
"0.47036615",
"0.4678246",
"0.4677655",
"0.46551028",
"0.46540335",
"0.4653721",
"0.46507987",
"0.46480048",
"0.46427518",
"0.46366692",
"0.46345145",
"0.46338516",
"0.46308988",
"0.4622108",
"0.46202976",
"0.46148282",
"0.46119213",
"0.46077845",
"0.4607412",
"0.46053672",
"0.45966354",
"0.45930216",
"0.4585301",
"0.45849982",
"0.45830736",
"0.45781296",
"0.45733798",
"0.45685565"
] | 0.86880225 | 0 |
IsGlobal return true if space is global | IsGlobal возвращает true, если пространство глобальное | func (s *Space) IsGlobal() bool {
return s.Type == SPACE_TYPE_GLOBAL
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (TypesObject) IsGlobal() bool { return boolResult }",
"func (o *RiskRulesListAllOfData) HasIsGlobal() bool {\n\tif o != nil && !IsNil(o.IsGlobal) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsObjectGlobal(obj *metav1.ObjectMeta) bool {\n\tif obj.Annotations == nil {\n\t\treturn false\n\t}\n\n\tif obj.Annotations[util.GlobalLabel] == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *RiskRulesListAllOfData) GetIsGlobal() bool {\n\tif o == nil || IsNil(o.IsGlobal) {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.IsGlobal\n}",
"func Global() Scope {\n\treturn globalScope\n}",
"func FieldNameIsGlobal(name string) bool {\n\tfor _, n := range GlobalFieldNames() {\n\t\tif n == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (o *RiskRulesListAllOfData) SetIsGlobal(v bool) {\n\to.IsGlobal = &v\n}",
"func (t *Table) HasGlobalTs() bool {\n\treturn t.globalTs != 0\n}",
"func (o *RiskRulesListAllOfData) GetIsGlobalOk() (*bool, bool) {\n\tif o == nil || IsNil(o.IsGlobal) {\n\t\treturn nil, false\n\t}\n\treturn o.IsGlobal, true\n}",
"func getGlobalInfo() (globalInfo map[string]interface{}) {\n\tglobalInfo = map[string]interface{}{/*\n\t\t\"isDistXL\": globalIsDistXL,\n\t\t\"isXL\": globalIsXL,\n\t\t\"isBrowserEnabled\": globalIsBrowserEnabled,\n\t\t\"isWorm\": globalWORMEnabled,\n\t\t\"isEnvBrowser\": globalIsEnvBrowser,\n\t\t\"isEnvCreds\": globalIsEnvCreds,\n\t\t\"isEnvRegion\": globalIsEnvRegion,\n\t\t\"isSSL\": globalIsSSL,\n\t\t\"serverRegion\": globalServerRegion,\n\t\t// Add more relevant global settings here.*/\n\t}\n\n\treturn globalInfo\n}",
"func (o *WafDdosSettings) HasGlobalThreshold() bool {\n\tif o != nil && o.GlobalThreshold != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func checkGlobalSet(t *testing.T, expError bool, fabMode, vlans, vxlans string) {\n\tgl := client.Global{\n\t\tName: \"global\",\n\t\tNetworkInfraType: fabMode,\n\t\tVlans: vlans,\n\t\tVxlans: vxlans,\n\t}\n\terr := contivClient.GlobalPost(&gl)\n\tif err != nil && !expError {\n\t\tt.Fatalf(\"Error setting global {%+v}. Err: %v\", gl, err)\n\t} else if err == nil && expError {\n\t\tt.Fatalf(\"Set global {%+v} succeded while expecing error\", gl)\n\t} else if err == nil {\n\t\t// verify global state\n\t\tgotGl, err := contivClient.GlobalGet(\"global\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error getting global object. Err: %v\", err)\n\t\t}\n\n\t\t// verify expected values\n\t\tif gotGl.NetworkInfraType != fabMode || gotGl.Vlans != vlans || gotGl.Vxlans != vxlans {\n\t\t\tt.Fatalf(\"Error Got global state {%+v} does not match expected %s, %s, %s\", gotGl, fabMode, vlans, vxlans)\n\t\t}\n\n\t\t// verify the state created\n\t\tgCfg := &gstate.Cfg{}\n\t\tgCfg.StateDriver = stateStore\n\t\terr = gCfg.Read(\"\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error reading global cfg state. Err: %v\", err)\n\t\t}\n\n\t\tif gCfg.Auto.VLANs != vlans || gCfg.Auto.VXLANs != vxlans {\n\t\t\tt.Fatalf(\"global config Vlan/Vxlan ranges %s/%s are not same as %s/%s\",\n\t\t\t\tgCfg.Auto.VLANs, gCfg.Auto.VXLANs, vlans, vxlans)\n\t\t}\n\n\t\t// verify global oper state\n\t\tgOper := &gstate.Oper{}\n\t\tgOper.StateDriver = stateStore\n\t\terr = gOper.Read(\"\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error reading global oper state. Err: %v\", err)\n\t\t}\n\n\t\t// verify vxlan resources\n\t\tvxlanRsrc := &resources.AutoVXLANCfgResource{}\n\t\tvxlanRsrc.StateDriver = stateStore\n\t\tif err := vxlanRsrc.Read(\"global\"); err != nil {\n\t\t\tt.Fatalf(\"Error reading vxlan resource. Err: %v\", err)\n\t\t}\n\n\t\t// verify vlan resource\n\t\tvlanRsrc := &resources.AutoVLANCfgResource{}\n\t\tvlanRsrc.StateDriver = stateStore\n\t\tif err := vlanRsrc.Read(\"global\"); err != nil {\n\t\t\tt.Fatalf(\"Error reading vlan resource. Err: %v\", err)\n\t\t}\n\t}\n}",
"func (g *Group) IsOfSystem() bool {\n\t//loadConfig()\n\n\tif g.GID > config.login.SYS_GID_MIN && g.GID < config.login.SYS_GID_MAX {\n\t\treturn true\n\t}\n\treturn false\n}",
"func Is(name string, value any) bool {\n\tif global == nil {\n\t\treturn false\n\t}\n\n\treturn global.Is(name, value)\n}",
"func (v Global) Equal(other Global) bool {\n\treturn v.addressLocal.Equal(other.addressLocal) && v.addressBase.Equal(other.addressBase)\n}",
"func (L *State) GetGlobal(name string) {\n\tCk := C.CString(name)\n\tdefer C.free(unsafe.Pointer(Ck))\n\tC.lua_getglobal(L.s, Ck)\n}",
"func GlobalUser() Name {\n\treturn \"global\"\n}",
"func (m HMSketch) global() hist.Histogram {\n\thMux.Lock()\n\tlocation := hash(\"__global__\", \"__global__\")\n\tval := m.Registers[m.Index[location]]\n\thMux.Unlock()\n\treturn val\n}",
"func (p *Project) Global() []Project {\n\treturn global\n}",
"func GlobalScope() *Scope {\n\treturn globalScope\n}",
"func (p *Status) Global() []Status {\n\treturn globalstatus\n}",
"func (c GlobalConfig) IsNode() bool {\n\treturn RunMode(c.OBSMode).IsNode()\n}",
"func (ppc *PermissionsPolicyCreate) SetIsGlobal(b bool) *PermissionsPolicyCreate {\n\tppc.mutation.SetIsGlobal(b)\n\treturn ppc\n}",
"func IsSpaceRoot(r *Node) bool {\n\tpath := r.InternalPath()\n\tif spaceNameBytes, err := xattr.Get(path, xattrs.SpaceNameAttr); err == nil {\n\t\tif string(spaceNameBytes) != \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (v Global) IsEmpty() bool {\n\treturn v.addressLocal.IsEmpty() && v.addressBase.IsEmpty()\n}",
"func (id *authIdentity) IsGlobalRORole() bool {\n\treturn format.ContainsString(id.roles, GlobalReadOnlyRole)\n}",
"func (metadata *metadataImpl) IsGlobalDomainEnabled() bool {\n\treturn metadata.enableGlobalDomain()\n}",
"func (m *Module) NewGlobal(name string, contentType types.Type) *Global {\n\tg := NewGlobal(name, contentType)\n\tm.Globals = append(m.Globals, g)\n\treturn g\n}",
"func WithGlobal(isGlobal bool) Option {\n\treturn func(o *options) {\n\t\to.isGlobal = isGlobal\n\t}\n}",
"func NewGlobal() *Global {\n\tthis := Global{}\n\tvar logLev string = \"INF\"\n\tthis.LogLev = &logLev\n\tvar cfgPath string = \"/opt/netconfd/version/etc/netconf.json\"\n\tthis.CfgPath = &cfgPath\n\treturn &this\n}",
"func (n *Node) IsSpaceRoot(ctx context.Context) bool {\n\t_, err := n.Xattr(ctx, prefixes.SpaceNameAttr)\n\treturn err == nil\n}",
"func (s *System) IsPublic() bool { return s.Name == PublicSystem.Name || s.Name == PublicCDSystem.Name }",
"func (ev *Evaler) Global() map[string]Variable {\n\treturn map[string]Variable(ev.global)\n}",
"func (ev *Evaler) Global() map[string]Variable {\n\treturn map[string]Variable(ev.global)\n}",
"func (p *Person) Global() []Person {\n\treturn globalperson\n}",
"func (c GlobalConfig) IsOBSMaster() bool {\n\treturn RunMode(c.OBSMode).IsOBSMaster()\n}",
"func (me TxsdRegistryHandleSimpleContentExtensionRegistry) IsLocal() bool {\n\treturn me.String() == \"local\"\n}",
"func SetGlobalVariables() {\n\tglobalVars = true\n}",
"func (m *ApiMeta) GlobalFlagSet(cmd string) *flag.FlagSet {\n\tf := flag.NewFlagSet(cmd, flag.ContinueOnError)\n\n\tf.StringVar(&m.gateEndpoint, \"gate-endpoint\", \"http://localhost:8084\",\n\t\t\"Gate (API server) endpoint\")\n\n\tf.Usage = func() {}\n\n\treturn f\n}",
"func (o *Global) HasMgmt() bool {\n\tif o != nil && o.Mgmt != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (obj *Global) IsPersonalMode(ctx context.Context) (bool, error) {\n\tresult := &struct {\n\t\tReturn bool `json:\"qReturn\"`\n\t}{}\n\terr := obj.RPC(ctx, \"IsPersonalMode\", result)\n\treturn result.Return, err\n}",
"func suToGlobal(su schema.Unit) bool {\n\tu := job.Unit{\n\t\tUnit: *schema.MapSchemaUnitOptionsToUnitFile(su.Options),\n\t}\n\treturn u.IsGlobal()\n}",
"func (eb *Bus) GlobalBind(fn Bindable, name string) {\n\teb.Bind(fn, name, 0)\n}",
"func (sv *globalSystemVariables) GetGlobal(name string) (sql.SystemVariable, interface{}, bool) {\n\tsv.mutex.RLock()\n\tdefer sv.mutex.RUnlock()\n\tname = strings.ToLower(name)\n\tv, ok := systemVars[name]\n\tif !ok {\n\t\treturn sql.SystemVariable{}, nil, false\n\t}\n\n\tif v.ValueFunction != nil {\n\t\tresult, err := v.ValueFunction()\n\t\tif err != nil {\n\t\t\tlogrus.StandardLogger().Warnf(\"unable to get value for system variable %s: %s\", name, err.Error())\n\t\t\treturn v, nil, true\n\t\t}\n\t\treturn v, result, true\n\t}\n\n\t// convert any set types to strings\n\tsysVal := sv.sysVarVals[name]\n\tif sysType, ok := v.Type.(sql.SetType); ok {\n\t\tif sv, ok := sysVal.Val.(uint64); ok {\n\t\t\tvar err error\n\t\t\tsysVal.Val, err = sysType.BitsToString(sv)\n\t\t\tif err != nil {\n\t\t\t\treturn sql.SystemVariable{}, nil, false\n\t\t\t}\n\t\t}\n\t}\n\treturn v, sysVal.Val, true\n}",
"func GlobalMemoryStatusEx(lpBuffer *MEMORYSTATUSEX) bool {\n\tret1 := syscall3(globalMemoryStatusEx, 1,\n\t\tuintptr(unsafe.Pointer(lpBuffer)),\n\t\t0,\n\t\t0)\n\treturn ret1 != 0\n}",
"func RegisterGlobal() {\n\tvalidate.AddGlobalMessages(Data)\n}",
"func RegisterGlobal() {\n\tvalidate.AddGlobalMessages(Data)\n}",
"func TestGlobalSetting(t *testing.T) {\n\t// try basic modification\n\tcheckGlobalSet(t, false, \"default\", \"1-4094\", \"1-10000\")\n\t// set aci mode\n\tcheckGlobalSet(t, false, \"aci\", \"1-4094\", \"1-10000\")\n\n\t// modify vlan/vxlan range\n\tcheckGlobalSet(t, false, \"default\", \"1-1000\", \"1001-2000\")\n\n\t// try invalid fabric mode\n\tcheckGlobalSet(t, true, \"xyz\", \"1-4094\", \"1-10000\")\n\n\t// try invalid vlan/vxlan range\n\tcheckGlobalSet(t, true, \"default\", \"1-5000\", \"1-10000\")\n\tcheckGlobalSet(t, true, \"default\", \"0-4094\", \"1-10000\")\n\tcheckGlobalSet(t, true, \"default\", \"1\", \"1-10000\")\n\tcheckGlobalSet(t, true, \"default\", \"1?2\", \"1-10000\")\n\tcheckGlobalSet(t, true, \"default\", \"abcd\", \"1-10000\")\n\tcheckGlobalSet(t, true, \"default\", \"1-4094\", \"1-100000\")\n\tcheckGlobalSet(t, true, \"default\", \"1-4094\", \"1-20000\")\n\n\t// reset back to default values\n\tcheckGlobalSet(t, false, \"default\", \"1-4094\", \"1-10000\")\n}",
"func (c GlobalConfig) IsOBS() bool {\n\treturn RunMode(c.OBSMode).IsOBS()\n}",
"func ExposeGlobal(id string, x interface{}) {\n\tglobal.define(sym(id), wrapGo(x))\n}",
"func isNamespaced(r corev3.Resource) bool {\n\tgr, ok := r.(corev3.GlobalResource)\n\tif !ok {\n\t\treturn true\n\t}\n\treturn !gr.IsGlobalResource()\n}",
"func globalIntern(s string) string {\n\tglobalInternMu.Lock()\n\tdefer globalInternMu.Unlock()\n\treturn globalInternMap.Intern(s)\n}",
"func Global() Value {\n\tpanic(message)\n}",
"func (c Config) GetGlobal(option string) (string, error) {\n\tif globals, ok := c[Globals]; ok {\n\t\tif settings, ok := globals.(map[string]string); ok {\n\t\t\tif value, ok := settings[option]; ok {\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"global setting for %s not found\", option)\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no global config options found\")\n\n}",
"func (b *BaseDevice) GlobalBounds() Rect {\n\tvar origin = b.Origin()\n\tvar bounds Rect\n\tbounds.SetXYWH(origin.X, origin.Y, b.Width(), b.Height())\n\treturn bounds\n}",
"func CreateGlobal(stateDriver core.StateDriver, gc *intent.ConfigGlobal) error {\n\tlog.Infof(\"Received global create with intent {%v}\", gc)\n\tvar err error\n\tgcfgUpdateList := []string{}\n\n\tmasterGc := &mastercfg.GlobConfig{}\n\tmasterGc.StateDriver = stateDriver\n\tmasterGc.Read(\"global\")\n\n\tgstate.GlobalMutex.Lock()\n\tdefer gstate.GlobalMutex.Unlock()\n\tgCfg := &gstate.Cfg{}\n\tgCfg.StateDriver = stateDriver\n\tgCfg.Read(\"global\")\n\n\t// check for valid values\n\tif gc.NwInfraType != \"\" {\n\t\tswitch gc.NwInfraType {\n\t\tcase \"default\", \"aci\", \"aci-opflex\":\n\t\t\t// These values are acceptable.\n\t\tdefault:\n\t\t\treturn errors.New(\"Invalid fabric mode\")\n\t\t}\n\t\tmasterGc.NwInfraType = gc.NwInfraType\n\t}\n\tif gc.VLANs != \"\" {\n\t\t_, err := netutils.ParseTagRanges(gc.VLANs, \"vlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgCfg.Auto.VLANs = gc.VLANs\n\t\tgcfgUpdateList = append(gcfgUpdateList, \"vlan\")\n\t}\n\n\tif gc.VXLANs != \"\" {\n\t\t_, err = netutils.ParseTagRanges(gc.VXLANs, \"vxlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgCfg.Auto.VXLANs = gc.VXLANs\n\t\tgcfgUpdateList = append(gcfgUpdateList, \"vxlan\")\n\t}\n\n\tif gc.FwdMode != \"\" {\n\t\tmasterGc.FwdMode = gc.FwdMode\n\t}\n\n\tif gc.ArpMode != \"\" {\n\t\tmasterGc.ArpMode = gc.ArpMode\n\t}\n\n\tif gc.PvtSubnet != \"\" {\n\t\tmasterGc.PvtSubnet = gc.PvtSubnet\n\t}\n\n\tif len(gcfgUpdateList) > 0 {\n\t\t// Delete old state\n\n\t\tgOper := &gstate.Oper{}\n\t\tgOper.StateDriver = stateDriver\n\t\terr = gOper.Read(\"\")\n\t\tif err == nil {\n\t\t\tfor _, res := range gcfgUpdateList {\n\t\t\t\terr = gCfg.UpdateResources(res)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\n\t\t\tfor _, res := range gcfgUpdateList {\n\t\t\t\t// setup resources\n\t\t\t\terr = gCfg.Process(res)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"Error updating the config %+v. Error: %s\", gCfg, err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr = gCfg.Write()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error updating global config.Error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn masterGc.Write()\n}",
"func (c *client) Global() Interface {\n\treturn &internalClient{\n\t\tsvc: c.services,\n\t}\n}",
"func (l Location) IsSystem() bool {\n\treturn l.Station == nil && l.Structure == nil\n}",
"func (c *Config) isGlobalReceiver(label map[string]string) bool {\n\n\tif c.globalReceiverSelector != nil {\n\t\tfor k, expected := range c.globalReceiverSelector.MatchLabels {\n\t\t\tif v, exists := label[k]; exists && v == expected {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func Global(file string) error {\n\tconfig, err := Read(file)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglobal = config\n\n\treturn nil\n}",
"func DeleteGlobal(stateDriver core.StateDriver) error {\n\tmasterGc := &mastercfg.GlobConfig{}\n\tmasterGc.StateDriver = stateDriver\n\terr := masterGc.Read(\"\")\n\tif err == nil {\n\t\terr = masterGc.Clear()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Setup global state\n\tgCfg := &gstate.Cfg{}\n\tgCfg.StateDriver = stateDriver\n\terr = gCfg.Read(\"\")\n\tif err == nil {\n\t\terr = gCfg.DeleteResources(\"vlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = gCfg.DeleteResources(\"vxlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = gCfg.Clear()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Delete old state\n\tgOper := &gstate.Oper{}\n\tgOper.StateDriver = stateDriver\n\terr = gOper.Read(\"\")\n\tif err == nil {\n\t\terr = gOper.Clear()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func getGlobalObject() Uint32_t {\n\tif Jerry_value_is_null(globalObj) {\n\t\tglobalObj = Jerry_get_global_object()\n\t}\n\treturn globalObj\n}",
"func NewGlobal() *Environment {\n\treturn New(nil)\n}",
"func GetGlobalStatus() models.ControllerStatuses {\n\treturn globalStatus.GetStatusModel()\n}",
"func GetGlobalStatus() models.ControllerStatuses {\n\treturn globalStatus.GetStatusModel()\n}",
"func Global() *rand.Rand {\n\tgMu.Lock()\n\trng := gRand\n\tgMu.Unlock()\n\treturn rng\n}",
"func (s *Swarm32) GlobalPosition() []float32 {\n\treturn s.globalposition\n}",
"func NewGlobalCfg(allowRepoCfg bool, mergeableReq bool, approvedReq bool) GlobalCfg {\n\treturn NewGlobalCfgFromArgs(GlobalCfgArgs{\n\t\tAllowRepoCfg: allowRepoCfg,\n\t\tMergeableReq: mergeableReq,\n\t\tApprovedReq: approvedReq,\n\t})\n}",
"func (c *Compiler) getGlobalInfo(g *ssa.Global) globalInfo {\n\tinfo := globalInfo{}\n\tif strings.HasPrefix(g.Name(), \"C.\") {\n\t\t// Created by CGo: such a name cannot be created by regular C code.\n\t\tinfo.linkName = g.Name()[2:]\n\t\tinfo.extern = true\n\t} else {\n\t\t// Pick the default linkName.\n\t\tinfo.linkName = g.RelString(nil)\n\t\t// Check for //go: pragmas, which may change the link name (among\n\t\t// others).\n\t\tdoc := c.astComments[info.linkName]\n\t\tif doc != nil {\n\t\t\tinfo.parsePragmas(doc)\n\t\t}\n\t}\n\treturn info\n}",
"func (o PriorityClassOutput) GlobalDefault() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v *PriorityClass) pulumi.BoolOutput { return v.GlobalDefault }).(pulumi.BoolOutput)\n}",
"func (this *ExDomain) IsGround() bool {\n\treturn this.Min == this.Max\n}",
"func HaveIPv6GlobalAddress() (bool, error) {\n\tifs, err := net.Interfaces()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor i := range ifs {\n\t\tiface := &ifs[i]\n\t\tif !isUp(iface) || isLoopback(iface) {\n\t\t\tcontinue\n\t\t}\n\t\taddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, a := range addrs {\n\t\t\tipnet, ok := a.(*net.IPNet)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ipnet.IP.To4() != nil || !ipnet.IP.IsGlobalUnicast() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (c *Compiler) getGlobal(g *ssa.Global) llvm.Value {\n\tinfo := c.getGlobalInfo(g)\n\tllvmGlobal := c.mod.NamedGlobal(info.linkName)\n\tif llvmGlobal.IsNil() {\n\t\tllvmType := c.getLLVMType(g.Type().(*types.Pointer).Elem())\n\t\tllvmGlobal = llvm.AddGlobal(c.mod, llvmType, info.linkName)\n\t\tif !info.extern {\n\t\t\tllvmGlobal.SetInitializer(llvm.ConstNull(llvmType))\n\t\t\tllvmGlobal.SetLinkage(llvm.InternalLinkage)\n\t\t}\n\t}\n\treturn llvmGlobal\n}",
"func (rg *CFGoReadGlobals) Read(stateNode *snreader.StateNode, input snreader.InputItf) (isEnd bool, err error) {\n\tlex := read(input)\n\n\tif ignoreWithoutBreakline(lex) {\n\t\treturn false, nil\n\t}\n\n\tif rg.first {\n\t\trg.first, rg.varType = false, lex.Value\n\t\treturn false, nil\n\t}\n\n\tif rg.mutiStatus == mutiStatusUnknown {\n\t\tif lex.Equal(ConstLeftParentheses) {\n\t\t\trg.mutiStatus = mutiStatusMuti\n\t\t\treturn false, nil\n\t\t}\n\n\t\trg.mutiStatus = mutiStatusSingle\n\t}\n\n\trg.countLonlyBrackets(lex)\n\n\tif rg.mutiStatus == mutiStatusMuti && rg.lleftPNum == -1 {\n\t\tif rg.gName == \"\" {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn true, rg.addGlobal(lex)\n\t}\n\n\tif lex.Equal(ConstBreakLine) {\n\t\tif rg.lleftCNum != 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif rg.gName == \"\" && rg.mutiStatus == mutiStatusMuti {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn rg.mutiStatus == mutiStatusSingle, rg.addGlobal(lex)\n\t}\n\n\tif rg.gName == \"\" {\n\t\tif !lex_pgl.IsIdent(lex) {\n\t\t\treturn true, onErr(rg, lex, \"var name want a ident\")\n\t\t}\n\n\t\trg.gName = lex.Value\n\t\treturn false, nil\n\t}\n\n\trg.gCode = append(rg.gCode, lex)\n\treturn false, nil\n}",
"func (this *KeyspaceTerm) IsSystem() bool {\n\treturn this.path != nil && this.path.IsSystem()\n}",
"func (o *SyntheticsGlobalVariableParseTestOptions) HasLocalVariableName() bool {\n\treturn o != nil && o.LocalVariableName != nil\n}",
"func UpdateGlobal(stateDriver core.StateDriver, gc *intent.ConfigGlobal) error {\n\tlog.Infof(\"Received global update with intent {%v}\", gc)\n\tvar err error\n\tgcfgUpdateList := []string{}\n\n\tmasterGc := &mastercfg.GlobConfig{}\n\tmasterGc.StateDriver = stateDriver\n\tmasterGc.Read(\"global\")\n\n\tgstate.GlobalMutex.Lock()\n\tdefer gstate.GlobalMutex.Unlock()\n\n\tgCfg := &gstate.Cfg{}\n\tgCfg.StateDriver = stateDriver\n\tgCfg.Read(\"global\")\n\n\t_, vlansInUse := gCfg.GetVlansInUse()\n\t_, vxlansInUse := gCfg.GetVxlansInUse()\n\n\t// check for valid values\n\tif gc.NwInfraType != \"\" {\n\t\tswitch gc.NwInfraType {\n\t\tcase \"default\", \"aci\", \"aci-opflex\":\n\t\t\t// These values are acceptable.\n\t\tdefault:\n\t\t\treturn errors.New(\"Invalid fabric mode\")\n\t\t}\n\t\tmasterGc.NwInfraType = gc.NwInfraType\n\t}\n\tif gc.VLANs != \"\" {\n\n\t\tif !gCfg.CheckInBitRange(gc.VLANs, vlansInUse, \"vlan\") {\n\t\t\treturn fmt.Errorf(\"cannot update the vlan range due to existing vlans %s\", vlansInUse)\n\t\t}\n\t\t_, err := netutils.ParseTagRanges(gc.VLANs, \"vlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgCfg.Auto.VLANs = gc.VLANs\n\t\tgcfgUpdateList = append(gcfgUpdateList, \"vlan\")\n\t}\n\n\tif gc.VXLANs != \"\" {\n\t\tif !gCfg.CheckInBitRange(gc.VXLANs, vxlansInUse, \"vxlan\") {\n\t\t\treturn fmt.Errorf(\"cannot update the vxlan range due to existing vxlans %s\", vxlansInUse)\n\t\t}\n\n\t\t_, err = netutils.ParseTagRanges(gc.VXLANs, \"vxlan\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgCfg.Auto.VXLANs = gc.VXLANs\n\t\tgcfgUpdateList = append(gcfgUpdateList, \"vxlan\")\n\t}\n\n\tif gc.FwdMode != \"\" {\n\t\tmasterGc.FwdMode = gc.FwdMode\n\t}\n\n\tif gc.ArpMode != \"\" {\n\t\tmasterGc.ArpMode = gc.ArpMode\n\t}\n\n\tif gc.PvtSubnet != \"\" {\n\t\tmasterGc.PvtSubnet = gc.PvtSubnet\n\t}\n\n\tif len(gcfgUpdateList) > 0 {\n\t\t// Delete old state\n\n\t\tgOper := &gstate.Oper{}\n\t\tgOper.StateDriver = stateDriver\n\t\terr = gOper.Read(\"\")\n\t\tif err == nil {\n\t\t\tfor _, res := range gcfgUpdateList {\n\t\t\t\terr = gCfg.UpdateResources(res)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr = gCfg.Write()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error updating global config.Error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn masterGc.Write()\n}",
"func Bool() bool {\n\treturn global.Bool()\n}",
"func (o *StorageHitachiPortAllOf) HasIpv6GlobalAddress() bool {\n\tif o != nil && o.Ipv6GlobalAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m *PoolModule) globals() []*modulestypes.VMMember {\n\treturn []*modulestypes.VMMember{}\n}",
"func HasProp(name string) bool {\n\tif global == nil {\n\t\treturn false\n\t}\n\n\treturn global.HasProp(name)\n}",
"func (m *Manager) Exists(globalID string) bool {\n\tcount, _ := m.collection.Find(bson.M{\"globalid\": globalID}).Count()\n\n\treturn count == 1\n}",
"func (m *mqService) SetQOSGlobal(global bool) ConfigFunc {\n\treturn func(qos *ConfigQOS) {\n\t\tqos.Global = global\n\t}\n}",
"func GetGlobalFilter() *GlobalFilter {\n\tgfOnce.Do(func() {\n\t\tGfi = GetNewGlobalFilter()\n\t})\n\treturn Gfi\n}",
"func Contains(key string) bool {\n\t_, contains := getFromMode(key)\n\tif !contains {\n\t\t_, contains = getFromGlobal(key)\n\t}\n\treturn contains\n}",
"func GlobalAffinity(key string, weight int32) *Affinity {\n\treturn &Affinity{\n\t\tScope: &resmgr.Expression{\n\t\t\tOp: resmgr.AlwaysTrue, // evaluate against all containers\n\t\t},\n\t\tMatch: &resmgr.Expression{\n\t\t\tKey: key,\n\t\t\tOp: resmgr.Exists,\n\t\t},\n\t\tWeight: weight,\n\t}\n}",
"func CheckGlobalVarFlags() []error {\n\tfmt.Fprint(os.Stdout, \" ↳ checking flags from global vars\\n\")\n\terrors := []error{}\n\tpflag.CommandLine.VisitAll(func(f *pflag.Flag) {\n\t\terrors = append(errors, fmt.Errorf(\"flag %q is invalid, please don't register any flag under the global variable \\\"CommandLine\\\"\", f.Name))\n\t})\n\treturn errors\n}",
"func (c *Config) IsGogs() bool {\n\treturn c.Gogs.Server != \"\"\n}",
"func RegisterGlobalStatSlot(slot base.StatSlot) {\n\tfor _, s := range globalStatSlot {\n\t\tif s.Name() == slot.Name() {\n\t\t\treturn\n\t\t}\n\t}\n\tglobalStatSlot = append(globalStatSlot, slot)\n}",
"func GetGlobalConfig(key string) interface{} {\n\tglobalConfig.RLock()\n\tvalue := globalConfig.gMap[key]\n\tglobalConfig.RUnlock()\n\treturn value\n}",
"func (m *RPCModule) globals() []*types.VMMember {\n\treturn []*types.VMMember{}\n}",
"func (m *DHTModule) globals() []*modulestypes.VMMember {\n\treturn []*modulestypes.VMMember{}\n}",
"func (obj *object) IsGround() bool {\n\treturn obj.ground == 2*len(obj.keys)\n}",
"func HasSection(section string) bool {\n\tif global == nil {\n\t\treturn false\n\t}\n\n\treturn global.HasSection(section)\n}",
"func IsHot() bool {\n\treturn js.Global.Get(\"hmrTrue\") != nil\n}",
"func (gfn *globalGFN) active() bool {\n\treturn gfn.lookup.Load() || gfn.timedLookup.Load()\n}",
"func (Var) IsGround() bool {\n\treturn false\n}",
"func (c Config) SetGlobal(option, value string) {\n\tif globals, ok := c[Globals]; ok {\n\t\tif settings, ok := globals.(map[string]string); ok {\n\t\t\tsettings[option] = value\n\t\t}\n\t}\n}",
"func (g *Group) IsRoot() bool {\n\treturn g == g.db.root\n}",
"func (o *Global) HasCfgPath() bool {\n\tif o != nil && o.CfgPath != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}"
] | [
"0.8007148",
"0.6786136",
"0.66457736",
"0.6634526",
"0.6249949",
"0.6210618",
"0.60924387",
"0.6089236",
"0.6016408",
"0.58948666",
"0.5890985",
"0.5815646",
"0.57908267",
"0.57907593",
"0.5763864",
"0.5744893",
"0.573173",
"0.56694597",
"0.563088",
"0.5604707",
"0.5591501",
"0.5560916",
"0.55558574",
"0.5534069",
"0.55292934",
"0.55116874",
"0.5465284",
"0.5461675",
"0.5443514",
"0.5441363",
"0.5414738",
"0.539372",
"0.53819513",
"0.53819513",
"0.53705585",
"0.53497887",
"0.5338675",
"0.53330666",
"0.53312945",
"0.5315481",
"0.5313717",
"0.53061914",
"0.52847546",
"0.5276259",
"0.5267005",
"0.5265382",
"0.5265382",
"0.5262785",
"0.5256729",
"0.524963",
"0.524641",
"0.5237319",
"0.5230826",
"0.5215597",
"0.5213414",
"0.52010673",
"0.51977277",
"0.5187198",
"0.5184759",
"0.5158595",
"0.51535285",
"0.51454145",
"0.5131184",
"0.5121408",
"0.5121408",
"0.51183295",
"0.50792426",
"0.50785524",
"0.5076095",
"0.5072171",
"0.50693434",
"0.50573117",
"0.50500524",
"0.5031997",
"0.50266427",
"0.50154203",
"0.50147176",
"0.5012861",
"0.50085944",
"0.49983406",
"0.4995693",
"0.4991699",
"0.49823466",
"0.49808386",
"0.49786085",
"0.49750242",
"0.49743977",
"0.49507248",
"0.49481508",
"0.49481326",
"0.49436828",
"0.4933824",
"0.49330315",
"0.49328044",
"0.49268943",
"0.49259827",
"0.49240467",
"0.4914362",
"0.4908129",
"0.49047327"
] | 0.8851892 | 0 |
IsPersonal return true if space is personal | IsPersonal возвращает true, если пространство личное | func (s *Space) IsPersonal() bool {
return s.Type == SPACE_TYPE_PERSONAL
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (obj *Global) IsPersonalMode(ctx context.Context) (bool, error) {\n\tresult := &struct {\n\t\tReturn bool `json:\"qReturn\"`\n\t}{}\n\terr := obj.RPC(ctx, \"IsPersonalMode\", result)\n\treturn result.Return, err\n}",
"func (s *System) IsPublic() bool { return s.Name == PublicSystem.Name || s.Name == PublicCDSystem.Name }",
"func (me TxsdContactType) IsPerson() bool { return me.String() == \"person\" }",
"func (me TxsdMarkerTypeMarkerUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }",
"func (me TxsdContactRole) IsTech() bool { return me.String() == \"tech\" }",
"func (me TrestrictionType) IsPublic() bool { return me.String() == \"public\" }",
"func (me TartIdTypeInt) IsBookaccession() bool { return me.String() == \"bookaccession\" }",
"func (n UsernsMode) IsPrivate() bool {\n\treturn !n.IsHost()\n}",
"func (me TrestrictionType) IsPrivate() bool { return me.String() == \"private\" }",
"func (me TAttlistGeneralNoteOwner) IsPip() bool { return me.String() == \"PIP\" }",
"func (me TxsdClipPathTypeClipPathUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }",
"func (me TxsdPaymentMechanism) IsPr() bool { return me.String() == \"PR\" }",
"func (me TxsdContactType) IsOrganization() bool { return me.String() == \"organization\" }",
"func (me TxsdSpace) IsDefault() bool { return me.String() == \"default\" }",
"func (me TpubStatusInt) IsPubmedr() bool { return me.String() == \"pubmedr\" }",
"func (me TxsdSystemCategory) IsInfrastructure() bool { return me.String() == \"infrastructure\" }",
"func (n UTSMode) IsPrivate() bool {\n\treturn !n.IsHost()\n}",
"func isPrivate(ns specs.LinuxNamespaceType, mode string) bool {\n\tswitch ns {\n\tcase specs.IPCNamespace:\n\t\treturn mode == \"private\"\n\tcase specs.NetworkNamespace, specs.PIDNamespace:\n\t\treturn !(isHost(mode) || isContainer(mode))\n\tcase specs.UserNamespace, specs.UTSNamespace:\n\t\treturn !(isHost(mode))\n\t}\n\treturn false\n}",
"func WiFiSecurityTypePWpaPersonal() *WiFiSecurityType {\n\tv := WiFiSecurityTypeVWpaPersonal\n\treturn &v\n}",
"func (me TxsdMarkerTypeMarkerUnits) IsUserSpaceOnUse() bool { return me.String() == \"userSpaceOnUse\" }",
"func (me TAttlistMedlineCitationOwner) IsPip() bool { return me.String() == \"PIP\" }",
"func (me TSAFTPTSourceBilling) IsP() bool { return me.String() == \"P\" }",
"func (me TAttlistDescriptorNameType) IsGeographic() bool { return me.String() == \"Geographic\" }",
"func isPublic(name string) bool {\n\tup := bytes.ToUpper([]byte(name))\n\treturn []byte(name)[0] == up[0]\n}",
"func (me TSAFTPTSourcePayment) IsP() bool { return me.String() == \"P\" }",
"func (s *Drive) Local() bool { return s.config.OAuth.ClientID == \"\" }",
"func (domain Domain) IsPrivate() bool {\n\treturn domain.Type == constant.PrivateDomain\n}",
"func (me TxsdImpactSimpleContentExtensionType) IsUser() bool { return me.String() == \"user\" }",
"func (me TpubStatusInt) IsPmcr() bool { return me.String() == \"pmcr\" }",
"func (me TxsdWorkType) IsOu() bool { return me.String() == \"OU\" }",
"func (me TAttlistKeywordListOwner) IsPip() bool { return me.String() == \"PIP\" }",
"func (me TAttlistELocationIDEIdType) IsPii() bool { return me.String() == \"pii\" }",
"func (h *Headers) IsPublic() bool {\n\treturn h.public\n}",
"func (n PidMode) IsPrivate() bool {\n\treturn !(n.IsHost() || n.IsContainer())\n}",
"func (m *Member) IsPrivate() bool { return !m.Published }",
"func (stringEntry *String) IsPersistant() bool {\n\treturn stringEntry.isPersistant\n}",
"func (c CgroupnsMode) IsPrivate() bool {\n\treturn c == CgroupnsModePrivate\n}",
"func (m *Member) IsGuest() bool { return m.Role == MemberRoleGuest }",
"func (me TxsdContactRole) IsCreator() bool { return me.String() == \"creator\" }",
"func (me TxsdTaxAccountingBasis) IsP() bool { return me.String() == \"P\" }",
"func (me TxsdCounterSimpleContentExtensionType) IsOrganization() bool {\n\treturn me.String() == \"organization\"\n}",
"func (me TxsdPaymentMechanism) IsOu() bool { return me.String() == \"OU\" }",
"func (n IpcMode) IsPrivate() bool {\n\treturn n == IPCModePrivate\n}",
"func (domain Domain) IsShared() bool {\n\treturn domain.Type == constant.SharedDomain\n}",
"func isProvisioned(ctx context.Context) (bool, error) {\n\treturn len(service.DefaultPermissions.FindRulesByRoleID(permissions.EveryoneRoleID)) > 0 &&\n\t\tlen(service.DefaultPermissions.FindRulesByRoleID(permissions.AdminsRoleID)) > 0, nil\n}",
"func (me TAttlistMedlineCitationStatus) IsPublisher() bool { return me.String() == \"Publisher\" }",
"func (me TAttlistArticlePubModel) IsPrintElectronic() bool { return me.String() == \"Print-Electronic\" }",
"func (h *Headers) IsPrivate() bool {\n\treturn h.private\n}",
"func (me TxsdWorkType) IsPf() bool { return me.String() == \"PF\" }",
"func (s *Space) IsGlobal() bool {\n\treturn s.Type == SPACE_TYPE_GLOBAL\n}",
"func (s *Service) IsManaged(ctx context.Context) (bool, error) {\n\tzoneSpec, _, _ := s.Scope.PrivateDNSSpec()\n\tif zoneSpec == nil {\n\t\treturn false, errors.Errorf(\"no private dns zone spec available\")\n\t}\n\n\tresult, err := s.zoneGetter.Get(ctx, zoneSpec)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tzone, ok := result.(privatedns.PrivateZone)\n\tif !ok {\n\t\treturn false, errors.Errorf(\"%T is not a privatedns.PrivateZone\", zone)\n\t}\n\n\ttags := converters.MapToTags(zone.Tags)\n\treturn tags.HasOwned(s.Scope.ClusterName()), nil\n}",
"func (n UsernsMode) IsHost() bool {\n\treturn n == \"host\"\n}",
"func (me TpubStatusInt) IsPubmed() bool { return me.String() == \"pubmed\" }",
"func (me TxsdContactRole) IsAdmin() bool { return me.String() == \"admin\" }",
"func (m *MMSObjectPolicyManager) serveOrg(polOrg string) bool {\n\tm.spMapLock.Lock()\n\tdefer m.spMapLock.Unlock()\n\n\tfor _, sp := range m.ServedOrgs {\n\t\tif sp.BusinessPolOrg == polOrg {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (me TAttlistArticlePubModel) IsElectronicPrint() bool { return me.String() == \"Electronic-Print\" }",
"func (me TpubStatusInt) IsPmc() bool { return me.String() == \"pmc\" }",
"func (me TxsdImpactSimpleContentExtensionType) IsAdmin() bool { return me.String() == \"admin\" }",
"func (user *User) IsLocal() bool {\n\treturn user.Role == UserRoleLocal\n}",
"func (s *Drive) Persistent() bool { return s.config.OAuth.ClientID != \"\" }",
"func (m *Member) IsOwner() bool { return m.Role == MemberRoleOwner }",
"func (me TSAFTPTSourcePayment) IsM() bool { return me.String() == \"M\" }",
"func (p *provision) isCloud() bool {\n\treturn p.IsGCPManaged && p.runtimeType == \"CLOUD\"\n}",
"func hasProdaccess(hostname string) bool {\n\treturn (hostname == \"avellanos\" || hostname == \"montero\")\n}",
"func (me TAttlistArticlePubModel) IsElectronic() bool { return me.String() == \"Electronic\" }",
"func (p *Provider) IsPrivate() bool {\n\treturn p.key != nil\n}",
"func (me TAttlistArticlePubModel) IsPrint() bool { return me.String() == \"Print\" }",
"func (m *AndroidManagedStoreApp) GetIsPrivate()(*bool) {\n val, err := m.GetBackingStore().Get(\"isPrivate\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (u User) IsSystem() bool {\n\treturn u.HasRole(SystemRole)\n}",
"func (gt GtwyMgr) IsPermitted(ctx context.Context, appcontext, remoteAddress string) (bool, error) {\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", \"start\")\n\t}\n\n\t//check the approval list\n\tq := datastore.NewQuery(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsKind\")).\n\t\tNamespace(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsNamespace\")).\n\t\tFilter(\"appcontext =\", appcontext).\n\t\tFilter(\"remoteaddress =\", remoteAddress).\n\t\tKeysOnly()\n\n\t//get the count\n\tn, err := gt.ds.Count(ctx, q)\n\t//if there was an error return it and false\n\tif err != nil {\n\t\tif err != datastore.ErrNoSuchEntity {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\n\t//return false if the count was zero\n\tif n == 0 {\n\t\treturn false, nil\n\t}\n\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", strconv.Itoa(n))\n\t\tlblog.LogEvent(\"GtwyMgr\", \"IsPermitted\", \"info\", \"end\")\n\t}\n\n\t//otherwise the address is valid\n\treturn true, nil\n}",
"func IsCorp(hostname string) bool {\n\treturn (hostname == \"avellanos\" || hostname == \"montero\" || hostname == \"monygham\")\n}",
"func (m *MMSObjectPolicyManager) hasOrg(org string) bool {\n\tif _, ok := m.orgMap[org]; ok {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (me TxsdSpace) IsPreserve() bool { return me.String() == \"preserve\" }",
"func IsPublic(request *http.Request) bool {\n\tif request == nil {\n\t\treturn true\n\t}\n\t//when condition is true then it is public\n\treturn request.Header.Get(headerXPublic) == \"true\"\n}",
"func isPublic(c echo.Context) bool {\n\tpublic, ok := c.Get(publicContextKey).(bool)\n\tif ok && public {\n\t\treturn true\n\t}\n\n\t// TODO Allow select methods on public urls. For now only support GET to these URLs\n\tif c.Request().Method != http.MethodGet {\n\t\treturn false\n\t}\n\n\trequestPath := c.Request().URL.Path\n\tglog.V(2).Infof(\"Checking if prefix '%s' is public\", requestPath)\n\n\tif PublicPathsWatcher == nil {\n\t\tglog.Errorf(\"Failed to initialize public-path manager\")\n\t\treturn false\n\t}\n\n\tpublicPathPrefixes := PublicPathsWatcher.GetPublicPaths()\n\tfor _, prefix := range publicPathPrefixes {\n\t\tif strings.HasPrefix(requestPath, prefix) {\n\t\t\tc.Set(publicContextKey, true)\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (o *StoragePhysicalDiskAllOf) HasSecured() bool {\n\tif o != nil && o.Secured != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdNodeRoleSimpleContentExtensionCategory) IsP2P() bool { return me.String() == \"p2p\" }",
"func (me TpubStatusInt) IsMedliner() bool { return me.String() == \"medliner\" }",
"func (t Token) Prod() bool {\n\treturn !strings.HasPrefix(string(t), \"test_\") &&\n\t\t!strings.HasPrefix(string(t), \"api_sandbox.\")\n}",
"func (me TSAFTPTSourceBilling) IsM() bool { return me.String() == \"M\" }",
"func (o *Transaction) HasPersonalFinanceCategory() bool {\n\tif o != nil && o.PersonalFinanceCategory.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *Service) IsManaged(ctx context.Context) (bool, error) {\n\treturn true, nil\n}",
"func (t *Task) IsPrivate() bool {\n\t// I like the idea of not needing to put an astrick next to a task\n\t// ... Descriptions automagically qualify for \"important tasks\"\n\t// No descriptions means it's filler, or private\n\t// Summaries WITH private: true are private\n\tif t.Summary == \"\" || t.Private {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func amAdmin() bool {\n\t_, err := os.Open(\"\\\\\\\\.\\\\PHYSICALDRIVE0\")\n\tif err != nil {\n\t\treturn false\t//not admin\n\t}\n\treturn true\t\t//admin\n}",
"func (fe *FileEncryptionProperties) IsUtilized() bool { return fe.utilized }",
"func (u *User) IsLocal() bool {\n\treturn u.LoginSource <= 0\n}",
"func (_ERC20Pausable *ERC20PausableCaller) IsPauser(opts *bind.CallOpts, account common.Address) (bool, error) {\n\tvar out []interface{}\n\terr := _ERC20Pausable.contract.Call(opts, &out, \"isPauser\", account)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (typ *Type) IsPublic() bool {\n\treturn isPublicName(typ.Name)\n}",
"func (h BareTlfHandle) IsPublic() bool {\n\treturn len(h.Readers) == 1 && h.Readers[0].Equal(keybase1.PublicUID)\n}",
"func (me TxsdContactRole) IsIrt() bool { return me.String() == \"irt\" }",
"func (n NetworkMode) IsPrivate() bool {\n\treturn !(n.IsHost() || n.IsContainer())\n}",
"func isPublic(myString string) bool {\n\ta := []rune(myString)\n\ta[0] = unicode.ToUpper(a[0])\n\treturn myString == string(a)\n}",
"func (void *VoidResponse) SetIsPersonal(personal bool) *VoidResponse {\n\tbody := JSON{\n\t\t\"is_personal\": personal,\n\t}\n\tvoid.Request = void.Request.Send(body)\n\n\treturn void\n}",
"func (s UserSet) IsPublic() bool {\n\tres := s.Collection().Call(\"IsPublic\")\n\tresTyped, _ := res.(bool)\n\treturn resTyped\n}",
"func IsConfidential() (bool){\n\tviper.SetConfigName(\"core\")\n\tviper.SetConfigType(\"yaml\")\n\tviper.AddConfigPath(\"/opt/gopath/src/github.com/hyperledger/fabric/peer\")\n\terr := viper.ReadInConfig() // Find and read the config file\n\tif err != nil { // Handle errors reading the config file\n\t\tpanic(fmt.Errorf(\"Fatal error config file [%s] \\n\", err))\n\t}\n\n\n\treturn viper.GetBool(\"security.privacy\")\n}",
"func (me TAttlistIssnIssnType) IsPrint() bool { return me.String() == \"Print\" }",
"func (me TxsdAddressSimpleContentExtensionCategory) IsMac() bool { return me.String() == \"mac\" }",
"func (me TxsdNodeRoleSimpleContentExtensionCategory) IsMail() bool { return me.String() == \"mail\" }",
"func (me TReviewPolicyLevel) IsAssignment() bool { return me.String() == \"Assignment\" }",
"func (c *Container) IsSpace() bool {\n\treturn c.Key != \"\"\n}"
] | [
"0.70180583",
"0.6078018",
"0.58026356",
"0.5692048",
"0.56086373",
"0.55681205",
"0.55415803",
"0.5512311",
"0.54638976",
"0.5388905",
"0.5357469",
"0.52480066",
"0.5236056",
"0.52012086",
"0.5195337",
"0.51941645",
"0.5180246",
"0.51634693",
"0.51496786",
"0.510989",
"0.5101718",
"0.5078444",
"0.506352",
"0.5047095",
"0.50364375",
"0.50275946",
"0.5015909",
"0.50047237",
"0.49950314",
"0.4994225",
"0.49928153",
"0.4979531",
"0.497502",
"0.49598962",
"0.49536142",
"0.49467856",
"0.49427083",
"0.49350557",
"0.4913251",
"0.49035236",
"0.49035078",
"0.4903402",
"0.48969546",
"0.48957703",
"0.488685",
"0.48849034",
"0.48814416",
"0.48790127",
"0.48717684",
"0.48713845",
"0.48701695",
"0.4854097",
"0.48248547",
"0.48215464",
"0.48138806",
"0.48130912",
"0.48129612",
"0.4810557",
"0.47807503",
"0.47760966",
"0.47692603",
"0.47680128",
"0.47659948",
"0.4764458",
"0.4763444",
"0.47625482",
"0.47624573",
"0.47526306",
"0.47525427",
"0.4746882",
"0.47443432",
"0.47404268",
"0.47358152",
"0.47330272",
"0.47270295",
"0.4707939",
"0.47066265",
"0.47056285",
"0.46988833",
"0.46855876",
"0.46838167",
"0.4683797",
"0.46827364",
"0.46671107",
"0.46653",
"0.46642926",
"0.46598196",
"0.46586242",
"0.4657797",
"0.46570107",
"0.4654399",
"0.4648663",
"0.46442586",
"0.4641929",
"0.4640386",
"0.46341017",
"0.46302345",
"0.46290642",
"0.46286738",
"0.46280304"
] | 0.8510974 | 0 |
IsArchived return true if space is archived | IsArchived возвращает true, если пространство архивировано | func (s *Space) IsArchived() bool {
return s.Type == SPACE_STATUS_ARCHIVED
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (sdk Sdk) IsArchived() bool {\n\treturn sdk.archiveFile() != \"\"\n}",
"func (r *BackupItem) IsArchived() bool {\n\treturn r.Status&StatusArchived == StatusArchived\n}",
"func (r *BackupItem) IsArchived() bool {\n\treturn r.Status&StatusArchived == StatusArchived\n}",
"func (r *RoleList) HasArchived() bool {\n\treturn r.hasArchived\n}",
"func (o *ShortenBitlinkBodyAllOf) HasArchived() bool {\n\tif o != nil && o.Archived != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *DataExportQuery) HasArchived() bool {\n\tif o != nil && o.Archived != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *CreateWaveOutput) SetIsArchived(v bool) *CreateWaveOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *StopReplicationOutput) SetIsArchived(v bool) *StopReplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *StartReplicationOutput) SetIsArchived(v bool) *StartReplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *StickerSet) GetIsArchived() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.IsArchived\n}",
"func (s *RetryDataReplicationOutput) SetIsArchived(v bool) *RetryDataReplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *UpdateWaveOutput) SetIsArchived(v bool) *UpdateWaveOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *StickerSet) GetArchived() (value bool) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Flags.Has(1)\n}",
"func (r *Repository) GetArchived() bool {\n\tif r == nil || r.Archived == nil {\n\t\treturn false\n\t}\n\treturn *r.Archived\n}",
"func (s *CreateApplicationOutput) SetIsArchived(v bool) *CreateApplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *FinalizeCutoverOutput) SetIsArchived(v bool) *FinalizeCutoverOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (o *ShortenBitlinkBodyAllOf) GetArchived() bool {\n\tif o == nil || o.Archived == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Archived\n}",
"func (s *ListWavesRequestFilters) SetIsArchived(v bool) *ListWavesRequestFilters {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *Wave) SetIsArchived(v bool) *Wave {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *PauseReplicationOutput) SetIsArchived(v bool) *PauseReplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *UpdateSourceServerReplicationTypeOutput) SetIsArchived(v bool) *UpdateSourceServerReplicationTypeOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *DisconnectFromServiceOutput) SetIsArchived(v bool) *DisconnectFromServiceOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *SourceServer) SetIsArchived(v bool) *SourceServer {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *DescribeSourceServersRequestFilters) SetIsArchived(v bool) *DescribeSourceServersRequestFilters {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (p *ProjectCard) GetArchived() bool {\n\tif p == nil || p.Archived == nil {\n\t\treturn false\n\t}\n\treturn *p.Archived\n}",
"func (s *ResumeReplicationOutput) SetIsArchived(v bool) *ResumeReplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *UpdateApplicationOutput) SetIsArchived(v bool) *UpdateApplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *Application) SetIsArchived(v bool) *Application {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *ChangeServerLifeCycleStateOutput) SetIsArchived(v bool) *ChangeServerLifeCycleStateOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (s *ListApplicationsRequestFilters) SetIsArchived(v bool) *ListApplicationsRequestFilters {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (fa FileAttributes) IsArchive() bool {\n\treturn fa&32 > 0\n}",
"func (p *ProjectCardOptions) GetArchived() bool {\n\tif p == nil || p.Archived == nil {\n\t\treturn false\n\t}\n\treturn *p.Archived\n}",
"func (s *ArchiveWaveOutput) SetIsArchived(v bool) *ArchiveWaveOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (o *DataExportQuery) SetArchived(v string) {\n\to.Archived = &v\n}",
"func (b *FollowUpBuilder) Archived(value bool) *FollowUpBuilder {\n\tb.archived = value\n\tb.bitmap_ |= 8\n\treturn b\n}",
"func (o *ShortenBitlinkBodyAllOf) SetArchived(v bool) {\n\to.Archived = &v\n}",
"func (s *UnarchiveWaveOutput) SetIsArchived(v bool) *UnarchiveWaveOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (o *ShortenBitlinkBodyAllOf) GetArchivedOk() (*bool, bool) {\n\tif o == nil || o.Archived == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Archived, true\n}",
"func (s *UnarchiveApplicationOutput) SetIsArchived(v bool) *UnarchiveApplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (o *DataExportQuery) GetArchivedOk() (*string, bool) {\n\tif o == nil || o.Archived == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Archived, true\n}",
"func (m *ServiceUpdateMessageViewpoint) GetIsArchived()(*bool) {\n val, err := m.GetBackingStore().Get(\"isArchived\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (s *ArchiveApplicationOutput) SetIsArchived(v bool) *ArchiveApplicationOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (c *AdsListCall) Archived(archived bool) *AdsListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (s *StickerSet) SetArchived(value bool) {\n\tif value {\n\t\ts.Flags.Set(1)\n\t\ts.Archived = true\n\t} else {\n\t\ts.Flags.Unset(1)\n\t\ts.Archived = false\n\t}\n}",
"func (c *CreativesListCall) Archived(archived bool) *CreativesListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (c *PlacementGroupsListCall) Archived(archived bool) *PlacementGroupsListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (c *PlacementsListCall) Archived(archived bool) *PlacementsListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (o *DataExportQuery) GetArchived() string {\n\tif o == nil || o.Archived == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Archived\n}",
"func (c *AdvertiserLandingPagesListCall) Archived(archived bool) *AdvertiserLandingPagesListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (g *Game) Archive(user *apigateway.AuthenticatedUser) bool {\n\tif g.db.State != models.StateFinished {\n\t\treturn false\n\t}\n\n\tif !g.userIsInGame(user) {\n\t\treturn false\n\t}\n\n\tg.db.State = models.StateArchived\n\n\treturn g.save(context.TODO())\n}",
"func (r *RoleList) GetArchived() uint {\n\treturn r.Archived\n}",
"func (m *Manifest) Archive(id uuid.UUID, txes ...*sqlx.Tx) (*db.Manifest, error) {\n\tconn := prepConn(m.Conn, txes...)\n\n\tu, err := db.FindManifest(conn, id.String())\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\n\tif u.Archived {\n\t\treturn u, nil\n\t}\n\n\tu.Archived = true\n\tu.ArchivedAt = null.TimeFrom(time.Now())\n\t_, err = u.Update(conn, boil.Whitelist(db.ManifestColumns.Archived, db.ManifestColumns.ArchivedAt))\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\treturn u, nil\n}",
"func (o *GetV1MembershipsParams) SetArchived(archived *bool) {\n\to.Archived = archived\n}",
"func (c *CampaignsListCall) Archived(archived bool) *CampaignsListCall {\n\tc.urlParams_.Set(\"archived\", fmt.Sprint(archived))\n\treturn c\n}",
"func (s *MarkAsArchivedOutput) SetIsArchived(v bool) *MarkAsArchivedOutput {\n\ts.IsArchived = &v\n\treturn s\n}",
"func (r ApiGetBitlinksByGroupRequest) Archived(archived string) ApiGetBitlinksByGroupRequest {\n\tr.archived = &archived\n\treturn r\n}",
"func (mr *MockRepoClientMockRecorder) IsArchived() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsArchived\", reflect.TypeOf((*MockRepoClient)(nil).IsArchived))\n}",
"func (m *ItemVariant) Archive() (err error) {\n\tm.IsArchived = 1\n\tif err = m.Save(\"is_archived\"); err == nil {\n\t\t// kalau semua item_variant dari item ini di archive maka item nya juga harus di archive\n\t\tvar t int64\n\t\to := orm.NewOrm()\n\t\tif e := o.Raw(\"select count(*) from item_variant where item_id = ? and is_archived = 0\", m.Item.ID).QueryRow(&t); e == nil && t == 0 {\n\t\t\to.Raw(\"update item set is_archived = 1 where id = ?\", m.Item.ID).Exec()\n\t\t}\n\t}\n\n\treturn\n}",
"func (o *GetSearchEmployeesParams) SetArchived(archived *bool) {\n\to.Archived = archived\n}",
"func (o *GetSearchClinicsParams) SetArchived(archived *bool) {\n\to.Archived = archived\n}",
"func (m *MockRepoClient) IsArchived() (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsArchived\")\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (a *BooleanArchive) ArchiveLen() int {\n\treturn len(a.archiveArray)\n}",
"func (m *ServiceUpdateMessageViewpoint) SetIsArchived(value *bool)() {\n err := m.GetBackingStore().Set(\"isArchived\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (p *ProjectCardListOptions) GetArchivedState() string {\n\tif p == nil || p.ArchivedState == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.ArchivedState\n}",
"func (account *Account) Archived(params ...interface{}) *FeedMedia {\r\n\tinsta := account.inst\r\n\r\n\tmedia := &FeedMedia{}\r\n\tmedia.inst = insta\r\n\tmedia.endpoint = urlUserArchived\r\n\r\n\tfor _, param := range params {\r\n\t\tswitch s := param.(type) {\r\n\t\tcase string:\r\n\t\t\tmedia.timestamp = s\r\n\t\t}\r\n\t}\r\n\r\n\treturn media\r\n}",
"func (r *Repo) IsDeleted() bool { return !r.DeletedAt.IsZero() }",
"func (s *Transaction) Archive(id uuid.UUID, txes ...*sql.Tx) (*db.Transaction, error) {\n\tu, err := db.FindTransaction(s.Conn, id.String())\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\n\tif u.Archived {\n\t\treturn u, nil\n\t}\n\n\tu.Archived = true\n\tu.ArchivedAt = null.TimeFrom(time.Now())\n\t_, err = u.Update(s.Conn, boil.Whitelist(db.TransactionColumns.Archived, db.TransactionColumns.ArchivedAt))\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\treturn u, nil\n}",
"func (s *Service) arcExist(aid int64) bool {\n\tvar (\n\t\tres *ugcmdl.Archive\n\t\terr error\n\t)\n\tif res, err = s.dao.ParseArc(ctx, aid); err != nil || res == nil {\n\t\treturn false\n\t}\n\tif res.Deleted == _deleted {\n\t\treturn false\n\t}\n\treturn true\n}",
"func ArchInBackup(arch Archive, backup *Backup) bool {\n\tbackupStart := backup.MongoMeta.Before.LastMajTS\n\tbackupEnd := backup.MongoMeta.After.LastMajTS\n\treturn TimestampInInterval(arch.Start, backupStart, backupEnd) ||\n\t\tTimestampInInterval(arch.End, backupStart, backupEnd) ||\n\t\tTimestampInInterval(backupStart, arch.Start, arch.End) ||\n\t\tTimestampInInterval(backupEnd, arch.Start, arch.End)\n}",
"func (p PostgresStapleStorer) Archive(email string, stapleID int) error {\n\tconn, err := p.connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx := context.Background()\n\tdefer conn.Close(ctx)\n\t_, err = conn.Exec(ctx, \"update staples set archived = true where user_email = $1 and id = $2\", email, stapleID)\n\treturn err\n}",
"func isArchiveFormatRar(archivePath string, password string) error {\n\treadCloser, err := rardecode.OpenReader(archivePath, password)\n\tif err == nil {\n\t\t_ = readCloser.Close()\n\t}\n\treturn err\n}",
"func (o *ArchivedAnalysis) HasArchiveSizeBytes() bool {\n\tif o != nil && o.ArchiveSizeBytes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func ArchivedKey(qname string) string {\n\treturn fmt.Sprintf(\"asynq:{%s}:archived\", qname)\n}",
"func (svc *LouisService) Archive(imageKey string) error {\n\n\tfiles, err := svc.ctx.Storage.ListFiles(imageKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar objectsToDelete = make([]storage.ObjectID, 0)\n\tvar originalKey storage.ObjectID\n\tvar realExists = false\n\tfor _, file := range files {\n\t\tif strings.HasSuffix(*file.Key, RealTransformName+\".\"+ImageExtension) {\n\t\t\trealExists = true\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(*file.Key, OriginalTransformName+\".\"+ImageExtension) {\n\t\t\toriginalKey = file\n\t\t\tcontinue\n\t\t}\n\t\tobjectsToDelete = append(objectsToDelete, file)\n\t}\n\tif realExists && originalKey != nil {\n\t\tobjectsToDelete = append(objectsToDelete, originalKey)\n\t}\n\n\tif len(objectsToDelete) > 0 {\n\t\terr = svc.ctx.Storage.DeleteFiles(objectsToDelete)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn svc.ctx.DB.DeleteImage(imageKey)\n}",
"func (a *Archive) Empty() bool {\n\treturn len(a.Files) == 0\n}",
"func (c *Client) archiveWorkspaceTemplateDB(namespace, uid string) (archived bool, err error) {\n\tresult, err := sb.Update(\"workspace_templates\").\n\t\tSet(\"is_archived\", true).\n\t\tWhere(sq.Eq{\n\t\t\t\"uid\": uid,\n\t\t\t\"namespace\": namespace,\n\t\t\t\"is_archived\": false,\n\t\t}).\n\t\tRunWith(c.DB).\n\t\tExec()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\trowsAffected, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif rowsAffected == 0 {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
"func (d *Dao) Archives(c context.Context, aids []int64, ip string) (a map[int64]*api.Arc, err error) {\n\tvar arg = &archive.ArgAids2{Aids: aids, RealIP: ip}\n\tif a, err = d.arc.Archives3(c, arg); err != nil {\n\t\tlog.Error(\"rpc archive (%v) error(%v)\", aids, err)\n\t\terr = ecode.CreativeArcServiceErr\n\t}\n\treturn\n}",
"func (me TxsdSpace) IsPreserve() bool { return me.String() == \"preserve\" }",
"func Archive() {\n\tcount.Archive()\n}",
"func IsPurgeable(candidate time.Time, before time.Duration, since time.Duration) bool {\n\tnow := time.Now().In(time.UTC)\n\tstart := time.Time{}\n\tend := now\n\tif before == 0 && since == 0 {\n\t\treturn true\n\t} else if before != 0 && since != 0 {\n\t\treturn false\n\t}\n\tif before != 0 {\n\t\tend = now.Add(-before)\n\t}\n\tif since != 0 {\n\t\tstart = now.Add(-since)\n\t}\n\tif candidate.After(start) && candidate.Before(end) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (s *Service) diffArc(old *ugcmdl.ArchDatabus, new *ugcmdl.ArchDatabus) (diff bool) {\n\tdiff = (old.Title != new.Title)\n\tdiff = diff || (old.Content != new.Content)\n\tdiff = diff || (old.PubTime != new.PubTime)\n\tdiff = diff || (old.TypeID != new.TypeID)\n\tdiff = diff || (old.Cover != new.Cover)\n\tdiff = diff || (s.getPTypeName(old.TypeID) != s.getPTypeName(new.TypeID))\n\treturn\n}",
"func (me TGetReviewableHITsSortProperty) IsExpiration() bool { return me.String() == \"Expiration\" }",
"func isArchiveItemHeader(line string, prefix string, suffix string, format string) bool {\n\tif !strings.HasPrefix(line, prefix) {\n\t\treturn false\n\t}\n\tif !strings.HasSuffix(line, suffix) {\n\t\treturn false\n\t}\n\t_, err := time.Parse(format, stripPrefixSuffix(line, prefix, suffix))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (c *Client) MarkAsArchived(ctx context.Context, params *MarkAsArchivedInput, optFns ...func(*Options)) (*MarkAsArchivedOutput, error) {\n\tif params == nil {\n\t\tparams = &MarkAsArchivedInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"MarkAsArchived\", params, optFns, addOperationMarkAsArchivedMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*MarkAsArchivedOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}",
"func (p *Policy) handleArchiving(ctx context.Context, storage logical.Storage) error {\n\t// We need to move keys that are no longer accessible to archivedKeys, and keys\n\t// that now need to be accessible back here.\n\t//\n\t// For safety, because there isn't really a good reason to, we never delete\n\t// keys from the archive even when we move them back.\n\n\t// Check if we have the latest minimum version in the current set of keys\n\t_, keysContainsMinimum := p.Keys[strconv.Itoa(p.MinDecryptionVersion)]\n\n\t// Sanity checks\n\tswitch {\n\tcase p.MinDecryptionVersion < 1:\n\t\treturn fmt.Errorf(\"minimum decryption version of %d is less than 1\", p.MinDecryptionVersion)\n\tcase p.LatestVersion < 1:\n\t\treturn fmt.Errorf(\"latest version of %d is less than 1\", p.LatestVersion)\n\tcase !keysContainsMinimum && p.ArchiveVersion != p.LatestVersion:\n\t\treturn fmt.Errorf(\"need to move keys from archive but archive version not up-to-date\")\n\tcase p.ArchiveVersion > p.LatestVersion:\n\t\treturn fmt.Errorf(\"archive version of %d is greater than the latest version %d\",\n\t\t\tp.ArchiveVersion, p.LatestVersion)\n\tcase p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion:\n\t\treturn fmt.Errorf(\"minimum decryption version of %d is greater than minimum encryption version %d\",\n\t\t\tp.MinDecryptionVersion, p.MinEncryptionVersion)\n\tcase p.MinDecryptionVersion > p.LatestVersion:\n\t\treturn fmt.Errorf(\"minimum decryption version of %d is greater than the latest version %d\",\n\t\t\tp.MinDecryptionVersion, p.LatestVersion)\n\t}\n\n\tarchive, err := p.LoadArchive(ctx, storage)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !keysContainsMinimum {\n\t\t// Need to move keys *from* archive\n\t\tfor i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {\n\t\t\tp.Keys[strconv.Itoa(i)] = archive.Keys[i-p.MinAvailableVersion]\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// Need to move keys *to* archive\n\n\t// We need a size that is equivalent to the latest version (number of keys)\n\t// but adding one since slice numbering starts at 0 and we're indexing by\n\t// key version\n\tif len(archive.Keys)+p.MinAvailableVersion < p.LatestVersion+1 {\n\t\t// Increase the size of the archive slice\n\t\tnewKeys := make([]KeyEntry, p.LatestVersion-p.MinAvailableVersion+1)\n\t\tcopy(newKeys, archive.Keys)\n\t\tarchive.Keys = newKeys\n\t}\n\n\t// We are storing all keys in the archive, so we ensure that it is up to\n\t// date up to p.LatestVersion\n\tfor i := p.ArchiveVersion + 1; i <= p.LatestVersion; i++ {\n\t\tarchive.Keys[i-p.MinAvailableVersion] = p.Keys[strconv.Itoa(i)]\n\t\tp.ArchiveVersion = i\n\t}\n\n\t// Trim the keys if required\n\tif p.ArchiveMinVersion < p.MinAvailableVersion {\n\t\tarchive.Keys = archive.Keys[p.MinAvailableVersion-p.ArchiveMinVersion:]\n\t\tp.ArchiveMinVersion = p.MinAvailableVersion\n\t}\n\n\terr = p.storeArchive(ctx, storage, archive)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Perform deletion afterwards so that if there is an error saving we\n\t// haven't messed with the current policy\n\tfor i := p.LatestVersion - len(p.Keys) + 1; i < p.MinDecryptionVersion; i++ {\n\t\tdelete(p.Keys, strconv.Itoa(i))\n\t}\n\n\treturn nil\n}",
"func NewArchiver(logger log.Logger) archiversvc.Service {\n\treturn &archiversvcsvc{\n\t\tlogger: logger,\n\t\tdb: &Archive{RWMutex: &sync.RWMutex{}},\n\t}\n}",
"func (me TGetReviewableHITsSortProperty) IsCreationTime() bool { return me.String() == \"CreationTime\" }",
"func isUnarchivingChannelEnabled() bool {\n\tif os.Getenv(\"ENABLE_UNARCHIVING_CHANNEL\") == \"true\" {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func (repo *Repository) Archive(ctx context.Context, claims auth.Claims, req ChecklistArchiveRequest, now time.Time) error {\n\tspan, ctx := tracer.StartSpanFromContext(ctx, \"internal.checklist.Archive\")\n\tdefer span.Finish()\n\n\t// Validate the request.\n\tv := webcontext.Validator()\n\terr := v.Struct(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ensure the claims can modify the checklist specified in the request.\n\terr = repo.CanModifyChecklist(ctx, claims, req.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If now empty set it to the current time.\n\tif now.IsZero() {\n\t\tnow = time.Now()\n\t}\n\n\t// Always store the time as UTC.\n\tnow = now.UTC()\n\t// Postgres truncates times to milliseconds when storing. We and do the same\n\t// here so the value we return is consistent with what we store.\n\tnow = now.Truncate(time.Millisecond)\n\t// Build the update SQL statement.\n\tquery := sqlbuilder.NewUpdateBuilder()\n\tquery.Update(checklistTableName)\n\tquery.Set(\n\t\tquery.Assign(\"archived_at\", now),\n\t)\n\n\tquery.Where(query.Equal(\"id\", req.ID))\n\t// Execute the query with the provided context.\n\tsql, args := query.Build()\n\tsql = repo.DbConn.Rebind(sql)\n\t_, err = repo.DbConn.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"query - %s\", query.String())\n\t\terr = errors.WithMessagef(err, \"archive checklist %s failed\", req.ID)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (ks *VRF) Archive(key secp256k1.PublicKey) (err error) {\n\tks.lock.Lock()\n\tdefer ks.lock.Unlock()\n\tif key == zeroPublicKey {\n\t\treturn fmt.Errorf(\"cannot delete the empty public key\")\n\t}\n\tif _, found := ks.keys[key]; found {\n\t\terr = ks.forget(key) // Destroy in-memory representation of key\n\t\tdelete(ks.keys, key)\n\t}\n\tmatches, err := ks.get(key)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"while checking for existence of key %s in DB\", key.String())\n\t} else if len(matches) == 0 {\n\t\treturn ErrAttemptToDeleteNonExistentKeyFromDB\n\t}\n\terr2 := ks.orm.ArchiveEncryptedSecretVRFKey(&vrfkey.EncryptedVRFKey{PublicKey: key})\n\treturn multierr.Append(err, err2)\n}",
"func (bgpr BlobsGetPropertiesResponse) ArchiveStatus() string {\n\treturn bgpr.rawResponse.Header.Get(\"x-ms-archive-status\")\n}",
"func (c CachedObject) IsExpired() bool {\r\n\r\n\telapsed := time.Now().Sub(c.CreatedAt.Add(time.Hour * getExpiryTimeInHrs()))\r\n\r\n\tif elapsed > 0.0 {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}",
"func ArchiveArchiverPath() string {\n\treturn \"/archive\"\n}",
"func (me TSearchHITsSortProperty) IsExpiration() bool { return me.String() == \"Expiration\" }",
"func isArchiveFormatZip(archivePath string) error {\n\treadCloser, err := zip.OpenReader(archivePath)\n\tif err == nil {\n\t\t_ = readCloser.Close()\n\t}\n\treturn err\n}",
"func (a *BooleanArchive) Len() int {\n\treturn a.size\n}",
"func (r *RoleList) RawArchived() string {\n\treturn r.rawArchived\n}",
"func (me TSearchHITsSortProperty) IsCreationTime() bool { return me.String() == \"CreationTime\" }",
"func (upload *Upload) IsExpired() bool {\n\tif upload.ExpireAt != nil {\n\t\tif time.Now().After(*upload.ExpireAt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsCreated(name string) bool {\n\treturn exec.Command(execName, \"inspect\", \"vm\", name).Run() == nil\n}"
] | [
"0.7998938",
"0.7904601",
"0.7904601",
"0.68943787",
"0.6770506",
"0.67099184",
"0.6696357",
"0.66641325",
"0.6639419",
"0.66274315",
"0.6606585",
"0.65943253",
"0.6559072",
"0.6552962",
"0.6517748",
"0.6511882",
"0.64961284",
"0.6490567",
"0.6486505",
"0.6484248",
"0.64326704",
"0.6421908",
"0.64202094",
"0.6409425",
"0.6397907",
"0.6388498",
"0.6384348",
"0.6376828",
"0.63089687",
"0.6270892",
"0.62568367",
"0.62180126",
"0.6118926",
"0.61046416",
"0.6091664",
"0.6090562",
"0.6082604",
"0.6074421",
"0.6040612",
"0.6031975",
"0.5997631",
"0.5990044",
"0.59836966",
"0.59586686",
"0.5948055",
"0.59215873",
"0.58920836",
"0.58829427",
"0.5844561",
"0.5836523",
"0.58020025",
"0.575106",
"0.5703843",
"0.5672779",
"0.5632328",
"0.5594819",
"0.55461246",
"0.55305034",
"0.5447593",
"0.5439517",
"0.525149",
"0.5218389",
"0.5192503",
"0.51921856",
"0.5172773",
"0.51475525",
"0.5140687",
"0.5099644",
"0.5068607",
"0.49979028",
"0.4983336",
"0.49645463",
"0.49638626",
"0.49617326",
"0.49529782",
"0.49317315",
"0.4894447",
"0.48888665",
"0.48789167",
"0.48683837",
"0.48680657",
"0.48448524",
"0.48349768",
"0.482418",
"0.4821425",
"0.47935596",
"0.47918546",
"0.47830155",
"0.47676235",
"0.4757821",
"0.47228503",
"0.47014505",
"0.4695662",
"0.46798056",
"0.46511257",
"0.46341503",
"0.46050787",
"0.45909014",
"0.45895723",
"0.4580624"
] | 0.8670008 | 0 |
IsPage return true if container is page | IsPage возвращает true, если контейнер является страницей | func (c *Container) IsPage() bool {
return c.Title != ""
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Content) IsPage() bool {\n\treturn c.Type == CONTENT_TYPE_PAGE\n}",
"func (si *StructInfo) IsPage() bool {\n\treturn si.Kind == core.PAGE\n}",
"func (p *Paginator) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}",
"func (p *Pagination) IsCurrentPage(page int) bool {\n\treturn p.CurrentPage() == page\n}",
"func (t *Type) IsContainer() bool {\n\t_, ok := frugalContainerTypes[t.Name]\n\treturn ok\n}",
"func (p Pagination) IsCurrent(page int) bool {\n\treturn page == p.CurrentPage\n}",
"func (eReference *eReferenceImpl) IsContainer() bool {\n\tpanic(\"IsContainer not implemented\")\n}",
"func (n PidMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func (p Page) inPage(s string) bool {\n\tfor _, v := range p.Links {\n\t\tif s == v.Url.String() || v.Url.String()+\"/\" == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (b *BaseElement) IsContainer() bool {\n\treturn false\n}",
"func (n NetworkMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func (p *Pagination) Show() bool {\n\treturn p.NumberOfPages() > 1\n}",
"func (p *Paginator) IsActive(page int) bool {\n\treturn p.Page() == page\n}",
"func (o *PaginationProperties) HasPage() bool {\n\tif o != nil && o.Page != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *Bundles) HasCurrentPage() bool {\n\tif o != nil && o.CurrentPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Paginator) HasPages() bool {\n\treturn p.PageNums() > 1\n}",
"func (r ContainerPage) IsEmpty() (bool, error) {\n\tcontainers, err := ExtractContainers(r)\n\treturn len(containers) == 0, err\n}",
"func (n IpcMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func (o *SpansListRequestAttributes) HasPage() bool {\n\treturn o != nil && o.Page != nil\n}",
"func (o *AclBindingListPage) HasPage() bool {\n\tif o != nil && o.Page != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *Paginator) Show() bool {\n\treturn p.NumberOfPages() > 1\n}",
"func InContainer() (bool, error) {\n\tns, err := GetContainerID()\n\treturn ns != \"\", err\n}",
"func (p Page) IsValid() bool {\n\treturn p.valid\n}",
"func (p *BlobContainersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListContainerItems.NextLink == nil || len(*p.current.ListContainerItems.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (r *RoleList) HasPage() bool {\n\treturn r.hasPage\n}",
"func (p Page) IsHTML() bool {\n\treturn p.Type().MediaType() == \"text/html\"\n}",
"func (p *ServiceListContainersSegmentPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListContainersSegmentResponse.NextMarker == nil || len(*p.current.ListContainersSegmentResponse.NextMarker) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listContainersSegmentHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (o *InlineResponse20014Projects) HasStartPage() bool {\n\tif o != nil && o.StartPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (v *Variant) IsContainer() bool {\n\treturn gobool(C.g_variant_is_container(v.native()))\n}",
"func (p *Pagination) IsLastPage() bool {\n\treturn p.CurrentPage >= p.TotalPages\n}",
"func (c CgroupSpec) IsContainer() bool {\n\t_, ok := containerID(string(c))\n\treturn ok\n}",
"func (p *Pagination) isFirst() bool {\n\treturn p.PageNumber == 1\n}",
"func (o *ViewMetaPage) HasPageSize() bool {\n\tif o != nil && o.PageSize != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func hasLandingPage(collection model.Pages, dir *model.Page) bool {\n\thasLanding := false\n\tfor _, page := range collection {\n\t\tif page.Type == \"file\" && page.Slug == dir.Slug {\n\t\t\thasLanding = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hasLanding\n}",
"func isContainer(mode string) bool {\n\tparts := strings.SplitN(mode, \":\", 2)\n\treturn len(parts) > 1 && parts[0] == \"container\"\n}",
"func IsContainerized() (bool, error) {\n\t// TODO: Implement jail detection for freeBSD\n\treturn false, errors.New(\"cannot detect if we are in container\")\n}",
"func (c *Container) isRun() bool {\n\tif c == nil {\n\t\tpanic(\"calling isRun on nil container\")\n\t}\n\treturn c.typeID == ContainerRun\n}",
"func (o *StorageNetAppCloudTargetAllOf) HasContainer() bool {\n\tif o != nil && o.Container != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c *Collection) IsPostsCollection() bool { return c.Name == postsName }",
"func (page *Page) AddPage(link *Page) bool {\n\tfor _, l := range page.Pages {\n\t\tif l == link {\n\t\t\treturn false\n\t\t}\n\t}\n\tpage.Pages = append(page.Pages, link)\n\treturn true\n}",
"func IsContainerized() (bool, error) {\n\tb, err := os.ReadFile(proc1Cgroup)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, line := range bytes.Split(b, []byte{'\\n'}) {\n\t\tif len(line) > 0 && !bytes.HasSuffix(line, []byte(\":/\")) && !bytes.HasSuffix(line, []byte(\":/init.scope\")) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func (p *ContainerListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListBlobsHierarchySegmentResponse.NextMarker == nil || len(*p.current.ListBlobsHierarchySegmentResponse.NextMarker) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBlobHierarchySegmentHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (o *OriginCollection) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}",
"func (q *QuestionnaireT) IsInNavigation(pageIdx int) bool {\n\n\tif pageIdx < 0 || pageIdx > len(q.Pages)-1 {\n\t\treturn false\n\t}\n\n\tif q.Pages[pageIdx].NoNavigation {\n\t\treturn false\n\t}\n\n\tif fc, ok := naviFuncs[q.Pages[pageIdx].NavigationCondition]; ok {\n\t\treturn fc(q, pageIdx)\n\t}\n\n\treturn true\n}",
"func (p *ManagementClientGetActiveSessionsPager) NextPage(ctx context.Context) bool {\n\tif !p.second {\n\t\tp.second = true\n\t\treturn true\n\t} else if !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionActiveSessionListResult.NextLink == nil || len(*p.current.BastionActiveSessionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, *p.current.BastionActiveSessionListResult.NextLink)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getActiveSessionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *StoragesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.StorageResourceCollection.NextLink == nil || len(*p.current.StorageResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p nullPage) IsEmpty() (bool, error) {\n\treturn true, nil\n}",
"func (p *TestPager) Check() (bool, error) {\n return false, nil\n}",
"func IsAjaxPage(vals url.Values) bool {\n\tpage := getPageName(vals)\n\tajax := vals.Get(\"ajax\")\n\tasJson := vals.Get(\"asJson\")\n\treturn page == FetchEventboxAjaxPageName ||\n\t\tpage == FetchResourcesAjaxPageName ||\n\t\tpage == GalaxyContentAjaxPageName ||\n\t\tpage == EventListAjaxPageName ||\n\t\tpage == AjaxChatAjaxPageName ||\n\t\tpage == NoticesAjaxPageName ||\n\t\tpage == RepairlayerAjaxPageName ||\n\t\tpage == TechtreeAjaxPageName ||\n\t\tpage == PhalanxAjaxPageName ||\n\t\tpage == ShareReportOverlayAjaxPageName ||\n\t\tpage == JumpgatelayerAjaxPageName ||\n\t\tpage == FederationlayerAjaxPageName ||\n\t\tpage == UnionchangeAjaxPageName ||\n\t\tpage == ChangenickAjaxPageName ||\n\t\tpage == PlanetlayerAjaxPageName ||\n\t\tpage == TraderlayerAjaxPageName ||\n\t\tpage == PlanetRenameAjaxPageName ||\n\t\tpage == RightmenuAjaxPageName ||\n\t\tpage == AllianceOverviewAjaxPageName ||\n\t\tpage == SupportAjaxPageName ||\n\t\tpage == BuffActivationAjaxPageName ||\n\t\tpage == AuctioneerAjaxPageName ||\n\t\tpage == HighscoreContentAjaxPageName ||\n\t\tajax == \"1\" ||\n\t\tasJson == \"1\"\n}",
"func (p *Paginator) Page() int {\n\tif p.page != 0 {\n\t\treturn p.page\n\t}\n\tif p.Request.Form == nil {\n\t\tp.Request.ParseForm()\n\t}\n\tp.page, _ = strconv.Atoi(p.Request.Form.Get(\"p\"))\n\tif p.page > p.PageNums() {\n\t\tp.page = p.PageNums()\n\t}\n\tif p.page <= 0 {\n\t\tp.page = 1\n\t}\n\treturn p.page\n}",
"func (t *Tailer) isContainerEntry(entry *sdjournal.JournalEntry) bool {\n\t_, exists := entry.Fields[containerIDKey]\n\treturn exists\n}",
"func (p *Page) Valid() bool {\n\tif p.Limit > 0 {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c Page) Page() revel.Result {\n\n\tc.RenderArgs[\"Site\"] = site.Site\n\n\t// Create PageData\n\tpdata := site.LoadPage(c.Params.Route.Get(\"section\"), c.Params.Route.Get(\"page\"))\n\tc.RenderArgs[\"Page\"] = pdata\n\n\tif pdata.Error != nil {\n\t\treturn c.NotFound(\"missing secton\")\n\t}\n\n\tc.RenderArgs[\"Section\"] = site.Site.Sections[pdata.Section]\n\n\treturn c.Render()\n\n}",
"func (o *Partition) GetIsContainer(ctx context.Context) (isContainer bool, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"IsContainer\").Store(&isContainer)\n\treturn\n}",
"func (p Page) Type() Type {\n\treturn p.PageType\n}",
"func (c *ColumnChunkMetaData) HasIndexPage() bool { return c.columnMeta.IsSetIndexPageOffset() }",
"func (p *ManagementClientGetBastionShareableLinkPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionShareableLinkListResult.NextLink == nil || len(*p.current.BastionShareableLinkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getBastionShareableLinkHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *Pagination) hasMoreThanOnePage() bool {\n\treturn p.Limit < p.Total\n}",
"func (m Model) OnLastPage() bool {\n\treturn m.Page == m.TotalPages-1\n}",
"func (*offsetPageInfoImpl) HasNextPage(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn (page.offset + page.limit) < page.totalCount, nil\n}",
"func (o *DeliveryGetOriginsResponse) HasPageInfo() bool {\n\tif o != nil && o.PageInfo != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func inContainer() opt.Bool {\n\tif runtime.GOOS != \"linux\" {\n\t\treturn \"\"\n\t}\n\tvar ret opt.Bool\n\tret.Set(false)\n\tif _, err := os.Stat(\"/.dockerenv\"); err == nil {\n\t\tret.Set(true)\n\t\treturn ret\n\t}\n\tif _, err := os.Stat(\"/run/.containerenv\"); err == nil {\n\t\t// See https://github.com/cri-o/cri-o/issues/5461\n\t\tret.Set(true)\n\t\treturn ret\n\t}\n\tlineread.File(\"/proc/1/cgroup\", func(line []byte) error {\n\t\tif mem.Contains(mem.B(line), mem.S(\"/docker/\")) ||\n\t\t\tmem.Contains(mem.B(line), mem.S(\"/lxc/\")) {\n\t\t\tret.Set(true)\n\t\t\treturn io.EOF // arbitrary non-nil error to stop loop\n\t\t}\n\t\treturn nil\n\t})\n\tlineread.File(\"/proc/mounts\", func(line []byte) error {\n\t\tif mem.Contains(mem.B(line), mem.S(\"lxcfs /proc/cpuinfo fuse.lxcfs\")) {\n\t\t\tret.Set(true)\n\t\t\treturn io.EOF\n\t\t}\n\t\treturn nil\n\t})\n\treturn ret\n}",
"func (p *Pages) Has(pageName string) bool {\n\tvar pageHandle = cleanAllSlashes(handlePath(pageName))\n\tvar prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))\n\tp.sl.RLock()\n\tif _, exists := p.managers[prefixPage]; exists {\n\t\tp.sl.RUnlock()\n\t\treturn true\n\t}\n\tif _, exists := p.managers[pageHandle]; exists {\n\t\tp.sl.RUnlock()\n\t\treturn true\n\t}\n\tp.sl.RUnlock()\n\treturn false\n}",
"func (p *DeploymentsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeploymentResourceCollection.NextLink == nil || len(*p.current.DeploymentResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (o *ViewMetaPage) HasPageOffset() bool {\n\tif o != nil && o.PageOffset != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *CdnGetScopeRulesResponse) HasPageInfo() bool {\n\tif o != nil && o.PageInfo != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *Bitlinks) HasPagination() bool {\n\tif o != nil && o.Pagination != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m *PrinterDefaults) GetFitPdfToPage()(*bool) {\n val, err := m.GetBackingStore().Get(\"fitPdfToPage\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}",
"func (o *Origin1) GetPageOk() (*PageType, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Page, true\n}",
"func (p *BuildServiceClientListBuildServicesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BuildServiceCollection.NextLink == nil || len(*p.current.BuildServiceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBuildServicesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *CustomDomainsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.CustomDomainResourceCollection.NextLink == nil || len(*p.current.CustomDomainResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (me TxsdCounterSimpleContentExtensionType) IsSession() bool { return me.String() == \"session\" }",
"func (p *DeploymentsClientListForClusterPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeploymentResourceCollection.NextLink == nil || len(*p.current.DeploymentResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listForClusterHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *StorageTargetsClientListByCachePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.StorageTargetsResult.NextLink == nil || len(*p.current.StorageTargetsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByCacheHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (is *MenuPage) WillHide() {\n}",
"func (p *Pagination) isLast() bool {\n\tif p.Total == 0 {\n\t\treturn true\n\t}\n\treturn p.Total > (p.PageNumber-1)*p.PageSize && !p.HasNext()\n}",
"func (p *ContainerListBlobFlatSegmentPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListBlobsFlatSegmentResponse.NextMarker == nil || len(*p.current.ListBlobsFlatSegmentResponse.NextMarker) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBlobFlatSegmentHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func detectContainer() bool {\n\tif runtime.GOOS != \"linux\" {\n\t\treturn false\n\t}\n\n\tfile, err := os.Open(\"/proc/1/cgroup\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer file.Close()\n\n\ti := 0\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\ti++\n\t\tif i > 1000 {\n\t\t\treturn false\n\t\t}\n\n\t\tline := scanner.Text()\n\t\tparts := strings.SplitN(line, \":\", 3)\n\t\tif len(parts) < 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.Contains(parts[2], \"docker\") ||\n\t\t\tstrings.Contains(parts[2], \"lxc\") ||\n\t\t\tstrings.Contains(parts[2], \"moby\") {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (p *SmartGroupsClientGetAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.SmartGroupsList.NextLink == nil || len(*p.current.SmartGroupsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (*offsetPageInfoImpl) HasPreviousPage(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn page.offset > 0, nil\n}",
"func (p *BastionHostsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionHostListResult.NextLink == nil || len(*p.current.BastionHostListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *APIPortalCustomDomainsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.APIPortalCustomDomainResourceCollection.NextLink == nil || len(*p.current.APIPortalCustomDomainResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func imageIsParent(store storage.Store, topLayer string) (bool, error) {\n\tchildren, err := getChildren(store, topLayer)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn len(children) > 0, nil\n}",
"func (o *PaginationProperties) HasNextPage() bool {\n\tif o != nil && o.NextPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *RecordSetsClientListByTypePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RecordSetListResult.NextLink == nil || len(*p.current.RecordSetListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByTypeHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p ServicePage) IsEmpty() (bool, error) {\n\tservices, err := ExtractServices(p)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\treturn len(services) == 0, nil\n}",
"func (pager *ProjectsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *OperationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationList.NextLink == nil || len(*p.current.OperationList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (spec *SourceSpec) IsContainerBuild() bool {\n\treturn spec.ContainerImage.Image != \"\"\n}",
"func (p *Pagination) hasNext() bool {\n\tif p.CurrentPage*p.Limit >= p.Total {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (view *DetailsView) IsVisible() bool {\n\tif view == nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func PageHandler(w http.ResponseWriter, r *http.Request) (handled bool) {\n\tlog.Println(\"PageHandler called in example plugin\")\n\treturn false\n}",
"func (p *ServicesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ServiceResourceList.NextLink == nil || len(*p.current.ServiceResourceList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (b BaseAppOption) IsManyPerContainerType() {}",
"func (p *ServiceTagInformationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ServiceTagInformationListResult.NextLink == nil || len(*p.current.ServiceTagInformationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ConfigurationServicesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ConfigurationServiceResourceCollection.NextLink == nil || len(*p.current.ConfigurationServiceResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BuildServiceBuilderClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BuilderResourceCollection.NextLink == nil || len(*p.current.BuilderResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (c *Container) isArray() bool {\n\tif c == nil {\n\t\tpanic(\"calling isArray on nil container\")\n\t}\n\treturn c.typeID == ContainerArray\n}",
"func (p *GatewayCustomDomainsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.GatewayCustomDomainResourceCollection.NextLink == nil || len(*p.current.GatewayCustomDomainResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AppsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AppResourceCollection.NextLink == nil || len(*p.current.AppResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}"
] | [
"0.7772381",
"0.72924674",
"0.632932",
"0.61344856",
"0.610035",
"0.60268337",
"0.6011801",
"0.58766586",
"0.5786298",
"0.57827115",
"0.57642055",
"0.5759891",
"0.566043",
"0.56425154",
"0.56420946",
"0.5632037",
"0.5631244",
"0.5612727",
"0.5582618",
"0.5552817",
"0.55508125",
"0.55412984",
"0.5514536",
"0.54886127",
"0.54673713",
"0.54460883",
"0.54145193",
"0.5385984",
"0.5329364",
"0.532783",
"0.5288585",
"0.526911",
"0.52689385",
"0.52688444",
"0.5268536",
"0.5249099",
"0.52339494",
"0.5223381",
"0.5206846",
"0.51774204",
"0.51340187",
"0.50956583",
"0.5091584",
"0.5089482",
"0.5038107",
"0.502906",
"0.5023562",
"0.50196236",
"0.5015",
"0.5014822",
"0.50041664",
"0.50034827",
"0.49934283",
"0.49932694",
"0.4978898",
"0.49695393",
"0.49543983",
"0.4949705",
"0.49483737",
"0.4939361",
"0.49306762",
"0.49038556",
"0.48995695",
"0.48943815",
"0.48901647",
"0.48805463",
"0.48788267",
"0.4857029",
"0.48562545",
"0.48548484",
"0.4852946",
"0.48496076",
"0.48469788",
"0.48451343",
"0.4829508",
"0.48187542",
"0.4818075",
"0.48148733",
"0.47999486",
"0.4794854",
"0.47917855",
"0.4787408",
"0.477824",
"0.47727877",
"0.47592992",
"0.47583613",
"0.47552857",
"0.4749606",
"0.47423005",
"0.47405085",
"0.47395974",
"0.47349742",
"0.4733931",
"0.47295636",
"0.4727889",
"0.47233707",
"0.47216886",
"0.4720963",
"0.47192878",
"0.471832"
] | 0.8593985 | 0 |
IsSpace return true if container is space | IsSpace возвращает true, если контейнер является пространством | func (c *Container) IsSpace() bool {
return c.Key != ""
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func isSpace(c byte) bool {\n\treturn c == space\n}",
"func isSpace(b byte) bool {\n\tswitch b {\n\tcase 32, 12, 9:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func IsSpaceRoot(r *Node) bool {\n\tpath := r.InternalPath()\n\tif spaceNameBytes, err := xattr.Get(path, xattrs.SpaceNameAttr); err == nil {\n\t\tif string(spaceNameBytes) != \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o *IpamNetworkDataData) HasSpaceIsTemplate() bool {\n\tif o != nil && o.SpaceIsTemplate != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsContainerized() (bool, error) {\n\t// TODO: Implement jail detection for freeBSD\n\treturn false, errors.New(\"cannot detect if we are in container\")\n}",
"func (me TxsdClipPathTypeClipPathUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }",
"func (o *IpamNetworkDataData) HasSpaceDescription() bool {\n\tif o != nil && o.SpaceDescription != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (n *Node) IsSpaceRoot(ctx context.Context) bool {\n\t_, err := n.Xattr(ctx, prefixes.SpaceNameAttr)\n\treturn err == nil\n}",
"func (me Tokens) HasSpaces() bool {\n\tfor i := 1; i < len(me); i++ {\n\t\tif diff := me[i].Pos.Off0 - (me[i-1].Pos.Off0 + len(me[i-1].Lexeme)); diff > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isContainer(mode string) bool {\n\tparts := strings.SplitN(mode, \":\", 2)\n\treturn len(parts) > 1 && parts[0] == \"container\"\n}",
"func isSpace(c rune) bool {\n\treturn c == ' ' || c == '\\t'\n}",
"func (o *IpamNetworkDataData) HasSpaceName() bool {\n\tif o != nil && o.SpaceName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (adapter *LevelAdapter) IsCyberspace() (result bool) {\n\tif properties := adapter.properties(); properties != nil {\n\t\tresult = *properties.CyberspaceFlag\n\t}\n\treturn\n}",
"func (me TxsdTspanTypeLengthAdjust) IsSpacing() bool { return me.String() == \"spacing\" }",
"func (o *IpamNetworkDataData) HasSpaceClassName() bool {\n\tif o != nil && o.SpaceClassName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdMarkerTypeMarkerUnits) IsUserSpace() bool { return me.String() == \"userSpace\" }",
"func isContainerMetric(e *loggregator_v2.Envelope) bool {\n\tgauge := e.GetGauge()\n\tif len(gauge.Metrics) != 5 {\n\t\treturn false\n\t}\n\trequired := []string{\n\t\t\"cpu\",\n\t\t\"memory\",\n\t\t\"disk\",\n\t\t\"memory_quota\",\n\t\t\"disk_quota\",\n\t}\n\n\tfor _, req := range required {\n\t\tif _, found := gauge.Metrics[req]; !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (t *Type) IsContainer() bool {\n\t_, ok := frugalContainerTypes[t.Name]\n\treturn ok\n}",
"func (s SpaceUnit) Space(SpaceUnit, int8) MetricUnit {\n\tpanic(\"Cannot add another space unit\")\n}",
"func IsSpace(rune int) bool {\n\tif rune <= 0xFF {\t// quick Latin-1 check\n\t\tswitch rune {\n\t\tcase '\\t', '\\n', '\\v', '\\f', '\\r', ' ', 0x85, 0xA0:\n\t\t\treturn true\n\t\t}\n\t\treturn false;\n\t}\n\treturn Is(White_Space, rune);\n}",
"func (s *Attribute) Space() *Dataspace {\n\thid := C.H5Aget_space(s.id)\n\tif int(hid) > 0 {\n\t\treturn newDataspace(hid)\n\t}\n\treturn nil\n}",
"func (space *Space) IsNil() bool {\n\treturn space.Kind == 0\n}",
"func IsSpaceChar(c byte) bool {\n\treturn c == 32 || c == 9\n}",
"func isSpace(r rune) bool {\n\t// https://github.com/toml-lang/toml#spec\n\t// Whitespace means tab (0x09) or space (0x20)\n\treturn r == 0x09 || r == 0x20\n}",
"func (n NetworkMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func (c CountUnit) Space(s SpaceUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(c)}).Space(s, dimension)\n}",
"func IsSpace(r rune) bool",
"func IsContainerized() (bool, error) {\n\tb, err := os.ReadFile(proc1Cgroup)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, line := range bytes.Split(b, []byte{'\\n'}) {\n\t\tif len(line) > 0 && !bytes.HasSuffix(line, []byte(\":/\")) && !bytes.HasSuffix(line, []byte(\":/init.scope\")) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}",
"func isSpace(r rune) bool {\n\tswitch r {\n\tcase ' ', '\\t', '\\n', '\\v', '\\f', '\\r':\n\t\treturn true\n\t}\n\treturn false\n}",
"func (s *Dataset) Space() *Dataspace {\n\thid := C.H5Dget_space(s.id)\n\tif int(hid) > 0 {\n\t\treturn newDataspace(hid)\n\t}\n\treturn nil\n}",
"func (o *IpamNetworkDataData) HasSpaceId() bool {\n\tif o != nil && o.SpaceId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t'\n}",
"func (o *IpamNetworkDataData) HasSpaceParentSpaceId() bool {\n\tif o != nil && o.SpaceParentSpaceId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (pod Pod) IsMemBound() bool {\n\tfor _, container := range pod.Containers {\n\t\tif container.IsMemBound() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t TimeUnit) Space(s SpaceUnit, dimension int8) MetricUnit {\n\treturn (&metricUnit{uint32(t)}).Space(s, dimension)\n}",
"func (l *Lexer) isSpace(ch byte) bool {\n\treturn ch == ' '\n}",
"func (n PidMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func isSpace(r rune) bool {\n\tswitch r {\n\tcase '\\t', '\\v', '\\f', '\\r', ' ', 0x85, 0xA0:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (sp booleanSpace) Size() int {\n\treturn 2\n}",
"func (o *StorageNetAppCloudTargetAllOf) HasIpspace() bool {\n\tif o != nil && o.Ipspace != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *StorageNetAppCloudTargetAllOf) HasContainer() bool {\n\tif o != nil && o.Container != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isContainerTerminated(info *cadvisorapiv2.ContainerInfo) bool {\n\tif !info.Spec.HasCpu && !info.Spec.HasMemory && !info.Spec.HasNetwork {\n\t\treturn true\n\t}\n\tcstat, found := latestContainerStats(info)\n\tif !found {\n\t\treturn true\n\t}\n\tif cstat.Network != nil {\n\t\tiStats := cadvisorInfoToNetworkStats(info)\n\t\tif iStats != nil {\n\t\t\tfor _, iStat := range iStats.Interfaces {\n\t\t\t\tif *iStat.RxErrors != 0 || *iStat.TxErrors != 0 || *iStat.RxBytes != 0 || *iStat.TxBytes != 0 {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif cstat.CpuInst == nil || cstat.Memory == nil {\n\t\treturn true\n\t}\n\treturn cstat.CpuInst.Usage.Total == 0 && cstat.Memory.RSS == 0\n}",
"func (o *IpamNetworkDataData) GetSpaceIsTemplate() string {\n\tif o == nil || o.SpaceIsTemplate == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.SpaceIsTemplate\n}",
"func (n IpcMode) IsContainer() bool {\n\t_, ok := containerID(string(n))\n\treturn ok\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n'\n}",
"func (o *IpamNetworkDataData) HasSpaceClassParameters() bool {\n\tif o != nil && o.SpaceClassParameters != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *Room) spaceFree(loc Point) bool {\n\tif loc.x < 0 || loc.x > 4 || loc.y < 0 || loc.y > 8 {\n\t\treturn false\n\t}\n\n\tif r.Grid[loc.x][loc.y].Solid {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func isSpace(b ...byte) bool {\n\n\treturn bytes.Contains([]byte(\" \\n\\r\\t\"), b)\n}",
"func (info *LevelInformation) IsCyberspace() bool {\n\treturn info.CyberspaceFlag != 0\n}",
"func (b *BaseElement) IsContainer() bool {\n\treturn false\n}",
"func (c *CircBuf) Space() int {\n\treturn (c.tail - c.head - 1) & (len(c.buf) - 1)\n}",
"func (o *HyperflexSnapshotStatus) HasUsedSpace() bool {\n\tif o != nil && o.UsedSpace != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *IpamAliasEditInput) HasSpaceName() bool {\n\tif o != nil && o.SpaceName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (h *Heap) IsEmpty() bool {\n\tif len(h.slice) == 0 {\n\t\treturn true\n\t}\n\n\tif (h.trimValue != -1) && (h.slice[0].distance >= h.trimValue) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n' || r == '\\r' || r == '\\f' || r == '\\v'\n}",
"func (v *Variant) IsContainer() bool {\n\treturn gobool(C.g_variant_is_container(v.native()))\n}",
"func isSpace(r rune) bool {\n\tswitch {\n\tcase r == 0x85:\n\t\treturn false\n\tcase\n\t\tunicode.IsSpace(r),\n\t\tr == '\\uFEFF': // zero width non-breaking space\n\t\treturn true\n\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (o *Partition) GetIsContainer(ctx context.Context) (isContainer bool, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"IsContainer\").Store(&isContainer)\n\treturn\n}",
"func (o *StorageHyperFlexStorageContainer) HasProvisionedCapacity() bool {\n\tif o != nil && o.ProvisionedCapacity != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *IpamNetworkDataData) HasParentSpaceName() bool {\n\tif o != nil && o.ParentSpaceName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (c CgroupSpec) IsContainer() bool {\n\t_, ok := containerID(string(c))\n\treturn ok\n}",
"func (eReference *eReferenceImpl) IsContainer() bool {\n\tpanic(\"IsContainer not implemented\")\n}",
"func isSpaceChar(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\r' || r == '\\n'\n}",
"func (me TxsdClipPathTypeClipPathUnits) IsUserSpaceOnUse() bool {\n\treturn me.String() == \"userSpaceOnUse\"\n}",
"func (o *StorageNetAppCifsShareAllOf) HasStorageContainer() bool {\n\tif o != nil && o.StorageContainer != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func IsDimension(t Type) bool {\n\tif v, ok := t.(*Operator); ok {\n\t\treturn v.Name == dimensionName\n\t}\n\treturn false\n}",
"func (h *Heap) IsEmpty() bool { return h.count == 0 }",
"func isSpace(b byte) bool {\n\treturn b == ' ' || b == '\\f' || b == '\\n' || b == '\\r' || b == '\\t' || b == '\\v'\n}",
"func (s SetOfSpaces) Contains(space int) bool {\r\n\tfor _, index := range s.indices {\r\n\t\tif index == space {\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\treturn false\r\n}",
"func (r ContainerPage) IsEmpty() (bool, error) {\n\tcontainers, err := ExtractContainers(r)\n\treturn len(containers) == 0, err\n}",
"func isSpace(r rune) bool {\r\n\treturn r == ' ' || r == '\\t' || r == '\\n' || r == ','\r\n}",
"func (o *IpamAliasEditInput) HasSpaceId() bool {\n\tif o != nil && o.SpaceId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (sp *Space) Length() int {\n\treturn len(*sp)\n}",
"func (u *User) HaveSpaceFor(realpath string) bool {\n\tif u.StorageQuota.HaveSpace(fs.GetFileSize(realpath)) {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}",
"func (t Token) IsLength() bool {\n\tif t.TokenType == css.DimensionToken {\n\t\treturn true\n\t} else if t.TokenType == css.NumberToken && t.Data[0] == '0' {\n\t\treturn true\n\t} else if t.TokenType == css.FunctionToken {\n\t\tfun := ToHash(t.Data[:len(t.Data)-1])\n\t\tif fun == Calc || fun == Min || fun == Max || fun == Clamp || fun == Attr || fun == Var || fun == Env {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (o *IpamNetworkDataData) HasParentSpaceId() bool {\n\tif o != nil && o.ParentSpaceId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdMovementType) IsGc() bool { return me.String() == \"GC\" }",
"func isSpace(r rune) bool {\n\tif r <= '\\u00FF' {\n\t\tswitch r {\n\t\tcase ' ', '\\t', '\\v', '\\f':\n\t\t\treturn true\n\t\tcase '\\u0085', '\\u00A0':\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\tif '\\u2000' <= r && r <= '\\u200a' {\n\t\treturn true\n\t}\n\tswitch r {\n\tcase '\\u1680', '\\u2028', '\\u2029', '\\u202f', '\\u205f', '\\u3000':\n\t\treturn true\n\t}\n\treturn false\n}",
"func IsSpaces(str []byte) bool {\n\tfor _, c := range str {\n\t\tif c != ' ' {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (s *Spaces) Contains(spaceName string) bool {\n\tspaceNameToUpper := strings.ToUpper(spaceName)\n\tfor _, v := range s.Spaces {\n\t\tif strings.ToUpper(v) == spaceNameToUpper {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (wb *WidgetBase) AlwaysConsumeSpace() bool {\n\treturn wb.alwaysConsumeSpace\n}",
"func (sm *SpaceManager) Space(ctx context.Context, space string) (storage.Space, error) {\n\tif !validateName(space) {\n\t\treturn nil, ErrorInvalidParam.Format(\"space\", space)\n\t}\n\treturn NewSpace(sm, space)\n}",
"func (obj *Object) HasDims() bool {\n\treturn obj.ListObject() != nil || (obj.HyperCube() != nil && len(obj.HyperCube().DimensionInfo) > 0)\n}",
"func (o *DnsZoneDataData) HasZoneSpaceName() bool {\n\tif o != nil && o.ZoneSpaceName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (me TxsdMarkerTypeMarkerUnits) IsUserSpaceOnUse() bool { return me.String() == \"userSpaceOnUse\" }",
"func (vec Vector2) IsUnit() bool {\n\treturn vec.Len() == 1\n}",
"func (space Space) Dim(data []core.Elemt) int {\n\treturn space.vspace.Dim(data)\n}",
"func CheckIsSpace(s string) bool {\n\tspace := true\n\n\tfor _, x := range s {\n\t\tif !unicode.IsSpace(x) {\n\t\t\tspace = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn space\n}",
"func HasSpace(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range s {\n\t\tif unicode.IsSpace(r) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (n *Node) IsDir(ctx context.Context) bool {\n\tattr, _ := n.XattrInt32(ctx, prefixes.TypeAttr)\n\treturn attr == int32(provider.ResourceType_RESOURCE_TYPE_CONTAINER)\n}",
"func isWhitespace(ch rune) bool {\n\treturn ch == ' ' || ch == '\\t'\n}",
"func (t *Type) IsRegularMemory() bool",
"func (dist Beta) Space() RealSpace {\n\treturn dist.space\n}",
"func (o *IpamNetworkDataData) SetSpaceIsTemplate(v string) {\n\to.SpaceIsTemplate = &v\n}",
"func (pod Pod) IsCpuBound() bool {\n\tfor _, container := range pod.Containers {\n\t\tif container.IsCpuBound() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (g *Group) IsFull() bool {\n\treturn g.Size() >= g.MaxSize\n}"
] | [
"0.6675399",
"0.63208467",
"0.62378913",
"0.61830646",
"0.6046376",
"0.6005166",
"0.59704506",
"0.5963759",
"0.59173125",
"0.590388",
"0.5838448",
"0.58182573",
"0.5815866",
"0.5789589",
"0.5774671",
"0.57681674",
"0.5735624",
"0.5731149",
"0.5728202",
"0.57250816",
"0.57173115",
"0.5705963",
"0.56934917",
"0.568219",
"0.566712",
"0.566218",
"0.5661247",
"0.5659956",
"0.5658281",
"0.56569105",
"0.5656165",
"0.56448156",
"0.56448156",
"0.56448156",
"0.56448156",
"0.56448156",
"0.563795",
"0.5625201",
"0.5621792",
"0.5592632",
"0.55911756",
"0.5587459",
"0.5586323",
"0.5549792",
"0.5539093",
"0.5517834",
"0.5500408",
"0.54800737",
"0.5466013",
"0.5459126",
"0.54579794",
"0.54517376",
"0.5451426",
"0.54417974",
"0.5413032",
"0.54117143",
"0.5406505",
"0.5403045",
"0.5380143",
"0.5337673",
"0.53376025",
"0.5335052",
"0.53267956",
"0.53076184",
"0.5296922",
"0.52765137",
"0.5261227",
"0.526096",
"0.52572453",
"0.52534205",
"0.52452725",
"0.524462",
"0.5236087",
"0.5236026",
"0.52303493",
"0.5197583",
"0.5192101",
"0.5176605",
"0.516905",
"0.5167984",
"0.51594543",
"0.5150859",
"0.51485676",
"0.51438236",
"0.5138062",
"0.5138017",
"0.5086993",
"0.5084447",
"0.50730944",
"0.5070843",
"0.506718",
"0.5065266",
"0.50642115",
"0.5061593",
"0.5054326",
"0.505145",
"0.5034944",
"0.50320035",
"0.5023447",
"0.502041"
] | 0.8235814 | 0 |
UnmarshalJSON is custom container ID unmarshaler | UnmarshalJSON — это пользовательский десериализатор идентификатора контейнера | func (c *ContainerID) UnmarshalJSON(b []byte) error {
switch {
case len(b) == 0:
// nop
case b[0] == '"':
*c = ContainerID(strings.Replace(string(b), "\"", "", -1))
default:
*c = ContainerID(string(b))
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Container) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (j *JobID) UnmarshalJSON(b []byte) error {\n\tvar u UUID\n\tif err := json.Unmarshal(b, &u); err != nil {\n\t\treturn err\n\t}\n\t*j = JobID(u)\n\treturn nil\n}",
"func (id *ID) UnmarshalJSON(b []byte) error {\n\ts := string(b)\n\tif s == \"null\" {\n\t\t*id = nilID\n\t\treturn nil\n\t}\n\treturn id.UnmarshalText(b[1 : len(b)-1])\n}",
"func (uid *MyULID) UnmarshalJSON(data []byte) error {\n\tvar s string\n\terr := json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmp, err := ulid.Parse(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*uid = MyULID(string(tmp[:]))\n\treturn nil\n}",
"func (v *AdScriptID) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage108(&r, v)\n\treturn r.Error()\n}",
"func (id *KeyID) UnmarshalJSON(data []byte) error {\n\t// need to strip leading and trailing double quotes\n\tif data[0] != '\"' || data[len(data)-1] != '\"' {\n\t\treturn fmt.Errorf(\"KeyID is not quoted\")\n\t}\n\tdata = data[1 : len(data)-1]\n\t*id = make([]byte, hex.DecodedLen(len(data)))\n\t_, err := hex.Decode(*id, data)\n\treturn err\n}",
"func (o *Kanban) UnmarshalJSON(data []byte) error {\n\tkv := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &kv); err != nil {\n\t\treturn err\n\t}\n\to.FromMap(kv)\n\tif idstr, ok := kv[\"id\"].(string); ok {\n\t\to.ID = idstr\n\t}\n\treturn nil\n}",
"func (j *QueueId) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (o *Echo) UnmarshalJSON(data []byte) error {\n\tkv := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &kv); err != nil {\n\t\treturn err\n\t}\n\to.FromMap(kv)\n\tif idstr, ok := kv[\"id\"].(string); ok {\n\t\to.ID = idstr\n\t}\n\treturn nil\n}",
"func (x *XID) UnmarshalJSON(b []byte) (err error) {\n\tif string(b) == \"null\" {\n\t\t*x = nilXID\n\t\treturn\n\t}\n\t*x, err = ParseXID(b2s(b[1 : len(b)-1]))\n\treturn\n}",
"func (d *IdentityDocument) UnmarshalJSON(data []byte) error {\n\ttype identityDocument IdentityDocument\n\tvar doc identityDocument\n\terr := json.Unmarshal(data, &doc)\n\n\tif err == nil {\n\t\t*d = IdentityDocument(doc)\n\t} else {\n\t\t// the id is surrounded by \"\\\" characters, so strip them\n\t\td.ID = string(data[1 : len(data)-1])\n\t}\n\n\treturn nil\n}",
"func (o *ExportData) UnmarshalJSON(data []byte) error {\n\tkv := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &kv); err != nil {\n\t\treturn err\n\t}\n\to.FromMap(kv)\n\tif idstr, ok := kv[\"id\"].(string); ok {\n\t\to.ID = idstr\n\t}\n\treturn nil\n}",
"func (o *Array_ID_A) UnmarshalJSON(b []byte) error {\n\treturn o.DeserializeJSON(b)\n}",
"func (t *TrustedIDProvider) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &t.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &t.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &t.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &t.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (tid *TransactionID) UnmarshalJSON(b []byte) error {\n\treturn (*crypto.Hash)(tid).UnmarshalJSON(b)\n}",
"func (c *ContainerNetworkInterface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &c.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &c.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (sc *StorageContainer) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar storageContainerProperties StorageContainerProperties\n\t\t\t\terr = json.Unmarshal(*v, &storageContainerProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.StorageContainerProperties = &storageContainerProperties\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.ID = &ID\n\t\t\t}\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.Name = &name\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar string\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsc.Type = &typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (sid *SpanID) UnmarshalJSON(data []byte) error {\n\tsid.id = [8]byte{}\n\treturn unmarshalJSON(sid.id[:], data)\n}",
"func (id *JobID) UnmarshalJSON(b []byte) error {\n\t// Sanity check. +2 for wrapping quotes.\n\tif len(b) != jobIDStrLen+2 {\n\t\treturn errors.New(\"invalid uuid string\")\n\t}\n\n\t// Remove wrapping quotes from before converting to uuid,\n\t// i.e. `\"de305d54-75b4-431b-adb2-eb6b9e546014\"` --> `de305d54-75b4-431b-adb2-eb6b9e546014`\n\tu, err := uuid.FromString(string(b[1 : len(b)-1]))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*id = JobID(u)\n\treturn nil\n}",
"func (e *ExpressRouteConnectionID) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &e.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (z *Int) UnmarshalJSON(text []byte) error {}",
"func (d *Dir) UnmarshalJSON(b []byte) error {\n\tvar j string\n\terr := json.Unmarshal(b, &j)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Note that if the string cannot be found then it will be set to the zero value, 'Created' in this case.\n\t*d = toID[j]\n\treturn nil\n}",
"func (id *NodeID) UnmarshalJSON(data []byte) error {\n\tvar unquoted string\n\terr := json.Unmarshal(data, &unquoted)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*id, err = NodeIDFromString(unquoted)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (u *UID) UnmarshalJSON(data []byte) error {\n\tu.Str = string(data[1 : len(data)-1])\n\treturn nil\n}",
"func (bid *BlockID) UnmarshalJSON(b []byte) error {\n\treturn (*crypto.Hash)(bid).UnmarshalJSON(b)\n}",
"func (fcid *FileContractID) UnmarshalJSON(b []byte) error {\n\treturn (*crypto.Hash)(fcid).UnmarshalJSON(b)\n}",
"func (t *TaxID) UnmarshalJSON(data []byte) error {\n\tif id, ok := ParseID(data); ok {\n\t\tt.ID = id\n\t\treturn nil\n\t}\n\n\ttype taxID TaxID\n\tvar v taxID\n\tif err := json.Unmarshal(data, &v); err != nil {\n\t\treturn err\n\t}\n\n\t*t = TaxID(v)\n\treturn nil\n}",
"func (v *Kick) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer20(&r, v)\n\treturn r.Error()\n}",
"func (t *TenantIDDescription) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &t.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &t.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (oid *OutputID) UnmarshalJSON(b []byte) error {\n\treturn (*crypto.Hash)(oid).UnmarshalJSON(b)\n}",
"func (j *CreateNhAssetOperation) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (a *ActivityReferenceIDAdded) UnmarshalJSON(b []byte) error {\n\tvar helper activityReferenceIDAddedUnmarshalHelper\n\tif err := json.Unmarshal(b, &helper); err != nil {\n\t\treturn err\n\t}\n\t*a = ActivityReferenceIDAdded(helper.Attributes)\n\treturn nil\n}",
"func (e *ExpressRouteCircuitPeeringID) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &e.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (id *ScheduleID) UnmarshalJSON(data []byte) error {\n\tscheduleID, err := ScheduleIDFromString(strings.Replace(string(data), \"\\\"\", \"\", 2))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tid.Shard = scheduleID.Shard\n\tid.Realm = scheduleID.Realm\n\tid.Schedule = scheduleID.Schedule\n\tid.checksum = scheduleID.checksum\n\n\treturn nil\n}",
"func (containerType *ContainerType) UnmarshalJSON(b []byte) error {\n\tstrType := string(b)\n\n\tswitch strType {\n\tcase \"null\":\n\t\t*containerType = ContainerNormal\n\t\tseelog.Warn(\"Unmarshalled nil ContainerType as Normal\")\n\t\treturn nil\n\t// 'true' or 'false' for compatibility with state version <= 5\n\tcase \"true\":\n\t\t*containerType = ContainerEmptyHostVolume\n\t\treturn nil\n\tcase \"false\":\n\t\t*containerType = ContainerNormal\n\t\treturn nil\n\t}\n\n\tif len(strType) < 2 {\n\t\t*containerType = ContainerNormal\n\t\treturn errors.New(\"invalid length set for ContainerType: \" + string(b))\n\t}\n\tif b[0] != '\"' || b[len(b)-1] != '\"' {\n\t\t*containerType = ContainerNormal\n\t\treturn errors.New(\"invalid value set for ContainerType, must be a string or null; got \" + string(b))\n\t}\n\tstrType = string(b[1 : len(b)-1])\n\n\tcontType, ok := stringToContainerType[strType]\n\tif !ok {\n\t\t*containerType = ContainerNormal\n\t\treturn errors.New(\"unrecognized ContainerType: \" + strType)\n\t}\n\t*containerType = contType\n\treturn nil\n}",
"func (a *AssetContainerSas) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"assetContainerSasUrls\":\n\t\t\terr = unpopulate(val, \"AssetContainerSasUrls\", &a.AssetContainerSasUrls)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (v *parentIDQuery) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson390b7126DecodeGithubComChancedPicker15(&r, v)\n\treturn r.Error()\n}",
"func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {\n\tf.d.jsonUnmarshalV(tm)\n}",
"func (fi *FlexInt) UnmarshalJSON(b []byte) error {\n\tif b[0] != '\"' {\n\t\treturn json.Unmarshal(b, (*int)(fi))\n\t}\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\ti, err := strconv.Atoi(s)\n\tif err != nil {\n\t\treturn nil\n\t}\n\t*fi = FlexInt(i)\n\treturn nil\n}",
"func (m *MediaServiceIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &m.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &m.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &m.Type)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"userAssignedIdentities\":\n\t\t\terr = unpopulate(val, \"UserAssignedIdentities\", &m.UserAssignedIdentities)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (p *PaymentID) UnmarshalJSON(data []byte) error {\n\tvar str string\n\terr := json.Unmarshal(data, &str)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*p, err = ParsePaymentIDStr(str)\n\treturn err\n}",
"func (u *UID) UnmarshalJSON(data []byte) error {\n\t// treat empty strings as null\n\tif len(data) == 0 || string(data) == \"\" || string(data) == \"\\\"\\\"\" || string(data) == \"\\\"null\\\"\" {\n\t\tu.NullUUID = uuid.NullUUID{Valid: false}\n\t\treturn nil\n\t}\n\n\treturn json.Unmarshal(data, &u.NullUUID)\n}",
"func (this *ContainerImageConfiguration) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (o *OrchestratorIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &o.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &o.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &o.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (v *PlantainerShadowMetadataSt) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(&r, v)\n\treturn r.Error()\n}",
"func IntUnmarshalJSON(z *big.Int, text []byte) error",
"func (u *UUID) UnmarshalJSON(d []byte) error {\n\tcontent, err := strconv.Unquote(string(d))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn u.UnmarshalJSON([]byte(content))\n}",
"func (i *Identity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &i.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &i.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"userAssignedIdentities\":\n\t\t\terr = unpopulate(val, \"UserAssignedIdentities\", &i.UserAssignedIdentities)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (i *Identity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &i.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &i.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"userAssignedIdentities\":\n\t\t\terr = unpopulate(val, \"UserAssignedIdentities\", &i.UserAssignedIdentities)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (v *idsQuery) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson390b7126DecodeGithubComChancedPicker27(&r, v)\n\treturn r.Error()\n}",
"func jsonToSingleContainer(json types.ContainerJSON) types.Container {\n\treturn types.Container{\n\t\tID: json.ID,\n\t\tNames: []string{json.Name},\n\t\tLabels: json.Config.Labels,\n\t\tState: json.State.Status,\n\t\tStatus: json.State.Status,\n\t}\n}",
"func (u *UUID) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\treturn u.UnmarshalText([]byte(s))\n}",
"func (l *ListContainerSasInput) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"expiryTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"ExpiryTime\", &l.ExpiryTime)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"permissions\":\n\t\t\terr = unpopulate(val, \"Permissions\", &l.Permissions)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (e *Unsigned) UnmarshalJSON(b []byte) error {\n\to := outEnvelope{}\n\terr := json.Unmarshal(b, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar body identity.Mutable\n\n\tt := o.ID.Type()\n\tswitch t {\n\tcase 0x01:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.UserV1{}\n\t\tcase 2:\n\t\t\tbody = &primitive.User{}\n\t\t}\n\tcase 0x03:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Service{}\n\t\t}\n\tcase 0x04:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Project{}\n\t\t}\n\tcase 0x05:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Environment{}\n\t\t}\n\tcase 0x0d:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Org{}\n\t\t}\n\tcase 0x0e:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Membership{}\n\t\t}\n\tcase 0x0f:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Team{}\n\t\t}\n\tcase 0x10:\n\t\tswitch o.Version {\n\t\tcase 2:\n\t\t\tbody = &primitive.Token{}\n\t\t}\n\tcase 0x11:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Policy{}\n\t\t}\n\tcase 0x12:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.PolicyAttachment{}\n\t\t}\n\tcase 0x13:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.OrgInvite{}\n\t\t}\n\tcase 0x17:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Machine{}\n\t\t}\n\tcase 0x18:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.MachineToken{}\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown primitive type id: %#02x\", t)\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Unknown schema version %d for primitive type id: %#02x\", o.Version, t)\n\t}\n\n\terr = json.Unmarshal(o.Body, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.ID = o.ID\n\te.Version = o.Version\n\n\te.Body = body\n\n\treturn nil\n}",
"func unmarshalJSON(i *big.Int, bz []byte) error {\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn i.UnmarshalText([]byte(text))\n}",
"func (i *ImageTemplateManagedImageSource) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"imageId\":\n\t\t\terr = unpopulate(val, &i.ImageID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *TamIdentifiers) UnmarshalJSON(raw []byte) error {\n\t// AO0\n\tvar dataAO0 struct {\n\t\tName string `json:\"Name,omitempty\"`\n\n\t\tValue string `json:\"Value,omitempty\"`\n\t}\n\tif err := swag.ReadJSON(raw, &dataAO0); err != nil {\n\t\treturn err\n\t}\n\n\tm.Name = dataAO0.Name\n\n\tm.Value = dataAO0.Value\n\n\treturn nil\n}",
"func (this *NamespacedName) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (i *Invoice) UnmarshalJSON(data []byte) error {\n\tif id, ok := ParseID(data); ok {\n\t\ti.ID = id\n\t\treturn nil\n\t}\n\n\ttype invoice Invoice\n\tvar v invoice\n\tif err := json.Unmarshal(data, &v); err != nil {\n\t\treturn err\n\t}\n\n\t*i = Invoice(v)\n\treturn nil\n}",
"func (u *UUID) UnmarshalJSON(b []byte) error {\n\tif len(b) != 38 || b[0] != '\"' || b[37] != '\"' {\n\t\treturn ErrInvalidUUID\n\t}\n\tid, err := Parse(b[1:37])\n\tif err != nil {\n\t\treturn err\n\t}\n\t*u = id\n\treturn nil\n}",
"func (f *FlexString) UnmarshalJSON(b []byte) error {\n\tif b[0] != '\"' {\n\t\tvar i int\n\t\terr := json.Unmarshal(b, &i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*f = FlexString(strconv.Itoa(i))\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(b, (*string)(f))\n}",
"func (v *VirtualHubID) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &v.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (a *OIDCIdentity) Unmarshal(bytes []byte) error {\n\treturn trace.Wrap(json.Unmarshal(bytes, a))\n}",
"func (d *LegacyDec) UnmarshalJSON(bz []byte) error {\n\tif d.i == nil {\n\t\td.i = new(big.Int)\n\t}\n\n\tvar text string\n\terr := json.Unmarshal(bz, &text)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Reuse dec allocation\n\tnewDec, err := LegacyNewDecFromStr(text)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.i = newDec.i\n\treturn nil\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func (c *CreateOrUpdateTrustedIDProviderProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"idProvider\":\n\t\t\terr = unpopulate(val, \"IDProvider\", &c.IDProvider)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *NavBarContainerStruct) UnmarshalJSON(bytes []byte) error{\n var tmp NavBarContainerStruct\n\n if err := json.Unmarshal(bytes, &tmp); err != nil {\n return err\n }\n m.NavBar = tmp.NavBar\n m.NavBar.ComponentData.NavBarItems = tmp.NavBarItems\n m.NavBarItems = tmp.NavBarItems\n return nil\n}",
"func (r *RegisteredAsn) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &r.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &r.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &r.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &r.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (this *BaseKubernetesContainerConfiguration) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (u *UserAssignedIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"clientId\":\n\t\t\terr = unpopulate(val, \"ClientID\", &u.ClientID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &u.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (u *UserAssignedIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"clientId\":\n\t\t\terr = unpopulate(val, \"ClientID\", &u.ClientID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &u.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (u *UserAssignedIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"clientId\":\n\t\t\terr = unpopulate(val, \"ClientID\", &u.ClientID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &u.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (j *LuaInt) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (i *ImageTemplateManagedImageDistributor) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"artifactTags\":\n\t\t\terr = unpopulate(val, &i.ArtifactTags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"imageId\":\n\t\t\terr = unpopulate(val, &i.ImageID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"runOutputName\":\n\t\t\terr = unpopulate(val, &i.RunOutputName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func parsePIDFromJSON(j []byte, runtime string) (int, error) {\n\n\tvar pid int\n\n\t// in crio, pid is present inside pid attribute of inspect output\n\t// in containerd, pid is present inside `info.pid` of inspect output\n\tif runtime == \"containerd\" {\n\t\tvar resp InspectResponse\n\t\tif err := json.Unmarshal(j, &resp); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tpid = resp.Info.PID\n\t} else if runtime == \"crio\" {\n\t\tvar resp InfoDetails\n\t\tif err := json.Unmarshal(j, &resp); err != nil {\n\t\t\treturn 0, errors.Errorf(\"[cri]: Could not find pid field in json: %s\", string(j))\n\t\t}\n\t\tpid = resp.PID\n\t} else {\n\t\treturn 0, errors.Errorf(\"[cri]: No supported container runtime, runtime: %v\", runtime)\n\t}\n\n\tif pid == 0 {\n\t\treturn 0, errors.Errorf(\"[cri]: No running target container found, pid: %v\", string(pid))\n\t}\n\n\treturn pid, nil\n}",
"func (x *CMsgDOTAPopup_PopupID) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgDOTAPopup_PopupID(num)\n\treturn nil\n}",
"func (f *RetireConnectionIDFrame) UnmarshalJSONObject(dec *gojay.Decoder, key string) error {\n\tif key == \"sequence_number\" {\n\t\treturn dec.Uint64(&f.SequenceNumber)\n\t}\n\treturn nil\n}",
"func (u *UserAssignedManagedIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"clientId\":\n\t\t\terr = unpopulate(val, \"ClientID\", &u.ClientID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &u.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *SystemAssignedServiceIdentity) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"principalId\":\n\t\t\terr = unpopulate(val, \"PrincipalID\", &s.PrincipalID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tenantId\":\n\t\t\terr = unpopulate(val, \"TenantID\", &s.TenantID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &s.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *Orchestrator) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &o.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"identity\":\n\t\t\terr = unpopulate(val, \"Identity\", &o.Identity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"kind\":\n\t\t\terr = unpopulate(val, \"Kind\", &o.Kind)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &o.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &o.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &o.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &o.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &o.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *TimeUnit) UnmarshalJSON(b []byte) error {\n\tvar j string\n\terr := json.Unmarshal(b, &j)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Note that if the string cannot be found then it will be set to the zero value, 'Created' in this case.\n\t*m = toTimeUnitID[j]\n\treturn nil\n}",
"func (s *StorageInformation) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &s.ID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *ProjectWebhook) UnmarshalJSON(data []byte) error {\n\tkv := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &kv); err != nil {\n\t\treturn err\n\t}\n\to.FromMap(kv)\n\tif idstr, ok := kv[\"id\"].(string); ok {\n\t\to.ID = idstr\n\t}\n\treturn nil\n}",
"func (c *ContainerNetworkInterfaceConfiguration) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &c.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &c.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &c.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &c.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &c.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (j *BootInitiationRespPacket) UnmarshalJSON(input []byte) error {\n\tfs := fflib.NewFFLexer(input)\n\treturn j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)\n}",
"func (k *KustoPoolDatabasesClientCreateOrUpdateResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalDatabaseClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.DatabaseClassification = res\n\treturn nil\n}",
"func (i *Issuer) UnmarshalJSON(data []byte) error {\n\tvar name Name\n\tif err := name.UnmarshalJSON(data); err != nil {\n\t\treturn err\n\t}\n\t*i = Issuer(name)\n\treturn nil\n}",
"func (id *ID) Unmarshal(dec *marshal.Decoder) {\n\tb := dec.Byte()\n\tswitch b {\n\tcase 0:\n\t\t*id = ID(dec.Varint())\n\tcase 1:\n\t\t*id = Intern(dec.Symbol())\n\tdefault:\n\t\tlog.Panicf(\"unmarshal symbol.id: corrupt data %v\", b)\n\t}\n}",
"func (id *UUID) UnmarshalJSON(data []byte) error {\n\t// Data is expected to be a json string, like: \"819c4ff4-31b4-4519-5d24-3c4a129b8649\"\n\tif len(data) < 2 || data[0] != '\"' || data[len(data)-1] != '\"' {\n\t\treturn fmt.Errorf(\"invalid UUID in JSON, %v is not a valid JSON string\", string(data))\n\t}\n\n\t// Grab string value without the surrounding \" characters\n\tvalue := string(data[1 : len(data)-1])\n\tparsed, err := ParseUUID(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid UUID in JSON, %v: %v\", value, err)\n\t}\n\n\t// Dereference pointer value and store parsed\n\t*id = parsed\n\treturn nil\n}",
"func (u *UID) UnmarshalJSON(b []byte) error {\n\tif !utf8.Valid(b) {\n\t\treturn fmt.Errorf(\"invalid UID string: %s\", b)\n\t}\n\n\tuid, err := strconv.Unquote(string(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*u = UID(uid)\n\treturn nil\n}",
"func (c *IntString) UnmarshalJSON(bytes []byte) error {\n\tr := make([]byte, 0)\n\tfor _, b := range bytes {\n\t\tswitch string(b) {\n\t\tcase \"\\\"\":\n\t\tdefault:\n\t\t\tr = append(r, b)\n\t\t}\n\t}\n\t*c = (IntString)(r)\n\treturn nil\n}",
"func (v *GetAdScriptIDParams) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage59(&r, v)\n\treturn r.Error()\n}",
"func (v *TransactionsSinceIDRequest) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel(&r, v)\n\treturn r.Error()\n}",
"func (e *Signed) UnmarshalJSON(b []byte) error {\n\to := outEnvelope{}\n\terr := json.Unmarshal(b, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar body identity.Immutable\n\n\tt := o.ID.Type()\n\tswitch t {\n\tcase 0x06:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.PublicKey{}\n\t\t}\n\tcase 0x07:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.PrivateKey{}\n\t\t}\n\tcase 0x08:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.Claim{}\n\t\t}\n\tcase 0x09:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.KeyringV1{}\n\t\tcase 2:\n\t\t\tbody = &primitive.Keyring{}\n\t\t}\n\tcase 0x0a:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.KeyringMemberV1{}\n\t\tcase 2:\n\t\t\tbody = &primitive.KeyringMember{}\n\t\t}\n\tcase 0x0b:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.CredentialV1{}\n\t\tcase 2:\n\t\t\tbody = &primitive.Credential{}\n\t\t}\n\tcase 0x15:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.KeyringMemberClaim{}\n\t\t}\n\tcase 0x16:\n\t\tswitch o.Version {\n\t\tcase 1:\n\t\t\tbody = &primitive.MEKShare{}\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown primitive type id: %#02x\", t)\n\t}\n\n\tif body == nil {\n\t\treturn fmt.Errorf(\"Unknown schema version %d for primitive type id: %#02x\", o.Version, t)\n\t}\n\n\terr = json.Unmarshal(o.Body, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.ID = o.ID\n\te.Version = o.Version\n\te.Signature = o.Signature\n\te.Body = body\n\n\treturn nil\n}",
"func (this *Service) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *Part) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(&r, v)\n\treturn r.Error()\n}",
"func (i *Intangible) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Name = &name\n\t\t\t}\n\t\tcase \"url\":\n\t\t\tif v != nil {\n\t\t\t\tvar URL string\n\t\t\t\terr = json.Unmarshal(*v, &URL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.URL = &URL\n\t\t\t}\n\t\tcase \"image\":\n\t\t\tif v != nil {\n\t\t\t\tvar imageVar ImageObject\n\t\t\t\terr = json.Unmarshal(*v, &imageVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Image = &imageVar\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tif v != nil {\n\t\t\t\tvar description string\n\t\t\t\terr = json.Unmarshal(*v, &description)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Description = &description\n\t\t\t}\n\t\tcase \"entityPresentationInfo\":\n\t\t\tif v != nil {\n\t\t\t\tvar entityPresentationInfo EntitiesEntityPresentationInfo\n\t\t\t\terr = json.Unmarshal(*v, &entityPresentationInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.EntityPresentationInfo = &entityPresentationInfo\n\t\t\t}\n\t\tcase \"bingId\":\n\t\t\tif v != nil {\n\t\t\t\tvar bingID string\n\t\t\t\terr = json.Unmarshal(*v, &bingID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.BingID = &bingID\n\t\t\t}\n\t\tcase \"contractualRules\":\n\t\t\tif v != nil {\n\t\t\t\tcontractualRules, err := unmarshalBasicContractualRulesContractualRuleArray(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.ContractualRules = &contractualRules\n\t\t\t}\n\t\tcase \"webSearchUrl\":\n\t\t\tif v != nil {\n\t\t\t\tvar webSearchURL string\n\t\t\t\terr = json.Unmarshal(*v, &webSearchURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.WebSearchURL = &webSearchURL\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.ID = &ID\n\t\t\t}\n\t\tcase \"_type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar TypeBasicResponseBase\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Type = typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (this *Quantity) UnmarshalJSON(b []byte) error {\n\treturn CommonUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (v *MACCommandIdentifier) UnmarshalJSON(b []byte) error {\n\tif bt, ok := unmarshalJSONString(b); ok {\n\t\treturn v.UnmarshalText(bt)\n\t}\n\ti, err := unmarshalEnumFromNumber(\"MACCommandIdentifier\", MACCommandIdentifier_name, b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = MACCommandIdentifier(i)\n\treturn nil\n}",
"func (v *EventContextCreated) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoWebaudio3(&r, v)\n\treturn r.Error()\n}"
] | [
"0.7148999",
"0.6506939",
"0.6456671",
"0.626328",
"0.62423307",
"0.62230057",
"0.62131965",
"0.62050635",
"0.61372226",
"0.6101458",
"0.6020385",
"0.60104495",
"0.5999803",
"0.59816074",
"0.5956228",
"0.5951397",
"0.59462184",
"0.5914333",
"0.59087616",
"0.5903828",
"0.5874978",
"0.58515394",
"0.58384144",
"0.58218277",
"0.58130175",
"0.57958853",
"0.57921165",
"0.5761687",
"0.57532996",
"0.5752603",
"0.57493794",
"0.57391036",
"0.5724651",
"0.57108915",
"0.5696814",
"0.5663221",
"0.56572175",
"0.5654916",
"0.56540674",
"0.56516004",
"0.56408614",
"0.5636818",
"0.5634443",
"0.56306756",
"0.56247836",
"0.5605456",
"0.5602584",
"0.560076",
"0.560076",
"0.5598185",
"0.55845463",
"0.55799437",
"0.5576217",
"0.5574511",
"0.55714923",
"0.5568862",
"0.5558053",
"0.555223",
"0.5550268",
"0.55430007",
"0.5529326",
"0.55237025",
"0.5519857",
"0.55169016",
"0.5515196",
"0.5490712",
"0.54841",
"0.5483804",
"0.5480973",
"0.54794025",
"0.54794025",
"0.54794025",
"0.54757714",
"0.5475621",
"0.5474887",
"0.54736245",
"0.5473523",
"0.5472396",
"0.54691166",
"0.54689",
"0.54654384",
"0.5463835",
"0.5463796",
"0.54605687",
"0.5460482",
"0.5457678",
"0.54553473",
"0.54537344",
"0.5453514",
"0.5453319",
"0.5445702",
"0.5440861",
"0.5440032",
"0.5438039",
"0.5430982",
"0.5430595",
"0.54285574",
"0.5424692",
"0.54235655",
"0.54224676"
] | 0.74064636 | 0 |
Deprecated: Use ListRepositoryReq.ProtoReflect.Descriptor instead. | Устарело: используйте ListRepositoryReq.ProtoReflect.Descriptor вместо этого. | func (*ListRepositoryReq) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{3}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{88}\n}",
"func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRepositoryRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*UpdateRepoReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{7}\n}",
"func (*DescribeRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListPodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{7}\n}",
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListMetadataResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{24}\n}",
"func (*IntegrationChangeHistoryListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{29}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*ListCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{164}\n}",
"func (*ComputeRepositoryDiffRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{22}\n}",
"func (*ListProjectsRequest) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{0}\n}",
"func (*RepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListModelTypesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{96}\n}",
"func (*GetListRequest) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListRefsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{16}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{5}\n}",
"func (*FindRemoteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*ListReleaseReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListCredentialsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{22}\n}",
"func (*ListVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListNodeSelectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{23}\n}",
"func (*ReleaseNameListRequest) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{21}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_bucketsd_pb_bucketsd_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListStorageRequest) Descriptor() ([]byte, []int) {\n\treturn file_console_proto_rawDescGZIP(), []int{11}\n}",
"func (*ReleaseListRequest) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{16}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRepositoriesRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12, 0}\n}",
"func (*ListWorkflowVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{140}\n}",
"func (*ListChannelMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{56}\n}",
"func (*DeleteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{15}\n}",
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListInstancesRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{9}\n}",
"func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListPrefixRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListWeaveScopePodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_weavescope_services_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetListServersRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListProjectsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetIosPlistRequest) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{9}\n}",
"func (*SimpleListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListRegistrationEntriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{46}\n}",
"func (*ListSubscriptionRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_subscription_proto_rawDescGZIP(), []int{4}\n}",
"func (*MemberLevelListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{47}\n}",
"func (*SyncPinnedRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_pinnedRepository_pinnedRepository_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListNetworkOperationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_vpc_v1_network_service_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListNetworkTargetsRequest) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_routing_v2_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*PeopleListRequest) Descriptor() ([]byte, []int) {\n\treturn file_sil_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListReleaseNameRequest) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListChartReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{27}\n}",
"func (*ListPeopleRequest) Descriptor() ([]byte, []int) {\n\treturn file_people_proto_rawDescGZIP(), []int{3}\n}",
"func (*GrowthChangeHistoryListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{20}\n}",
"func (*WaitListsRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{77}\n}",
"func (*ProductsListRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_proto_productslist_products_list_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListCommitsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{16}\n}",
"func (*UserListRequest) Descriptor() ([]byte, []int) {\n\treturn file_presence_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListVariableReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{11}\n}",
"func (*PollCredentialOffersRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{30}\n}",
"func (*ListReportRequest) Descriptor() ([]byte, []int) {\n\treturn file_report_proto_rawDescGZIP(), []int{9}\n}",
"func (*DetachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{22}\n}",
"func (*ResolveVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{25}\n}",
"func (*MemberListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListNetworksRequest) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_iam_v1_service_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListTagsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{28}\n}",
"func (*DeleteRefRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*RevokeTokensRequest) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{17}\n}",
"func (*SelectorVerificationsReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRevokedTokensRequest) Descriptor() ([]byte, []int) {\n\treturn file_token_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListControllerPlanningsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_plannings_services_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListTodoRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{7}\n}",
"func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListConnectionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{20}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListDeviceRequest) Descriptor() ([]byte, []int) {\n\treturn file_device_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetWatchlistRequest) Descriptor() ([]byte, []int) {\n\treturn file_golang_pkg_proto_movies_movies_proto_rawDescGZIP(), []int{1}\n}",
"func (*UpdateIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListGuildChannelsRequest) Descriptor() ([]byte, []int) {\n\treturn file_discord_v1_cache_proto_rawDescGZIP(), []int{2}\n}",
"func (*GetClientListRequest) Descriptor() ([]byte, []int) {\n\treturn file_messaging_proto_rawDescGZIP(), []int{4}\n}"
] | [
"0.73137987",
"0.72216994",
"0.71340483",
"0.71158785",
"0.69697946",
"0.6960516",
"0.6959861",
"0.6948829",
"0.6934028",
"0.6908448",
"0.69048285",
"0.690034",
"0.68986243",
"0.689512",
"0.68887913",
"0.68837196",
"0.68708044",
"0.68625325",
"0.68586475",
"0.6857217",
"0.68469566",
"0.68315244",
"0.68184614",
"0.6817829",
"0.6816957",
"0.68059987",
"0.67956644",
"0.67865884",
"0.67798525",
"0.67768294",
"0.677461",
"0.6766642",
"0.6763213",
"0.67548347",
"0.6753135",
"0.6748962",
"0.67484576",
"0.67477757",
"0.6747773",
"0.6746616",
"0.67423314",
"0.67410284",
"0.67340887",
"0.6729393",
"0.6725142",
"0.6724164",
"0.6723736",
"0.671546",
"0.6714432",
"0.67060906",
"0.6702911",
"0.6702277",
"0.66980755",
"0.6697367",
"0.6684613",
"0.66797566",
"0.66759795",
"0.667422",
"0.6673895",
"0.66738915",
"0.66646856",
"0.6664298",
"0.6664133",
"0.6658782",
"0.6648001",
"0.66479003",
"0.6646913",
"0.66453505",
"0.6643581",
"0.6643048",
"0.66347784",
"0.6632129",
"0.66292876",
"0.66215074",
"0.66198397",
"0.66181207",
"0.6616842",
"0.6611718",
"0.6607748",
"0.6603575",
"0.65967786",
"0.6595983",
"0.6595492",
"0.6594901",
"0.65942943",
"0.6590991",
"0.6584193",
"0.65840226",
"0.6581837",
"0.6581565",
"0.65814227",
"0.65801805",
"0.65770453",
"0.6573808",
"0.6570653",
"0.65698993",
"0.6568904",
"0.6568293",
"0.6567575",
"0.65675193"
] | 0.75993085 | 0 |
Deprecated: Use ListRepositoryRes.ProtoReflect.Descriptor instead. | Устарело: используйте ListRepositoryRes.ProtoReflect.Descriptor вместо этого. | func (*ListRepositoryRes) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{4}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListMetadataResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{24}\n}",
"func (*ListRefsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{16}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListRepositoriesResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{2}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListRepositoriesRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12, 0}\n}",
"func (*FindRepositories) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{34}\n}",
"func (*Repository) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{9}\n}",
"func (RepositoryVisibilityEnum_RepositoryVisibility) EnumDescriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{8, 0}\n}",
"func (*ListReleaseRes) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{14}\n}",
"func (*Ref) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{13}\n}",
"func (*FindRepositories_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{34, 0}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}",
"func (*RepositoryVisibilityEnum) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListVariableRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{12}\n}",
"func (*FindRemoteRepositoryResponse) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{3}\n}",
"func (*SetRepository) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListInstancesResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{10}\n}",
"func (*IntegrationChangeHistoryListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{31}\n}",
"func (*ListCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{164}\n}",
"func (*ListIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListOptions) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}",
"func (*SyncPinnedRepositoryResponse) Descriptor() ([]byte, []int) {\n\treturn file_pinnedRepository_pinnedRepository_proto_rawDescGZIP(), []int{2}\n}",
"func (*Description) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{7}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*RegistrationListRes) Descriptor() ([]byte, []int) {\n\treturn file_registration_proto_rawDescGZIP(), []int{24}\n}",
"func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}",
"func (*InstanceMetadata) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{20}\n}",
"func (*ListNotification) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_notification_proto_rawDescGZIP(), []int{1}\n}",
"func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListInstancesRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{17}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_bucketsd_pb_bucketsd_proto_rawDescGZIP(), []int{3}\n}",
"func (Repository_Format) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{8}\n}",
"func (*FlagsListResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{22}\n}",
"func (*RepositoryDraft) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_reference_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*DescribeRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{3}\n}",
"func (*RegisterInstanceResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{8}\n}",
"func (*GetListResponse) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{15}\n}",
"func (*ComputeRepositoryDiffRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{22}\n}",
"func (*ListArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{16}\n}",
"func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}",
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListPodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{7}\n}",
"func (*Repository) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListProjectsResponse) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{1}\n}",
"func (*PortList) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{2}\n}",
"func (*DescribeInstanceResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{29}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*ListPodsResponse) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{88}\n}",
"func (*ListNodeSelectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{23}\n}",
"func (*SubscriptionList) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListResonse) Descriptor() ([]byte, []int) {\n\treturn file_cache_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListNodeSelectorsResponse) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{24}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{7}\n}",
"func (*Type_ListType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{0}\n}",
"func (*UpdateIpPermissionMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{12}\n}",
"func (*RunList) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{6}\n}",
"func (*FindRemoteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListProjectsRequest) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{0}\n}",
"func (*CodeLens) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{164}\n}",
"func (*ReleaseListResponse) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{17}\n}",
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{3}\n}",
"func (*SubscriptionList) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{103}\n}",
"func (*SetRepository_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{14, 0}\n}",
"func (*Resource) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{4}\n}",
"func (*RepositoryNamedIdentification) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListService) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*DetachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{22}\n}",
"func (*UserListByIdsRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{12}\n}",
"func (*MemberLevelListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{49}\n}",
"func (*ReadTensorboardUsageResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*OperationsListResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{51}\n}",
"func (*ListRegistrationEntriesResponse) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{47}\n}",
"func (*LeaderboardList) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{50}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{10}\n}",
"func (*GetListServersRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{18}\n}"
] | [
"0.702423",
"0.6973052",
"0.69654864",
"0.6897099",
"0.68097436",
"0.67948705",
"0.6777074",
"0.6746349",
"0.67213786",
"0.668595",
"0.6660488",
"0.66343534",
"0.65886915",
"0.65775126",
"0.65540314",
"0.65510374",
"0.6541655",
"0.651642",
"0.6510909",
"0.6510901",
"0.650161",
"0.6494593",
"0.64935195",
"0.64923024",
"0.6469338",
"0.64681154",
"0.6467863",
"0.64619243",
"0.646142",
"0.64480144",
"0.644486",
"0.6444526",
"0.64363545",
"0.64333355",
"0.64314693",
"0.6430751",
"0.6430325",
"0.6429717",
"0.6428138",
"0.6427618",
"0.6427161",
"0.64264375",
"0.642475",
"0.6423276",
"0.6421993",
"0.64219487",
"0.64209074",
"0.64196736",
"0.64149845",
"0.6411303",
"0.64061755",
"0.64050573",
"0.640342",
"0.6400732",
"0.63982075",
"0.6392678",
"0.6389589",
"0.63869905",
"0.63855505",
"0.6384645",
"0.6381376",
"0.6377445",
"0.6374867",
"0.63743293",
"0.6367827",
"0.6366193",
"0.63604945",
"0.63599765",
"0.6358973",
"0.6358797",
"0.6351663",
"0.63514686",
"0.6348778",
"0.6347781",
"0.634558",
"0.6344705",
"0.63433456",
"0.63409036",
"0.633264",
"0.6331624",
"0.6330982",
"0.63234377",
"0.6322296",
"0.6320312",
"0.6318112",
"0.6317079",
"0.6316992",
"0.631463",
"0.6311785",
"0.6308693",
"0.6305048",
"0.63047165",
"0.63024676",
"0.63024193",
"0.6300963",
"0.6300939",
"0.6295663",
"0.62945855",
"0.6290005",
"0.62890077"
] | 0.734723 | 0 |
Deprecated: Use ListJobReq.ProtoReflect.Descriptor instead. | Устарело: используйте ListJobReq.ProtoReflect.Descriptor вместо этого. | func (*ListJobReq) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{7}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*GetJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListScheduledWorkloadsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protoc_api_list_scheduled_workloads_request_message_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{5}\n}",
"func (*RunDisconnectedServicesJobReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{4}\n}",
"func (*AddBatchJobOperationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListTimersRequest) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{0}\n}",
"func (*RejectedJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListProofRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{34}\n}",
"func (*RunDeleteDisconnectedServicesJobReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{19}\n}",
"func (*MutateBatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*WaitListsRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{77}\n}",
"func (*BatchRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{3, 0}\n}",
"func (*BatchRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*RunBatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListJobRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{8}\n}",
"func (*CancelJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobmanager_proto_rawDescGZIP(), []int{3}\n}",
"func (*SimpleListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*WatchProvisioningApprovalRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*PatchTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{154}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*ClientBatchListRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_client_batch_pb2_client_batch_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListProvisioningApprovalRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ApprovedJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{8}\n}",
"func (*DeleteJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListThingsRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*CancelClusJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{153}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*JobRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}",
"func (*WaitListRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{78}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{4}\n}",
"func (*AddJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{1}\n}",
"func (*PostConceptMappingJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{44}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetClientListRequest) Descriptor() ([]byte, []int) {\n\treturn file_messaging_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{88}\n}",
"func (*GetJobStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobmanager_proto_rawDescGZIP(), []int{2}\n}",
"func (*CancelledJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{14}\n}",
"func (*GetListRequest) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{12}\n}",
"func (*DeleteJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListConversationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{6}\n}",
"func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_bucketsd_pb_bucketsd_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListTaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_task_task_proto_rawDescGZIP(), []int{0}\n}",
"func (*RunJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobmanager_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListModelEvaluationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_automl_v1_service_proto_rawDescGZIP(), []int{19}\n}",
"func (*ListPodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetJobsReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{7}\n}",
"func (*GrowthChangeHistoryListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{20}\n}",
"func (*ListChartReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{27}\n}",
"func (*BatchGetProvisioningApprovalRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetMengerListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_menger_menger_proto_rawDescGZIP(), []int{13}\n}",
"func (*WatchProvisioningApprovalRequestRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*MutateBatchJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{2}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_scheduler_appengine_messages_config_proto_rawDescGZIP(), []int{4}\n}",
"func (*ScheduleWorkloadRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protoc_api_schedule_workload_request_message_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{9}\n}",
"func (*BatchUpdateIngressRulesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{22}\n}",
"func (*ProductsListRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_proto_productslist_products_list_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRulesRequest) Descriptor() ([]byte, []int) {\n\treturn file_plugin_proto_rawDescGZIP(), []int{0}\n}",
"func (*RevokeJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{21}\n}",
"func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}",
"func (*WatchLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*MutateBatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListReportRequest) Descriptor() ([]byte, []int) {\n\treturn file_report_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{70}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{10}\n}",
"func (*MemberTaskListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{119}\n}",
"func (*PluginListRequest) Descriptor() ([]byte, []int) {\n\treturn file_plugin_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListBatchJobResultsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListLeaderboardRecordsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{61}\n}",
"func (*ListIngressRulesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{20}\n}",
"func (*ListApikeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListTodoRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{7}\n}",
"func (*CancelAllJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobmanager_proto_rawDescGZIP(), []int{4}\n}",
"func (*RefreshCallQueueRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_RefreshCallQueueProtocol_proto_rawDescGZIP(), []int{0}\n}",
"func (*IntegrationChangeHistoryListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{29}\n}",
"func (*GetJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{6}\n}",
"func (*WatchLimitRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetListServersRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{18}\n}",
"func (*TasksListRequest) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_task_proto_rawDescGZIP(), []int{2}\n}"
] | [
"0.7615721",
"0.7097827",
"0.7036084",
"0.6987018",
"0.6931635",
"0.6919937",
"0.69092584",
"0.68779176",
"0.6857533",
"0.68378437",
"0.6824317",
"0.681413",
"0.6808692",
"0.68012184",
"0.67888135",
"0.678158",
"0.6770477",
"0.67660964",
"0.67263854",
"0.6692397",
"0.6688013",
"0.66863364",
"0.6683824",
"0.6682858",
"0.6674408",
"0.6661591",
"0.66446936",
"0.6623658",
"0.66172194",
"0.66162056",
"0.6613643",
"0.6595268",
"0.6591197",
"0.6584725",
"0.6569804",
"0.6569292",
"0.6564783",
"0.6556901",
"0.655035",
"0.65500337",
"0.65488464",
"0.65375876",
"0.6532796",
"0.65299845",
"0.6525475",
"0.6523994",
"0.65227425",
"0.6521638",
"0.6521205",
"0.6500985",
"0.6495546",
"0.64931995",
"0.64905715",
"0.64886767",
"0.648388",
"0.6483382",
"0.6479993",
"0.64771616",
"0.6472035",
"0.6471805",
"0.64524364",
"0.64484143",
"0.64463687",
"0.6444814",
"0.6443936",
"0.644214",
"0.6439938",
"0.64398116",
"0.64354676",
"0.64342487",
"0.64278626",
"0.64250606",
"0.6424359",
"0.6424018",
"0.6415698",
"0.64135796",
"0.6412668",
"0.6412542",
"0.6410519",
"0.64072984",
"0.6404831",
"0.64013255",
"0.6398987",
"0.63945204",
"0.6391098",
"0.63903326",
"0.6388762",
"0.6381386",
"0.63803273",
"0.63753235",
"0.6373257",
"0.63730216",
"0.6371386",
"0.6366825",
"0.6366025",
"0.6365325",
"0.6364127",
"0.63602644",
"0.6357536",
"0.6350838"
] | 0.75819296 | 1 |
Deprecated: Use ListJobRes.ProtoReflect.Descriptor instead. | Устарело: используйте ListJobRes.ProtoReflect.Descriptor вместо этого. | func (*ListJobRes) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{8}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListJobReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListBatchJobResultsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*MutateBatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListBatchJobResultsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{8}\n}",
"func (*RevokeJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{21}\n}",
"func (*GetJobsReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{7}\n}",
"func (*BatchJobResult) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{9}\n}",
"func (*JobResources) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{3}\n}",
"func (*RunDisconnectedServicesJobReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListScheduledWorkloadsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protoc_api_list_scheduled_workloads_request_message_proto_rawDescGZIP(), []int{0}\n}",
"func (*MutateBatchJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListTimersRequest) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{0}\n}",
"func (*RefreshCallQueueResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_RefreshCallQueueProtocol_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_scheduler_appengine_messages_config_proto_rawDescGZIP(), []int{4}\n}",
"func (*GetJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{5}\n}",
"func (*RunDeleteDisconnectedServicesJobReq) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{19}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{5}\n}",
"func (*RejectedJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{13}\n}",
"func (*RejectedJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{12}\n}",
"func (*AddJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{2}\n}",
"func (*DeleteJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{19}\n}",
"func (*ListMessagesResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{15}\n}",
"func (*AddBatchJobOperationsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*BatchJobOperation) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{1}\n}",
"func (*JobReply) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobmanager_proto_rawDescGZIP(), []int{1}\n}",
"func (*ApprovedJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{9}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*DeleteJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{4}\n}",
"func (*AddBatchJobOperationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}",
"func (*JobDetail) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{3}\n}",
"func (*RevokeJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20}\n}",
"func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}",
"func (*RunDisconnectedServicesJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{16}\n}",
"func (*BatchDedicatedResources) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1beta1_machine_resources_proto_rawDescGZIP(), []int{3}\n}",
"func (*RunBatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*CancelledJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{15}\n}",
"func (*Notification_BenchmarkJobMessage) Descriptor() ([]byte, []int) {\n\treturn file_isuxportal_resources_notification_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*PeriodicJobInfo) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListTimersResponse) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{1}\n}",
"func (*RunDeleteDisconnectedServicesJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{20}\n}",
"func (*MutateBatchJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_services_batch_job_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*WaitListsRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{77}\n}",
"func (*PatchTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{154}\n}",
"func (TryJob_Result) EnumDescriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}",
"func (*JobReply) Descriptor() ([]byte, []int) {\n\treturn file_job_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{153}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{6}\n}",
"func (*JobId) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{0}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*FinishedInvocationList) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_scheduler_appengine_internal_db_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{7}\n}",
"func (*RefreshCallQueueRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_RefreshCallQueueProtocol_proto_rawDescGZIP(), []int{0}\n}",
"func (*FlagsListResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{22}\n}",
"func (*PostConceptMappingJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{44}\n}",
"func (*CreateJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{2}\n}",
"func (*ProposeJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{17}\n}",
"func (*JobMessage) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{0}\n}",
"func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*GetListServersRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{18}\n}",
"func (*JobInfo) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{22}\n}",
"func (*ReceiveBenchmarkJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_xsuportal_services_bench_receiving_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*BatchRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListNotification) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_notification_proto_rawDescGZIP(), []int{1}\n}",
"func (JobType) EnumDescriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListRulesRequest) Descriptor() ([]byte, []int) {\n\treturn file_plugin_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListTaskRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_task_task_proto_rawDescGZIP(), []int{0}\n}",
"func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListLeaderboardRecordsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{61}\n}",
"func (Retry_Conf_Grpc_RetryOn) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_mesh_v1alpha1_retry_proto_rawDescGZIP(), []int{0, 0, 3, 0}\n}",
"func (*ChangeReport) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicemanagement_v1_resources_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListMetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{1}\n}",
"func (*MultiConceptMappingJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{47}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_job_proto_rawDescGZIP(), []int{0}\n}",
"func (*WaitListResponse) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{79}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*TryJob) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{1}\n}",
"func (*CancelClusJobsReply) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_clusrun_proto_rawDescGZIP(), []int{13}\n}",
"func (*BatchResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_buildbucket_proto_builds_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*JobFeatures) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{4}\n}",
"func (*DeleteJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{18}\n}",
"func (*PluginListRequest) Descriptor() ([]byte, []int) {\n\treturn file_plugin_proto_rawDescGZIP(), []int{7}\n}",
"func (*RegistrationListRes) Descriptor() ([]byte, []int) {\n\treturn file_registration_proto_rawDescGZIP(), []int{24}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*NewJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_versiontracker_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{4}\n}",
"func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{5}\n}",
"func (TryJob_Status) EnumDescriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{1, 1}\n}"
] | [
"0.71542174",
"0.7084673",
"0.70369273",
"0.6963372",
"0.6837766",
"0.67902213",
"0.6754233",
"0.67203486",
"0.66875696",
"0.66731966",
"0.66695327",
"0.66659844",
"0.6660195",
"0.66439104",
"0.66189617",
"0.66168547",
"0.6597521",
"0.6573736",
"0.65519154",
"0.6539392",
"0.65354234",
"0.6526745",
"0.652067",
"0.65187734",
"0.6516527",
"0.6502582",
"0.6483641",
"0.64575934",
"0.6445682",
"0.64313823",
"0.64292455",
"0.6424732",
"0.6422292",
"0.64098275",
"0.6406528",
"0.6393624",
"0.6392143",
"0.6387365",
"0.6380746",
"0.6376143",
"0.6376087",
"0.6373523",
"0.63664836",
"0.6362359",
"0.63576347",
"0.6356432",
"0.6333596",
"0.63301146",
"0.63277704",
"0.63277185",
"0.6327642",
"0.6318567",
"0.6317996",
"0.6313141",
"0.6304122",
"0.6303218",
"0.62903035",
"0.62896764",
"0.6289289",
"0.62891555",
"0.6287979",
"0.6282203",
"0.6281544",
"0.6280076",
"0.6276984",
"0.62714726",
"0.6270108",
"0.6270082",
"0.62657535",
"0.625994",
"0.6252817",
"0.6246291",
"0.6238839",
"0.6237658",
"0.6233476",
"0.6228247",
"0.62274253",
"0.622541",
"0.6221088",
"0.6219295",
"0.62171584",
"0.62131876",
"0.62090135",
"0.62088805",
"0.6205791",
"0.6205725",
"0.62041235",
"0.6204002",
"0.6199773",
"0.6194993",
"0.6190925",
"0.6189228",
"0.61854905",
"0.618235",
"0.6181854",
"0.61810565",
"0.61803544",
"0.6179427",
"0.6178322",
"0.6174971"
] | 0.73013926 | 0 |
Deprecated: Use ListVariableReq.ProtoReflect.Descriptor instead. | Устарело: используйте ListVariableReq.ProtoReflect.Descriptor вместо этого. | func (*ListVariableReq) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{11}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListVariableRes) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{12}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_proto_v1_synthetics_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{10}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{1}\n}",
"func (*MemberListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListConceptLanguagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{49}\n}",
"func (*MemberTagListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{110}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*MemberLevelListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{47}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{4}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{0}\n}",
"func (*MemberReceiveAddressListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{83}\n}",
"func (*SimpleListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListDataAttributeBindingsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_data_taxonomy_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListTodoRequest) Descriptor() ([]byte, []int) {\n\treturn file_todo_proto_rawDescGZIP(), []int{7}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{4, 0}\n}",
"func (*FeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{10}\n}",
"func (*PatchConceptLanguagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{50}\n}",
"func (*ProductsListRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_proto_productslist_products_list_proto_rawDescGZIP(), []int{0}\n}",
"func (*PeopleListRequest) Descriptor() ([]byte, []int) {\n\treturn file_sil_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{164}\n}",
"func (*MemberRuleSettingListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{92}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{2}\n}",
"func (*UserListRequest) Descriptor() ([]byte, []int) {\n\treturn file_presence_proto_rawDescGZIP(), []int{1}\n}",
"func (*WatchlistRequest) Descriptor() ([]byte, []int) {\n\treturn file_golang_pkg_proto_movies_movies_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListTimersRequest) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListConversationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{6}\n}",
"func (*DeleteFeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_feedbackreq_proto_rawDescGZIP(), []int{6}\n}",
"func (*BatchUpdateReferencesRequest_Request) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListCredentialsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{22}\n}",
"func (*ListProofRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{34}\n}",
"func (*Type_ListType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListChartReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{27}\n}",
"func (*ListConceptsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{31}\n}",
"func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListChecksRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}",
"func (*MemberStatisticsInfoListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{101}\n}",
"func (*WaitListsRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{77}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListDeviceRequest) Descriptor() ([]byte, []int) {\n\treturn file_device_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetWatchlistRequest) Descriptor() ([]byte, []int) {\n\treturn file_golang_pkg_proto_movies_movies_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListTeamsRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListModelTypesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{96}\n}",
"func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListWeaveScopePodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_weavescope_services_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_faultinjector_proto_rawDescGZIP(), []int{8}\n}",
"func (*MemberTaskListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{119}\n}",
"func (*UserListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_hezzel_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListTagsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{28}\n}",
"func (*ListChannelMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{56}\n}",
"func (*ListModelsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{78}\n}",
"func (*ListPodsRequest) Descriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListTasksRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{153}\n}",
"func (*ListModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{88}\n}",
"func (*ListUserFriendReq) Descriptor() ([]byte, []int) {\n\treturn file_v1_friend_friend_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListenRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{46}\n}",
"func (*PatchConceptsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{34}\n}",
"func (*GetClientListRequest) Descriptor() ([]byte, []int) {\n\treturn file_messaging_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_manage_grpc_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListNodeSelectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{23}\n}",
"func (*WatchLimitsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_limit_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListValue) Descriptor() ([]byte, []int) {\n\treturn file_chameleon_api_http_data_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListThingsRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*GetListRequest) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListNodePlanningsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_plannings_services_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListKnowledgeGraphsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{42}\n}",
"func (*GetListServersRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{18}\n}",
"func (*ListScopesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{108}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_bucketsd_pb_bucketsd_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListCertificateV1Request) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_certificate_api_ocp_certificate_api_proto_rawDescGZIP(), []int{6}\n}",
"func (*GetMengerListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_menger_menger_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListStorageRequest) Descriptor() ([]byte, []int) {\n\treturn file_console_proto_rawDescGZIP(), []int{11}\n}",
"func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}"
] | [
"0.7003119",
"0.6571559",
"0.65231967",
"0.65141094",
"0.6512945",
"0.6493738",
"0.6483414",
"0.64811575",
"0.64797944",
"0.64721453",
"0.6448194",
"0.64414454",
"0.64365965",
"0.6429342",
"0.64083",
"0.63977855",
"0.63758576",
"0.63715404",
"0.63642275",
"0.6352802",
"0.635117",
"0.6336102",
"0.632986",
"0.62984383",
"0.6291143",
"0.62903583",
"0.62833977",
"0.6282236",
"0.6275633",
"0.62753654",
"0.62699443",
"0.6264017",
"0.62538284",
"0.62310135",
"0.6225462",
"0.62169725",
"0.6216825",
"0.62155837",
"0.62101793",
"0.62034404",
"0.620161",
"0.6186752",
"0.6182777",
"0.6181709",
"0.618055",
"0.617811",
"0.61717963",
"0.6170321",
"0.6166951",
"0.6161556",
"0.6159441",
"0.6158322",
"0.6157138",
"0.61512077",
"0.61464804",
"0.61425894",
"0.61400104",
"0.6138167",
"0.6133802",
"0.6133737",
"0.61330277",
"0.6121504",
"0.6120058",
"0.611995",
"0.6116482",
"0.6114691",
"0.61141825",
"0.611207",
"0.6111583",
"0.61106527",
"0.61077636",
"0.6105521",
"0.6102609",
"0.61025876",
"0.609972",
"0.60994554",
"0.60975933",
"0.609561",
"0.6093459",
"0.6089733",
"0.60876995",
"0.60871285",
"0.60861725",
"0.6083695",
"0.6080672",
"0.60792196",
"0.60770977",
"0.60726094",
"0.6069936",
"0.60695094",
"0.60674155",
"0.60644865",
"0.6060987",
"0.60590196",
"0.60585874",
"0.60571975",
"0.6057014",
"0.6056346",
"0.6052754",
"0.60525143"
] | 0.76881313 | 0 |
Deprecated: Use ListVariableRes.ProtoReflect.Descriptor instead. | Устарело: используйте ListVariableRes.ProtoReflect.Descriptor вместо этого. | func (*ListVariableRes) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{12}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*ListVariableReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{11}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{3}\n}",
"func (*Type_ListType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_proto_v1_synthetics_proto_rawDescGZIP(), []int{2}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListValue) Descriptor() ([]byte, []int) {\n\treturn file_chameleon_api_http_data_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{7}\n}",
"func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListValue) Descriptor() ([]byte, []int) {\n\treturn file_proto_value_value_proto_rawDescGZIP(), []int{4}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_accord_proto_rawDescGZIP(), []int{7}\n}",
"func (*Variable) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{9}\n}",
"func (*FlagsListResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{22}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{7}\n}",
"func (x *fastReflection_EvidenceList) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_EvidenceList\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{14}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{14}\n}",
"func (*CMsg_CVars) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListModelReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{105}\n}",
"func (*PortList) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_url_proto_rawDescGZIP(), []int{4}\n}",
"func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{164}\n}",
"func (*ListDeleted) Descriptor() ([]byte, []int) {\n\treturn file_lists_events_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListService) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListRefsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{15}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{6}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*Reference) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3}\n}",
"func (*Vector) Descriptor() ([]byte, []int) {\n\treturn file_mitre_cvss_v3_cvss_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListOptions) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{11}\n}",
"func (StatusMessage_Reference) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (*ListUserFriendRsp_List) Descriptor() ([]byte, []int) {\n\treturn file_v1_friend_friend_proto_rawDescGZIP(), []int{7, 0}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsg_CVars_CVar) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{4, 0}\n}",
"func (*RefreshCallQueueResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_RefreshCallQueueProtocol_proto_rawDescGZIP(), []int{1}\n}",
"func (*VfList) Descriptor() ([]byte, []int) {\n\treturn file_config_devmodel_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_proto_rawDescGZIP(), []int{1}\n}",
"func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{6}\n}",
"func (*PortList) Descriptor() ([]byte, []int) {\n\treturn file_portdomain_proto_rawDescGZIP(), []int{2}\n}",
"func (*FlexibleRuleOperandInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_user_lists_proto_rawDescGZIP(), []int{8}\n}",
"func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListConceptLanguagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{49}\n}",
"func (*Vector) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{2}\n}",
"func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}",
"func (*ListAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListChartReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{27}\n}",
"func (*MemberListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{2}\n}",
"func (*MemberListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{4}\n}",
"func (*ListNotification) Descriptor() ([]byte, []int) {\n\treturn file_infra_grpc_notification_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_weather_proto_rawDescGZIP(), []int{17}\n}",
"func (*CMsgVector) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{0}\n}",
"func (*VectorClock) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_l3_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListMessagesResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{15}\n}",
"func (*UserListLogicalRuleInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_user_lists_proto_rawDescGZIP(), []int{12}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wallet_proto_rawDescGZIP(), []int{7}\n}",
"func (*EventsListProto) Descriptor() ([]byte, []int) {\n\treturn file_inotify_proto_rawDescGZIP(), []int{9}\n}",
"func (*ListTimersRequest) Descriptor() ([]byte, []int) {\n\treturn file_list_timers_proto_rawDescGZIP(), []int{0}\n}",
"func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}",
"func (*ListRefsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{16}\n}",
"func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}",
"func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListNodeSelectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListMetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{1}\n}",
"func (*ListValue) Descriptor() ([]byte, []int) {\n\treturn file_ocis_messages_settings_v0_settings_proto_rawDescGZIP(), []int{14}\n}",
"func (*ApiWarning) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{1}\n}",
"func (*Vector) Descriptor() ([]byte, []int) {\n\treturn file_msgdata_proto_rawDescGZIP(), []int{5}\n}",
"func (*Listen) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{4}\n}",
"func (*VariableID) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{10}\n}",
"func (*FriendList) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{38}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{5}\n}",
"func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}",
"func (*ListNotificationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{63}\n}",
"func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_user_proto_rawDescGZIP(), []int{3}\n}",
"func (*CSVCMsg_GameEventListDescriptorT) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{44, 1}\n}",
"func (*SubscriptionList) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{12}\n}",
"func (*CSVCMsg_GameEventListDescriptorT) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{44, 1}\n}",
"func (*Preferences) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{8}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_google_actions_sdk_v2_conversation_prompt_content_list_proto_rawDescGZIP(), []int{0}\n}",
"func (*ListTeamsRequest) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{18}\n}",
"func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{3}\n}",
"func (*ListTeamsResponse_TeamListItem) Descriptor() ([]byte, []int) {\n\treturn file_xsuportal_services_audience_team_list_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*MemberRuleSettingListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{94}\n}",
"func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{13}\n}",
"func (*ListResponse) Descriptor() ([]byte, []int) {\n\treturn file_fabl_v1_item_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*ListRulesRequest) Descriptor() ([]byte, []int) {\n\treturn file_plugin_proto_rawDescGZIP(), []int{0}\n}"
] | [
"0.7172866",
"0.66695154",
"0.6534382",
"0.650128",
"0.6460396",
"0.63638353",
"0.6329906",
"0.6309684",
"0.6288664",
"0.62838805",
"0.6276277",
"0.62503725",
"0.62489116",
"0.6238479",
"0.62189096",
"0.62014437",
"0.62013793",
"0.6194213",
"0.6187662",
"0.6171217",
"0.61705434",
"0.61689913",
"0.6162572",
"0.61600405",
"0.61512583",
"0.61435163",
"0.6138212",
"0.6131955",
"0.6124012",
"0.61189973",
"0.61151296",
"0.61108553",
"0.6109564",
"0.60937804",
"0.60934526",
"0.60930437",
"0.6085666",
"0.60823274",
"0.60820955",
"0.607129",
"0.6068712",
"0.6065399",
"0.60629493",
"0.6059853",
"0.6058581",
"0.60557574",
"0.60543764",
"0.6051109",
"0.6049606",
"0.6047818",
"0.60445976",
"0.6043466",
"0.6043102",
"0.60417366",
"0.60409397",
"0.6037396",
"0.60239476",
"0.6016687",
"0.6013249",
"0.6012516",
"0.6008534",
"0.6005499",
"0.60042626",
"0.59979004",
"0.5997046",
"0.59968936",
"0.5996778",
"0.5996466",
"0.59953004",
"0.5995163",
"0.59950835",
"0.59936404",
"0.5992359",
"0.59920996",
"0.5989054",
"0.59866107",
"0.5979301",
"0.59772193",
"0.5974964",
"0.59741604",
"0.5972864",
"0.5972625",
"0.5964673",
"0.5957267",
"0.59531915",
"0.5950476",
"0.5948025",
"0.5940354",
"0.5938543",
"0.5938112",
"0.5936863",
"0.5936408",
"0.59361494",
"0.5936116",
"0.5934483",
"0.59296674",
"0.5927911",
"0.59277153",
"0.59270763",
"0.59256524"
] | 0.7408062 | 0 |
Deprecated: Use DescribeRepositoryReq.ProtoReflect.Descriptor instead. | Устарело: используйте DescribeRepositoryReq.ProtoReflect.Descriptor вместо этого. | func (*DescribeRepositoryReq) Descriptor() ([]byte, []int) {
return file_api_ops_proto_rawDescGZIP(), []int{13}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{3}\n}",
"func (*ComputeRepositoryDiffRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{22}\n}",
"func (*GetRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13}\n}",
"func (*RepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_service_proto_rawDescGZIP(), []int{10}\n}",
"func (*UpdateRepoReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{5}\n}",
"func (*FindRemoteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{2}\n}",
"func (*ListRepositoryReq) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{3}\n}",
"func (*DeleteRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{15}\n}",
"func (*DescribeInstanceRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{28}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*DetachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{22}\n}",
"func (*ListMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{23}\n}",
"func (*ResolveVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{25}\n}",
"func (*DescribeClientRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{30}\n}",
"func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{1}\n}",
"func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}",
"func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}",
"func (*SyncPinnedRepositoryRequest) Descriptor() ([]byte, []int) {\n\treturn file_pinnedRepository_pinnedRepository_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetRepositoryRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{13, 0}\n}",
"func (*ListRepositoriesRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12}\n}",
"func (*ComputeRepositoryDiffRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{22, 0}\n}",
"func (*DeleteRepositoryRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{15, 0}\n}",
"func (*PackageRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{6}\n}",
"func (*UpdateRegistryRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*AttachMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{21}\n}",
"func (*RevertRepositoryCommitsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{33}\n}",
"func (*RefreshProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{2}\n}",
"func (*DeleteRefRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{14}\n}",
"func (*GetRegistryRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*FindRemoteRepositoryResponse) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{3}\n}",
"func (*UpdateIpPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{10}\n}",
"func (*StatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_google_cloudprober_servers_grpc_proto_grpcservice_proto_rawDescGZIP(), []int{1}\n}",
"func (*Repository) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{9}\n}",
"func (*ChangeUpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_githubcard_proto_rawDescGZIP(), []int{23}\n}",
"func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*DeleteRegistryRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_containerregistry_v1_registry_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}",
"func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}",
"func (*DiffRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{2}\n}",
"func (*UpdateNetworkRequest) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_iam_v1_service_proto_rawDescGZIP(), []int{8}\n}",
"func (*OutdatedRequest) Descriptor() ([]byte, []int) {\n\treturn file_cc_arduino_cli_commands_v1_commands_proto_rawDescGZIP(), []int{12}\n}",
"func (*GetChangesRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_pb_watcher_proto_rawDescGZIP(), []int{1}\n}",
"func (*UpdateEntityRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_metadata_proto_rawDescGZIP(), []int{1}\n}",
"func (*NewVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_versiontracker_proto_rawDescGZIP(), []int{1}\n}",
"func (*UpdateRemoteMirrorRequest) Descriptor() ([]byte, []int) {\n\treturn file_remote_proto_rawDescGZIP(), []int{0}\n}",
"func (*DescribePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{6}\n}",
"func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}",
"func (*SelectorVerificationReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{0}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*Repository) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_artifactregistry_v1_repository_proto_rawDescGZIP(), []int{0}\n}",
"func (*ReadTensorboardUsageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*TelemetryRequest) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{11}\n}",
"func (*SetRepository) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{14}\n}",
"func (*IssueCredentialRequest) Descriptor() ([]byte, []int) {\n\treturn file_messages_proto_rawDescGZIP(), []int{11}\n}",
"func (*MergeRepositoryCommitsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{32}\n}",
"func (*CodeLensRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{163}\n}",
"func (*PatchAnnotationsStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*ProjectUpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{70}\n}",
"func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}",
"func (*SelectorVerificationsReq) Descriptor() ([]byte, []int) {\n\treturn file_proto_selector_verification_msgs_proto_rawDescGZIP(), []int{2}\n}",
"func (*CodeLensResolveRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{33}\n}",
"func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}",
"func (*PatchKeysRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{74}\n}",
"func (*UpdateTensorboardRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{4}\n}",
"func (*ReportRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{2}\n}",
"func (*GetCollectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{163}\n}",
"func (*PatchWorkflowVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{143}\n}",
"func (*DidChangeConfigurationRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{72}\n}",
"func (*GetInstanceURLRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{26}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}",
"func (*VersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*RenameRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{194}\n}",
"func (*ProjectUpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{73}\n}",
"func (*RefreshRuntimeTokenInternalRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{12}\n}",
"func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_authorization_proto_rawDescGZIP(), []int{0}\n}",
"func (*GetEntityRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_metadata_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetTensorboardRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{1}\n}",
"func (*SetRepository_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{14, 0}\n}",
"func (*GrantedProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{114}\n}",
"func (*GrantedProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{111}\n}",
"func (*DeleteCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{162}\n}",
"func (*UpdateNetworkRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_vpc_v1_network_service_proto_rawDescGZIP(), []int{5}\n}",
"func (*QueryPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_permission_pb_request_proto_rawDescGZIP(), []int{0}\n}",
"func (*PatchModelVersionsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{86}\n}",
"func (*GetConceptLanguageRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{48}\n}",
"func (*DefinitionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{129}\n}",
"func (*ContractQueryRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{22}\n}",
"func (*DetachTagsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{19}\n}",
"func (*SyncPinnedRepositoryResponse) Descriptor() ([]byte, []int) {\n\treturn file_pinnedRepository_pinnedRepository_proto_rawDescGZIP(), []int{2}\n}",
"func (*RepositoryNamedIdentification) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{10}\n}",
"func (*ListRepositoriesRequest_Response) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{12, 0}\n}",
"func (*DescribeProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{8}\n}",
"func (*UpdateArtifactRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_metadata_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_interservice_license_control_license_control_proto_rawDescGZIP(), []int{9}\n}",
"func (*UpdateRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*DescribeInstanceResponse) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{29}\n}",
"func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{7}\n}",
"func (*ApplyRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{0}\n}"
] | [
"0.73243886",
"0.72970176",
"0.7288239",
"0.71888715",
"0.7183465",
"0.7175035",
"0.713901",
"0.71179855",
"0.69901067",
"0.6977314",
"0.6969404",
"0.692119",
"0.69194776",
"0.6916009",
"0.6915778",
"0.6902419",
"0.6895223",
"0.6869635",
"0.68641007",
"0.6858219",
"0.68566984",
"0.6856575",
"0.6831967",
"0.6829509",
"0.6825475",
"0.681544",
"0.6812004",
"0.679574",
"0.67941856",
"0.67840755",
"0.6764977",
"0.676405",
"0.6749076",
"0.6737332",
"0.67368275",
"0.6731883",
"0.6731832",
"0.67251706",
"0.6719052",
"0.6712237",
"0.67118704",
"0.6709382",
"0.67091745",
"0.6707253",
"0.66995454",
"0.66977316",
"0.66976875",
"0.66796815",
"0.66794395",
"0.66773254",
"0.6671631",
"0.6670288",
"0.6669743",
"0.66694003",
"0.6665093",
"0.66612023",
"0.6658953",
"0.6658508",
"0.6655267",
"0.66539234",
"0.665298",
"0.6651917",
"0.6647821",
"0.6646552",
"0.66443926",
"0.6643315",
"0.66413873",
"0.66389126",
"0.6638807",
"0.66375387",
"0.66372836",
"0.66372836",
"0.66364366",
"0.66333765",
"0.66331947",
"0.6630622",
"0.66305476",
"0.6627455",
"0.66265756",
"0.66257346",
"0.66244465",
"0.66212463",
"0.6620096",
"0.6618237",
"0.6610441",
"0.6609914",
"0.6607445",
"0.6604237",
"0.6603787",
"0.66033703",
"0.6601688",
"0.65991336",
"0.6598866",
"0.6597676",
"0.6595485",
"0.6592487",
"0.65923274",
"0.65915793",
"0.6591478",
"0.6591014"
] | 0.7458026 | 0 |