code stringlengths 31 2.05k | label_name stringclasses 5 values | label int64 0 4 |
|---|---|---|
func (ar *AnswerActivityRepo) sendCancelAcceptAnswerNotification(
ctx context.Context, op *schema.AcceptAnswerOperationInfo) {
for _, act := range op.Activities {
msg := &schema.NotificationMsg{
ReceiverUserID: act.ActivityUserID,
Type: schema.NotificationTypeAchievement,
ObjectID: op.AnswerObjectID,
}
if act.ActivityUserID == op.QuestionObjectID {
msg.TriggerUserID = op.AnswerObjectID
msg.ObjectType = constant.QuestionObjectType
} else {
msg.TriggerUserID = op.QuestionObjectID
msg.ObjectType = constant.AnswerObjectType
}
if msg.TriggerUserID != msg.ReceiverUserID {
ar.notificationQueueService.Send(ctx, msg)
}
}
} | Base | 1 |
func Satitize(data *imagedata.ImageData) (*imagedata.ImageData, error) {
r := bytes.NewReader(data.Data)
l := xml.NewLexer(parse.NewInput(r))
buf, cancel := imagedata.BorrowBuffer()
ignoreTag := 0
for {
tt, tdata := l.Next()
if ignoreTag > 0 {
switch tt {
case xml.ErrorToken:
cancel()
return nil, l.Err()
case xml.EndTagToken, xml.StartTagCloseVoidToken:
ignoreTag--
case xml.StartTagToken:
ignoreTag++
}
continue
}
switch tt {
case xml.ErrorToken:
if l.Err() != io.EOF {
cancel()
return nil, l.Err()
}
newData := imagedata.ImageData{
Data: buf.Bytes(),
Type: data.Type,
}
newData.SetCancel(cancel)
return &newData, nil
case xml.StartTagToken:
if strings.ToLower(string(l.Text())) == "script" {
ignoreTag++
continue
}
buf.Write(tdata)
case xml.AttributeToken:
if _, unsafe := unsafeAttrs[strings.ToLower(string(l.Text()))]; unsafe {
continue
}
buf.Write(tdata)
default:
buf.Write(tdata)
}
}
} | Base | 1 |
func (hs *HTTPServer) GetPluginMarkdown(c *models.ReqContext) response.Response {
pluginID := web.Params(c.Req)[":pluginId"]
name := web.Params(c.Req)[":name"]
content, err := hs.pluginMarkdown(c.Req.Context(), pluginID, name)
if err != nil {
var notFound plugins.NotFoundError
if errors.As(err, ¬Found) {
return response.Error(404, notFound.Error(), nil)
}
return response.Error(500, "Could not get markdown file", err)
}
// fallback try readme
if len(content) == 0 {
content, err = hs.pluginMarkdown(c.Req.Context(), pluginID, "help")
if err != nil {
return response.Error(501, "Could not get markdown file", err)
}
}
resp := response.Respond(http.StatusOK, content)
resp.SetHeader("Content-Type", "text/plain; charset=utf-8")
return resp
} | Base | 1 |
func NewHandler() *Handler {
return &Handler{
clusterService: cluster.NewService(),
userService: user.NewService(),
roleService: role.NewService(),
rolebindingService: rolebinding.NewService(),
ldapService: ldap.NewService(),
jwtSigner: jwt.NewSigner(jwt.HS256, JwtSigKey, jwtMaxAge),
}
} | Base | 1 |
func ReadConfig(c *config.Config, path ...string) error {
v := viper.New()
v.SetConfigName("app")
v.SetConfigType("yaml")
for i := range path {
configFilePaths = append(configFilePaths, path[i])
}
for i := range configFilePaths {
realDir := file.ReplaceHomeDir(configFilePaths[i])
if exists := fileutil.Exist(realDir); !exists {
fmt.Println(fmt.Sprintf(configNotFoundSkipErr, realDir))
continue
}
v.AddConfigPath(realDir)
if err := v.ReadInConfig(); err != nil {
fmt.Println(fmt.Sprintf(configReadErr, realDir, err.Error()))
continue
}
if err := v.MergeInConfig(); err != nil {
fmt.Println(fmt.Sprintf(configMergeErr, configFilePaths))
}
}
var configMap map[string]interface{}
if err := v.Unmarshal(&configMap); err != nil {
return err
}
str, err := json.Marshal(&configMap)
if err != nil {
return err
}
if err := json.Unmarshal(str, &c); err != nil {
return nil
}
return nil
} | Base | 1 |
func AddV1Route(app iris.Party) {
v1Party := app.Party("/v1")
v1Party.Use(langHandler())
v1Party.Use(pageHandler())
session.Install(v1Party)
mfa.Install(v1Party)
authParty := v1Party.Party("")
authParty.Use(WarpedJwtHandler())
authParty.Use(authHandler())
authParty.Use(resourceExtractHandler())
authParty.Use(roleHandler())
authParty.Use(roleAccessHandler())
authParty.Use(resourceNameInvalidHandler())
authParty.Use(logHandler())
authParty.Get("/", apiResourceHandler(authParty))
user.Install(authParty)
cluster.Install(authParty)
role.Install(authParty)
system.Install(v1Party)
proxy.Install(authParty)
ws.Install(authParty)
chart.Install(authParty)
webkubectl.Install(authParty, v1Party)
ldap.Install(authParty)
imagerepo.Install(authParty)
file.Install(authParty)
} | Class | 2 |
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
serverContext, cancel := context.WithCancel(ctx)
logger := common.Logger(serverContext)
if artifactPath == "" {
return cancel
}
router := httprouter.New()
logger.Debugf("Artifacts base path '%s'", artifactPath)
fs := os.DirFS(artifactPath)
uploads(router, MkdirFsImpl{artifactPath, fs})
downloads(router, fs)
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", addr, port),
ReadHeaderTimeout: 2 * time.Second,
Handler: router,
}
// run server
go func() {
logger.Infof("Start server on http://%s:%s", addr, port)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Fatal(err)
}
}()
// wait for cancel to gracefully shutdown server
go func() {
<-serverContext.Done()
if err := server.Shutdown(ctx); err != nil {
logger.Errorf("Failed shutdown gracefully - force shutdown: %v", err)
server.Close()
}
}()
return cancel
} | Base | 1 |
func (fsys MkdirFsImpl) OpenAtEnd(name string) (fs.File, error) {
file, err := os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return nil, err
}
_, err = file.Seek(0, os.SEEK_END)
if err != nil {
return nil, err
}
return file, nil
} | Base | 1 |
func (fsys MkdirFsImpl) MkdirAll(path string, perm fs.FileMode) error {
return os.MkdirAll(fsys.dir+"/"+path, perm)
} | Base | 1 |
func uploads(router *httprouter.Router, fsys MkdirFS) {
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
runID := params.ByName("runId")
json, err := json.Marshal(FileContainerResourceURL{
FileContainerResourceURL: fmt.Sprintf("http://%s/upload/%s", req.Host, runID),
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PUT("/upload/:runId", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
itemPath := req.URL.Query().Get("itemPath")
runID := params.ByName("runId")
if req.Header.Get("Content-Encoding") == "gzip" {
itemPath += gzipExtension
}
filePath := fmt.Sprintf("%s/%s", runID, itemPath)
err := fsys.MkdirAll(path.Dir(filePath), os.ModePerm)
if err != nil {
panic(err)
}
file, err := func() (fs.File, error) {
contentRange := req.Header.Get("Content-Range")
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
return fsys.OpenAtEnd(filePath)
}
return fsys.Open(filePath)
}()
if err != nil {
panic(err)
}
defer file.Close()
writer, ok := file.(io.Writer)
if !ok {
panic(errors.New("File is not writable"))
}
if req.Body == nil {
panic(errors.New("No body given"))
}
_, err = io.Copy(writer, req.Body)
if err != nil {
panic(err)
}
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
} | Base | 1 |
err := fs.WalkDir(fsys, dirPath, func(path string, entry fs.DirEntry, err error) error {
if !entry.IsDir() {
rel, err := filepath.Rel(dirPath, path)
if err != nil {
panic(err)
}
// if it was upload as gzip
rel = strings.TrimSuffix(rel, gzipExtension)
files = append(files, ContainerItem{
Path: fmt.Sprintf("%s/%s", itemPath, rel),
ItemType: "file",
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
})
}
return nil
}) | Base | 1 |
func (fsys MkdirFsImpl) Open(name string) (fs.File, error) {
return os.OpenFile(fsys.dir+"/"+name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
} | Base | 1 |
func TestListArtifactContainer(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"1/some/file": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, memfs)
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := ContainerItemResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, len(response.Value))
assert.Equal("some/file/.", response.Value[0].Path)
assert.Equal("file", response.Value[0].ItemType)
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
} | Base | 1 |
func TestDownloadArtifactFile(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"1/some/file": {
Data: []byte("content"),
},
})
router := httprouter.New()
downloads(router, memfs)
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
data := rr.Body.Bytes()
assert.Equal("content", string(data))
} | Base | 1 |
func (fsys MapFsImpl) MkdirAll(path string, perm fs.FileMode) error {
// mocked no-op
return nil
} | Base | 1 |
func TestListArtifacts(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"1/file.txt": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, memfs)
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := NamedFileContainerResourceURLResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, response.Count)
assert.Equal("file.txt", response.Value[0].Name)
assert.Equal("http://localhost/download/1", response.Value[0].FileContainerResourceURL)
} | Base | 1 |
func TestFinalizeArtifactUpload(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, MapFsImpl{memfs})
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
} | Base | 1 |
func (file WritableFile) Write(data []byte) (int, error) {
file.fsys[file.path].Data = data
return len(data), nil
} | Base | 1 |
func (fsys MapFsImpl) Open(path string) (fs.File, error) {
var file = fstest.MapFile{
Data: []byte("content2"),
}
fsys.MapFS[path] = &file
result, err := fsys.MapFS.Open(path)
return WritableFile{result, fsys.MapFS, path}, err
} | Base | 1 |
func (fsys MapFsImpl) OpenAtEnd(path string) (fs.File, error) {
var file = fstest.MapFile{
Data: []byte("content2"),
}
fsys.MapFS[path] = &file
result, err := fsys.MapFS.Open(path)
return WritableFile{result, fsys.MapFS, path}, err
} | Base | 1 |
func TestArtifactUploadBlob(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, MapFsImpl{memfs})
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
assert.Equal("content", string(memfs["1/some/file"].Data))
} | Base | 1 |
func TestNewArtifactUploadPrepare(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, MapFsImpl{memfs})
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := FileContainerResourceURL{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("http://localhost/upload/1", response.FileContainerResourceURL)
} | Base | 1 |
func FromBytes(size int, bits []byte) Bitfield {
bf := NewBitfield(size)
start := len(bf) - len(bits)
if start < 0 {
panic("bitfield too small")
}
copy(bf[start:], bits)
return bf
} | Base | 1 |
func NewBitfield(size int) Bitfield {
if size%8 != 0 {
panic("Bitfield size must be a multiple of 8")
}
return make([]byte, size/8)
} | Base | 1 |
func BenchmarkBitfield(t *testing.B) {
bf := NewBitfield(benchmarkSize)
t.ResetTimer()
for i := 0; i < t.N; i++ {
if bf.Bit(i % benchmarkSize) {
t.Fatal("bad", i)
}
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
if !bf.Bit(i % benchmarkSize) {
t.Fatal("bad", i)
}
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
bf.SetBit(i % benchmarkSize)
bf.UnsetBit(i % benchmarkSize)
if bf.Bit(i % benchmarkSize) {
t.Fatal("bad", i)
}
}
} | Base | 1 |
func TestExhaustive24(t *testing.T) {
bf := NewBitfield(24)
max := 1 << 24
bint := new(big.Int)
bts := make([]byte, 4)
for j := 0; j < max; j++ {
binary.BigEndian.PutUint32(bts, uint32(j))
bint.SetBytes(bts[1:])
bf.SetBytes(nil)
for i := 0; i < 24; i++ {
if bf.Bit(i) {
t.Fatalf("bit %d should have been false", i)
}
if bint.Bit(i) == 1 {
bf.SetBit(i)
bf.SetBit(i)
} else {
bf.UnsetBit(i)
bf.UnsetBit(i)
}
if bf.Bit(i) != (bint.Bit(i) == 1) {
t.Fatalf("bit %d should have been true", i)
}
}
if !bytes.Equal(bint.Bytes(), bf.Bytes()) {
t.Logf("%v %v", bint.Bytes(), bf.Bytes())
t.Fatal("big int and bitfield not equal")
}
for i := 0; i < 24; i++ {
if (bint.Bit(i) == 1) != bf.Bit(i) {
t.Fatalf("bit %d wrong", i)
}
}
for i := 0; i < 24; i++ {
if bf.OnesBefore(i) != bits.OnesCount32(uint32(j)<<(32-uint(i))) {
t.Fatalf("wrong bit count")
}
if bf.OnesAfter(i) != bits.OnesCount32(uint32(j)>>uint(i)) {
t.Fatalf("wrong bit count")
}
if bf.Ones() != bits.OnesCount32(uint32(j)) {
t.Fatalf("wrong bit count")
}
}
}
} | Base | 1 |
func TestBitfield(t *testing.T) {
bf := NewBitfield(128)
if bf.OnesBefore(20) != 0 {
t.Fatal("expected no bits set")
}
bf.SetBit(10)
if bf.OnesBefore(20) != 1 {
t.Fatal("expected 1 bit set")
}
bf.SetBit(12)
if bf.OnesBefore(20) != 2 {
t.Fatal("expected 2 bit set")
}
bf.SetBit(30)
if bf.OnesBefore(20) != 2 {
t.Fatal("expected 2 bit set")
}
bf.SetBit(100)
if bf.OnesBefore(20) != 2 {
t.Fatal("expected 2 bit set")
}
bf.UnsetBit(10)
if bf.OnesBefore(20) != 1 {
t.Fatal("expected 1 bit set")
}
bint := new(big.Int).SetBytes(bf.Bytes())
for i := 0; i < 128; i++ {
if bf.Bit(i) != (bint.Bit(i) == 1) {
t.Fatalf("expected bit %d to be %v", i, bf.Bit(i))
}
}
} | Base | 1 |
func BenchmarkOnes(t *testing.B) {
bf := NewBitfield(benchmarkSize)
t.ResetTimer()
for i := 0; i < t.N; i++ {
for j := 0; j*4 < benchmarkSize; j++ {
if bf.Ones() != j {
t.Fatal("bad", i)
}
bf.SetBit(j * 4)
}
for j := 0; j*4 < benchmarkSize; j++ {
bf.UnsetBit(j * 4)
}
}
} | Base | 1 |
func BenchmarkBytes(t *testing.B) {
bfa := NewBitfield(211)
bfb := NewBitfield(211)
for j := 0; j*4 < 211; j++ {
bfa.SetBit(j * 4)
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
bfb.SetBytes(bfa.Bytes())
}
} | Base | 1 |
func NewUnixFSHAMTShard(ctx context.Context, substrate dagpb.PBNode, data data.UnixFSData, lsys *ipld.LinkSystem) (ipld.Node, error) {
if err := validateHAMTData(data); err != nil {
return nil, err
}
shardCache := make(map[ipld.Link]*_UnixFSHAMTShard, substrate.FieldLinks().Length())
bf := bitField(data)
return &_UnixFSHAMTShard{
ctx: ctx,
_substrate: substrate,
data: data,
lsys: lsys,
shardCache: shardCache,
bitfield: bf,
cachedLength: -1,
}, nil
} | Class | 2 |
func bitField(nd data.UnixFSData) bitfield.Bitfield {
bf := bitfield.NewBitfield(int(nd.FieldFanout().Must().Int()))
bf.SetBytes(nd.FieldData().Must().Bytes())
return bf
} | Class | 2 |
func (s *shard) serialize(ls *ipld.LinkSystem) (ipld.Link, uint64, error) {
ufd, err := BuildUnixFS(func(b *Builder) {
DataType(b, data.Data_HAMTShard)
HashType(b, s.hasher)
Data(b, s.bitmap())
Fanout(b, uint64(s.size))
})
if err != nil {
return nil, 0, err
}
pbb := dagpb.Type.PBNode.NewBuilder()
pbm, err := pbb.BeginMap(2)
if err != nil {
return nil, 0, err
}
if err = pbm.AssembleKey().AssignString("Data"); err != nil {
return nil, 0, err
}
if err = pbm.AssembleValue().AssignBytes(data.EncodeUnixFSData(ufd)); err != nil {
return nil, 0, err
}
if err = pbm.AssembleKey().AssignString("Links"); err != nil {
return nil, 0, err
}
lnkBuilder := dagpb.Type.PBLinks.NewBuilder()
lnks, err := lnkBuilder.BeginList(int64(len(s.children)))
if err != nil {
return nil, 0, err
}
// sorting happens in codec-dagpb
var totalSize uint64
for idx, e := range s.children {
var lnk dagpb.PBLink
if e.shard != nil {
ipldLnk, sz, err := e.shard.serialize(ls)
if err != nil {
return nil, 0, err
}
totalSize += sz
fullName := s.formatLinkName("", idx)
lnk, err = BuildUnixFSDirectoryEntry(fullName, int64(sz), ipldLnk)
if err != nil {
return nil, 0, err
}
} else {
fullName := s.formatLinkName(e.Name.Must().String(), idx)
sz := e.Tsize.Must().Int()
totalSize += uint64(sz)
lnk, err = BuildUnixFSDirectoryEntry(fullName, sz, e.Hash.Link())
}
if err != nil {
return nil, 0, err
}
if err := lnks.AssembleValue().AssignNode(lnk); err != nil {
return nil, 0, err
}
}
if err := lnks.Finish(); err != nil {
return nil, 0, err
}
pbm.AssembleValue().AssignNode(lnkBuilder.Build())
if err := pbm.Finish(); err != nil {
return nil, 0, err
}
node := pbb.Build()
lnk, sz, err := sizedStore(ls, fileLinkProto, node)
if err != nil {
return nil, 0, err
}
return lnk, totalSize + sz, nil
} | Class | 2 |
func (s *shard) bitmap() []byte {
bm := bitfield.NewBitfield(s.size)
for i := 0; i < s.size; i++ {
if _, ok := s.children[i]; ok {
bm.SetBit(i)
}
}
return bm.Bytes()
} | Class | 2 |
func makeDirWidth(ds format.DAGService, size, width int) ([]string, *legacy.Shard, error) {
ctx := context.Background()
s, _ := legacy.NewShard(ds, width)
var dirs []string
for i := 0; i < size; i++ {
dirs = append(dirs, fmt.Sprintf("DIRNAME%d", i))
}
shuffle(time.Now().UnixNano(), dirs)
for i := 0; i < len(dirs); i++ {
nd := ft.EmptyDirNode()
ds.Add(ctx, nd)
err := s.Set(ctx, dirs[i], nd)
if err != nil {
return nil, nil, err
}
}
return dirs, s, nil
} | Class | 2 |
func TestBasicSet(t *testing.T) {
ds, lsys := mockDag()
for _, w := range []int{128, 256, 512, 1024, 2048, 4096} {
t.Run(fmt.Sprintf("BasicSet%d", w), func(t *testing.T) {
names, s, err := makeDirWidth(ds, 1000, w)
require.NoError(t, err)
ctx := context.Background()
legacyNode, err := s.Node()
require.NoError(t, err)
nd, err := lsys.Load(ipld.LinkContext{Ctx: ctx}, cidlink.Link{Cid: legacyNode.Cid()}, dagpb.Type.PBNode)
require.NoError(t, err)
hamtShard, err := hamt.AttemptHAMTShardFromNode(ctx, nd, lsys)
require.NoError(t, err)
for _, d := range names {
_, err := hamtShard.LookupByString(d)
require.NoError(t, err)
}
})
}
} | Class | 2 |
func bitField(nd data.UnixFSData) (bitfield.Bitfield, error) {
fanout := int(nd.FieldFanout().Must().Int())
if fanout > maximumHamtWidth {
return nil, fmt.Errorf("hamt witdh (%d) exceed maximum allowed (%d)", fanout, maximumHamtWidth)
}
bf := bitfield.NewBitfield(fanout)
bf.SetBytes(nd.FieldData().Must().Bytes())
return bf, nil
} | Class | 2 |
func NewHandler(appLister applisters.ApplicationLister, namespace string, enabledNamespaces []string, db db.ArgoDB, enf *rbac.Enforcer, cache *servercache.Cache,
appResourceTree AppResourceTreeFn, allowedShells []string) *terminalHandler {
return &terminalHandler{
appLister: appLister,
db: db,
enf: enf,
cache: cache,
appResourceTreeFn: appResourceTree,
allowedShells: allowedShells,
namespace: namespace,
enabledNamespaces: enabledNamespaces,
}
} | Base | 1 |
func newTerminalSession(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*terminalSession, error) {
conn, err := upgrader.Upgrade(w, r, responseHeader)
if err != nil {
return nil, err
}
session := &terminalSession{
wsConn: conn,
tty: true,
sizeChan: make(chan remotecommand.TerminalSize),
doneChan: make(chan struct{}),
}
return session, nil
} | Base | 1 |
func Test_nativeHelmChart_ExtractChart(t *testing.T) {
client := NewClient("https://argoproj.github.io/argo-helm", Creds{}, false, "")
path, closer, err := client.ExtractChart("argo-cd", "0.7.1", false)
assert.NoError(t, err)
defer io.Close(closer)
info, err := os.Stat(path)
assert.NoError(t, err)
assert.True(t, info.IsDir())
} | Class | 2 |
func Test_nativeHelmChart_ExtractChart_insecure(t *testing.T) {
client := NewClient("https://argoproj.github.io/argo-helm", Creds{InsecureSkipVerify: true}, false, "")
path, closer, err := client.ExtractChart("argo-cd", "0.7.1", false)
assert.NoError(t, err)
defer io.Close(closer)
info, err := os.Stat(path)
assert.NoError(t, err)
assert.True(t, info.IsDir())
} | Class | 2 |
func safeAddr(ctx context.Context, resolver *net.Resolver, hostport string, opts ...Option) (string, error) {
c := basicConfig()
for _, opt := range opts {
opt(c)
}
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return "", err
}
ip := net.ParseIP(host)
if ip != nil {
if ip.To4() != nil && c.isIPForbidden(ip) {
return "", fmt.Errorf("bad ip is detected: %v", ip)
}
return net.JoinHostPort(ip.String(), port), nil
}
if c.isHostForbidden(host) {
return "", fmt.Errorf("bad host is detected: %v", host)
}
r := resolver
if r == nil {
r = net.DefaultResolver
}
addrs, err := r.LookupIPAddr(ctx, host)
if err != nil || len(addrs) <= 0 {
return "", err
}
safeAddrs := make([]net.IPAddr, 0, len(addrs))
for _, addr := range addrs {
// only support IPv4 address
if addr.IP.To4() == nil {
continue
}
if c.isIPForbidden(addr.IP) {
return "", fmt.Errorf("bad ip is detected: %v", addr.IP)
}
safeAddrs = append(safeAddrs, addr)
}
if len(safeAddrs) == 0 {
return "", fmt.Errorf("fail to lookup ip addr: %v", host)
}
return net.JoinHostPort(safeAddrs[0].IP.String(), port), nil
} | Base | 1 |
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
cleaned, err := fs.SafePath(p)
if err != nil {
return err
}
var currentSize int64
// If the file does not exist on the system already go ahead and create the pathway
// to it and an empty file. We'll then write to it later on after this completes.
stat, err := os.Stat(cleaned)
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
} else if err == nil {
if stat.IsDir() {
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
}
currentSize = stat.Size()
}
br := bufio.NewReader(r)
// Check that the new size we're writing to the disk can fit. If there is currently
// a file we'll subtract that current file size from the size of the buffer to determine
// the amount of new data we're writing (or amount we're removing if smaller).
if err := fs.HasSpaceFor(int64(br.Size()) - currentSize); err != nil {
return err
}
// Touch the file and return the handle to it at this point. This will create the file,
// any necessary directories, and set the proper owner of the file.
file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
if err != nil {
return err
}
defer file.Close()
buf := make([]byte, 1024*4)
sz, err := io.CopyBuffer(file, r, buf)
// Adjust the disk usage to account for the old size and the new size of the file.
fs.addDisk(sz - currentSize)
return fs.Chown(cleaned)
} | Base | 1 |
func (e *Engine) PeerDisconnected(p peer.ID) {
e.lock.Lock()
defer e.lock.Unlock()
ledger, ok := e.ledgerMap[p]
if ok {
ledger.lk.RLock()
entries := ledger.Entries()
ledger.lk.RUnlock()
for _, entry := range entries {
e.peerLedger.CancelWant(p, entry.Cid)
}
}
delete(e.ledgerMap, p)
e.scoreLedger.PeerDisconnected(p)
} | Base | 1 |
func (e *Engine) findOrCreate(p peer.ID) *ledger {
// Take a read lock (as it's less expensive) to check if we have a ledger
// for the peer
e.lock.RLock()
l, ok := e.ledgerMap[p]
e.lock.RUnlock()
if ok {
return l
}
// There's no ledger, so take a write lock, then check again and create the
// ledger if necessary
e.lock.Lock()
defer e.lock.Unlock()
l, ok = e.ledgerMap[p]
if !ok {
l = newLedger(p)
e.ledgerMap[p] = l
}
return l
} | Base | 1 |
func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) {
l := e.findOrCreate(p)
l.lk.Lock()
defer l.lk.Unlock()
// Remove sent blocks from the want list for the peer
for _, block := range m.Blocks() {
e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData()))
l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block)
}
// Remove sent block presences from the want list for the peer
for _, bp := range m.BlockPresences() {
// Don't record sent data. We reserve that for data blocks.
if bp.Type == pb.Message_Have {
l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have)
}
}
} | Base | 1 |
func (e *Engine) Peers() []peer.ID {
e.lock.RLock()
defer e.lock.RUnlock()
response := make([]peer.ID, 0, len(e.ledgerMap))
for _, ledger := range e.ledgerMap {
response = append(response, ledger.Partner)
}
return response
} | Base | 1 |
func (e *Engine) PeerConnected(p peer.ID) {
e.lock.Lock()
defer e.lock.Unlock()
_, ok := e.ledgerMap[p]
if !ok {
e.ledgerMap[p] = newLedger(p)
}
e.scoreLedger.PeerConnected(p)
} | Base | 1 |
func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry {
partner := e.findOrCreate(p)
partner.lk.Lock()
entries := partner.wantList.Entries()
partner.lk.Unlock()
return entries
} | Base | 1 |
func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) {
if len(blks) == 0 {
return
}
l := e.findOrCreate(from)
// Record how many bytes were received in the ledger
l.lk.Lock()
defer l.lk.Unlock()
for _, blk := range blks {
log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData()))
e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData()))
}
} | Base | 1 |
func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {
test.Flaky(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sanfrancisco := newTestEngine(ctx, "sf")
seattle := newTestEngine(ctx, "sea")
m := message.New(true)
sanfrancisco.Engine.MessageSent(seattle.Peer, m)
seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m)
if seattle.Peer == sanfrancisco.Peer {
t.Fatal("Sanity Check: Peers have same Key!")
}
if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) {
t.Fatal("Peer wasn't added as a Partner")
}
if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) {
t.Fatal("Peer wasn't added as a Partner")
}
seattle.Engine.PeerDisconnected(sanfrancisco.Peer)
if peerIsPartner(sanfrancisco.Peer, seattle.Engine) {
t.Fatal("expected peer to be removed")
}
} | Base | 1 |
func (l *peerLedger) Wants(p peer.ID, k cid.Cid) {
m, ok := l.cids[k]
if !ok {
m = make(map[peer.ID]struct{})
l.cids[k] = m
}
m[p] = struct{}{}
} | Base | 1 |
func (l *peerLedger) Peers(k cid.Cid) []peer.ID {
m, ok := l.cids[k]
if !ok {
return nil
}
peers := make([]peer.ID, 0, len(m))
for p := range m {
peers = append(peers, p)
}
return peers
} | Base | 1 |
func newPeerLedger() *peerLedger {
return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})}
} | Base | 1 |
func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) {
m, ok := l.cids[k]
if !ok {
return
}
delete(m, p)
if len(m) == 0 {
delete(l.cids, k)
}
} | Base | 1 |
func (t *Teler) checkCommonWebAttack(r *http.Request) error {
// Decode the URL-encoded request URI of the URL
uri := toURLDecode(r.URL.RequestURI())
// Declare byte slice for request body.
var body string
// Initialize buffer to hold request body.
buf := &bytes.Buffer{}
// Use io.Copy to copy the request body to the buffer.
_, err := io.Copy(buf, r.Body)
if err == nil {
// If the read not fails, replace the request body
// with a new io.ReadCloser that reads from the buffer.
r.Body = io.NopCloser(buf)
// Convert the buffer to a string.
body = buf.String()
}
// Decode the URL-encoded of body
body = toURLDecode(body)
// Iterate over the filters in the CommonWebAttack data stored in the t.threat.cwa.Filters field
for _, filter := range t.threat.cwa.Filters {
// Initialize a variable to track whether a match is found
var match bool
// Check the type of the filter's pattern
switch pattern := filter.pattern.(type) {
case *regexp.Regexp: // If the pattern is a regex
match = pattern.MatchString(uri) || pattern.MatchString(body)
case *pcre.Matcher: // If the pattern is a PCRE expr
match = pattern.MatchString(uri, 0) || pattern.MatchString(body, 0)
default: // If the pattern is of an unknown type, skip to the next iteration
continue
}
// If the pattern matches the request URI or body, return an error indicating a common web attack has been detected
if match {
return errors.New(filter.Description)
}
}
// Return nil if no match is found
return nil
} | Base | 1 |
func (ea *ExternalAuth) AuthPlain(username, password string) ([]string, error) {
accountName, ok := auth.CheckDomainAuth(username, ea.perDomain, ea.domains)
if !ok {
return nil, module.ErrUnknownCredentials
}
// TODO: Extend process protocol to support multiple authorization identities.
return []string{username}, AuthUsingHelper(ea.helperPath, accountName, password)
} | Class | 2 |
func (a *Auth) AuthPlain(username, password string) ([]string, error) {
if a.useHelper {
if err := external.AuthUsingHelper(a.helperPath, username, password); err != nil {
return nil, err
}
}
err := runPAMAuth(username, password)
if err != nil {
return nil, err
}
return []string{username}, nil
} | Class | 2 |
func (a *Auth) AuthPlain(username, password string) ([]string, error) {
key, err := precis.UsernameCaseMapped.CompareKey(username)
if err != nil {
return nil, err
}
identities := make([]string, 0, 1)
if len(a.userTbls) != 0 {
for _, tbl := range a.userTbls {
repl, ok, err := tbl.Lookup(key)
if err != nil {
return nil, err
}
if !ok {
continue
}
if repl != "" {
identities = append(identities, repl)
} else {
identities = append(identities, key)
}
if a.onlyFirstID && len(identities) != 0 {
break
}
}
if len(identities) == 0 {
return nil, errors.New("plain_separate: unknown credentials")
}
}
var (
lastErr error
ok bool
)
for _, pass := range a.passwd {
passIDs, err := pass.AuthPlain(username, password)
if err != nil {
lastErr = err
continue
}
if len(a.userTbls) == 0 {
identities = append(identities, passIDs...)
}
ok = true
}
if !ok {
return nil, lastErr
}
return identities, nil
} | Class | 2 |
func TestPlainSplit_NoUser(t *testing.T) {
a := Auth{
passwd: []module.PlainAuth{
mockAuth{
db: map[string][]string{
"user1": []string{"user1a", "user1b"},
},
},
},
}
ids, err := a.AuthPlain("user1", "aaa")
if err != nil {
t.Fatal("Unexpected error:", err)
}
if !reflect.DeepEqual(ids, []string{"user1a", "user1b"}) {
t.Fatal("Wrong ids returned:", ids)
}
} | Class | 2 |
func TestPlainSplit_NoUser_MultiPass(t *testing.T) {
a := Auth{
passwd: []module.PlainAuth{
mockAuth{
db: map[string][]string{
"user2": []string{"user2a", "user2b"},
},
},
mockAuth{
db: map[string][]string{
"user1": []string{"user1a", "user1b"},
},
},
},
}
ids, err := a.AuthPlain("user1", "aaa")
if err != nil {
t.Fatal("Unexpected error:", err)
}
if !reflect.DeepEqual(ids, []string{"user1a", "user1b"}) {
t.Fatal("Wrong ids returned:", ids)
}
} | Class | 2 |
func TestPlainSplit_MultiUser_Pass(t *testing.T) {
a := Auth{
userTbls: []module.Table{
mockTable{
db: map[string]string{
"userWH": "user1",
},
},
mockTable{
db: map[string]string{
"user1": "user2",
},
},
},
passwd: []module.PlainAuth{
mockAuth{
db: map[string][]string{
"user2": []string{"user2a", "user2b"},
},
},
mockAuth{
db: map[string][]string{
"user1": []string{"user1a", "user1b"},
},
},
},
}
ids, err := a.AuthPlain("user1", "aaa")
if err != nil {
t.Fatal("Unexpected error:", err)
}
if !reflect.DeepEqual(ids, []string{"user2"}) {
t.Fatal("Wrong ids returned:", ids)
}
} | Class | 2 |
func (m mockAuth) AuthPlain(username, _ string) ([]string, error) {
ids, ok := m.db[username]
if !ok {
return nil, errors.New("invalid creds")
}
return ids, nil
} | Class | 2 |
func TestPlainSplit_UserPass(t *testing.T) {
a := Auth{
userTbls: []module.Table{
mockTable{
db: map[string]string{
"user1": "user2",
},
},
},
passwd: []module.PlainAuth{
mockAuth{
db: map[string][]string{
"user2": []string{"user2a", "user2b"},
},
},
mockAuth{
db: map[string][]string{
"user1": []string{"user1a", "user1b"},
},
},
},
}
ids, err := a.AuthPlain("user1", "aaa")
if err != nil {
t.Fatal("Unexpected error:", err)
}
if !reflect.DeepEqual(ids, []string{"user2"}) {
t.Fatal("Wrong ids returned:", ids)
}
} | Class | 2 |
func filterIdentity(accounts []string, identity string) ([]string, error) {
if identity == "" {
return accounts, nil
}
matchFound := false
for _, acc := range accounts {
if precis.UsernameCaseMapped.Compare(acc, identity) {
accounts = []string{identity}
matchFound = true
break
}
}
if !matchFound {
return nil, errors.New("auth: invalid credentials")
}
return accounts, nil
} | Class | 2 |
return sasl.NewLoginServer(func(username, password string) error {
accounts, err := s.AuthPlain(username, password)
if err != nil {
s.Log.Error("authentication failed", err, "username", username, "src_ip", remoteAddr)
return errors.New("auth: invalid credentials")
}
return successCb(accounts)
}) | Class | 2 |
func (s *SASLAuth) AuthPlain(username, password string) ([]string, error) {
if len(s.Plain) == 0 {
return nil, ErrUnsupportedMech
}
var lastErr error
accounts := make([]string, 0, 1)
for _, p := range s.Plain {
pAccs, err := p.AuthPlain(username, password)
if err != nil {
lastErr = err
continue
}
if s.OnlyFirstID {
return pAccs, nil
}
accounts = append(accounts, pAccs...)
}
if len(accounts) == 0 {
return nil, fmt.Errorf("no auth. provider accepted creds, last err: %w", lastErr)
}
return accounts, nil
} | Class | 2 |
srv := a.CreateSASL("XWHATEVER", &net.TCPAddr{}, func([]string) error { return nil }) | Class | 2 |
srv := a.CreateSASL("PLAIN", &net.TCPAddr{}, func(passed []string) error {
ids = passed
return nil
}) | Class | 2 |
func (m mockAuth) AuthPlain(username, _ string) ([]string, error) {
ids, ok := m.db[username]
if !ok {
return nil, errors.New("invalid creds")
}
return ids, nil
} | Class | 2 |
func (mockAuth) SASLMechanisms() []string {
return []string{sasl.Plain, sasl.Login}
} | Class | 2 |
func (a *Auth) AuthPlain(username, password string) ([]string, error) {
if a.useHelper {
return []string{username}, external.AuthUsingHelper(a.helperPath, username, password)
}
ent, err := Lookup(username)
if err != nil {
return nil, err
}
if !ent.IsAccountValid() {
return nil, fmt.Errorf("shadow: account is expired")
}
if !ent.IsPasswordValid() {
return nil, fmt.Errorf("shadow: password is expired")
}
if err := ent.VerifyPassword(password); err != nil {
if err == ErrWrongPassword {
return nil, module.ErrUnknownCredentials
}
return nil, err
}
return []string{username}, nil
} | Class | 2 |
func SASLAuthDirective(m *config.Map, node *config.Node) (interface{}, error) { | Class | 2 |
func (d *Dummy) AuthPlain(username, _ string) ([]string, error) {
return []string{username}, nil
} | Class | 2 |
func (d *Dummy) SASLMechanisms() []string {
return []string{sasl.Plain, sasl.Login}
} | Class | 2 |
func (store *Storage) AuthPlain(username, password string) ([]string, error) {
// TODO: Pass session context there.
defer trace.StartRegion(context.Background(), "sql/AuthPlain").End()
accountName, err := prepareUsername(username)
if err != nil {
return nil, err
}
password, err = precis.OpaqueString.CompareKey(password)
if err != nil {
return nil, err
}
// TODO: Make go-imap-sql CheckPlain return an actual error.
if !store.Back.CheckPlain(accountName, password) {
return nil, module.ErrUnknownCredentials
}
return []string{username}, nil
} | Class | 2 |
func Create(filePath string) (*os.File, error) {
if exist, err := IsPathExist(filePath); err != nil {
return nil, err
} else if exist {
return os.Create(filePath)
}
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
return nil, err
}
return os.Create(filePath)
} | Base | 1 |
func RemoveFile(path string) error {
err := os.Remove(path)
return err
} | Base | 1 |
func IsPathExist(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
} | Base | 1 |
func BytesToFile(filePath string, data []byte) error {
exist, _ := IsPathExist(filePath)
if !exist {
if err := CreateFile(filePath); err != nil {
return err
}
}
return ioutil.WriteFile(filePath, data, 0644)
} | Base | 1 |
func unzipFile(file *zip.File, dstDir string) error {
// create the directory of file
filePath := path.Join(dstDir, file.Name)
if file.FileInfo().IsDir() {
if err := os.MkdirAll(filePath, os.ModePerm); err != nil {
return err
}
return nil
}
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
return err
}
// open the file
r, err := file.Open()
if err != nil {
return err
}
defer r.Close()
// create the file
w, err := os.Create(filePath)
if err != nil {
return err
}
defer w.Close()
// save the decompressed file content
_, err = io.Copy(w, r)
return err
} | Base | 1 |
corsHandler := gh.CORS(gh.AllowCredentials(), gh.AllowedHeaders([]string{"x-requested-with", "content-type"}), gh.AllowedMethods([]string{"GET", "POST", "HEAD", "DELETE"}), gh.AllowedOriginValidator(func(origin string) bool {
if strings.Contains(origin, "localhost") ||
strings.HasSuffix(origin, "play-with-docker.com") ||
strings.HasSuffix(origin, "play-with-kubernetes.com") ||
strings.HasSuffix(origin, "docker.com") ||
strings.HasSuffix(origin, "play-with-go.dev") {
return true
}
return false
}), gh.AllowedOrigins([]string{})) | Base | 1 |
func NewIdpAuthnRequest(idp *IdentityProvider, r *http.Request) (*IdpAuthnRequest, error) {
req := &IdpAuthnRequest{
IDP: idp,
HTTPRequest: r,
Now: TimeNow(),
}
switch r.Method {
case "GET":
compressedRequest, err := base64.StdEncoding.DecodeString(r.URL.Query().Get("SAMLRequest"))
if err != nil {
return nil, fmt.Errorf("cannot decode request: %s", err)
}
req.RequestBuffer, err = ioutil.ReadAll(flate.NewReader(bytes.NewReader(compressedRequest)))
if err != nil {
return nil, fmt.Errorf("cannot decompress request: %s", err)
}
req.RelayState = r.URL.Query().Get("RelayState")
case "POST":
if err := r.ParseForm(); err != nil {
return nil, err
}
var err error
req.RequestBuffer, err = base64.StdEncoding.DecodeString(r.PostForm.Get("SAMLRequest"))
if err != nil {
return nil, err
}
req.RelayState = r.PostForm.Get("RelayState")
default:
return nil, fmt.Errorf("method not allowed")
}
return req, nil
} | Base | 1 |
func (sp *ServiceProvider) ValidateLogoutResponseRedirect(queryParameterData string) error {
retErr := &InvalidResponseError{
Now: TimeNow(),
}
rawResponseBuf, err := base64.StdEncoding.DecodeString(queryParameterData)
if err != nil {
retErr.PrivateErr = fmt.Errorf("unable to parse base64: %s", err)
return retErr
}
retErr.Response = string(rawResponseBuf)
gr, err := ioutil.ReadAll(flate.NewReader(bytes.NewBuffer(rawResponseBuf)))
if err != nil {
retErr.PrivateErr = err
return retErr
}
if err := xrv.Validate(bytes.NewReader(gr)); err != nil {
return err
}
doc := etree.NewDocument()
if err := doc.ReadFromBytes(rawResponseBuf); err != nil {
retErr.PrivateErr = err
return retErr
}
if err := sp.validateSignature(doc.Root()); err != nil {
retErr.PrivateErr = err
return retErr
}
var resp LogoutResponse
if err := unmarshalElement(doc.Root(), &resp); err != nil {
retErr.PrivateErr = err
return retErr
}
if err := sp.validateLogoutResponse(&resp); err != nil {
return err
}
return nil
} | Base | 1 |
func authentication(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
authenticationHandler(w, r)
next.ServeHTTP(w, r)
})
} | Class | 2 |
func authenticationWithStore(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
store := helpers.Store(r)
db.StoreSession(store, r.URL.String(), func() {
authenticationHandler(w, r)
})
next.ServeHTTP(w, r)
})
} | Class | 2 |
func getSystemInfo(w http.ResponseWriter, r *http.Request) {
//updateAvailable, err := util.CheckUpdate()
//if err != nil {
// helpers.WriteError(w, err)
// return
//}
body := map[string]interface{}{
"version": util.Version,
//"update": updateAvailable,
"ansible": util.AnsibleVersion(),
"demo": util.Config.DemoMode,
}
helpers.WriteJSON(w, http.StatusOK, body)
} | Class | 2 |
func (v *V002Entry) Unmarshal(pe models.ProposedEntry) error {
it, ok := pe.(*models.Intoto)
if !ok {
return errors.New("cannot unmarshal non Intoto v0.0.2 type")
}
var err error
if err := types.DecodeEntry(it.Spec, &v.IntotoObj); err != nil {
return err
}
// field validation
if err := v.IntotoObj.Validate(strfmt.Default); err != nil {
return err
}
if string(v.IntotoObj.Content.Envelope.Payload) == "" {
return nil
}
env := &dsse.Envelope{
Payload: string(v.IntotoObj.Content.Envelope.Payload),
PayloadType: *v.IntotoObj.Content.Envelope.PayloadType,
}
allPubKeyBytes := make([][]byte, 0)
for _, sig := range v.IntotoObj.Content.Envelope.Signatures {
env.Signatures = append(env.Signatures, dsse.Signature{
KeyID: sig.Keyid,
Sig: string(sig.Sig),
})
allPubKeyBytes = append(allPubKeyBytes, sig.PublicKey)
}
if _, err := verifyEnvelope(allPubKeyBytes, env); err != nil {
return err
}
v.env = *env
decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload))
if err != nil {
return fmt.Errorf("could not decode envelope payload: %w", err)
}
h := sha256.Sum256(decodedPayload)
v.IntotoObj.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{
Algorithm: swag.String(models.IntotoV002SchemaContentPayloadHashAlgorithmSha256),
Value: swag.String(hex.EncodeToString(h[:])),
}
return nil
} | Base | 1 |
func (v V002Entry) Verifier() (pki.PublicKey, error) {
if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil {
return nil, errors.New("intoto v0.0.2 entry not initialized")
}
sigs := v.IntotoObj.Content.Envelope.Signatures
if len(sigs) == 0 {
return nil, errors.New("no signatures found on intoto entry")
}
return x509.NewPublicKey(bytes.NewReader(v.IntotoObj.Content.Envelope.Signatures[0].PublicKey))
} | Base | 1 |
func (v V002Entry) Insertable() (bool, error) {
if v.IntotoObj.Content == nil {
return false, errors.New("missing content property")
}
if v.IntotoObj.Content.Envelope == nil {
return false, errors.New("missing envelope property")
}
if len(v.IntotoObj.Content.Envelope.Payload) == 0 {
return false, errors.New("missing envelope content")
}
if v.IntotoObj.Content.Envelope.PayloadType == nil || len(*v.IntotoObj.Content.Envelope.PayloadType) == 0 {
return false, errors.New("missing payloadType content")
}
if len(v.IntotoObj.Content.Envelope.Signatures) == 0 {
return false, errors.New("missing signatures content")
}
for _, sig := range v.IntotoObj.Content.Envelope.Signatures {
if len(sig.Sig) == 0 {
return false, errors.New("missing signature content")
}
if len(sig.PublicKey) == 0 {
return false, errors.New("missing publicKey content")
}
}
if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 {
return false, errors.New("invalid DSSE envelope")
}
return true, nil
} | Base | 1 |
func createRekorEnvelope(dsseEnv *dsse.Envelope, pub [][]byte) *models.IntotoV002SchemaContentEnvelope {
env := &models.IntotoV002SchemaContentEnvelope{}
b64 := strfmt.Base64([]byte(dsseEnv.Payload))
env.Payload = b64
env.PayloadType = &dsseEnv.PayloadType
for i, sig := range dsseEnv.Signatures {
env.Signatures = append(env.Signatures, &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{
Keyid: sig.KeyID,
Sig: strfmt.Base64([]byte(sig.Sig)),
PublicKey: strfmt.Base64(pub[i]),
})
}
return env
} | Base | 1 |
func (*FailedEventsManagerT) SaveFailedRecordIDs(taskRunIDFailedEventsMap map[string][]*FailedEventRowT, txn *sql.Tx) {
if !failedKeysEnabled {
return
}
for taskRunID, failedEvents := range taskRunIDFailedEventsMap {
table := fmt.Sprintf(`%s_%s`, failedKeysTablePrefix, taskRunID)
sqlStatement := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s (
destination_id TEXT NOT NULL,
record_id JSONB NOT NULL,
created_at TIMESTAMP NOT NULL);`, table)
_, err := txn.Exec(sqlStatement)
if err != nil {
_ = txn.Rollback()
panic(err)
}
insertQuery := fmt.Sprintf(`INSERT INTO %s VALUES($1, $2, $3);`, table)
stmt, err := txn.Prepare(insertQuery)
if err != nil {
_ = txn.Rollback()
panic(err)
}
createdAt := time.Now()
for _, failedEvent := range failedEvents {
if len(failedEvent.RecordID) == 0 || !json.Valid(failedEvent.RecordID) {
pkgLogger.Infof("skipped adding invalid recordId: %s, to failed keys table: %s", failedEvent.RecordID, table)
continue
}
_, err = stmt.Exec(failedEvent.DestinationID, failedEvent.RecordID, createdAt)
if err != nil {
panic(err)
}
}
stmt.Close()
}
} | Base | 1 |
func (fem *FailedEventsManagerT) FetchFailedRecordIDs(taskRunID string) []*FailedEventRowT {
if !failedKeysEnabled {
return []*FailedEventRowT{}
}
failedEvents := make([]*FailedEventRowT, 0)
var rows *sql.Rows
var err error
table := fmt.Sprintf(`%s_%s`, failedKeysTablePrefix, taskRunID)
sqlStatement := fmt.Sprintf(`SELECT %[1]s.destination_id, %[1]s.record_id
FROM %[1]s `, table)
rows, err = fem.dbHandle.Query(sqlStatement)
if err != nil {
pkgLogger.Errorf("Failed to fetch from table %s with error: %v", taskRunID, err)
return failedEvents
}
defer rows.Close()
for rows.Next() {
var failedEvent FailedEventRowT
err := rows.Scan(&failedEvent.DestinationID, &failedEvent.RecordID)
if err != nil {
panic(err)
}
failedEvents = append(failedEvents, &failedEvent)
}
return failedEvents
} | Base | 1 |
func handle() {
// startReaper()
fluid.LogVersion()
if pprofAddr != "" {
newPprofServer(pprofAddr)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
})
if err != nil {
panic(fmt.Sprintf("csi: unable to create controller manager due to error %v", err))
}
config := config.Config{
NodeId: nodeID,
Endpoint: endpoint,
PruneFs: pruneFs,
PrunePath: prunePath,
}
if err = csi.SetupWithManager(mgr, config); err != nil {
panic(fmt.Sprintf("unable to set up manager due to error %v", err))
}
ctx := ctrl.SetupSignalHandler()
if err = mgr.Start(ctx); err != nil {
panic(fmt.Sprintf("unable to start controller recover due to error %v", err))
}
} | Class | 2 |
func NewDriver(nodeID, endpoint string, client client.Client, apiReader client.Reader) *driver {
glog.Infof("Driver: %v version: %v", driverName, version)
proto, addr := utils.SplitSchemaAddr(endpoint)
glog.Infof("protocol: %v addr: %v", proto, addr)
if !strings.HasPrefix(addr, "/") {
addr = fmt.Sprintf("/%s", addr)
}
socketDir := filepath.Dir(addr)
err := os.MkdirAll(socketDir, 0755)
if err != nil {
glog.Errorf("failed due to %v", err)
os.Exit(1)
}
csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME})
csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
return &driver{
nodeId: nodeID,
endpoint: endpoint,
csiDriver: csiDriver,
client: client,
apiReader: apiReader,
}
} | Class | 2 |
func (d *driver) newNodeServer() *nodeServer {
return &nodeServer{
nodeId: d.nodeId,
DefaultNodeServer: csicommon.NewDefaultNodeServer(d.csiDriver),
client: d.client,
apiReader: d.apiReader,
}
} | Class | 2 |
func (ns *nodeServer) prepareSessMgr(workDir string) error {
sessMgrLabelKey := common.SessMgrNodeSelectorKey
var labelsToModify common.LabelsToModify
labelsToModify.Add(sessMgrLabelKey, "true")
node, err := ns.getNode()
if err != nil {
return errors.Wrapf(err, "can't get node %s", ns.nodeId)
}
_, err = utils.ChangeNodeLabelWithPatchMode(ns.client, node, labelsToModify)
if err != nil {
return errors.Wrapf(err, "error when patching labels on node %s", ns.nodeId)
}
// check sessmgrd.sock file existence
sessMgrSockFilePath := filepath.Join(workDir, common.SessMgrSockFile)
glog.Infof("Checking existence of file %s", sessMgrSockFilePath)
retryLimit := 30
var i int
for i = 0; i < retryLimit; i++ {
if _, err := os.Stat(sessMgrSockFilePath); err == nil {
break
}
// err != nil
if !os.IsNotExist(err) {
glog.Errorf("fail to os.Stat sessmgr socket file %s", sessMgrSockFilePath)
}
time.Sleep(1 * time.Second)
}
if i >= retryLimit {
return errors.New("timeout waiting for SessMgr Pod to be ready")
}
return nil
} | Class | 2 |
func (ns *nodeServer) getNode() (node *v1.Node, err error) {
// Default to allow patch stale node info
if envVar, found := os.LookupEnv(AllowPatchStaleNodeEnv); !found || envVar == "true" {
if ns.node != nil {
glog.V(3).Infof("Found cached node %s", ns.node.Name)
return ns.node, nil
}
}
if node, err = kubeclient.GetNode(ns.apiReader, ns.nodeId); err != nil {
return nil, err
}
glog.V(1).Infof("Got node %s from api server", node.Name)
ns.node = node
return ns.node, nil
} | Class | 2 |
func Register(mgr manager.Manager, cfg config.Config) error {
csiDriver := NewDriver(cfg.NodeId, cfg.Endpoint, mgr.GetClient(), mgr.GetAPIReader())
if err := mgr.Add(csiDriver); err != nil {
return err
}
return nil
} | Class | 2 |
func AccountPostLogin(w http.ResponseWriter, r *http.Request) {
account, err := (&models.Account{Context: ctx.Context}).FromBody(r)
if err != nil {
ctx.HandleStatus(w, r, err.Error(), http.StatusBadRequest)
return
}
var a1 = &models.Account{Context: ctx.Context}
a1.FromData(account)
a1, err = a1.Get()
if err != nil {
ctx.HandleStatus(w, r, err.Error(), http.StatusBadRequest)
return
}
account, err = a1.ValidatePassword(account.Password, "Password")
if err != nil {
ctx.HandleStatus(w, r, "Invalid username or password!", http.StatusForbidden)
return
}
session, err := (&models.Session{Context: ctx.Context, Unique: account.Unique}).Post()
if err != nil {
ctx.HandleStatus(w, r, err.Error(), http.StatusBadRequest)
return
}
expiry := time.Now().Add(time.Hour * 15) // TODO: Change this once we've implemented refreshing
SetAuthCookie(w, types.CookieSessionID, session.Key.ID.String(), expiry)
SetAuthCookie(w, types.CookieRefreshToken, session.Refresh, expiry)
ctx.HandleJson(w, r, account.CopyPublic(), http.StatusOK)
} | Class | 2 |
func authenticateDNSToken(tokenString string) bool {
tokens := strings.Split(tokenString, " ")
if len(tokens) < 2 {
return false
}
return tokens[1] == servercfg.GetDNSKey()
} | Base | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.