text
stringlengths 11
4.05M
|
|---|
package pathfileops
import (
"os"
"testing"
)
func TestFileMgr_MoveFileToNewDir_01(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
destDir := fh.AdjustPathSlash("../../createFilesTest")
setupDestFile := fh.AdjustPathSlash("../../createFilesTest/TestFile003.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile).\n"+
"setupDestFile='%v'\nError:'%v'",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error: Attempted Deletion Failed!!\n"+
"Destination file STILL EXISTS!\n"+
"setupDestFile='%v'", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error: Attempted Deletion Failed!!\n"+
"'srcFile' SILL EXISTS!!\n"+
"srcFile='%v'\n"+
"Error:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error: Attempted Deletion Failed!\n"+
"'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Attempt copy operation failed!!\n"+
"Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
newFMgr, err := srcFileMgr.MoveFileToNewDir(destDir)
if err != nil {
t.Errorf("Error returned by srcFileMgr.MoveFileToNewDir(destDir).\n"+
"destDir='%v'\nError='%v'\n",
destDir, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(newFMgr.absolutePathFileName) {
t.Errorf("Error: Move Operation Failed!\nDestination File DOES NOT EXIST!\n"+
"Destination File (newFMgr)='%v'", newFMgr.absolutePathFileName)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
doesExist, err := newFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by newFMgr.DoesThisFileExist().\n"+
"newFMgr='%v'\nError='%v'\n",
newFMgr.absolutePathFileName, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !doesExist {
t.Errorf("Error: After Move Operation Destination File DOES NOT EXIST!\n"+
"Destination File (newFMgr)='%v'",
newFMgr.absolutePathFileName)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
doesExist, err = fh.DoesThisFileExist(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DoesThisFileExist(srcFile)\n"+
"srcFile='%v'\nError='%v'\n", srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if doesExist {
t.Errorf("Error: After move operation, the source file still exists!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
err = fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_02(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
destDir := fh.AdjustPathSlash("../../createFilesTest")
setupDestFile := fh.AdjustPathSlash("../../createFilesTest/TestFile003.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\nError='%v'\n",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error: Destination File STILL EXISTS!\n"+
"setupDestFile='%v'\n", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error deleting source file\n"+
"Error returned by fh.DeleteDirFile(srcFile)\n"+
"'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\nError:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error: Attempted Deletion Failed!\n"+
"'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\n"+
"srcFile='%v'\n",
setupSrcFile,
srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Copy operation FAILED!\n"+
"Destination file DOES NOT EXIST!!\n"+
"Destination file='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr.isInitialized = false
_, err = srcFileMgr.MoveFileToNewDir(destDir)
if err == nil {
t.Error("Expected error return from srcFileMgr.MoveFileToNewDir(destDir)\n" +
"because srcFileMgr is invalid.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
err = fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_03(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
setupDestFile := fh.AdjustPathSlash("../../createFilesTest/TestFile003.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting destination file.\n"+
"Destination File='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error - destination file, STILL EXISTS!\n"+
"Destination File='%v'\n", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting source file.\n"+
"Source File='%v'\nError:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error - Failed to Delete 'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Test Setup Error: Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
return
}
_, err = srcFileMgr.MoveFileToNewDir("")
if err == nil {
t.Error("Expected error return from srcFileMgr.MoveFileToNewDir(\"\") " +
"because the input parameter is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
err = fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_04(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
destDir := fh.AdjustPathSlash("../../createFilesTest")
setupDestFile := fh.AdjustPathSlash("../../createFilesTest/TestFile003.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting destination file.\n"+
"Destination File='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error - destination file, STILL EXISTS!\n"+
"Destination File='%v'\n", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting source file.\n"+
"Source File='%v'\nError:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error - Failed to Delete 'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Test Setup Error: Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
targetFMgr, err := srcFileMgr.MoveFileToNewDir(destDir)
if err != nil {
t.Errorf("Error returned by srcFileMgr.MoveFileToNewDir(destDir)\n"+
"destDir='%v'\nError='%v'\n", destDir, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !targetFMgr.DoesFileExist() {
t.Errorf("Error: After Move Operation Destination File DOES NOT EXIST!\n"+
"Destination File='%v'\n", targetFMgr.GetAbsolutePath())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
err = fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_05(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
destDir := fh.AdjustPathSlash("../../iDoNotExit")
setupDestFile := fh.AdjustPathSlash("../../iDoNotExit/TestFile003.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting destination file.\n"+
"Destination File='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error - destination file, STILL EXISTS!\n"+
"Destination File='%v'\n", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting source file.\n"+
"Source File='%v'\nError:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error - Failed to Delete 'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Test Setup Error: Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}." +
"NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
return
}
targetFMgr, err := srcFileMgr.MoveFileToNewDir(destDir)
if err != nil {
t.Errorf("Error returned by srcFileMgr.MoveFileToNewDir(destDir)\n"+
"destDir='%v'\nError='%v'\n", destDir, err.Error())
_ = fh.DeleteDirPathAll(destDir)
return
}
if !targetFMgr.DoesFileExist() {
t.Errorf("Error: After 'move' operation, Target File DOES NOT EXIST!\n"+
"destDir='%v'\n", destDir)
}
err = fh.DeleteDirPathAll(destDir)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirPathAll(destDir)\n"+
"destDir='%v'\nError='%v'\n", destDir, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_06(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../logTest/FileMgmnt/TestFile003.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/TestFile003.txt")
destDir := fh.AdjustPathSlash(" ")
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error on DeleteDirFile() deleting source file.\n"+
"Source File='%v'\nError:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error - Failed to Delete 'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Test Setup Error: Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}." +
"NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
return
}
_, err = srcFileMgr.MoveFileToNewDir(destDir)
if err == nil {
t.Error("Expected error return from srcFileMgr.MoveFileToNewDir(destDir)\n" +
"because the 'destDir' string consists of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
}
func TestFileMgr_MoveFileToNewDir_07(t *testing.T) {
fh := FileHelper{}
setupSrcFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_0_3_test.txt")
srcFile := fh.AdjustPathSlash("../../checkfiles/level_0_3_test.txt")
destDir := fh.AdjustPathSlash("../../createFilesTest")
setupDestFile := fh.AdjustPathSlash("../../createFilesTest/level_0_3_test.txt")
if fh.DoesFileExist(setupDestFile) {
err := fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile).\n"+
"setupDestFile='%v'\nError:'%v'\n",
setupDestFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(setupDestFile) {
t.Errorf("Error: Attempted Deletion Failed!!\n"+
"Destination file STILL EXISTS!\n"+
"setupDestFile='%v'\n", setupDestFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
if fh.DoesFileExist(srcFile) {
err := fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error: Attempted Deletion Failed!!\n"+
"'srcFile' SILL EXISTS!!\n"+
"srcFile='%v'\n"+
"Error:'%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if fh.DoesFileExist(srcFile) {
t.Errorf("Error: Attempted Deletion Failed!\n"+
"'srcFile' STILL EXISTS!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
}
err := fh.CopyFileByIo(setupSrcFile, srcFile)
if err != nil {
t.Errorf("Error returned by fh.CopyFileByIo(setupSrcFile, srcFile)\n"+
"setupSrcFile='%v'\nsrcFile='%v'\nError='%v'\n",
setupSrcFile, srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(srcFile) {
t.Errorf("Attempt copy operation failed!!\n"+
"Source File does NOT EXIST!!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
srcFileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(srcFile)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(srcFile).\n"+
"srcFile='%v'\nError='%v'\n",
srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
newFMgr, err := srcFileMgr.MoveFileToNewDir(destDir)
if err != nil {
t.Errorf("Error returned by srcFileMgr.MoveFileToNewDir(destDir).\n"+
"destDir='%v'\nError='%v'\n",
destDir, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !fh.DoesFileExist(newFMgr.absolutePathFileName) {
t.Errorf("Error: Move Operation Failed!\nDestination File DOES NOT EXIST!\n"+
"Destination File (newFMgr)='%v'", newFMgr.absolutePathFileName)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
doesExist, err := newFMgr.DoesThisFileExist()
if err != nil {
t.Errorf("Non-Path Error returned by newFMgr.DoesThisFileExist().\n"+
"newFMgr='%v'\nError='%v'\n",
newFMgr.absolutePathFileName, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if !doesExist {
t.Errorf("Error: After Move Operation Destination File DOES NOT EXIST!\n"+
"Destination File (newFMgr)='%v'\n",
newFMgr.absolutePathFileName)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
doesExist, err = fh.DoesThisFileExist(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DoesThisFileExist(srcFile)\n"+
"srcFile='%v'\nError='%v'\n", srcFile, err.Error())
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
if doesExist {
t.Errorf("Error: After move operation, the source file still exists!\n"+
"srcFile='%v'\n", srcFile)
_ = fh.DeleteDirFile(srcFile)
_ = fh.DeleteDirFile(setupDestFile)
return
}
err = fh.DeleteDirFile(srcFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(srcFile)\n"+
"srcFile='%v'\n"+
"Error='%v'\n",
srcFile, err.Error())
}
err = fh.DeleteDirFile(setupDestFile)
if err != nil {
t.Errorf("Error returned by fh.DeleteDirFile(setupDestFile)\n"+
"setupDestFile='%v'\n"+
"Error='%v'\n",
setupDestFile, err.Error())
}
}
func TestFileMgr_New_01(t *testing.T) {
fh := FileHelper{}
relPath := "..\\logTest\\CmdrX\\CmdrX.log"
commonDir, err := fh.MakeAbsolutePath(relPath)
if err != nil {
t.Errorf("Received Error on fh.MakeAbsolutePath(relPath).\n"+
"relPath='%v'\nError='%v'\n",
relPath, err.Error())
return
}
fileName := "CmdrX"
fileNameExt := "CmdrX.log"
extName := ".log"
fileMgr, err := FileMgr{}.New(commonDir)
if err != nil {
t.Errorf("Received Error on FileMgr{}.New(commonDir)\n"+
"Error='%v'\n",
err.Error())
return
}
if fileMgr.fileName != fileName {
t.Errorf("Expected File Name='%v'\n"+
"Instead, File Name='%v'\n",
fileName, fileMgr.fileName)
}
if fileMgr.fileExt != extName {
t.Errorf("Expected File Extension='%v'\n"+
"Instead, File Extension='%v'",
extName, fileMgr.fileExt)
}
if fileMgr.fileNameExt != fileNameExt {
t.Errorf("Expected File Name + Extension='%v'\n"+
"Instead, File Name + Extension='%v'\n",
fileNameExt, fileMgr.fileNameExt)
}
if !fileMgr.isInitialized {
t.Error("Expected fileMgr.isInitialized=='true'.\n" +
"Instead, fileMgr.isInitialized=='false'.\n")
}
if !fileMgr.isFileNamePopulated {
t.Error("Expected fileMgr.isFileNamePopulated=='true'.\n" +
"Instead, fileMgr.isFileNamePopulated='false'\n")
}
if !fileMgr.isFileNameExtPopulated {
t.Error("Expected fileMgr.isFileNameExtPopulated=='true'\n" +
"Instead, fileMgr.isFileNameExtPopulated=='false'\n")
}
if !fileMgr.isFileExtPopulated {
t.Error("Expected fileMgr.isFileExtPopulated=='true'.\n" +
"Instead, fileMgr.isFileExtPopulated='false'.\n")
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'.\n" +
"Instead, fileMgr.isAbsolutePathFileNamePopulated='false'\n")
}
}
func TestFileMgr_New_02(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("..\\logTest\\CmdrX\\CmdrX.log")
fileName := "CmdrX"
fileNameExt := "CmdrX.log"
extName := ".log"
fileMgr, err := FileMgr{}.New(commonDir)
if err != nil {
t.Errorf("Error returned by FileMgr{}.New(commonDir)\n"+
"commonDir='%v'\nError='%v'\n",
commonDir, err.Error())
return
}
if fileMgr.fileName != fileName {
t.Errorf("Expected File Name='%v'.\n"+
"Instead, File Name='%v'\n",
fileName, fileMgr.fileName)
}
if fileMgr.fileExt != extName {
t.Errorf("Expected File Extension='%v'\n"+
"Instead, File Extension='%v'\n",
extName, fileMgr.fileExt)
}
if fileMgr.fileNameExt != fileNameExt {
t.Errorf("Expected File Name + Extension='%v'.\n"+
"Instead, File Name + Extension='%v'.\n",
fileNameExt, fileMgr.fileNameExt)
}
if !fileMgr.isInitialized {
t.Error("Expected fileMgr.isInitialized=='true'.\n" +
"Instead, fileMgr.isInitialized=='false'\n")
}
if !fileMgr.isFileNamePopulated {
t.Error("Expected fileMgr.isFileNamePopulated=='true'\n" +
"Instead, fileMgr.isFileNamePopulated='false'\n")
}
if !fileMgr.isFileNameExtPopulated {
t.Error("Expected fileMgr.isFileNameExtPopulated=='true'\n" +
"Instead, fileMgr.isFileNameExtPopulated='false'\n")
}
if !fileMgr.isFileExtPopulated {
t.Error("Expected fileMgr.isFileExtPopulated=='true'\n" +
"Instead, fileMgr.isFileExtPopulated='false'\n")
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'\n" +
"Instead, fileMgr.isAbsolutePathFileNamePopulated='false'\n")
}
}
func TestFileMgr_New_03(t *testing.T) {
_, err := FileMgr{}.New("")
if err == nil {
t.Error("Expected error return from FileMgr{}.New(\"\")\n" +
"because the input parameter is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_New_04(t *testing.T) {
_, err := FileMgr{}.New("!^%&*()")
if err == nil {
t.Error("Expected error return from FileMgr{}.New(\"!^%&*()\")\n" +
"because the input parameter contains invalid characters.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromPathFileNameExtStr_01(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\xt_dirmgr_01_test.go")
fileName := "xt_dirmgr_01_test"
fileNameExt := "xt_dirmgr_01_test.go"
extName := ".go"
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(commonDir)
if err != nil {
t.Errorf("Error returned by FileMgr{}.NewFromPathFileNameExtStr(commonDir)\n"+
"commonDir='%v'\nError='%v'\n",
commonDir, err)
return
}
if fileMgr.fileName != fileName {
t.Errorf("Expected File Name='%v'.\n"+
"Instead, File Name='%v'\n",
fileName, fileMgr.fileName)
}
if fileMgr.fileExt != extName {
t.Errorf("Expected File Extension='%v'\n"+
"Instead, File Extension='%v'\n", extName, fileMgr.fileExt)
}
if fileMgr.fileNameExt != fileNameExt {
t.Errorf("Expected File Name + Extension='%v'.\n"+
"Instead, File Name + Extension='%v'\n",
fileNameExt, fileMgr.fileNameExt)
}
if !fileMgr.isInitialized {
t.Error("Expected fileMgr.isInitialized=='true'.\n" +
"Instead, fileMgr.isInitialized='false'\n")
}
if !fileMgr.isFileNamePopulated {
t.Error("Expected fileMgr.isFileNamePopulated=='true'.\n" +
"Instead, fileMgr.isFileNamePopulated='false'\n")
}
if !fileMgr.isFileNameExtPopulated {
t.Error("Expected fileMgr.isFileNameExtPopulated=='true'\n" +
"Instead, fileMgr.isFileNameExtPopulated='false'\n")
}
if !fileMgr.isFileExtPopulated {
t.Error("Expected fileMgr.isFileExtPopulated=='true'\n" +
"Instead, fileMgr.isFileExtPopulated='false'\n")
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'\n" +
"Instead, fileMgr.isAbsolutePathFileNamePopulated=='false'\n")
}
}
func TestFileMgr_NewFromPathFileNameExtStr_02(t *testing.T) {
path := "../../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/level_2_2_test.txt"
eFileNameExt := "level_2_2_test.txt"
eFileName := "level_2_2_test"
eFileExt := ".txt"
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(path)
if err != nil {
t.Errorf("Error returned from FileMgr{}."+
"NewFromPathFileNameExtStr(path)\n"+
"path=='%v'\nError='%v'",
path, err)
}
if eFileNameExt != fileMgr.fileNameExt {
t.Errorf("Expected extracted fileNameExt=='%v'.\n"+
"Instead fileNameExt=='%v'\n",
eFileNameExt, fileMgr.fileNameExt)
}
if fileMgr.fileName != eFileName {
t.Errorf("Expected fileMgr.fileName=='%v'.\n"+
"Instead, fileMgr.fileName== %v\n",
eFileName, fileMgr.fileName)
}
if fileMgr.fileExt != eFileExt {
t.Errorf("Expected fileMgr.fileExt=='%v'\n"+
"Instead, fileMgr.fileExt== %v\n",
eFileExt, fileMgr.fileExt)
}
if !fileMgr.dMgr.isPathPopulated {
t.Error("Expected 'fileMgr.isPathPopulated==true'.\n" +
"Instead, fileMgr.isPathPopulated=='false'\n")
}
if !fileMgr.doesAbsolutePathFileNameExist {
t.Error("Expected 'fileMgr.doesAbsolutePathFileNameExist==true'.\n" +
"Instead fileMgr.doesAbsolutePathFileNameExist=='false'\n")
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'.\n" +
"Instead, it is 'false'.\n")
}
if !fileMgr.dMgr.doesAbsolutePathExist {
t.Error("Expected fileMgr.doesAbsolutePathExist=='true'.\n" +
"Instead, it is 'false'.\n")
}
}
func TestFileMgr_NewFromPathFileNameExtStr_03(t *testing.T) {
path := "filehelperexamples"
_, err := FileMgr{}.NewFromPathFileNameExtStr(path)
if err == nil {
t.Error("Expected an error from FileMgr{}." +
"NewFromPathFileNameExtStr(path)\n" +
"because path='filehelperexamples'.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromPathFileNameExtStr_04(t *testing.T) {
path := "../../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/" +
"level_03_dir/level_3_3_test.txt"
eFileNameExt := "level_3_3_test.txt"
eFileName := "level_3_3_test"
eFileExt := ".txt"
fileMgr, err := FileMgr{}.NewFromPathFileNameExtStr(path)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromPathFileNameExtStr(path)\n"+
"path=='%v'\nError: %v\n",
path, err)
return
}
if eFileNameExt != fileMgr.fileNameExt {
t.Errorf("Expected extracted fileNameExt=='%v'\n"+
"Instead fileNameExt=='%v'\n",
eFileNameExt, fileMgr.fileNameExt)
}
if eFileName != fileMgr.fileName {
t.Errorf("Expected fileMgr.fileName== '%v'\n"+
"Instead fileMgr.fileName== '%v'\n",
eFileName, fileMgr.fileName)
}
if eFileExt != fileMgr.fileExt {
t.Errorf("Expected fileMgr.fileExt=='%v'\n"+
"Instead, fileMgr.fileExt== %v\n",
eFileExt, fileMgr.fileExt)
}
if !fileMgr.dMgr.isPathPopulated {
t.Errorf("Expected 'fileMgr.dMgr.isPathPopulated==true'\n"+
"Instead, fileMgr.isPathPopulated==%v\n",
fileMgr.dMgr.isPathPopulated)
}
if !fileMgr.doesAbsolutePathFileNameExist {
t.Errorf("Expected 'fileMgr.doesAbsolutePathFileNameExist==true'\n"+
"Instead fileMgr.doesAbsolutePathFileNameExist=='%v'\n",
fileMgr.dMgr.isPathPopulated)
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Errorf("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'.\n"+
"Instead, fileMgr.isAbsolutePathFileNamePopulated=='%v'\n",
fileMgr.isAbsolutePathFileNamePopulated)
}
if !fileMgr.dMgr.doesAbsolutePathExist {
t.Errorf("Expected fileMgr.doesAbsolutePathExist=='true'\n"+
"Instead, it is '%v'\n",
fileMgr.dMgr.doesAbsolutePathExist)
}
}
func TestFileMgr_NewFromFileInfo_01(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
expectedFileName := "newerFileForTest_01"
expectedExt := ".txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash("../../filesfortest/newfilesfortest")
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath("+
"adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
absPathFileNameExt :=
absPath + string(os.PathSeparator) + expectedFileNameExt
info, err := fh.GetFileInfo(absPathFileNameExt)
if err != nil {
t.Errorf("Error returned from fh.GetFileInfo(absPathFileNameExt).\n"+
"absPathFileNameExt='%v'\nError='%v'\n",
absPathFileNameExt, err.Error())
return
}
fileMgr, err := FileMgr{}.NewFromFileInfo(absPath, info)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromFileInfo(absPath, info).\n"+
"absPath='%v'\ninfo.Name()='%v'\nError='%v'\n",
absPath, info.Name(), err.Error())
return
}
if fileMgr.fileNameExt != expectedFileNameExt {
t.Errorf("Expected extracted fileMgr.fileNameExt=='%v'\n"+
"Instead fileMgr.fileNameExt='%v'\n",
expectedFileNameExt, fileMgr.fileNameExt)
}
if fileMgr.fileName != expectedFileName {
t.Errorf("Expected fileMgr.fileName=='%v'\n"+
"Instead fileMgr.fileName=='%v'\n",
expectedFileName, fileMgr.fileName)
}
if fileMgr.fileExt != expectedExt {
t.Errorf("Expected fileMgr.fileExt=='%v'\n"+
"Instead fileMgr.fileExt=='%v'\n",
expectedExt, fileMgr.fileName)
}
if !fileMgr.dMgr.isPathPopulated {
t.Errorf("Expected 'fileMgr.isPathPopulated=='true'\n"+
"Instead fileMgr.isPathPopulated=='%v'",
fileMgr.dMgr.isPathPopulated)
}
if !fileMgr.doesAbsolutePathFileNameExist {
t.Errorf("Expected 'fileMgr.doesAbsolutePathFileNameExist"+
"==true'\n"+
"Instead fileMgr.doesAbsolutePathFileNameExist=='%v'\n",
fileMgr.doesAbsolutePathFileNameExist)
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("ERROR: Expected fileMgr." +
"isAbsolutePathFileNamePopulated=='true'.\n" +
"Instead, it is 'false'\n")
}
if !fileMgr.dMgr.doesAbsolutePathExist {
t.Error("ERROR: Expected fileMgr.doesAbsolutePathExist==true'.\n" +
"Instead, it is 'false'\n")
}
if !fileMgr.actualFileInfo.isFInfoInitialized {
t.Error("ERROR: Expected fileMgr.actualFileInfo.isFInfoInitialized" +
"='true'.\nInstead, it is 'false'")
}
if fileMgr.actualFileInfo.Name() != expectedFileNameExt {
t.Errorf("ERROR: Expected fileMgr.actualFileInfo.Name()=='%v'.\n"+
"Instead, fileMgr.actualFileInfo.Name()=='%v'.\n",
expectedFileNameExt, fileMgr.actualFileInfo.Name())
}
}
func TestFileMgr_NewFromFileInfo_02(t *testing.T) {
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
var info os.FileInfo
_, err = FileMgr{}.NewFromFileInfo(absPath, info)
if err == nil {
t.Errorf("Expected an error from FileMgr{}.NewFromFileInfo(absPath, info)\n" +
"because input parameter 'info' is INVALID!\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromFileInfo_03(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
absPathFileNameExt :=
absPath + string(os.PathSeparator) + expectedFileNameExt
info, err := fh.GetFileInfo(absPathFileNameExt)
if err != nil {
t.Errorf("Error returned from fh.GetFileInfo(absPathFileNameExt).\n"+
"absPathFileNameExt='%v'\nError='%v'\n",
absPathFileNameExt, err.Error())
}
absPath = "../../iDoNotExist"
absPath, err = fh.MakeAbsolutePath(absPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(absPath).\n"+
"absPath='%v'\nError='%v'\n",
absPath, err.Error())
return
}
expectedFilePath :=
absPath + string(os.PathSeparator) + expectedFileNameExt
nFMgr, err := FileMgr{}.NewFromFileInfo(absPath, info)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromFileInfo(absPath, info).\n"+
"absPath='%v'\ninfo.Name()='%v'\nError='%v'\n",
absPath, info.Name(), err.Error())
return
}
if expectedFilePath != nFMgr.GetAbsolutePathFileName() {
t.Errorf("ERROR: Expected File Path='%v'.\n"+
"Instead, File Path='%v'\n",
expectedFilePath, nFMgr.GetAbsolutePathFileName())
}
}
func TestFileMgr_NewFromFileInfo_04(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
absPathFileNameExt :=
absPath + string(os.PathSeparator) + expectedFileNameExt
info, err := fh.GetFileInfo(absPathFileNameExt)
if err != nil {
t.Errorf("Error returned from fh.GetFileInfo(absPathFileNameExt).\n"+
"absPathFileNameExt='%v'\nError='%v'\n",
absPathFileNameExt, err.Error())
return
}
absPath = ""
_, err = FileMgr{}.NewFromFileInfo(absPath, info)
if err == nil {
t.Error("Expected an error from FileMgr{}.NewFromFileInfo(absPath, info)\n" +
"because absPath is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirMgrFileNameExt_01(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
dMgr, err := DirMgr{}.New(adjustedPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+
"(adjustedPath).\nadjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
fMgr, err :=
FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt("+
"dMgr, expectedFileNameExt).\n"+
"dMgr='%v'\nrawFileNameExt='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), expectedFileNameExt, err.Error())
return
}
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
expectedAbsPathFileNameExt := absPath + string(os.PathSeparator) + expectedFileNameExt
if expectedAbsPathFileNameExt != fMgr.absolutePathFileName {
t.Errorf("Expected absolutePathFileName='%v'.\n"+
"Instead, absolutePathFileName='%v'\n",
expectedAbsPathFileNameExt, fMgr.absolutePathFileName)
}
}
func TestFileMgr_NewFromDirMgrFileNameExt_02(t *testing.T) {
rawFileNameExt := "./newerFileForTest_01.txt"
expectedFileNameExt := "newerFileForTest_01.txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
dMgr, err := DirMgr{}.New(adjustedPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+
"(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
fMgr, err :=
FileMgr{}.NewFromDirMgrFileNameExt(dMgr, rawFileNameExt)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromDirMgrFileNameExt"+
"(dMgr, rawFileNameExt).\n"+
"dMgr='%v'\nrawFileNameExt='%v'\nError='%v'\n",
dMgr.GetAbsolutePath(), rawFileNameExt, err.Error())
}
absPath, err := fh.MakeAbsolutePath(adjustedPath)
if err != nil {
t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
}
expectedAbsPathFileNameExt :=
absPath + string(os.PathSeparator) + expectedFileNameExt
if expectedAbsPathFileNameExt != fMgr.absolutePathFileName {
t.Errorf("Expected absolutePathFileName='%v'.\n"+
"Instead, absolutePathFileName='%v'\n",
expectedAbsPathFileNameExt, fMgr.absolutePathFileName)
}
}
func TestFileMgr_NewFromDirMgrFileNameExt_03(t *testing.T) {
fh := FileHelper{}
rawPath := "../../filesfortest/newfilesfortest"
absolutePath, err := fh.MakeAbsolutePath(rawPath)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(rawPath).\n"+
"rawPath='%v'\nError='%v'\n",
rawPath, err.Error())
}
dMgr, err := DirMgr{}.New(absolutePath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.New(absolutePath).\n"+
"adjustedPath='%v'\nError='%v'\n",
absolutePath, err.Error())
return
}
_, err = FileMgr{}.NewFromDirMgrFileNameExt(dMgr, "")
if err == nil {
t.Errorf("Expected error return from FileMgr{}." +
"NewFromDirMgrFileNameExt(dMgr, \"\")\n" +
"because the input parameter is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirMgrFileNameExt_04(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
dMgr, err := DirMgr{}.New(adjustedPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}.NewFromPathFileNameExtStr"+
"(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
dMgr.isInitialized = false
_, err =
FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err == nil {
t.Errorf("Expected error return from FileMgr{}" +
"NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)\n" +
"because the dMgr is INVALID.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirMgrFileNameExt_05(t *testing.T) {
expectedFileNameExt := "$%!*().#+_"
fh := FileHelper{}
adjustedPath := fh.AdjustPathSlash(
"../../filesfortest/newfilesfortest")
dMgr, err := DirMgr{}.New(adjustedPath)
if err != nil {
t.Errorf("Error returned from DirMgr{}."+
"NewFromPathFileNameExtStr(adjustedPath).\n"+
"adjustedPath='%v'\nError='%v'\n",
adjustedPath, err.Error())
return
}
_, err =
FileMgr{}.NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)
if err == nil {
t.Errorf("Expected error return from FileMgr{}" +
"NewFromDirMgrFileNameExt(dMgr, expectedFileNameExt)\n" +
"because the expectedFileNameExt contains invalid characters.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirStrFileNameStr_01(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
expectedFileName := "newerFileForTest_01"
expectedExt := ".txt"
fh := FileHelper{}
rawPath := "../../filesfortest/newfilesfortest"
expectedPath := fh.AdjustPathSlash(rawPath)
expectedAbsPath, err := fh.MakeAbsolutePath(expectedPath)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPath).\n"+
"expectedPath='%v'\nError='%v'\n",
expectedPath, err.Error())
return
}
fileMgr, err :=
FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err != nil {
t.Errorf("Error returned from FileMgr{}.NewFromDirStrFileNameStr("+
"rawPath, expectedFileNameExt).\n"+
"rawPath='%v'\nexpectedFileNameExt='%v'\nError='%v'\n",
rawPath, expectedFileNameExt, err.Error())
return
}
if fileMgr.fileNameExt != expectedFileNameExt {
t.Errorf("Expected extracted fileMgr.fileNameExt==%v.\n"+
"Instead fileMgr.fileNameExt='%v'\n",
expectedFileNameExt, fileMgr.fileNameExt)
}
if fileMgr.fileName != expectedFileName {
t.Errorf("Expected fileMgr.fileName== '%v'\n"+
"Instead fileMgr.fileName== '%v'\n",
expectedFileName, fileMgr.fileName)
}
if fileMgr.fileExt != expectedExt {
t.Errorf("Expected fileMgr.fileExt== '%v'\n"+
"Instead got: fileMgr.fileExt=='%v'\n",
expectedExt, fileMgr.fileName)
}
if !fileMgr.dMgr.isPathPopulated {
t.Errorf("Expected 'fileMgr.isPathPopulated==true'.\n"+
"Instead fileMgr.isPathPopulated=='%v'\n",
fileMgr.dMgr.isPathPopulated)
}
if !fileMgr.doesAbsolutePathFileNameExist {
t.Errorf("Expected fileMgr.doesAbsolutePathFileNameExist"+
"=='true'\nInstead fileMgr.doesAbsolutePathFileNameExist=='%v'\n",
fileMgr.dMgr.isPathPopulated)
}
if !fileMgr.isAbsolutePathFileNamePopulated {
t.Error("Expected fileMgr.isAbsolutePathFileNamePopulated=='true'.\n" +
"Instead, it is 'false'.\n")
}
if !fileMgr.dMgr.doesAbsolutePathExist {
t.Error("Expected fileMgr.doesAbsolutePathExist=='true'.\n" +
"Instead, it is 'false'.\n")
}
if !fileMgr.actualFileInfo.isFInfoInitialized {
t.Error("Expected fileMgr.actualFileInfo.isFInfoInitialized='true'.\n" +
"Error, it is 'false'.\n")
}
if fileMgr.actualFileInfo.Name() != expectedFileNameExt {
t.Errorf("Expected fileMgr.actualFileInfo.Name()=='%v'.\n"+
"Instead fileMgr.actualFileInfo.Name()=='%v'.\n",
expectedFileNameExt, fileMgr.actualFileInfo.Name())
}
if expectedAbsPath != fileMgr.dMgr.absolutePath {
t.Errorf("Expected absolutePath='%v'.\n"+
"Instead, absolutePath='%v'\n",
expectedAbsPath, fileMgr.dMgr.absolutePath)
}
if expectedPath != fileMgr.dMgr.path {
t.Errorf("Expected path='%v'.\n"+
"Instead, path='%v'\n",
expectedPath, fileMgr.dMgr.path)
}
}
func TestFileMgr_NewFromDirStrFileNameStr_02(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
rawPath := ""
_, err :=
FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err == nil {
t.Error("Expected error return from FileMgr{}." +
"NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)\n" +
"because rawPath is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirStrFileNameStr_03(t *testing.T) {
expectedFileNameExt := ""
rawPath := "../../filesfortest/newfilesfortest"
_, err := FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err == nil {
t.Error("Expected error return from FileMgr{}." +
"NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)\n" +
"because expectedFileNameExt is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirStrFileNameStr_04(t *testing.T) {
expectedFileNameExt := " "
rawPath := "../../filesfortest/newfilesfortest"
_, err := FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err == nil {
t.Error("Expected error return from FileMgr{}." +
"NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)\n" +
"because expectedFileNameExt consists of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirStrFileNameStr_05(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
rawPath := ""
_, err := FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err == nil {
t.Error("Expected error return from FileMgr{}." +
"NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)\n" +
"because raw path is an empty string.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileMgr_NewFromDirStrFileNameStr_06(t *testing.T) {
expectedFileNameExt := "newerFileForTest_01.txt"
rawPath := " "
_, err := FileMgr{}.NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)
if err == nil {
t.Error("Expected error return from FileMgr{}." +
"NewFromDirStrFileNameStr(rawPath, expectedFileNameExt)\n" +
"because raw path consists of blank spaces.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
|
package recursion
import (
"fmt"
"testing"
)
func Test_canPartitionKSubsets(t *testing.T) {
res := canPartitionKSubsets([]int{4, 3, 2, 3, 5, 2, 1}, 4)
fmt.Println(res)
}
|
package main
import (
"fmt"
)
//引用类型包括:channel slice map
//内置函数 new() 计算类型大小,为其分配零值内存 返回指针
//make 会被编译器翻译成具体的函数,为其分配内存和初始化成员结构 返回对象而非指针
//类型转换 显示转换 不支持隐式转换
//字符串是不可变值类型 内部用指针指向UTF-8字节数组
//默认值是""
//用索引号访问某字节 如s[i]
//不能用序列号获取字节元素指针 如:&s[i] 非法
//不可变类型,无法修改字节数组
//字节数组尾部不能包含NULL
func main() {
//s := "abc"
//b := []byte(s)
//b[0] = 'u'
//
//println(string(b))
b := []int64{-2,2,-3,4,-1,2,1,-5,3}
fmt.Println(MaxSubseqSum1(b,int64(len(b))))
}
func MaxSubseqSum1(list []int64, N int64) int64 {
var ThisSum, MaxSum int64= 0, 0
var i, j, k int64
for i = 0; i < N; i++ {
for j = i; j < N; j++ {
ThisSum = 0
for k = i; k <= j; j++ {
ThisSum += list[k]
}
if ThisSum > MaxSum {
MaxSum = ThisSum
}
}
}
return MaxSum
}
|
package util
import (
"errors"
"net"
)
var invertTable [256]byte
func init() {
for value := 0; value < 256; value++ {
invertTable[value] = invertByte(byte(value))
}
}
func compareIPs(left, right net.IP) bool {
leftVal, err := ipToValue(left)
if err != nil {
return false
}
rightVal, err := ipToValue(right)
if err != nil {
return false
}
return leftVal < rightVal
}
func copyIP(ip net.IP) net.IP {
retval := make(net.IP, len(ip))
copy(retval, ip)
return retval
}
func decrementIP(ip net.IP) {
for index := len(ip) - 1; index >= 0; index-- {
if ip[index] > 0 {
ip[index]--
return
}
ip[index] = 0xff
}
}
func incrementIP(ip net.IP) {
for index := len(ip) - 1; index >= 0; index-- {
if ip[index] < 255 {
ip[index]++
return
}
ip[index] = 0
}
}
func invertByte(input byte) byte {
var inverted byte
for index := 0; index < 8; index++ {
inverted <<= 1
if input&0x80 == 0 {
inverted |= 1
}
input <<= 1
}
return inverted
}
func invertIP(ip net.IP) {
for index, value := range ip {
ip[index] = invertTable[value]
}
}
func ipToValue(ip net.IP) (uint32, error) {
ip = ip.To4()
if ip == nil {
return 0, errors.New("not an IPv4 address")
}
return uint32(ip[0])<<24 |
uint32(ip[1])<<16 |
uint32(ip[2])<<8 |
uint32(ip[3]), nil
}
|
package model
import "sort"
func SliceEq(a, b []string) bool {
if (a == nil) != (b == nil) {
return false
}
if len(a) != len(b) {
return false
}
sort.Slice(a, func(i, j int) bool {
return a[i] < a[j]
})
sort.Slice(b, func(i, j int) bool {
return b[i] < b[j]
})
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
|
// Copyright 2017 Xiaomi, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugins
import (
"github.com/open-falcon/falcon-plus/modules/agent/g"
"io/ioutil"
"log"
"path/filepath"
"strconv"
"strings"
)
// return: dict{sys/ntp/60_ntp.py : *Plugin}
func ListPlugins(script_path string) map[string]*Plugin {
ret := make(map[string]*Plugin)
if script_path == "" {
return ret
}
abs_path := filepath.Join(g.Config().Plugin.Dir, script_path)
fs, err := ioutil.ReadDir(abs_path)
if err != nil {
log.Println("can not list files under", abs_path)
return ret
}
for _, f := range fs {
if f.IsDir() {
continue
}
filename := f.Name()
arr := strings.Split(filename, "_")
if len(arr) < 2 {
continue
}
// filename should be: $cycle_$xx
var cycle int
cycle, err = strconv.Atoi(arr[0])
if err != nil {
continue
}
fpath := filepath.Join(script_path, filename)
plugin := &Plugin{FilePath: fpath, MTime: f.ModTime().Unix(), Cycle: cycle, Args: ""}
ret[fpath] = plugin
}
return ret
}
|
package weather_domain
type Weather struct {
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
TimeZone string `json:"timezone"`
Currently CurrentlyInfo `json:"currently"`
}
type CurrentlyInfo struct {
Temperature float64 `json:"temperature"`
Summary string `json:"summary"`
DewPoint float64 `json:"dewPoint"`
Pressure float64 `json:"pressure"`
Humidity float64 `json:"humidity"`
}
type WeatherRequest struct {
ApiKey string `json:"api_key"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
}
|
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
)
type Page struct {
Title string
SubTitle string
ListTitle string
FileList string
PoweredBy string
}
type Filelist struct {
No string
Filename string
Author string
Grade string
Modtime string
Corrected string
}
type Footer struct {
PoweredBy string
}
type Refresh struct {
Message string
}
func uploadHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("path:%s method: %s", r.URL.Path, r.Method)
if r.Method == "POST" {
r.ParseMultipartForm(32 << 20)
file, _, err := r.FormFile("uploadfile")
user := r.FormValue("username")
grade := r.FormValue("borm")
if err != nil {
panic(err)
return
}
defer file.Close()
t := time.Now()
s := ""
const layout = "2006-01-02-15-04-05"
s = fmt.Sprintf(t.Format(layout))
f, err := os.OpenFile("./files/"+grade+"_thesis_"+user+"_"+s+".pdf",
os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
panic(err)
}
defer f.Close()
io.Copy(f, file)
var refresh Refresh
refresh.Message = "Upload suceeded."
tmpl, err := template.ParseFiles("refresh.html")
if err != nil {
panic(err)
}
err = tmpl.Execute(w, refresh)
if err != nil {
panic(err)
}
}
}
func deleteHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("path:%s method: %s", r.URL.Path, r.Method)
basedir := "./files/"
if r.Method == "POST" {
base := r.FormValue("filename")
filename := basedir + base + ".pdf"
err := os.Remove(filename)
if err != nil {
panic(err)
}
var refresh Refresh
refresh.Message = "Delete suceeded."
tmpl, err := template.ParseFiles("refresh.html")
if err != nil {
panic(err)
}
err = tmpl.Execute(w, refresh)
if err != nil {
panic(err)
}
}
}
func viewHandler(w http.ResponseWriter, r *http.Request) {
_, err := os.Stat("./files")
if err != nil {
os.Mkdir("./files", os.ModeDir|0777)
}
flist, err := ioutil.ReadDir("./files")
if err != nil {
panic(err)
}
var page Page
page.Title = "Thesis Uploader"
page.SubTitle = "Upload"
page.ListTitle = "File List"
tmpl, err := template.ParseFiles("layout.html")
if err != nil {
panic(err)
}
err = tmpl.Execute(w, page)
if err != nil {
panic(err)
}
tmplfl, err := template.ParseFiles("filelist.html")
if err != nil {
panic(err)
}
i := 1
if len(flist) > 0 {
var fl Filelist
for _, f := range flist {
fl.No = strconv.Itoa(i)
filename := f.Name()
s := regexp.MustCompile("_").Split(filename, 4)
re := strings.TrimSuffix(filename, ".pdf")
fl.Filename = re
fl.Author = s[2]
fl.Grade = s[0]
fl.Corrected = "Nothing"
modtime := f.ModTime()
m := ""
const layout = "2006-01-02 15:04:05"
m = fmt.Sprintf(modtime.Format(layout))
fl.Modtime = m
err = tmplfl.Execute(w, fl)
if err != nil {
panic(err)
}
i++
}
}
var footer Footer
tmplft, err := template.ParseFiles("footer.html")
if err != nil {
panic(err)
}
footer.PoweredBy = "This page is powered by " + runtime.Version() + "."
err = tmplft.Execute(w, footer)
if err != nil {
panic(err)
}
}
func main() {
http.HandleFunc("/", viewHandler)
http.HandleFunc("/upload/", uploadHandler)
http.HandleFunc("/delete/", deleteHandler)
http.HandleFunc("/files/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, r.URL.Path[1:])
})
http.HandleFunc("/images/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, r.URL.Path[1:])
})
panic(http.ListenAndServe(":8080", nil))
}
|
package authentication
import (
"github.com/go-ldap/ldap/v3"
)
// ProductionLDAPClientFactory the production implementation of an ldap connection factory.
type ProductionLDAPClientFactory struct{}
// NewProductionLDAPClientFactory create a concrete ldap connection factory.
func NewProductionLDAPClientFactory() *ProductionLDAPClientFactory {
return &ProductionLDAPClientFactory{}
}
// DialURL creates a client from an LDAP URL when successful.
func (f *ProductionLDAPClientFactory) DialURL(addr string, opts ...ldap.DialOpt) (client LDAPClient, err error) {
return ldap.DialURL(addr, opts...)
}
|
package app
import (
"createorder/controllers/order"
"createorder/logger"
"github.com/gin-gonic/gin"
)
var router = gin.Default()
func Start() {
router.POST("/Order/CreateOrder", order.CreateOrder)
router.POST("/Order/CompleteOrder/:order_id", order.CompleteOrder)
router.GET("/Order/GetOrder/:order_id", order.GetOrder)
router.GET("/Payment/AddPaymentItem", order.AddPaymentItem)
router.GET("/Payment/UpdatePaymentItem", order.UpdatePaymentItem)
logger.Info("application to start")
router.Run(":5551")
}
|
package main
import (
"database/sql"
"fmt"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"time"
)
// gorm v2 安装和连接 mysql
var (
sqlDB *sql.DB
gormDB *gorm.DB
)
func InitDB() {
// driverName
driverName := "mysql"
// DSN
dbUser := "root"
dbPassword := "root"
protocol := "tcp"
dbHost := "127.0.0.1"
dbPort := "3306"
dbName := "blog"
parseTime := true
loc := "Local"
charset := "utf8mb4"
dataSourceName := fmt.Sprintf("%s:%s@%s(%s:%s)/%s?charset=%s&parseTime=%t&loc=%s", dbUser, dbPassword, protocol, dbHost, dbPort, dbName, charset, parseTime, loc)
// 数据库连接
if sqlDB == nil {
sqlDB, _ = sql.Open(driverName, dataSourceName)
}
err := sqlDB.Ping()
if err != nil {
fmt.Printf("sqlDB.Ping() err:%s\n", err)
return
}
// gorm 是用的 sql 包的连接池
sqlDB.SetMaxOpenConns(10) // 设置连接池最大打开连接数
sqlDB.SetMaxIdleConns(5) // 设置连接池最大空闲连接数
sqlDB.SetConnMaxLifetime(time.Hour) // 设置连接可复用的最大时间
// 使用现有数据库连接初始化 *gorm.DB
// gorm 配置
gormDB, err = gorm.Open(
mysql.New(
mysql.Config{
Conn: sqlDB,
},
),
&gorm.Config{
SkipDefaultTransaction: true, // 关闭写入操作默认启用事务
DisableAutomaticPing: true, // 关闭自动 Ping 数据库
},
)
if err != nil {
fmt.Printf("gorm.Open() err:%s\n", err)
return
}
// gorm 还支持使用 mysql 驱动的高级配置和使用自定义驱动。
}
func main() {
defer func() {
if err := recover(); err != nil {
fmt.Printf("panic() err:%s\n", err)
return
}
}()
// 初始化数据库
InitDB()
// 查看是否关闭默认启用事务
fmt.Println(gormDB.SkipDefaultTransaction)
}
|
package main
import (
"log"
"os"
"html/template"
)
type user struct {
Name, Email, Emoji string
Colors []string
}
// We'll want a pointer to a Template object to be able to execute our
// templates later.
var templates *template.Template
// For convenience, load templates in the init().
func init() {
// templates will now point to a Template glob that has all templates.
// Let Must function handle our errors for us.
templates = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
b := user {
Name: "Balloonicorn",
Email: "b@sparkles.com",
Emoji: "💖",
Colors: []string{"pink", "blue", "rainbow"},
}
// Render a specific template to stdout.
error := templates.ExecuteTemplate(os.Stdout, "greeting.gohtml", b)
if error != nil {
log.Fatalln(error)
}
}
|
// Copyright 2013 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//Command funcs-gen generates some of the code in the funcs package.
package main
import (
"strings"
"github.com/katydid/katydid/gen"
)
const compareStr = `
type {{.Type}}{{.CName}} struct {
V1 {{.CType}}
V2 {{.CType}}
hash uint64
hasVariable bool
}
func (this *{{.Type}}{{.CName}}) Eval() (bool, error) {
v1, err := this.V1.Eval()
if err != nil {
return {{.Error}}, nil
}
v2, err := this.V2.Eval()
if err != nil {
return {{.Error}}, nil
}
{{if .Eval}}{{.Eval}}{{else}}return v1 {{.Operator}} v2, nil{{end}}
}
func (this *{{.Type}}{{.CName}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*{{.Type}}{{.CName}}); ok {
if c := this.V1.Compare(other.V1); c != 0 {
return c
}
if c := this.V2.Compare(other.V2); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *{{.Type}}{{.CName}}) Shorthand() (string, bool) {
if _, ok1 := this.V1.(aVariable); ok1 {
if _, ok2 := this.V2.(aConst); ok2 {
return "{{.Operator}} " + this.V2.String(), true
}
}
if _, ok2 := this.V2.(aVariable); ok2 {
if _, ok1 := this.V1.(aConst); ok1 {
return "{{.Operator}} " + this.V1.String(), true
}
}
return "", false
}
func (this *{{.Type}}{{.CName}}) String() string {
return "{{.Name}}" + "(" + sjoin(this.V1, this.V2) + ")"
}
func (this *{{.Type}}{{.CName}}) HasVariable() bool {
return this.hasVariable
}
func (this *{{.Type}}{{.CName}}) Hash() uint64 {
return this.hash
}
func init() {
Register("{{.Name}}", {{.CType}}{{.CName}})
}
// {{.CType}}{{.CName}} returns a new {{.Comment}} function.
func {{.CType}}{{.CName}}(a, b {{.CType}}) Bool {
return TrimBool(&{{.Type}}{{.CName}}{
V1: a,
V2: b,
hash: hashWithId({{.Hash}}, a, b),
hasVariable: a.HasVariable() || b.HasVariable(),
})
}
`
type compare struct {
Name string
Operator string
Type string
Eval string
CType string
Error string
Comment string
}
func (this *compare) CName() string {
if this.Name == "ge" || this.Name == "le" {
return strings.ToUpper(this.Name)
}
return gen.CapFirst(this.Name)
}
func (this *compare) Hash() uint64 {
return deriveHashStr(this.Name)
}
const newFuncStr = `
//New{{.}} dynamically creates and asserts the returning function is of type {{.}}.
//This function is used by the compose library to compile functions together.
func (f *Maker) New{{.}}(values ...interface{}) ({{.}}, error) {
v, err := f.New(values...)
if err != nil {
return nil, err
}
return v.({{.}}), nil
}
`
const constStr = `
type Const{{.CType}} interface {
{{.CType}}
}
var typConst{{.CType}} reflect.Type = reflect.TypeOf((*Const{{.CType}})(nil)).Elem()
type const{{.CType}} struct {
v {{.GoType}}
hash uint64
}
//{{.CType}}Const returns a new constant function of type {{.CType}}
func {{.CType}}Const(v {{.GoType}}) Const{{.CType}} {
h := uint64(17)
h = 31*h + {{.Hash}}
h = 31*h + deriveHash{{.CType}}(v)
return &const{{.CType}}{v, h}
}
func (this *const{{.CType}}) IsConst() {}
func (this *const{{.CType}}) Eval() ({{.GoType}}, error) {
return this.v, nil
}
func (this *const{{.CType}}) HasVariable() bool { return false }
func (this *const{{.CType}}) Hash() uint64 {
return this.hash
}
func (this *const{{.CType}}) String() string {
{{if .ListType}}ss := make([]string, len(this.v))
for i := range this.v {
ss[i] = fmt.Sprintf("{{.String}}", this.v[i])
}
return "[]{{.ListType}}{" + strings.Join(ss, ",") + "}"{{else}}return fmt.Sprintf("{{.String}}", this.v){{end}}
}
// Trim{{.CType}} turns functions into constants, if they can be evaluated at compile time.
func Trim{{.CType}}(f {{.CType}}) {{.CType}} {
if _, ok := f.(Const); ok {
return f
}
if f.HasVariable() {
return f
}
v, err := f.Eval()
if err != nil {
return f
}
return {{.CType}}Const(v)
}
`
type conster struct {
CType string
GoType string
String string
ListType string
}
func (this *conster) Hash() uint64 {
return deriveHashStr(this.CType)
}
const listStr = `
type listOf{{.FuncType}} struct {
List []{{.FuncType}}
hash uint64
hasVariable bool
}
//NewListOf{{.FuncType}} returns a new function that when evaluated returns a list of type {{.FuncType}}
func NewListOf{{.FuncType}}(v []{{.FuncType}}) {{.CType}} {
h := uint64(17)
h = 31*h + {{.Hash}}
for i := 0; i < len(v); i++ {
h = 31*h + v[i].Hash()
}
hasVariable := false
for _, vv := range v {
if vv.HasVariable() {
hasVariable = true
break
}
}
return Trim{{.CType}}(&listOf{{.FuncType}}{
List: v,
hash: h,
hasVariable: hasVariable,
})
}
func (this *listOf{{.FuncType}}) Eval() ([]{{.GoType}}, error) {
res := make([]{{.GoType}}, len(this.List))
var err error
for i, e := range this.List {
res[i], err = e.Eval()
if err != nil {
return nil, err
}
}
return res, nil
}
func (this *listOf{{.FuncType}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*listOf{{.FuncType}}); ok {
if len(this.List) != len(other.List) {
if len(this.List) < len(other.List) {
return -1
}
return 1
}
for i := range this.List {
if c := this.List[i].Compare(other.List[i]); c != 0 {
return c
}
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *listOf{{.FuncType}}) HasVariable() bool {
return this.hasVariable
}
func (this *listOf{{.FuncType}}) Hash() uint64 {
return this.hash
}
func (this *listOf{{.FuncType}}) String() string {
ss := make([]string, len(this.List))
for i := range this.List {
ss[i] = this.List[i].String()
}
return "[]{{.Type}}{" + strings.Join(ss, ",") + "}"
}
func (this *listOf{{.FuncType}}) IsListOf() {}
`
type list struct {
Type string
CType string
FuncType string
GoType string
}
func (this *list) Hash() uint64 {
return deriveHashStr(this.CType)
}
const printStr = `
type print{{.Name}} struct {
E {{.Name}}
hash uint64
}
func (this *print{{.Name}}) Eval() ({{.GoType}}, error) {
v, err := this.E.Eval()
if err != nil {
fmt.Printf("error: %#v\n", v)
} else {
fmt.Printf("value: %#v\n", v)
}
return v, err
}
func (this *print{{.Name}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*print{{.Name}}); ok {
if c := this.E.Compare(other.E); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *print{{.Name}}) String() string {
return "print(" + this.E.String() +")"
}
func (this *print{{.Name}}) Hash() uint64 {
return this.hash
}
func (this *print{{.Name}}) HasVariable() bool { return true }
func init() {
Register("print", Print{{.Name}})
}
//Print{{.Name}} returns a function that prints out the value of the argument function and returns its value.
func Print{{.Name}}(e {{.Name}}) {{.Name}} {
return &print{{.Name}}{
E: e,
hash: hashWithId({{.Hash}}, e),
}
}
`
type printer struct {
Name string
GoType string
}
func (this *printer) Hash() uint64 {
return deriveHashStr(this.Name)
}
const lengthStr = `
type len{{.}} struct {
E {{.}}
hash uint64
hasVariable bool
}
func (this *len{{.}}) Eval() (int64, error) {
e, err := this.E.Eval()
if err != nil {
return 0, err
}
return int64(len(e)), nil
}
func (this *len{{.}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*len{{.}}); ok {
if c := this.E.Compare(other.E); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *len{{.}}) String() string {
return "length(" + this.E.String() + ")"
}
func (this *len{{.}}) HasVariable() bool {
return this.hasVariable
}
func (this *len{{.}}) Hash() uint64 {
return this.hash
}
func init() {
Register("length", Len{{.}})
}
//Len{{.}} returns a function that returns the length of a list of type {{.}}
func Len{{.}}(e {{.}}) Int {
return TrimInt(&len{{.}}{
E: e,
hash: Hash("length", e),
hasVariable: e.HasVariable(),
})
}
`
const elemStr = `
type elem{{.ListType}} struct {
List {{.ListType}}
Index Int
hash uint64
hasVariable bool
}
func (this *elem{{.ListType}}) Eval() ({{.ReturnType}}, error) {
list, err := this.List.Eval()
if err != nil {
return {{.Default}}, err
}
index64, err := this.Index.Eval()
if err != nil {
return {{.Default}}, err
}
index := int(index64)
if len(list) == 0 {
return {{.Default}}, NewRangeCheckErr(index, len(list))
}
if index < 0 {
index = index % len(list)
}
if len(list) <= index {
return {{.Default}}, NewRangeCheckErr(index, len(list))
}
return list[index], nil
}
func (this *elem{{.ListType}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*elem{{.ListType}}); ok {
if c := this.List.Compare(other.List); c != 0 {
return c
}
if c := this.Index.Compare(other.Index); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *elem{{.ListType}}) HasVariable() bool {
return this.hasVariable
}
func (this *elem{{.ListType}}) String() string {
return "elem(" + sjoin(this.List, this.Index) + ")"
}
func (this *elem{{.ListType}}) Hash() uint64 {
return this.hash
}
func init() {
Register("elem", Elem{{.ListType}})
}
//Elem{{.ListType}} returns a function that returns the n'th element of the list.
func Elem{{.ListType}}(list {{.ListType}}, n Int) {{.ThrowType}} {
return Trim{{.ThrowType}}(&elem{{.ListType}}{
List: list,
Index: n,
hash: hashWithId({{.Hash}}, n, list),
hasVariable: n.HasVariable() || list.HasVariable(),
})
}
`
type elemer struct {
ListType string
ReturnType string
ThrowType string
Default string
}
func (this *elemer) Hash() uint64 {
return deriveHashStr(this.ListType)
}
const rangeStr = `
type range{{.ListType}} struct {
List {{.ListType}}
First Int
Last Int
hash uint64
hasVariable bool
}
func (this *range{{.ListType}}) Eval() ({{.ReturnType}}, error) {
list, err := this.List.Eval()
if err != nil {
return nil, err
}
first64, err := this.First.Eval()
if err != nil {
return nil, err
}
first := int(first64)
if len(list) == 0 {
return nil, NewRangeCheckErr(first, len(list))
}
if first < 0 {
first = first % len(list)
}
if first > len(list) {
return nil, NewRangeCheckErr(first, len(list))
}
last64, err := this.Last.Eval()
if err != nil {
return nil, err
}
last := int(last64)
if last < 0 {
last = last % len(list)
}
if last > len(list) {
return nil, NewRangeCheckErr(last, len(list))
}
if first > last {
return nil, NewRangeErr(first, last)
}
return list[first:last], nil
}
func (this *range{{.ListType}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*range{{.ListType}}); ok {
if c := this.List.Compare(other.List); c != 0 {
return c
}
if c := this.First.Compare(other.First); c != 0 {
return c
}
if c := this.Last.Compare(other.Last); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *range{{.ListType}}) HasVariable() bool {
return this.hasVariable
}
func (this *range{{.ListType}}) String() string {
return "range(" + sjoin(this.List, this.First, this.Last) +")"
}
func (this *range{{.ListType}}) Hash() uint64 {
return this.hash
}
func init() {
Register("range", Range{{.ListType}})
}
//Range{{.ListType}} returns a function that returns a range of elements from a list.
func Range{{.ListType}}(list {{.ListType}}, from, to Int) {{.ListType}} {
return Trim{{.ListType}}(&range{{.ListType}}{
List: list,
First: from,
Last: to,
hash: hashWithId({{.Hash}}, from, to, list),
hasVariable: from.HasVariable() || to.HasVariable() || list.HasVariable(),
})
}
`
type ranger struct {
ListType string
ReturnType string
}
func (this *ranger) Hash() uint64 {
return deriveHashStr(this.ListType)
}
const variableStr = `
type var{{.Name}} struct {
Value parser.Value
hash uint64
}
var _ Setter = &var{{.Name}}{}
var _ aVariable = &var{{.Name}}{}
type ErrNot{{.Name}}Const struct {}
func (this ErrNot{{.Name}}Const) Error() string {
return "${{.Decode}} is not a const"
}
func (this *var{{.Name}}) Eval() ({{.GoType}}, error) {
if this.Value == nil {
return {{.Default}}, ErrNot{{.Name}}Const{}
}
v, err := this.Value.{{.Name}}()
if err != nil {
return {{.Default}}, err
}
return v, nil
}
func (this *var{{.Name}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if _, ok := that.(*var{{.Name}}); ok {
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *var{{.Name}}) Hash() uint64 {
return this.hash
}
func (this *var{{.Name}}) HasVariable() bool { return true }
func (this *var{{.Name}}) isVariable() {}
func (this *var{{.Name}}) SetValue(v parser.Value) {
this.Value = v
}
func (this *var{{.Name}}) String() string {
return "${{.Decode}}"
}
//{{.Name}}Var returns a variable of type {{.Name}}
func {{.Name}}Var() *var{{.Name}} {
h := uint64(17)
h = 31*h + {{.Hash}}
return &var{{.Name}}{hash: h}
}
`
type varer struct {
Name string
Decode string
GoType string
Default string
}
func (this *varer) Hash() uint64 {
return deriveHashStr(this.Name)
}
const typStr = `
type typ{{.Name}} struct {
E {{.Name}}
hash uint64
hasVariable bool
}
func (this *typ{{.Name}}) Eval() (bool, error) {
_, err := this.E.Eval()
return (err == nil), nil
}
func (this *typ{{.Name}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*typ{{.Name}}); ok {
if c := this.E.Compare(other.E); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *typ{{.Name}}) HasVariable() bool {
return this.hasVariable
}
func (this *typ{{.Name}}) String() string {
return "type(" + this.E.String() + ")"
}
func (this *typ{{.Name}}) Hash() uint64 {
return this.hash
}
func init() {
Register("type", Type{{.Name}})
}
//Type{{.Name}} returns a function that returns true if the error returned by the argument function is nil.
func Type{{.Name}}(v {{.Name}}) Bool {
return TrimBool(&typ{{.Name}}{
E: v,
hash: hashWithId({{.Hash}}, v),
hasVariable: v.HasVariable(),
})
}
`
type typer struct {
Name string
}
func (this *typer) Hash() uint64 {
return deriveHashStr(this.Name)
}
const inSetStr = `
type inSet{{.Name}} struct {
Elem {{.Name}}
List {{.ConstListType}}
set map[{{.Type}}]struct{}
hash uint64
hasVariable bool
}
func (this *inSet{{.Name}}) Eval() (bool, error) {
v, err := this.Elem.Eval()
if err != nil {
return false, err
}
_, ok := this.set[v]
return ok, nil
}
func (this *inSet{{.Name}}) Compare(that Comparable) int {
if this.Hash() != that.Hash() {
if this.Hash() < that.Hash() {
return -1
}
return 1
}
if other, ok := that.(*inSet{{.Name}}); ok {
if c := this.Elem.Compare(other.Elem); c != 0 {
return c
}
if c := this.List.Compare(other.List); c != 0 {
return c
}
return 0
}
return strings.Compare(this.String(), that.String())
}
func (this *inSet{{.Name}}) String() string {
return "contains(" + sjoin(this.Elem, this.List) + ")"
}
func (this *inSet{{.Name}}) HasVariable() bool {
return this.hasVariable
}
func (this *inSet{{.Name}}) Hash() uint64 {
return this.hash
}
func init() {
Register("contains", Contains{{.Name}})
}
//Contains{{.Name}} returns a function that checks whether the element is contained in the list.
func Contains{{.Name}}(element {{.Name}}, list {{.ConstListType}}) (Bool, error) {
if list.HasVariable() {
return nil, ErrContainsListNotConst{}
}
l, err := list.Eval()
if err != nil {
return nil, err
}
set := make(map[{{.Type}}]struct{})
for i := range l {
set[l[i]] = struct{}{}
}
return TrimBool(&inSet{{.Name}}{
Elem: element,
List: list,
set: set,
hash: hashWithId({{.Hash}}, element, list),
hasVariable: element.HasVariable() || list.HasVariable(),
}), nil
}
`
type inSeter struct {
Name string
ConstListType string
Type string
}
func (this *inSeter) Hash() uint64 {
return deriveHashStr(this.Name)
}
func main() {
gen := gen.NewPackage("funcs")
gen(compareStr, "compare.gen.go", []interface{}{
&compare{"ge", ">=", "double", "", "Double", "false", "greater than or equal"},
&compare{"ge", ">=", "int", "", "Int", "false", "greater than or equal"},
&compare{"ge", ">=", "uint", "", "Uint", "false", "greater than or equal"},
&compare{"ge", ">=", "bytes", "return bytes.Compare(v1, v2) >= 0, nil", "Bytes", "false", "greater than or equal"},
&compare{"gt", ">", "double", "", "Double", "false", "greater than"},
&compare{"gt", ">", "int", "", "Int", "false", "greater than"},
&compare{"gt", ">", "uint", "", "Uint", "false", "greater than"},
&compare{"gt", ">", "bytes", "return bytes.Compare(v1, v2) > 0, nil", "Bytes", "false", "greater than"},
&compare{"le", "<=", "double", "", "Double", "false", "less than or equal"},
&compare{"le", "<=", "int", "", "Int", "false", "less than or equal"},
&compare{"le", "<=", "uint", "", "Uint", "false", "less than or equal"},
&compare{"le", "<=", "bytes", "return bytes.Compare(v1, v2) <= 0, nil", "Bytes", "false", "less than or equal"},
&compare{"lt", "<", "double", "", "Double", "false", "less than"},
&compare{"lt", "<", "int", "", "Int", "false", "less than"},
&compare{"lt", "<", "uint", "", "Uint", "false", "less than"},
&compare{"lt", "<", "bytes", "return bytes.Compare(v1, v2) < 0, nil", "Bytes", "false", "less than"},
&compare{"eq", "==", "double", "", "Double", "false", "equal"},
&compare{"eq", "==", "int", "", "Int", "false", "equal"},
&compare{"eq", "==", "uint", "", "Uint", "false", "equal"},
&compare{"eq", "==", "bool", "", "Bool", "false", "equal"},
&compare{"eq", "==", "string", "", "String", "false", "equal"},
&compare{"eq", "==", "bytes", "return bytes.Equal(v1, v2), nil", "Bytes", "false", "equal"},
&compare{"ne", "!=", "double", "", "Double", "false", "not equal"},
&compare{"ne", "!=", "int", "", "Int", "false", "not equal"},
&compare{"ne", "!=", "uint", "", "Uint", "false", "not equal"},
&compare{"ne", "!=", "bool", "", "Bool", "false", "not equal"},
&compare{"ne", "!=", "string", "", "String", "false", "not equal"},
&compare{"ne", "!=", "bytes", "return !bytes.Equal(v1, v2), nil", "Bytes", "false", "not equal"},
}, `"bytes"`, `"strings"`)
gen(newFuncStr, "newfunc.gen.go", []interface{}{
"Double",
"Int",
"Uint",
"Bool",
"String",
"Bytes",
"Doubles",
"Ints",
"Uints",
"Bools",
"Strings",
"ListOfBytes",
})
gen(constStr, "const.gen.go", []interface{}{
&conster{"Double", "float64", "double(%f)", ""},
&conster{"Int", "int64", "int(%d)", ""},
&conster{"Uint", "uint64", "uint(%d)", ""},
&conster{"Bool", "bool", "%v", ""},
&conster{"String", "string", "`%s`", ""},
&conster{"Bytes", "[]byte", "%#v", ""},
&conster{"Doubles", "[]float64", "double(%f)", "double"},
&conster{"Ints", "[]int64", "int(%d)", "int"},
&conster{"Uints", "[]uint64", "uint(%d)", "uint"},
&conster{"Bools", "[]bool", "%v", "bool"},
&conster{"Strings", "[]string", "`%s`", "string"},
&conster{"ListOfBytes", "[][]byte", "%#v", "[]byte"},
}, `"fmt"`, `"strings"`, `"reflect"`)
gen(listStr, "list.gen.go", []interface{}{
&list{"double", "Doubles", "Double", "float64"},
&list{"int", "Ints", "Int", "int64"},
&list{"uint", "Uints", "Uint", "uint64"},
&list{"bool", "Bools", "Bool", "bool"},
&list{"string", "Strings", "String", "string"},
&list{"[]byte", "ListOfBytes", "Bytes", "[]byte"},
}, `"strings"`)
gen(printStr, "print.gen.go", []interface{}{
&printer{"Double", "float64"},
&printer{"Int", "int64"},
&printer{"Uint", "uint64"},
&printer{"Bool", "bool"},
&printer{"String", "string"},
&printer{"Bytes", "[]byte"},
&printer{"Doubles", "[]float64"},
&printer{"Ints", "[]int64"},
&printer{"Uints", "[]uint64"},
&printer{"Bools", "[]bool"},
&printer{"Strings", "[]string"},
&printer{"ListOfBytes", "[][]byte"},
}, `"fmt"`, `"strings"`)
gen(lengthStr, "length.gen.go", []interface{}{
"Doubles",
"Ints",
"Uints",
"Bools",
"Strings",
"ListOfBytes",
"String",
"Bytes",
}, `"strings"`)
gen(elemStr, "elem.gen.go", []interface{}{
&elemer{"Doubles", "float64", "Double", "0"},
&elemer{"Ints", "int64", "Int", "0"},
&elemer{"Uints", "uint64", "Uint", "0"},
&elemer{"Bools", "bool", "Bool", "false"},
&elemer{"Strings", "string", "String", `""`},
&elemer{"ListOfBytes", "[]byte", "Bytes", "nil"},
}, `"strings"`)
gen(rangeStr, "range.gen.go", []interface{}{
&ranger{"Doubles", "[]float64"},
&ranger{"Ints", "[]int64"},
&ranger{"Uints", "[]uint64"},
&ranger{"Bools", "[]bool"},
&ranger{"Strings", "[]string"},
&ranger{"ListOfBytes", "[][]byte"},
}, `"strings"`)
gen(variableStr, "variable.gen.go", []interface{}{
&varer{"Double", "double", "float64", "0"},
&varer{"Int", "int", "int64", "0"},
&varer{"Uint", "uint", "uint64", "0"},
&varer{"Bool", "bool", "bool", "false"},
&varer{"String", "string", "string", `""`},
&varer{"Bytes", "[]byte", "[]byte", "nil"},
}, `"strings"`, `"github.com/katydid/katydid/parser"`)
gen(typStr, "type.gen.go", []interface{}{
&typer{"Double"},
&typer{"Int"},
&typer{"Uint"},
&typer{"Bool"},
&typer{"String"},
&typer{"Bytes"},
}, `"strings"`)
gen(inSetStr, "inset.gen.go", []interface{}{
&inSeter{"Int", "ConstInts", "int64"},
&inSeter{"Uint", "ConstUints", "uint64"},
&inSeter{"String", "ConstStrings", "string"},
}, `"strings"`)
}
|
package model
type UtcTiming struct {
// The server to get the time from
Value string `json:"value,omitempty"`
// The scheme id to use. Please refer to the DASH standard.
SchemeIdUri string `json:"schemeIdUri,omitempty"`
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package handle
import (
"context"
"encoding/json"
"fmt"
"math"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/ngaut/pools"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
ddlUtil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle/cache"
handle_metrics "github.com/pingcap/tidb/statistics/handle/metrics"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/syncutil"
"github.com/tiancaiamao/gp"
"github.com/tikv/client-go/v2/oracle"
atomic2 "go.uber.org/atomic"
"go.uber.org/zap"
)
const (
// TiDBGlobalStats represents the global-stats for a partitioned table.
TiDBGlobalStats = "global"
// MaxPartitionMergeBatchSize indicates the max batch size for a worker to merge partition stats
MaxPartitionMergeBatchSize = 256
)
// Handle can update stats info periodically.
type Handle struct {
// this gpool is used to reuse goroutine in the mergeGlobalStatsTopN.
gpool *gp.Pool
pool sessionPool
// initStatsCtx is the ctx only used for initStats
initStatsCtx sessionctx.Context
// sysProcTracker is used to track sys process like analyze
sysProcTracker sessionctx.SysProcTracker
// autoAnalyzeProcIDGetter is used to generate auto analyze ID.
autoAnalyzeProcIDGetter func() uint64
InitStatsDone chan struct{}
// ddlEventCh is a channel to notify a ddl operation has happened.
// It is sent only by owner or the drop stats executor, and read by stats handle.
ddlEventCh chan *ddlUtil.Event
// idxUsageListHead contains all the index usage collectors required by session.
idxUsageListHead *SessionIndexUsageCollector
// listHead contains all the stats collector required by session.
listHead *SessionStatsCollector
// It can be read by multiple readers at the same time without acquiring lock, but it can be
// written only after acquiring the lock.
statsCache *cache.StatsCachePointer
// globalMap contains all the delta map from collectors when we dump them to KV.
globalMap struct {
data tableDeltaMap
sync.Mutex
}
// colMap contains all the column stats usage information from collectors when we dump them to KV.
colMap struct {
data colStatsUsageMap
sync.Mutex
}
// tableLocked used to store locked tables
tableLocked []int64
// StatsLoad is used to load stats concurrently
StatsLoad StatsLoad
mu struct {
ctx sessionctx.Context
syncutil.RWMutex
}
schemaMu struct {
// pid2tid is the map from partition ID to table ID.
pid2tid map[int64]int64
// schemaVersion is the version of information schema when `pid2tid` is built.
schemaVersion int64
sync.RWMutex
}
lease atomic2.Duration
}
// GetTableLockedAndClearForTest for unit test only
func (h *Handle) GetTableLockedAndClearForTest() []int64 {
tableLocked := h.tableLocked
h.tableLocked = make([]int64, 0)
return tableLocked
}
// LoadLockedTables load locked tables from store
func (h *Handle) LoadLockedTables() error {
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
rows, _, err := h.execRestrictedSQL(ctx, "select table_id from mysql.stats_table_locked")
if err != nil {
return errors.Trace(err)
}
h.tableLocked = make([]int64, len(rows))
for i, row := range rows {
h.tableLocked[i] = row.GetInt64(0)
}
return nil
}
// AddLockedTables add locked tables id to store
func (h *Handle) AddLockedTables(tids []int64, pids []int64, tables []*ast.TableName) (string, error) {
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err := exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return "", err
}
//load tables to check duplicate when insert
rows, _, err := h.execRestrictedSQL(ctx, "select table_id from mysql.stats_table_locked")
if err != nil {
return "", err
}
dupTables := make([]string, 0)
tableLocked := make([]int64, 0)
for _, row := range rows {
tableLocked = append(tableLocked, row.GetInt64(0))
}
strTids := fmt.Sprintf("%v", tids)
logutil.BgLogger().Info("lock table ", zap.String("category", "stats"), zap.String("tableIDs", strTids))
for i, tid := range tids {
_, err = exec.ExecuteInternal(ctx, "insert into mysql.stats_table_locked(table_id) select %? from dual where not exists(select table_id from mysql.stats_table_locked where table_id = %?)", tid, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when insert mysql.stats_table_locked ", zap.String("category", "stats"), zap.Error(err))
return "", err
}
// update handle
if !isTableLocked(tableLocked, tid) {
tableLocked = append(tableLocked, tid)
} else {
dupTables = append(dupTables, tables[i].Schema.L+"."+tables[i].Name.L)
}
}
//insert related partitions while don't warning duplicate partitions
for _, tid := range pids {
_, err = exec.ExecuteInternal(ctx, "insert into mysql.stats_table_locked(table_id) select %? from dual where not exists(select table_id from mysql.stats_table_locked where table_id = %?)", tid, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when insert mysql.stats_table_locked ", zap.String("category", "stats"), zap.Error(err))
return "", err
}
if !isTableLocked(tableLocked, tid) {
tableLocked = append(tableLocked, tid)
}
}
err = finishTransaction(ctx, exec, err)
if err != nil {
return "", err
}
// update handle.tableLocked after transaction success, if txn failed, tableLocked won't be updated
h.tableLocked = tableLocked
if len(dupTables) > 0 {
tables := dupTables[0]
for i, table := range dupTables {
if i == 0 {
continue
}
tables += ", " + table
}
var msg string
if len(tids) > 1 {
if len(tids) > len(dupTables) {
msg = "skip locking locked tables: " + tables + ", other tables locked successfully"
} else {
msg = "skip locking locked tables: " + tables
}
} else {
msg = "skip locking locked table: " + tables
}
return msg, err
}
return "", err
}
// getStatsDeltaFromTableLocked get count, modify_count and version for the given table from mysql.stats_table_locked.
func (h *Handle) getStatsDeltaFromTableLocked(ctx context.Context, tableID int64) (count, modifyCount int64, version uint64, err error) {
rows, _, err := h.execRestrictedSQL(ctx, "select count, modify_count, version from mysql.stats_table_locked where table_id = %?", tableID)
if err != nil {
return 0, 0, 0, err
}
if len(rows) == 0 {
return 0, 0, 0, nil
}
count = rows[0].GetInt64(0)
modifyCount = rows[0].GetInt64(1)
version = rows[0].GetUint64(2)
return count, modifyCount, version, nil
}
// RemoveLockedTables remove tables from table locked array
func (h *Handle) RemoveLockedTables(tids []int64, pids []int64, tables []*ast.TableName) (string, error) {
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err := exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return "", err
}
//load tables to check unlock the unlock table
rows, _, err := h.execRestrictedSQL(ctx, "select table_id from mysql.stats_table_locked")
if err != nil {
return "", err
}
nonlockedTables := make([]string, 0)
tableLocked := make([]int64, 0)
for _, row := range rows {
tableLocked = append(tableLocked, row.GetInt64(0))
}
strTids := fmt.Sprintf("%v", tids)
logutil.BgLogger().Info("unlock table ", zap.String("category", "stats"), zap.String("tableIDs", strTids))
for i, tid := range tids {
// get stats delta during table locked
count, modifyCount, version, err := h.getStatsDeltaFromTableLocked(ctx, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when getStatsDeltaFromTableLocked", zap.String("category", "stats"), zap.Error(err))
return "", err
}
// update stats_meta with stats delta
_, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %?, count = count + %?, modify_count = modify_count + %? where table_id = %?", version, count, modifyCount, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when update mysql.stats_meta", zap.String("category", "stats"), zap.Error(err))
return "", err
}
cache.TableRowStatsCache.Invalidate(tid)
_, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_table_locked where table_id = %?", tid)
if err != nil {
logutil.BgLogger().Error("error occurred when delete from mysql.stats_table_locked ", zap.String("category", "stats"), zap.Error(err))
return "", err
}
var exist bool
exist, tableLocked = removeIfTableLocked(tableLocked, tid)
if !exist {
nonlockedTables = append(nonlockedTables, tables[i].Schema.L+"."+tables[i].Name.L)
}
}
//delete related partitions while don't warning delete empty partitions
for _, tid := range pids {
// get stats delta during table locked
count, modifyCount, version, err := h.getStatsDeltaFromTableLocked(ctx, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when getStatsDeltaFromTableLocked", zap.String("category", "stats"), zap.Error(err))
return "", err
}
// update stats_meta with stats delta
_, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %?, count = count + %?, modify_count = modify_count + %? where table_id = %?", version, count, modifyCount, tid)
if err != nil {
logutil.BgLogger().Error("error occurred when update mysql.stats_meta", zap.String("category", "stats"), zap.Error(err))
return "", err
}
cache.TableRowStatsCache.Invalidate(tid)
_, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_table_locked where table_id = %?", tid)
if err != nil {
logutil.BgLogger().Error("error occurred when delete from mysql.stats_table_locked ", zap.String("category", "stats"), zap.Error(err))
return "", err
}
_, tableLocked = removeIfTableLocked(tableLocked, tid)
}
err = finishTransaction(ctx, exec, err)
if err != nil {
return "", err
}
// update handle.tableLocked after transaction success, if txn failed, tableLocked won't be updated
h.tableLocked = tableLocked
if len(nonlockedTables) > 0 {
tables := nonlockedTables[0]
for i, table := range nonlockedTables {
if i == 0 {
continue
}
tables += ", " + table
}
var msg string
if len(tids) > 1 {
if len(tids) > len(nonlockedTables) {
msg = "skip unlocking non-locked tables: " + tables + ", other tables unlocked successfully"
} else {
msg = "skip unlocking non-locked tables: " + tables
}
} else {
msg = "skip unlocking non-locked table: " + tables
}
return msg, err
}
return "", err
}
// IsTableLocked check whether table is locked in handle with Handle.Mutex
func (h *Handle) IsTableLocked(tableID int64) bool {
h.mu.RLock()
defer h.mu.RUnlock()
return h.isTableLocked(tableID)
}
// IsTableLocked check whether table is locked in handle without Handle.Mutex
func (h *Handle) isTableLocked(tableID int64) bool {
return isTableLocked(h.tableLocked, tableID)
}
// isTableLocked check whether table is locked
func isTableLocked(tableLocked []int64, tableID int64) bool {
return lockTableIndexOf(tableLocked, tableID) > -1
}
// lockTableIndexOf get the locked table's index in the array
func lockTableIndexOf(tableLocked []int64, tableID int64) int {
for idx, id := range tableLocked {
if id == tableID {
return idx
}
}
return -1
}
// removeIfTableLocked try to remove the table from table locked array
func removeIfTableLocked(tableLocked []int64, tableID int64) (bool, []int64) {
idx := lockTableIndexOf(tableLocked, tableID)
if idx > -1 {
tableLocked = append(tableLocked[:idx], tableLocked[idx+1:]...)
}
return idx > -1, tableLocked
}
func (h *Handle) withRestrictedSQLExecutor(ctx context.Context, fn func(context.Context, sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error)) ([]chunk.Row, []*ast.ResultField, error) {
se, err := h.pool.Get()
if err != nil {
return nil, nil, errors.Trace(err)
}
defer h.pool.Put(se)
exec := se.(sqlexec.RestrictedSQLExecutor)
return fn(ctx, exec)
}
func (h *Handle) execRestrictedSQL(ctx context.Context, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) {
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats)
return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) {
return exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, params...)
})
}
func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int, procTrackID uint64, analyzeSnapshot bool, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) {
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats)
return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) {
optFuncs := []sqlexec.OptionFuncAlias{
execOptionForAnalyze[statsVer],
sqlexec.GetAnalyzeSnapshotOption(analyzeSnapshot),
sqlexec.GetPartitionPruneModeOption(string(h.CurrentPruneMode())),
sqlexec.ExecOptionUseCurSession,
sqlexec.ExecOptionWithSysProcTrack(procTrackID, h.sysProcTracker.Track, h.sysProcTracker.UnTrack),
}
return exec.ExecRestrictedSQL(ctx, optFuncs, sql, params...)
})
}
func (h *Handle) execRestrictedSQLWithSnapshot(ctx context.Context, sql string, snapshot uint64, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) {
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats)
return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) {
optFuncs := []sqlexec.OptionFuncAlias{
sqlexec.ExecOptionWithSnapshot(snapshot),
sqlexec.ExecOptionUseCurSession,
}
return exec.ExecRestrictedSQL(ctx, optFuncs, sql, params...)
})
}
// Clear the statsCache, only for test.
func (h *Handle) Clear() {
// TODO: Here h.mu seems to protect all the fields of Handle. Is is reasonable?
h.mu.Lock()
cache, err := cache.NewStatsCache()
if err != nil {
logutil.BgLogger().Warn("create stats cache failed", zap.Error(err))
h.mu.Unlock()
return
}
h.statsCache.Replace(cache)
for len(h.ddlEventCh) > 0 {
<-h.ddlEventCh
}
h.mu.ctx.GetSessionVars().InitChunkSize = 1
h.mu.ctx.GetSessionVars().MaxChunkSize = 1
h.mu.ctx.GetSessionVars().EnableChunkRPC = false
h.mu.ctx.GetSessionVars().SetProjectionConcurrency(0)
h.listHead.ClearForTest()
h.globalMap.Lock()
h.globalMap.data = make(tableDeltaMap)
h.globalMap.Unlock()
h.colMap.Lock()
h.colMap.data = make(colStatsUsageMap)
h.colMap.Unlock()
h.mu.Unlock()
}
type sessionPool interface {
Get() (pools.Resource, error)
Put(pools.Resource)
}
// NewHandle creates a Handle for update stats.
func NewHandle(ctx, initStatsCtx sessionctx.Context, lease time.Duration, pool sessionPool, tracker sessionctx.SysProcTracker, autoAnalyzeProcIDGetter func() uint64) (*Handle, error) {
cfg := config.GetGlobalConfig()
handle := &Handle{
gpool: gp.New(math.MaxInt16, time.Minute),
ddlEventCh: make(chan *ddlUtil.Event, 1000),
listHead: NewSessionStatsCollector(),
idxUsageListHead: &SessionIndexUsageCollector{mapper: make(indexUsageMap)},
pool: pool,
sysProcTracker: tracker,
autoAnalyzeProcIDGetter: autoAnalyzeProcIDGetter,
InitStatsDone: make(chan struct{}),
}
handle.initStatsCtx = initStatsCtx
handle.lease.Store(lease)
handle.mu.ctx = ctx
statsCache, err := cache.NewStatsCachePointer()
if err != nil {
return nil, err
}
handle.statsCache = statsCache
handle.globalMap.data = make(tableDeltaMap)
handle.colMap.data = make(colStatsUsageMap)
handle.StatsLoad.SubCtxs = make([]sessionctx.Context, cfg.Performance.StatsLoadConcurrency)
handle.StatsLoad.NeededItemsCh = make(chan *NeededItemTask, cfg.Performance.StatsLoadQueueSize)
handle.StatsLoad.TimeoutItemsCh = make(chan *NeededItemTask, cfg.Performance.StatsLoadQueueSize)
handle.StatsLoad.WorkingColMap = map[model.TableItemID][]chan stmtctx.StatsLoadResult{}
err = handle.RefreshVars()
if err != nil {
return nil, err
}
return handle, nil
}
// Lease returns the stats lease.
func (h *Handle) Lease() time.Duration {
return h.lease.Load()
}
// SetLease sets the stats lease.
func (h *Handle) SetLease(lease time.Duration) {
h.lease.Store(lease)
}
// DurationToTS converts duration to timestamp.
func DurationToTS(d time.Duration) uint64 {
return oracle.ComposeTS(d.Nanoseconds()/int64(time.Millisecond), 0)
}
// UpdateStatsHealthyMetrics updates stats healthy distribution metrics according to stats cache.
func (h *Handle) UpdateStatsHealthyMetrics() {
v := h.statsCache.Load()
if v == nil {
return
}
distribution := make([]int64, 5)
for _, tbl := range v.Values() {
healthy, ok := tbl.GetStatsHealthy()
if !ok {
continue
}
if healthy < 50 {
distribution[0]++
} else if healthy < 80 {
distribution[1]++
} else if healthy < 100 {
distribution[2]++
} else {
distribution[3]++
}
distribution[4]++
}
for i, val := range distribution {
handle_metrics.StatsHealthyGauges[i].Set(float64(val))
}
}
// Update reads stats meta from store and updates the stats map.
func (h *Handle) Update(is infoschema.InfoSchema, opts ...cache.TableStatsOpt) error {
oldCache := h.statsCache.Load()
lastVersion := oldCache.Version()
// We need this because for two tables, the smaller version may write later than the one with larger version.
// Consider the case that there are two tables A and B, their version and commit time is (A0, A1) and (B0, B1),
// and A0 < B0 < B1 < A1. We will first read the stats of B, and update the lastVersion to B0, but we cannot read
// the table stats of A0 if we read stats that greater than lastVersion which is B0.
// We can read the stats if the diff between commit time and version is less than three lease.
offset := DurationToTS(3 * h.Lease())
if oldCache.Version() >= offset {
lastVersion = lastVersion - offset
} else {
lastVersion = 0
}
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
rows, _, err := h.execRestrictedSQL(ctx, "SELECT version, table_id, modify_count, count from mysql.stats_meta where version > %? order by version", lastVersion)
if err != nil {
return errors.Trace(err)
}
option := &cache.TableStatsOption{}
for _, opt := range opts {
opt(option)
}
tables := make([]*statistics.Table, 0, len(rows))
deletedTableIDs := make([]int64, 0, len(rows))
for _, row := range rows {
version := row.GetUint64(0)
physicalID := row.GetInt64(1)
modifyCount := row.GetInt64(2)
count := row.GetInt64(3)
table, ok := h.getTableByPhysicalID(is, physicalID)
if !ok {
logutil.BgLogger().Debug("unknown physical ID in stats meta table, maybe it has been dropped", zap.Int64("ID", physicalID))
deletedTableIDs = append(deletedTableIDs, physicalID)
continue
}
tableInfo := table.Meta()
if oldTbl, ok := oldCache.GetFromInternal(physicalID); ok && oldTbl.Version >= version && tableInfo.UpdateTS == oldTbl.TblInfoUpdateTS {
continue
}
tbl, err := h.TableStatsFromStorage(tableInfo, physicalID, false, 0)
// Error is not nil may mean that there are some ddl changes on this table, we will not update it.
if err != nil {
logutil.BgLogger().Error("error occurred when read table stats", zap.String("category", "stats"), zap.String("table", tableInfo.Name.O), zap.Error(err))
continue
}
if tbl == nil {
deletedTableIDs = append(deletedTableIDs, physicalID)
continue
}
tbl.Version = version
tbl.RealtimeCount = count
tbl.ModifyCount = modifyCount
tbl.Name = getFullTableName(is, tableInfo)
tbl.TblInfoUpdateTS = tableInfo.UpdateTS
tables = append(tables, tbl)
}
h.updateStatsCache(oldCache, tables, deletedTableIDs, opts...)
return nil
}
// UpdateSessionVar updates the necessary session variables for the stats reader.
func (h *Handle) UpdateSessionVar() error {
h.mu.Lock()
defer h.mu.Unlock()
verInString, err := h.mu.ctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBAnalyzeVersion)
if err != nil {
return err
}
ver, err := strconv.ParseInt(verInString, 10, 64)
if err != nil {
return err
}
h.mu.ctx.GetSessionVars().AnalyzeVersion = int(ver)
return err
}
// GlobalStats is used to store the statistics contained in the global-level stats
// which is generated by the merge of partition-level stats.
// It will both store the column stats and index stats.
// In the column statistics, the variable `num` is equal to the number of columns in the partition table.
// In the index statistics, the variable `num` is always equal to one.
type GlobalStats struct {
Hg []*statistics.Histogram
Cms []*statistics.CMSketch
TopN []*statistics.TopN
Fms []*statistics.FMSketch
MissingPartitionStats []string
Num int
Count int64
ModifyCount int64
}
// MergePartitionStats2GlobalStatsByTableID merge the partition-level stats to global-level stats based on the tableID.
func (h *Handle) MergePartitionStats2GlobalStatsByTableID(sc sessionctx.Context,
opts map[ast.AnalyzeOptionType]uint64, is infoschema.InfoSchema,
physicalID int64, isIndex int, histIDs []int64,
tablePartitionStats map[int64]*statistics.Table) (globalStats *GlobalStats, err error) {
// get the partition table IDs
globalTable, ok := h.getTableByPhysicalID(is, physicalID)
if !ok {
err = errors.Errorf("unknown physical ID %d in stats meta table, maybe it has been dropped", physicalID)
return
}
globalTableInfo := globalTable.Meta()
globalStats, err = h.mergePartitionStats2GlobalStats(sc, opts, is, globalTableInfo, isIndex, histIDs, tablePartitionStats)
if err != nil {
return
}
if len(globalStats.MissingPartitionStats) > 0 {
var item string
if isIndex == 0 {
item = "columns"
} else {
item = "index"
if len(histIDs) > 0 {
item += " " + globalTableInfo.FindIndexNameByID(histIDs[0])
}
}
logutil.BgLogger().Warn("missing partition stats when merging global stats", zap.String("table", globalTableInfo.Name.L),
zap.String("item", item), zap.Strings("missing", globalStats.MissingPartitionStats))
}
return
}
func (h *Handle) loadTablePartitionStats(tableInfo *model.TableInfo, partitionDef *model.PartitionDefinition) (*statistics.Table, error) {
var partitionStats *statistics.Table
partitionStats, err := h.TableStatsFromStorage(tableInfo, partitionDef.ID, true, 0)
if err != nil {
return nil, err
}
// if the err == nil && partitionStats == nil, it means we lack the partition-level stats which the physicalID is equal to partitionID.
if partitionStats == nil {
errMsg := fmt.Sprintf("table `%s` partition `%s`", tableInfo.Name.L, partitionDef.Name.L)
err = types.ErrPartitionStatsMissing.GenWithStackByArgs(errMsg)
return nil, err
}
return partitionStats, nil
}
// MergePartitionStats2GlobalStatsByTableID merge the partition-level stats to global-level stats based on the tableInfo.
func (h *Handle) mergePartitionStats2GlobalStats(sc sessionctx.Context,
opts map[ast.AnalyzeOptionType]uint64, is infoschema.InfoSchema, globalTableInfo *model.TableInfo,
isIndex int, histIDs []int64,
allPartitionStats map[int64]*statistics.Table) (globalStats *GlobalStats, err error) {
partitionNum := len(globalTableInfo.Partition.Definitions)
// initialized the globalStats
globalStats = new(GlobalStats)
if len(histIDs) == 0 {
for _, col := range globalTableInfo.Columns {
// The virtual generated column stats can not be merged to the global stats.
if col.IsGenerated() && !col.GeneratedStored {
continue
}
histIDs = append(histIDs, col.ID)
}
}
globalStats.Num = len(histIDs)
globalStats.Count = 0
globalStats.Hg = make([]*statistics.Histogram, globalStats.Num)
globalStats.Cms = make([]*statistics.CMSketch, globalStats.Num)
globalStats.TopN = make([]*statistics.TopN, globalStats.Num)
globalStats.Fms = make([]*statistics.FMSketch, globalStats.Num)
// The first dimension of slice is means the number of column or index stats in the globalStats.
// The second dimension of slice is means the number of partition tables.
// Because all topN and histograms need to be collected before they can be merged.
// So we should store all of the partition-level stats first, and merge them together.
allHg := make([][]*statistics.Histogram, globalStats.Num)
allCms := make([][]*statistics.CMSketch, globalStats.Num)
allTopN := make([][]*statistics.TopN, globalStats.Num)
allFms := make([][]*statistics.FMSketch, globalStats.Num)
for i := 0; i < globalStats.Num; i++ {
allHg[i] = make([]*statistics.Histogram, 0, partitionNum)
allCms[i] = make([]*statistics.CMSketch, 0, partitionNum)
allTopN[i] = make([]*statistics.TopN, 0, partitionNum)
allFms[i] = make([]*statistics.FMSketch, 0, partitionNum)
}
skipMissingPartitionStats := sc.GetSessionVars().SkipMissingPartitionStats
if sc.GetSessionVars().InRestrictedSQL {
// For AutoAnalyze and HandleDDLEvent(ActionDropTablePartition), we need to use @@global.tidb_skip_missing_partition_stats
val, err1 := sc.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBAnalyzeSkipColumnTypes)
if err1 != nil {
logutil.BgLogger().Error("loading tidb_skip_missing_partition_stats failed", zap.Error(err1))
err = err1
return
}
skipMissingPartitionStats = variable.TiDBOptOn(val)
}
for _, def := range globalTableInfo.Partition.Definitions {
partitionID := def.ID
partitionTable, ok := h.getTableByPhysicalID(is, partitionID)
if !ok {
err = errors.Errorf("unknown physical ID %d in stats meta table, maybe it has been dropped", partitionID)
return
}
tableInfo := partitionTable.Meta()
var partitionStats *statistics.Table
if allPartitionStats != nil {
partitionStats, ok = allPartitionStats[partitionID]
}
// If pre-load partition stats isn't provided, then we load partition stats directly and set it into allPartitionStats
if allPartitionStats == nil || partitionStats == nil || !ok {
var err1 error
partitionStats, err1 = h.loadTablePartitionStats(tableInfo, &def)
if err1 != nil {
if skipMissingPartitionStats && types.ErrPartitionStatsMissing.Equal(err) {
globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, fmt.Sprintf("partition `%s`", def.Name.L))
continue
}
err = err1
return
}
if allPartitionStats == nil {
allPartitionStats = make(map[int64]*statistics.Table)
}
allPartitionStats[partitionID] = partitionStats
}
for i := 0; i < globalStats.Num; i++ {
hg, cms, topN, fms, analyzed := partitionStats.GetStatsInfo(histIDs[i], isIndex == 1)
skipPartition := false
if !analyzed {
var missingPart string
if isIndex == 0 {
missingPart = fmt.Sprintf("partition `%s` column `%s`", def.Name.L, tableInfo.FindColumnNameByID(histIDs[i]))
} else {
missingPart = fmt.Sprintf("partition `%s` index `%s`", def.Name.L, tableInfo.FindIndexNameByID(histIDs[i]))
}
if !skipMissingPartitionStats {
err = types.ErrPartitionStatsMissing.GenWithStackByArgs(fmt.Sprintf("table `%s` %s", tableInfo.Name.L, missingPart))
return
}
globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, missingPart)
skipPartition = true
}
// partition stats is not empty but column stats(hist, topn) is missing
if partitionStats.RealtimeCount > 0 && (hg == nil || hg.TotalRowCount() <= 0) && (topN == nil || topN.TotalCount() <= 0) {
var missingPart string
if isIndex == 0 {
missingPart = fmt.Sprintf("partition `%s` column `%s`", def.Name.L, tableInfo.FindColumnNameByID(histIDs[i]))
} else {
missingPart = fmt.Sprintf("partition `%s` index `%s`", def.Name.L, tableInfo.FindIndexNameByID(histIDs[i]))
}
if !skipMissingPartitionStats {
err = types.ErrPartitionColumnStatsMissing.GenWithStackByArgs(fmt.Sprintf("table `%s` %s", tableInfo.Name.L, missingPart))
return
}
globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, missingPart+" hist and topn")
skipPartition = true
}
if i == 0 {
// In a partition, we will only update globalStats.Count once
globalStats.Count += partitionStats.RealtimeCount
globalStats.ModifyCount += partitionStats.ModifyCount
}
if !skipPartition {
allHg[i] = append(allHg[i], hg)
allCms[i] = append(allCms[i], cms)
allTopN[i] = append(allTopN[i], topN)
allFms[i] = append(allFms[i], fms)
}
}
}
// After collect all of the statistics from the partition-level stats,
// we should merge them together.
for i := 0; i < globalStats.Num; i++ {
if len(allHg[i]) == 0 {
// If all partitions have no stats, we skip merging global stats because it may not handle the case `len(allHg[i]) == 0`
// correctly. It can avoid unexpected behaviors such as nil pointer panic.
continue
}
// Merge CMSketch
globalStats.Cms[i] = allCms[i][0].Copy()
for j := 1; j < len(allCms[i]); j++ {
err = globalStats.Cms[i].MergeCMSketch(allCms[i][j])
if err != nil {
return
}
}
// Merge topN. We need to merge TopN before merging the histogram.
// Because after merging TopN, some numbers will be left.
// These remaining topN numbers will be used as a separate bucket for later histogram merging.
var popedTopN []statistics.TopNMeta
wrapper := statistics.NewStatsWrapper(allHg[i], allTopN[i])
globalStats.TopN[i], popedTopN, allHg[i], err = mergeGlobalStatsTopN(h.gpool, sc, wrapper, sc.GetSessionVars().StmtCtx.TimeZone, sc.GetSessionVars().AnalyzeVersion, uint32(opts[ast.AnalyzeOptNumTopN]), isIndex == 1)
if err != nil {
return
}
// Merge histogram
globalStats.Hg[i], err = statistics.MergePartitionHist2GlobalHist(sc.GetSessionVars().StmtCtx, allHg[i], popedTopN, int64(opts[ast.AnalyzeOptNumBuckets]), isIndex == 1)
if err != nil {
return
}
// NOTICE: after merging bucket NDVs have the trend to be underestimated, so for safe we don't use them.
for j := range globalStats.Hg[i].Buckets {
globalStats.Hg[i].Buckets[j].NDV = 0
}
// Update NDV of global-level stats
globalStats.Fms[i] = allFms[i][0].Copy()
for j := 1; j < len(allFms[i]); j++ {
globalStats.Fms[i].MergeFMSketch(allFms[i][j])
}
// update the NDV
globalStatsNDV := globalStats.Fms[i].NDV()
if globalStatsNDV > globalStats.Count {
globalStatsNDV = globalStats.Count
}
globalStats.Hg[i].NDV = globalStatsNDV
}
return
}
func mergeGlobalStatsTopN(gp *gp.Pool, sc sessionctx.Context, wrapper *statistics.StatsWrapper,
timeZone *time.Location, version int, n uint32, isIndex bool) (*statistics.TopN,
[]statistics.TopNMeta, []*statistics.Histogram, error) {
mergeConcurrency := sc.GetSessionVars().AnalyzePartitionMergeConcurrency
killed := &sc.GetSessionVars().Killed
// use original method if concurrency equals 1 or for version1
if mergeConcurrency < 2 {
return statistics.MergePartTopN2GlobalTopN(timeZone, version, wrapper.AllTopN, n, wrapper.AllHg, isIndex, killed)
}
batchSize := len(wrapper.AllTopN) / mergeConcurrency
if batchSize < 1 {
batchSize = 1
} else if batchSize > MaxPartitionMergeBatchSize {
batchSize = MaxPartitionMergeBatchSize
}
return MergeGlobalStatsTopNByConcurrency(gp, mergeConcurrency, batchSize, wrapper, timeZone, version, n, isIndex, killed)
}
// MergeGlobalStatsTopNByConcurrency merge partition topN by concurrency
// To merge global stats topn by concurrency, we will separate the partition topn in concurrency part and deal it with different worker.
// mergeConcurrency is used to control the total concurrency of the running worker, and mergeBatchSize is sued to control
// the partition size for each worker to solve it
func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatchSize int, wrapper *statistics.StatsWrapper,
timeZone *time.Location, version int, n uint32, isIndex bool, killed *uint32) (*statistics.TopN,
[]statistics.TopNMeta, []*statistics.Histogram, error) {
if len(wrapper.AllTopN) < mergeConcurrency {
mergeConcurrency = len(wrapper.AllTopN)
}
tasks := make([]*statistics.TopnStatsMergeTask, 0)
for start := 0; start < len(wrapper.AllTopN); {
end := start + mergeBatchSize
if end > len(wrapper.AllTopN) {
end = len(wrapper.AllTopN)
}
task := statistics.NewTopnStatsMergeTask(start, end)
tasks = append(tasks, task)
start = end
}
var wg sync.WaitGroup
taskNum := len(tasks)
taskCh := make(chan *statistics.TopnStatsMergeTask, taskNum)
respCh := make(chan *statistics.TopnStatsMergeResponse, taskNum)
for i := 0; i < mergeConcurrency; i++ {
worker := statistics.NewTopnStatsMergeWorker(taskCh, respCh, wrapper, killed)
wg.Add(1)
gp.Go(func() {
defer wg.Done()
worker.Run(timeZone, isIndex, n, version)
})
}
for _, task := range tasks {
taskCh <- task
}
close(taskCh)
wg.Wait()
close(respCh)
resps := make([]*statistics.TopnStatsMergeResponse, 0)
// handle Error
hasErr := false
errMsg := make([]string, 0)
for resp := range respCh {
if resp.Err != nil {
hasErr = true
errMsg = append(errMsg, resp.Err.Error())
}
resps = append(resps, resp)
}
if hasErr {
return nil, nil, nil, errors.New(strings.Join(errMsg, ","))
}
// fetch the response from each worker and merge them into global topn stats
sorted := make([]statistics.TopNMeta, 0, mergeConcurrency)
leftTopn := make([]statistics.TopNMeta, 0)
for _, resp := range resps {
if resp.TopN != nil {
sorted = append(sorted, resp.TopN.TopN...)
}
leftTopn = append(leftTopn, resp.PopedTopn...)
}
globalTopN, popedTopn := statistics.GetMergedTopNFromSortedSlice(sorted, n)
result := append(leftTopn, popedTopn...)
statistics.SortTopnMeta(result)
return globalTopN, result, wrapper.AllHg, nil
}
func (h *Handle) getTableByPhysicalID(is infoschema.InfoSchema, physicalID int64) (table.Table, bool) {
h.schemaMu.Lock()
defer h.schemaMu.Unlock()
if is.SchemaMetaVersion() != h.schemaMu.schemaVersion {
h.schemaMu.schemaVersion = is.SchemaMetaVersion()
h.schemaMu.pid2tid = buildPartitionID2TableID(is)
}
if id, ok := h.schemaMu.pid2tid[physicalID]; ok {
return is.TableByID(id)
}
return is.TableByID(physicalID)
}
func buildPartitionID2TableID(is infoschema.InfoSchema) map[int64]int64 {
mapper := make(map[int64]int64)
for _, db := range is.AllSchemas() {
tbls := db.Tables
for _, tbl := range tbls {
pi := tbl.GetPartitionInfo()
if pi == nil {
continue
}
for _, def := range pi.Definitions {
mapper[def.ID] = tbl.ID
}
}
}
return mapper
}
// GetMemConsumed returns the mem size of statscache consumed
func (h *Handle) GetMemConsumed() (size int64) {
size = h.statsCache.Load().Cost()
return
}
// GetTableStats retrieves the statistics table from cache, and the cache will be updated by a goroutine.
func (h *Handle) GetTableStats(tblInfo *model.TableInfo, opts ...cache.TableStatsOpt) *statistics.Table {
return h.GetPartitionStats(tblInfo, tblInfo.ID, opts...)
}
// GetPartitionStats retrieves the partition stats from cache.
func (h *Handle) GetPartitionStats(tblInfo *model.TableInfo, pid int64, opts ...cache.TableStatsOpt) *statistics.Table {
var tbl *statistics.Table
if h == nil {
tbl = statistics.PseudoTable(tblInfo)
tbl.PhysicalID = pid
return tbl
}
statsCache := h.statsCache.Load()
var ok bool
option := &cache.TableStatsOption{}
for _, opt := range opts {
opt(option)
}
if option.ByQuery() {
tbl, ok = statsCache.GetFromUser(pid)
} else {
tbl, ok = statsCache.GetFromInternal(pid)
}
if !ok {
tbl = statistics.PseudoTable(tblInfo)
tbl.PhysicalID = pid
if tblInfo.GetPartitionInfo() == nil || h.statsCacheLen() < 64 {
h.updateStatsCache(statsCache, []*statistics.Table{tbl}, nil)
}
return tbl
}
return tbl
}
func (h *Handle) statsCacheLen() int {
return h.statsCache.Load().Len()
}
func (h *Handle) initStatsCache(newCache *cache.StatsCache) {
h.statsCache.Replace(newCache)
}
// updateStatsCache will update statsCache into non COW mode.
// If it is in the COW mode. it overrides the global statsCache with a new one, it may fail
// if the global statsCache has been modified by others already.
// Callers should add retry loop if necessary.
func (h *Handle) updateStatsCache(newCache *cache.StatsCache, tables []*statistics.Table, deletedIDs []int64,
opts ...cache.TableStatsOpt) (updated bool) {
h.statsCache.UpdateStatsCache(newCache, tables, deletedIDs, opts...)
return true
}
// LoadNeededHistograms will load histograms for those needed columns/indices.
func (h *Handle) LoadNeededHistograms() (err error) {
items := statistics.HistogramNeededItems.AllItems()
reader, err := h.getGlobalStatsReader(0)
if err != nil {
return err
}
defer func() {
err1 := h.releaseGlobalStatsReader(reader)
if err1 != nil && err == nil {
err = err1
}
}()
loadFMSketch := config.GetGlobalConfig().Performance.EnableLoadFMSketch
for _, item := range items {
if !item.IsIndex {
err = h.loadNeededColumnHistograms(reader, item, loadFMSketch)
} else {
err = h.loadNeededIndexHistograms(reader, item, loadFMSketch)
}
if err != nil {
return err
}
}
return nil
}
func (h *Handle) loadNeededColumnHistograms(reader *statistics.StatsReader, col model.TableItemID, loadFMSketch bool) (err error) {
oldCache := h.statsCache.Load()
tbl, ok := oldCache.GetFromInternal(col.TableID)
if !ok {
return nil
}
c, ok := tbl.Columns[col.ID]
if !ok || !c.IsLoadNeeded() {
statistics.HistogramNeededItems.Delete(col)
return nil
}
hg, err := statistics.HistogramFromStorage(reader, col.TableID, c.ID, &c.Info.FieldType, c.Histogram.NDV, 0, c.LastUpdateVersion, c.NullCount, c.TotColSize, c.Correlation)
if err != nil {
return errors.Trace(err)
}
cms, topN, err := statistics.CMSketchAndTopNFromStorage(reader, col.TableID, 0, col.ID)
if err != nil {
return errors.Trace(err)
}
var fms *statistics.FMSketch
if loadFMSketch {
fms, err = statistics.FMSketchFromStorage(reader, col.TableID, 0, col.ID)
if err != nil {
return errors.Trace(err)
}
}
rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID)
if err != nil {
return errors.Trace(err)
}
if len(rows) == 0 {
logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", col.TableID), zap.Int64("hist_id", col.ID))
return errors.Trace(fmt.Errorf("fail to get stats version for this histogram, table_id:%v, hist_id:%v", col.TableID, col.ID))
}
statsVer := rows[0].GetInt64(0)
colHist := &statistics.Column{
PhysicalID: col.TableID,
Histogram: *hg,
Info: c.Info,
CMSketch: cms,
TopN: topN,
FMSketch: fms,
IsHandle: c.IsHandle,
StatsVer: statsVer,
}
if colHist.StatsAvailable() {
colHist.StatsLoadedStatus = statistics.NewStatsFullLoadStatus()
}
// Reload the latest stats cache, otherwise the `updateStatsCache` may fail with high probability, because functions
// like `GetPartitionStats` called in `fmSketchFromStorage` would have modified the stats cache already.
oldCache = h.statsCache.Load()
tbl, ok = oldCache.GetFromInternal(col.TableID)
if !ok {
return nil
}
tbl = tbl.Copy()
tbl.Columns[c.ID] = colHist
if h.updateStatsCache(oldCache, []*statistics.Table{tbl}, nil) {
statistics.HistogramNeededItems.Delete(col)
}
return nil
}
func (h *Handle) loadNeededIndexHistograms(reader *statistics.StatsReader, idx model.TableItemID, loadFMSketch bool) (err error) {
oldCache := h.statsCache.Load()
tbl, ok := oldCache.GetFromInternal(idx.TableID)
if !ok {
return nil
}
index, ok := tbl.Indices[idx.ID]
if !ok {
statistics.HistogramNeededItems.Delete(idx)
return nil
}
hg, err := statistics.HistogramFromStorage(reader, idx.TableID, index.ID, types.NewFieldType(mysql.TypeBlob), index.Histogram.NDV, 1, index.LastUpdateVersion, index.NullCount, index.TotColSize, index.Correlation)
if err != nil {
return errors.Trace(err)
}
cms, topN, err := statistics.CMSketchAndTopNFromStorage(reader, idx.TableID, 1, idx.ID)
if err != nil {
return errors.Trace(err)
}
var fms *statistics.FMSketch
if loadFMSketch {
fms, err = statistics.FMSketchFromStorage(reader, idx.TableID, 1, idx.ID)
if err != nil {
return errors.Trace(err)
}
}
rows, _, err := reader.Read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID)
if err != nil {
return errors.Trace(err)
}
if len(rows) == 0 {
logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", idx.TableID), zap.Int64("hist_id", idx.ID))
return errors.Trace(fmt.Errorf("fail to get stats version for this histogram, table_id:%v, hist_id:%v", idx.TableID, idx.ID))
}
idxHist := &statistics.Index{Histogram: *hg, CMSketch: cms, TopN: topN, FMSketch: fms,
Info: index.Info, StatsVer: rows[0].GetInt64(0),
Flag: index.Flag, PhysicalID: idx.TableID,
StatsLoadedStatus: statistics.NewStatsFullLoadStatus()}
index.LastAnalyzePos.Copy(&idxHist.LastAnalyzePos)
oldCache = h.statsCache.Load()
tbl, ok = oldCache.GetFromInternal(idx.TableID)
if !ok {
return nil
}
tbl = tbl.Copy()
tbl.Indices[idx.ID] = idxHist
if h.updateStatsCache(oldCache, []*statistics.Table{tbl}, nil) {
statistics.HistogramNeededItems.Delete(idx)
}
return nil
}
// LastUpdateVersion gets the last update version.
func (h *Handle) LastUpdateVersion() uint64 {
return h.statsCache.Load().Version()
}
// FlushStats flushes the cached stats update into store.
func (h *Handle) FlushStats() {
for len(h.ddlEventCh) > 0 {
e := <-h.ddlEventCh
if err := h.HandleDDLEvent(e); err != nil {
logutil.BgLogger().Error("handle ddl event fail", zap.String("category", "stats"), zap.Error(err))
}
}
if err := h.DumpStatsDeltaToKV(DumpAll); err != nil {
logutil.BgLogger().Error("dump stats delta fail", zap.String("category", "stats"), zap.Error(err))
}
}
// TableStatsFromStorage loads table stats info from storage.
func (h *Handle) TableStatsFromStorage(tableInfo *model.TableInfo, physicalID int64, loadAll bool, snapshot uint64) (_ *statistics.Table, err error) {
reader, err := h.getGlobalStatsReader(snapshot)
if err != nil {
return nil, err
}
defer func() {
err1 := h.releaseGlobalStatsReader(reader)
if err == nil && err1 != nil {
err = err1
}
}()
statsTbl, ok := h.statsCache.Load().GetFromInternal(physicalID)
if !ok {
statsTbl = nil
}
statsTbl, err = statistics.TableStatsFromStorage(reader, tableInfo, physicalID, loadAll, h.Lease(), statsTbl)
if err != nil {
return nil, err
}
if reader.IsHistory() || statsTbl == nil {
return statsTbl, nil
}
return statsTbl, nil
}
// StatsMetaCountAndModifyCount reads count and modify_count for the given table from mysql.stats_meta.
func (h *Handle) StatsMetaCountAndModifyCount(tableID int64) (count, modifyCount int64, err error) {
reader, err := h.getGlobalStatsReader(0)
if err != nil {
return 0, 0, err
}
defer func() {
err1 := h.releaseGlobalStatsReader(reader)
if err1 != nil && err == nil {
err = err1
}
}()
rows, _, err := reader.Read("select count, modify_count from mysql.stats_meta where table_id = %?", tableID)
if err != nil {
return 0, 0, err
}
if len(rows) == 0 {
return 0, 0, nil
}
count = int64(rows[0].GetUint64(0))
modifyCount = rows[0].GetInt64(1)
return count, modifyCount, nil
}
func saveTopNToStorage(ctx context.Context, exec sqlexec.SQLExecutor, tableID int64, isIndex int, histID int64, topN *statistics.TopN) error {
if topN == nil {
return nil
}
for i := 0; i < len(topN.TopN); {
end := i + batchInsertSize
if end > len(topN.TopN) {
end = len(topN.TopN)
}
sql := new(strings.Builder)
sql.WriteString("insert into mysql.stats_top_n (table_id, is_index, hist_id, value, count) values ")
for j := i; j < end; j++ {
topn := topN.TopN[j]
val := sqlexec.MustEscapeSQL("(%?, %?, %?, %?, %?)", tableID, isIndex, histID, topn.Encoded, topn.Count)
if j > i {
val = "," + val
}
if j > i && sql.Len()+len(val) > maxInsertLength {
end = j
break
}
sql.WriteString(val)
}
i = end
if _, err := exec.ExecuteInternal(ctx, sql.String()); err != nil {
return err
}
}
return nil
}
func saveBucketsToStorage(ctx context.Context, exec sqlexec.SQLExecutor, sc *stmtctx.StatementContext, tableID int64, isIndex int, hg *statistics.Histogram) (lastAnalyzePos []byte, err error) {
if hg == nil {
return
}
for i := 0; i < len(hg.Buckets); {
end := i + batchInsertSize
if end > len(hg.Buckets) {
end = len(hg.Buckets)
}
sql := new(strings.Builder)
sql.WriteString("insert into mysql.stats_buckets (table_id, is_index, hist_id, bucket_id, count, repeats, lower_bound, upper_bound, ndv) values ")
for j := i; j < end; j++ {
bucket := hg.Buckets[j]
count := bucket.Count
if j > 0 {
count -= hg.Buckets[j-1].Count
}
var upperBound types.Datum
upperBound, err = hg.GetUpper(j).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob))
if err != nil {
return
}
if j == len(hg.Buckets)-1 {
lastAnalyzePos = upperBound.GetBytes()
}
var lowerBound types.Datum
lowerBound, err = hg.GetLower(j).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob))
if err != nil {
return
}
val := sqlexec.MustEscapeSQL("(%?, %?, %?, %?, %?, %?, %?, %?, %?)", tableID, isIndex, hg.ID, j, count, bucket.Repeat, lowerBound.GetBytes(), upperBound.GetBytes(), bucket.NDV)
if j > i {
val = "," + val
}
if j > i && sql.Len()+len(val) > maxInsertLength {
end = j
break
}
sql.WriteString(val)
}
i = end
if _, err = exec.ExecuteInternal(ctx, sql.String()); err != nil {
return
}
}
return
}
// SaveTableStatsToStorage saves the stats of a table to storage.
func (h *Handle) SaveTableStatsToStorage(results *statistics.AnalyzeResults, analyzeSnapshot bool, source string) (err error) {
h.mu.Lock()
defer h.mu.Unlock()
return SaveTableStatsToStorage(h.mu.ctx, results, analyzeSnapshot, source)
}
// SaveTableStatsToStorage saves the stats of a table to storage.
func SaveTableStatsToStorage(sctx sessionctx.Context, results *statistics.AnalyzeResults, analyzeSnapshot bool, source string) (err error) {
needDumpFMS := results.TableID.IsPartitionTable()
tableID := results.TableID.GetStatisticsID()
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
if err1 := recordHistoricalStatsMeta(sctx, tableID, statsVer, source); err1 != nil {
logutil.BgLogger().Error("record historical stats meta failed",
zap.Int64("table-id", tableID),
zap.Uint64("version", statsVer),
zap.String("source", source),
zap.Error(err1))
}
}
}()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := sctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return err
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := sctx.Txn(true)
if err != nil {
return err
}
version := txn.StartTS()
// 1. Save mysql.stats_meta.
var rs sqlexec.RecordSet
// Lock this row to prevent writing of concurrent analyze.
rs, err = exec.ExecuteInternal(ctx, "select snapshot, count, modify_count from mysql.stats_meta where table_id = %? for update", tableID)
if err != nil {
return err
}
var rows []chunk.Row
rows, err = sqlexec.DrainRecordSet(ctx, rs, sctx.GetSessionVars().MaxChunkSize)
if err != nil {
return err
}
err = rs.Close()
if err != nil {
return err
}
var curCnt, curModifyCnt int64
if len(rows) > 0 {
snapshot := rows[0].GetUint64(0)
// A newer version analyze result has been written, so skip this writing.
if snapshot >= results.Snapshot && results.StatsVer == statistics.Version2 {
return nil
}
curCnt = int64(rows[0].GetUint64(1))
curModifyCnt = rows[0].GetInt64(2)
}
if len(rows) == 0 || results.StatsVer != statistics.Version2 {
if _, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_meta (version, table_id, count, snapshot) values (%?, %?, %?, %?)", version, tableID, results.Count, results.Snapshot); err != nil {
return err
}
statsVer = version
} else {
modifyCnt := curModifyCnt - results.BaseModifyCnt
if modifyCnt < 0 {
modifyCnt = 0
}
logutil.BgLogger().Info("incrementally update modifyCount", zap.String("category", "stats"),
zap.Int64("tableID", tableID),
zap.Int64("curModifyCnt", curModifyCnt),
zap.Int64("results.BaseModifyCnt", results.BaseModifyCnt),
zap.Int64("modifyCount", modifyCnt))
var cnt int64
if analyzeSnapshot {
cnt = curCnt + results.Count - results.BaseCount
if cnt < 0 {
cnt = 0
}
logutil.BgLogger().Info("incrementally update count", zap.String("category", "stats"),
zap.Int64("tableID", tableID),
zap.Int64("curCnt", curCnt),
zap.Int64("results.Count", results.Count),
zap.Int64("results.BaseCount", results.BaseCount),
zap.Int64("count", cnt))
} else {
cnt = results.Count
if cnt < 0 {
cnt = 0
}
logutil.BgLogger().Info("directly update count", zap.String("category", "stats"),
zap.Int64("tableID", tableID),
zap.Int64("results.Count", results.Count),
zap.Int64("count", cnt))
}
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version=%?, modify_count=%?, count=%?, snapshot=%? where table_id=%?", version, modifyCnt, cnt, results.Snapshot, tableID); err != nil {
return err
}
statsVer = version
}
cache.TableRowStatsCache.Invalidate(tableID)
// 2. Save histograms.
for _, result := range results.Ars {
for i, hg := range result.Hist {
// It's normal virtual column, skip it.
if hg == nil {
continue
}
var cms *statistics.CMSketch
if results.StatsVer != statistics.Version2 {
cms = result.Cms[i]
}
cmSketch, err := statistics.EncodeCMSketchWithoutTopN(cms)
if err != nil {
return err
}
fmSketch, err := statistics.EncodeFMSketch(result.Fms[i])
if err != nil {
return err
}
// Delete outdated data
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tableID, result.IsIndex, hg.ID); err != nil {
return err
}
if err = saveTopNToStorage(ctx, exec, tableID, result.IsIndex, hg.ID, result.TopNs[i]); err != nil {
return err
}
if _, err := exec.ExecuteInternal(ctx, "delete from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tableID, result.IsIndex, hg.ID); err != nil {
return err
}
if fmSketch != nil && needDumpFMS {
if _, err = exec.ExecuteInternal(ctx, "insert into mysql.stats_fm_sketch (table_id, is_index, hist_id, value) values (%?, %?, %?, %?)", tableID, result.IsIndex, hg.ID, fmSketch); err != nil {
return err
}
}
if _, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, flag, correlation) values (%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)",
tableID, result.IsIndex, hg.ID, hg.NDV, version, hg.NullCount, cmSketch, hg.TotColSize, results.StatsVer, statistics.AnalyzeFlag, hg.Correlation); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %?", tableID, result.IsIndex, hg.ID); err != nil {
return err
}
sc := sctx.GetSessionVars().StmtCtx
var lastAnalyzePos []byte
lastAnalyzePos, err = saveBucketsToStorage(ctx, exec, sc, tableID, result.IsIndex, hg)
if err != nil {
return err
}
if len(lastAnalyzePos) > 0 {
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_histograms set last_analyze_pos = %? where table_id = %? and is_index = %? and hist_id = %?", lastAnalyzePos, tableID, result.IsIndex, hg.ID); err != nil {
return err
}
}
if result.IsIndex == 0 {
if _, err = exec.ExecuteInternal(ctx, "insert into mysql.column_stats_usage (table_id, column_id, last_analyzed_at) values(%?, %?, current_timestamp()) on duplicate key update last_analyzed_at = values(last_analyzed_at)", tableID, hg.ID); err != nil {
return err
}
}
}
}
// 3. Save extended statistics.
extStats := results.ExtStats
if extStats == nil || len(extStats.Stats) == 0 {
return nil
}
var bytes []byte
var statsStr string
for name, item := range extStats.Stats {
bytes, err = json.Marshal(item.ColIDs)
if err != nil {
return err
}
strColIDs := string(bytes)
switch item.Tp {
case ast.StatsTypeCardinality, ast.StatsTypeCorrelation:
statsStr = fmt.Sprintf("%f", item.ScalarVals)
case ast.StatsTypeDependency:
statsStr = item.StringVals
}
if _, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_extended values (%?, %?, %?, %?, %?, %?, %?)", name, item.Tp, tableID, strColIDs, statsStr, version, statistics.ExtendedStatsAnalyzed); err != nil {
return err
}
}
return
}
// SaveStatsToStorage saves the stats to storage.
// If count is negative, both count and modify count would not be used and not be written to the table. Unless, corresponding
// fields in the stats_meta table will be updated.
// TODO: refactor to reduce the number of parameters
func (h *Handle) SaveStatsToStorage(tableID int64, count, modifyCount int64, isIndex int, hg *statistics.Histogram,
cms *statistics.CMSketch, topN *statistics.TopN, statsVersion int, isAnalyzed int64, updateAnalyzeTime bool, source string) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
h.recordHistoricalStatsMeta(tableID, statsVer, source)
}
}()
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
version := txn.StartTS()
// If the count is less than 0, then we do not want to update the modify count and count.
if count >= 0 {
_, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_meta (version, table_id, count, modify_count) values (%?, %?, %?, %?)", version, tableID, count, modifyCount)
cache.TableRowStatsCache.Invalidate(tableID)
} else {
_, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %? where table_id = %?", version, tableID)
}
if err != nil {
return err
}
statsVer = version
cmSketch, err := statistics.EncodeCMSketchWithoutTopN(cms)
if err != nil {
return err
}
// Delete outdated data
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %? and is_index = %? and hist_id = %?", tableID, isIndex, hg.ID); err != nil {
return err
}
if err = saveTopNToStorage(ctx, exec, tableID, isIndex, hg.ID, topN); err != nil {
return err
}
if _, err := exec.ExecuteInternal(ctx, "delete from mysql.stats_fm_sketch where table_id = %? and is_index = %? and hist_id = %?", tableID, isIndex, hg.ID); err != nil {
return err
}
flag := 0
if isAnalyzed == 1 {
flag = statistics.AnalyzeFlag
}
if _, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, flag, correlation) values (%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)",
tableID, isIndex, hg.ID, hg.NDV, version, hg.NullCount, cmSketch, hg.TotColSize, statsVersion, flag, hg.Correlation); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %? and is_index = %? and hist_id = %?", tableID, isIndex, hg.ID); err != nil {
return err
}
sc := h.mu.ctx.GetSessionVars().StmtCtx
var lastAnalyzePos []byte
lastAnalyzePos, err = saveBucketsToStorage(ctx, exec, sc, tableID, isIndex, hg)
if err != nil {
return err
}
if isAnalyzed == 1 && len(lastAnalyzePos) > 0 {
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_histograms set last_analyze_pos = %? where table_id = %? and is_index = %? and hist_id = %?", lastAnalyzePos, tableID, isIndex, hg.ID); err != nil {
return err
}
}
if updateAnalyzeTime && isIndex == 0 {
if _, err = exec.ExecuteInternal(ctx, "insert into mysql.column_stats_usage (table_id, column_id, last_analyzed_at) values(%?, %?, current_timestamp()) on duplicate key update last_analyzed_at = current_timestamp()", tableID, hg.ID); err != nil {
return err
}
}
return
}
// SaveMetaToStorage will save stats_meta to storage.
func (h *Handle) SaveMetaToStorage(tableID, count, modifyCount int64, source string) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
h.recordHistoricalStatsMeta(tableID, statsVer, source)
}
}()
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
version := txn.StartTS()
_, err = exec.ExecuteInternal(ctx, "replace into mysql.stats_meta (version, table_id, count, modify_count) values (%?, %?, %?, %?)", version, tableID, count, modifyCount)
statsVer = version
cache.TableRowStatsCache.Invalidate(tableID)
return err
}
func (h *Handle) statsMetaByTableIDFromStorage(tableID int64, snapshot uint64) (version uint64, modifyCount, count int64, err error) {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
var rows []chunk.Row
if snapshot == 0 {
rows, _, err = h.execRestrictedSQL(ctx, "SELECT version, modify_count, count from mysql.stats_meta where table_id = %? order by version", tableID)
} else {
rows, _, err = h.execRestrictedSQLWithSnapshot(ctx, "SELECT version, modify_count, count from mysql.stats_meta where table_id = %? order by version", snapshot, tableID)
if err != nil {
return 0, 0, 0, err
}
}
if err != nil || len(rows) == 0 {
return
}
version = rows[0].GetUint64(0)
modifyCount = rows[0].GetInt64(1)
count = rows[0].GetInt64(2)
return
}
func (h *Handle) getGlobalStatsReader(snapshot uint64) (reader *statistics.StatsReader, err error) {
h.mu.Lock()
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("getGlobalStatsReader panic %v", r)
}
if err != nil {
h.mu.Unlock()
}
}()
return statistics.GetStatsReader(snapshot, h.mu.ctx.(sqlexec.RestrictedSQLExecutor))
}
func (h *Handle) releaseGlobalStatsReader(reader *statistics.StatsReader) error {
defer h.mu.Unlock()
return reader.Close()
}
// InsertExtendedStats inserts a record into mysql.stats_extended and update version in mysql.stats_meta.
func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, tableID int64, ifNotExists bool) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
h.recordHistoricalStatsMeta(tableID, statsVer, StatsMetaHistorySourceExtendedStats)
}
}()
slices.Sort(colIDs)
bytes, err := json.Marshal(colIDs)
if err != nil {
return errors.Trace(err)
}
strColIDs := string(bytes)
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
// No need to use `exec.ExecuteInternal` since we have acquired the lock.
rows, _, err := h.execRestrictedSQL(ctx, "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)", tableID, statistics.ExtendedStatsInited, statistics.ExtendedStatsAnalyzed)
if err != nil {
return errors.Trace(err)
}
for _, row := range rows {
currStatsName := row.GetString(0)
currTp := row.GetInt64(1)
currStrColIDs := row.GetString(2)
if currStatsName == statsName {
if ifNotExists {
return nil
}
return errors.Errorf("extended statistics '%s' for the specified table already exists", statsName)
}
if tp == int(currTp) && currStrColIDs == strColIDs {
return errors.Errorf("extended statistics '%s' with same type on same columns already exists", statsName)
}
}
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
version := txn.StartTS()
// Bump version in `mysql.stats_meta` to trigger stats cache refresh.
if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil {
return err
}
statsVer = version
// Remove the existing 'deleted' records.
if _, err = exec.ExecuteInternal(ctx, "DELETE FROM mysql.stats_extended WHERE name = %? and table_id = %?", statsName, tableID); err != nil {
return err
}
// Remove the cache item, which is necessary for cases like a cluster with 3 tidb instances, e.g, a, b and c.
// If tidb-a executes `alter table drop stats_extended` to mark the record as 'deleted', and before this operation
// is synchronized to other tidb instances, tidb-b executes `alter table add stats_extended`, which would delete
// the record from the table, tidb-b should delete the cached item synchronously. While for tidb-c, it has to wait for
// next `Update()` to remove the cached item then.
h.removeExtendedStatsItem(tableID, statsName)
const sql = "INSERT INTO mysql.stats_extended(name, type, table_id, column_ids, version, status) VALUES (%?, %?, %?, %?, %?, %?)"
if _, err = exec.ExecuteInternal(ctx, sql, statsName, tp, tableID, strColIDs, version, statistics.ExtendedStatsInited); err != nil {
return err
}
return
}
// MarkExtendedStatsDeleted update the status of mysql.stats_extended to be `deleted` and the version of mysql.stats_meta.
func (h *Handle) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExists bool) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
h.recordHistoricalStatsMeta(tableID, statsVer, StatsMetaHistorySourceExtendedStats)
}
}()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
rows, _, err := h.execRestrictedSQL(ctx, "SELECT name FROM mysql.stats_extended WHERE name = %? and table_id = %? and status in (%?, %?)", statsName, tableID, statistics.ExtendedStatsInited, statistics.ExtendedStatsAnalyzed)
if err != nil {
return errors.Trace(err)
}
if len(rows) == 0 {
if ifExists {
return nil
}
return fmt.Errorf("extended statistics '%s' for the specified table does not exist", statsName)
}
if len(rows) > 1 {
logutil.BgLogger().Warn("unexpected duplicate extended stats records found", zap.String("name", statsName), zap.Int64("table_id", tableID))
}
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err1 := finishTransaction(ctx, exec, err)
if err == nil && err1 == nil {
h.removeExtendedStatsItem(tableID, statsName)
}
err = err1
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
version := txn.StartTS()
if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil {
return err
}
statsVer = version
if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_extended SET version = %?, status = %? WHERE name = %? and table_id = %?", version, statistics.ExtendedStatsDeleted, statsName, tableID); err != nil {
return err
}
return nil
}
const updateStatsCacheRetryCnt = 5
func (h *Handle) removeExtendedStatsItem(tableID int64, statsName string) {
for retry := updateStatsCacheRetryCnt; retry > 0; retry-- {
oldCache := h.statsCache.Load()
tbl, ok := oldCache.GetFromInternal(tableID)
if !ok || tbl.ExtendedStats == nil || len(tbl.ExtendedStats.Stats) == 0 {
return
}
newTbl := tbl.Copy()
delete(newTbl.ExtendedStats.Stats, statsName)
if h.updateStatsCache(oldCache, []*statistics.Table{newTbl}, nil) {
return
}
if retry == 1 {
logutil.BgLogger().Info("remove extended stats cache failed", zap.String("stats_name", statsName), zap.Int64("table_id", tableID))
} else {
logutil.BgLogger().Info("remove extended stats cache failed, retrying", zap.String("stats_name", statsName), zap.Int64("table_id", tableID))
}
}
}
// ReloadExtendedStatistics drops the cache for extended statistics and reload data from mysql.stats_extended.
func (h *Handle) ReloadExtendedStatistics() error {
reader, err := h.getGlobalStatsReader(0)
if err != nil {
return err
}
defer func() {
err1 := h.releaseGlobalStatsReader(reader)
if err1 != nil && err == nil {
err = err1
}
}()
for retry := updateStatsCacheRetryCnt; retry > 0; retry-- {
oldCache := h.statsCache.Load()
tables := make([]*statistics.Table, 0, oldCache.Len())
for _, tbl := range oldCache.Values() {
t, err := statistics.ExtendedStatsFromStorage(reader, tbl.Copy(), tbl.PhysicalID, true)
if err != nil {
return err
}
tables = append(tables, t)
}
if h.updateStatsCache(oldCache, tables, nil) {
return nil
}
}
return fmt.Errorf("update stats cache failed for %d attempts", updateStatsCacheRetryCnt)
}
// BuildExtendedStats build extended stats for column groups if needed based on the column samples.
func (h *Handle) BuildExtendedStats(tableID int64, cols []*model.ColumnInfo, collectors []*statistics.SampleCollector) (*statistics.ExtendedStatsColl, error) {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)"
rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, statistics.ExtendedStatsAnalyzed, statistics.ExtendedStatsInited)
if err != nil {
return nil, errors.Trace(err)
}
if len(rows) == 0 {
return nil, nil
}
statsColl := statistics.NewExtendedStatsColl()
for _, row := range rows {
name := row.GetString(0)
item := &statistics.ExtendedStatsItem{Tp: uint8(row.GetInt64(1))}
colIDs := row.GetString(2)
err := json.Unmarshal([]byte(colIDs), &item.ColIDs)
if err != nil {
logutil.BgLogger().Error("invalid column_ids in mysql.stats_extended, skip collecting extended stats for this row", zap.String("column_ids", colIDs), zap.Error(err))
continue
}
item = h.fillExtendedStatsItemVals(item, cols, collectors)
if item != nil {
statsColl.Stats[name] = item
}
}
if len(statsColl.Stats) == 0 {
return nil, nil
}
return statsColl, nil
}
func (h *Handle) fillExtendedStatsItemVals(item *statistics.ExtendedStatsItem, cols []*model.ColumnInfo, collectors []*statistics.SampleCollector) *statistics.ExtendedStatsItem {
switch item.Tp {
case ast.StatsTypeCardinality, ast.StatsTypeDependency:
return nil
case ast.StatsTypeCorrelation:
return h.fillExtStatsCorrVals(item, cols, collectors)
}
return nil
}
func (h *Handle) fillExtStatsCorrVals(item *statistics.ExtendedStatsItem, cols []*model.ColumnInfo, collectors []*statistics.SampleCollector) *statistics.ExtendedStatsItem {
colOffsets := make([]int, 0, 2)
for _, id := range item.ColIDs {
for i, col := range cols {
if col.ID == id {
colOffsets = append(colOffsets, i)
break
}
}
}
if len(colOffsets) != 2 {
return nil
}
// samplesX and samplesY are in order of handle, i.e, their SampleItem.Ordinals are in order.
samplesX := collectors[colOffsets[0]].Samples
// We would modify Ordinal of samplesY, so we make a deep copy.
samplesY := statistics.CopySampleItems(collectors[colOffsets[1]].Samples)
sampleNum := mathutil.Min(len(samplesX), len(samplesY))
if sampleNum == 1 {
item.ScalarVals = 1
return item
}
if sampleNum <= 0 {
item.ScalarVals = 0
return item
}
h.mu.Lock()
sc := h.mu.ctx.GetSessionVars().StmtCtx
h.mu.Unlock()
var err error
samplesX, err = statistics.SortSampleItems(sc, samplesX)
if err != nil {
return nil
}
samplesYInXOrder := make([]*statistics.SampleItem, 0, sampleNum)
for i, itemX := range samplesX {
if itemX.Ordinal >= len(samplesY) {
continue
}
itemY := samplesY[itemX.Ordinal]
itemY.Ordinal = i
samplesYInXOrder = append(samplesYInXOrder, itemY)
}
samplesYInYOrder, err := statistics.SortSampleItems(sc, samplesYInXOrder)
if err != nil {
return nil
}
var corrXYSum float64
for i := 1; i < len(samplesYInYOrder); i++ {
corrXYSum += float64(i) * float64(samplesYInYOrder[i].Ordinal)
}
// X means the ordinal of the item in original sequence, Y means the oridnal of the item in the
// sorted sequence, we know that X and Y value sets are both:
// 0, 1, ..., sampleNum-1
// we can simply compute sum(X) = sum(Y) =
// (sampleNum-1)*sampleNum / 2
// and sum(X^2) = sum(Y^2) =
// (sampleNum-1)*sampleNum*(2*sampleNum-1) / 6
// We use "Pearson correlation coefficient" to compute the order correlation of columns,
// the formula is based on https://en.wikipedia.org/wiki/Pearson_correlation_coefficient.
// Note that (itemsCount*corrX2Sum - corrXSum*corrXSum) would never be zero when sampleNum is larger than 1.
itemsCount := float64(sampleNum)
corrXSum := (itemsCount - 1) * itemsCount / 2.0
corrX2Sum := (itemsCount - 1) * itemsCount * (2*itemsCount - 1) / 6.0
item.ScalarVals = (itemsCount*corrXYSum - corrXSum*corrXSum) / (itemsCount*corrX2Sum - corrXSum*corrXSum)
return item
}
// SaveExtendedStatsToStorage writes extended stats of a table into mysql.stats_extended.
func (h *Handle) SaveExtendedStatsToStorage(tableID int64, extStats *statistics.ExtendedStatsColl, isLoad bool) (err error) {
statsVer := uint64(0)
defer func() {
if err == nil && statsVer != 0 {
h.recordHistoricalStatsMeta(tableID, statsVer, StatsMetaHistorySourceExtendedStats)
}
}()
if extStats == nil || len(extStats.Stats) == 0 {
return nil
}
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
version := txn.StartTS()
for name, item := range extStats.Stats {
bytes, err := json.Marshal(item.ColIDs)
if err != nil {
return errors.Trace(err)
}
strColIDs := string(bytes)
var statsStr string
switch item.Tp {
case ast.StatsTypeCardinality, ast.StatsTypeCorrelation:
statsStr = fmt.Sprintf("%f", item.ScalarVals)
case ast.StatsTypeDependency:
statsStr = item.StringVals
}
// If isLoad is true, it's INSERT; otherwise, it's UPDATE.
if _, err := exec.ExecuteInternal(ctx, "replace into mysql.stats_extended values (%?, %?, %?, %?, %?, %?, %?)", name, item.Tp, tableID, strColIDs, statsStr, version, statistics.ExtendedStatsAnalyzed); err != nil {
return err
}
}
if !isLoad {
if _, err := exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil {
return err
}
statsVer = version
}
return nil
}
// CurrentPruneMode indicates whether tbl support runtime prune for table and first partition id.
func (h *Handle) CurrentPruneMode() variable.PartitionPruneMode {
return variable.PartitionPruneMode(h.mu.ctx.GetSessionVars().PartitionPruneMode.Load())
}
// RefreshVars uses to pull PartitionPruneMethod vars from kv storage.
func (h *Handle) RefreshVars() error {
h.mu.Lock()
defer h.mu.Unlock()
return h.mu.ctx.RefreshVars(context.Background())
}
// CheckAnalyzeVersion checks whether all the statistics versions of this table's columns and indexes are the same.
func (h *Handle) CheckAnalyzeVersion(tblInfo *model.TableInfo, physicalIDs []int64, version *int) bool {
// We simply choose one physical id to get its stats.
var tbl *statistics.Table
for _, pid := range physicalIDs {
tbl = h.GetPartitionStats(tblInfo, pid)
if !tbl.Pseudo {
break
}
}
if tbl == nil || tbl.Pseudo {
return true
}
return statistics.CheckAnalyzeVerOnTable(tbl, version)
}
type colStatsTimeInfo struct {
LastUsedAt *types.Time
LastAnalyzedAt *types.Time
}
// getDisableColumnTrackingTime reads the value of tidb_disable_column_tracking_time from mysql.tidb if it exists.
func (h *Handle) getDisableColumnTrackingTime() (*time.Time, error) {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
rows, fields, err := h.execRestrictedSQL(ctx, "SELECT variable_value FROM %n.%n WHERE variable_name = %?", mysql.SystemDB, mysql.TiDBTable, variable.TiDBDisableColumnTrackingTime)
if err != nil {
return nil, err
}
if len(rows) == 0 {
return nil, nil
}
d := rows[0].GetDatum(0, &fields[0].Column.FieldType)
// The string represents the UTC time when tidb_enable_column_tracking is set to 0.
value, err := d.ToString()
if err != nil {
return nil, err
}
t, err := time.Parse(types.UTCTimeFormat, value)
if err != nil {
return nil, err
}
return &t, nil
}
// LoadColumnStatsUsage loads column stats usage information from disk.
func (h *Handle) LoadColumnStatsUsage(loc *time.Location) (map[model.TableItemID]colStatsTimeInfo, error) {
disableTime, err := h.getDisableColumnTrackingTime()
if err != nil {
return nil, errors.Trace(err)
}
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
// Since we use another session from session pool to read mysql.column_stats_usage, which may have different @@time_zone, so we do time zone conversion here.
rows, _, err := h.execRestrictedSQL(ctx, "SELECT table_id, column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00'), CONVERT_TZ(last_analyzed_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage")
if err != nil {
return nil, errors.Trace(err)
}
colStatsMap := make(map[model.TableItemID]colStatsTimeInfo, len(rows))
for _, row := range rows {
if row.IsNull(0) || row.IsNull(1) {
continue
}
tblColID := model.TableItemID{TableID: row.GetInt64(0), ID: row.GetInt64(1), IsIndex: false}
var statsUsage colStatsTimeInfo
if !row.IsNull(2) {
gt, err := row.GetTime(2).GoTime(time.UTC)
if err != nil {
return nil, errors.Trace(err)
}
// If `last_used_at` is before the time when `set global enable_column_tracking = 0`, we should ignore it because
// `set global enable_column_tracking = 0` indicates all the predicate columns collected before.
if disableTime == nil || gt.After(*disableTime) {
t := types.NewTime(types.FromGoTime(gt.In(loc)), mysql.TypeTimestamp, types.DefaultFsp)
statsUsage.LastUsedAt = &t
}
}
if !row.IsNull(3) {
gt, err := row.GetTime(3).GoTime(time.UTC)
if err != nil {
return nil, errors.Trace(err)
}
t := types.NewTime(types.FromGoTime(gt.In(loc)), mysql.TypeTimestamp, types.DefaultFsp)
statsUsage.LastAnalyzedAt = &t
}
colStatsMap[tblColID] = statsUsage
}
return colStatsMap, nil
}
// CollectColumnsInExtendedStats returns IDs of the columns involved in extended stats.
func (h *Handle) CollectColumnsInExtendedStats(tableID int64) ([]int64, error) {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)"
rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, statistics.ExtendedStatsAnalyzed, statistics.ExtendedStatsInited)
if err != nil {
return nil, errors.Trace(err)
}
if len(rows) == 0 {
return nil, nil
}
columnIDs := make([]int64, 0, len(rows)*2)
for _, row := range rows {
twoIDs := make([]int64, 0, 2)
data := row.GetString(2)
err := json.Unmarshal([]byte(data), &twoIDs)
if err != nil {
logutil.BgLogger().Error("invalid column_ids in mysql.stats_extended, skip collecting extended stats for this row", zap.String("column_ids", data), zap.Error(err))
continue
}
columnIDs = append(columnIDs, twoIDs...)
}
return columnIDs, nil
}
// GetPredicateColumns returns IDs of predicate columns, which are the columns whose stats are used(needed) when generating query plans.
func (h *Handle) GetPredicateColumns(tableID int64) ([]int64, error) {
disableTime, err := h.getDisableColumnTrackingTime()
if err != nil {
return nil, errors.Trace(err)
}
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
rows, _, err := h.execRestrictedSQL(ctx, "SELECT column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage WHERE table_id = %? AND last_used_at IS NOT NULL", tableID)
if err != nil {
return nil, errors.Trace(err)
}
columnIDs := make([]int64, 0, len(rows))
for _, row := range rows {
if row.IsNull(0) || row.IsNull(1) {
continue
}
colID := row.GetInt64(0)
gt, err := row.GetTime(1).GoTime(time.UTC)
if err != nil {
return nil, errors.Trace(err)
}
// If `last_used_at` is before the time when `set global enable_column_tracking = 0`, we don't regard the column as predicate column because
// `set global enable_column_tracking = 0` indicates all the predicate columns collected before.
if disableTime == nil || gt.After(*disableTime) {
columnIDs = append(columnIDs, colID)
}
}
return columnIDs, nil
}
// Max column size is 6MB. Refer https://docs.pingcap.com/tidb/dev/tidb-limitations/#limitation-on-a-single-column
const maxColumnSize = 6 << 20
// RecordHistoricalStatsToStorage records the given table's stats data to mysql.stats_history
func (h *Handle) RecordHistoricalStatsToStorage(dbName string, tableInfo *model.TableInfo, physicalID int64, isPartition bool) (uint64, error) {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
var js *JSONTable
var err error
if isPartition {
js, err = h.tableStatsToJSON(dbName, tableInfo, physicalID, 0)
} else {
js, err = h.DumpStatsToJSON(dbName, tableInfo, nil, true)
}
if err != nil {
return 0, errors.Trace(err)
}
version := uint64(0)
if len(js.Partitions) == 0 {
version = js.Version
} else {
for _, p := range js.Partitions {
version = p.Version
if version != 0 {
break
}
}
}
blocks, err := JSONTableToBlocks(js, maxColumnSize)
if err != nil {
return version, errors.Trace(err)
}
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return version, errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
ts := time.Now().Format("2006-01-02 15:04:05.999999")
const sql = "INSERT INTO mysql.stats_history(table_id, stats_data, seq_no, version, create_time) VALUES (%?, %?, %?, %?, %?)"
for i := 0; i < len(blocks); i++ {
if _, err := exec.ExecuteInternal(ctx, sql, physicalID, blocks[i], i, version, ts); err != nil {
return version, errors.Trace(err)
}
}
return version, nil
}
func checkHistoricalStatsEnable(sctx sessionctx.Context) (enable bool, err error) {
val, err := sctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBEnableHistoricalStats)
if err != nil {
return false, errors.Trace(err)
}
return variable.TiDBOptOn(val), nil
}
// CheckHistoricalStatsEnable is used to check whether TiDBEnableHistoricalStats is enabled.
func (h *Handle) CheckHistoricalStatsEnable() (enable bool, err error) {
h.mu.Lock()
defer h.mu.Unlock()
return checkHistoricalStatsEnable(h.mu.ctx)
}
// InsertAnalyzeJob inserts analyze job into mysql.analyze_jobs and gets job ID for further updating job.
func (h *Handle) InsertAnalyzeJob(job *statistics.AnalyzeJob, instance string, procID uint64) error {
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.RestrictedSQLExecutor)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
jobInfo := job.JobInfo
const textMaxLength = 65535
if len(jobInfo) > textMaxLength {
jobInfo = jobInfo[:textMaxLength]
}
const insertJob = "INSERT INTO mysql.analyze_jobs (table_schema, table_name, partition_name, job_info, state, instance, process_id) VALUES (%?, %?, %?, %?, %?, %?, %?)"
_, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, insertJob, job.DBName, job.TableName, job.PartitionName, jobInfo, statistics.AnalyzePending, instance, procID)
if err != nil {
return err
}
const getJobID = "SELECT LAST_INSERT_ID()"
rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, getJobID)
if err != nil {
return err
}
job.ID = new(uint64)
*job.ID = rows[0].GetUint64(0)
return nil
}
// DeleteAnalyzeJobs deletes the analyze jobs whose update time is earlier than updateTime.
func (h *Handle) DeleteAnalyzeJobs(updateTime time.Time) error {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
_, _, err := h.execRestrictedSQL(ctx, "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat))
return err
}
// SetStatsCacheCapacity sets capacity
func (h *Handle) SetStatsCacheCapacity(c int64) {
if h == nil {
return
}
v := h.statsCache.Load()
if v == nil {
return
}
sc := v
sc.SetCapacity(c)
logutil.BgLogger().Info("update stats cache capacity successfully", zap.Int64("capacity", c))
}
// Close stops the background
func (h *Handle) Close() {
h.statsCache.Load().Close()
h.gpool.Close()
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collate
import (
"strings"
"github.com/pingcap/tidb/util/stringutil"
)
type binCollator struct {
}
// Compare implement Collator interface.
func (*binCollator) Compare(a, b string) int {
return strings.Compare(a, b)
}
// Key implement Collator interface.
func (*binCollator) Key(str string) []byte {
return []byte(str)
}
// KeyWithoutTrimRightSpace implement Collator interface.
func (*binCollator) KeyWithoutTrimRightSpace(str string) []byte {
return []byte(str)
}
// Pattern implements Collator interface.
func (*binCollator) Pattern() WildcardPattern {
return &binPattern{}
}
type binPaddingCollator struct {
}
func (*binPaddingCollator) Compare(a, b string) int {
return strings.Compare(truncateTailingSpace(a), truncateTailingSpace(b))
}
func (*binPaddingCollator) Key(str string) []byte {
return []byte(truncateTailingSpace(str))
}
// KeyWithoutTrimRightSpace implement Collator interface.
func (*binPaddingCollator) KeyWithoutTrimRightSpace(str string) []byte {
return []byte(str)
}
// Pattern implements Collator interface.
// Notice that trailing spaces are significant.
func (*binPaddingCollator) Pattern() WildcardPattern {
return &binPattern{}
}
type binPattern struct {
patChars []rune
patTypes []byte
}
// Compile implements WildcardPattern interface.
func (p *binPattern) Compile(patternStr string, escape byte) {
p.patChars, p.patTypes = stringutil.CompilePattern(patternStr, escape)
}
// DoMatch implements WildcardPattern interface.
func (p *binPattern) DoMatch(str string) bool {
return stringutil.DoMatch(str, p.patChars, p.patTypes)
}
|
// Package livereload implements server for LiveReload protocol 7.
//
// Everything in this package is safe for concurrent use by multiple goroutines.
package livereload
import (
"net/http"
"path/filepath"
"github.com/gorilla/websocket"
"github.com/powerman/tr/pkg/broadcast"
)
const defaultServerName = "Go"
// ServerConfig contains Server configuration.
type ServerConfig struct {
Name string // Server name. Default: "Go".
ForceReloadNewClients bool // Force reload when client connects for the first time (loose detection using User-Agent).
}
// Server for LiveReload protocol 7.
type Server struct {
cfg ServerConfig
upgrader *websocket.Upgrader
reload *broadcast.Topic
seen map[string]bool
}
// NewServer creates and returns new Server.
func NewServer(cfg ServerConfig) *Server {
if cfg.Name == "" {
cfg.Name = defaultServerName
}
upgrader := &websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
srv := &Server{
cfg: cfg,
upgrader: upgrader,
reload: broadcast.NewTopic(),
seen: make(map[string]bool),
}
return srv
}
// ServeHTTP implements WebSocket LiveReload protocol 7.
func (srv *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ws, err := srv.upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
c := NewConn(ws)
c.Send() <- MsgHello(srv.cfg.Name)
if srv.cfg.ForceReloadNewClients {
if ua := r.Header.Get("User-Agent"); !srv.seen[ua] {
srv.seen[ua] = true
c.Send() <- MsgReload("/force-reload.js") // Just a fake name with .js ext.
}
}
srv.reload.Subscribe(c)
defer srv.reload.Unsubscribe(c)
c.Wait()
}
// Reload tells all connected LiveReload clients to reload given path.
// CSS and images will be updated without reloading the whole page.
func (srv *Server) Reload(path string) {
srv.reload.Broadcast(MsgReload(filepath.ToSlash(path)))
}
|
package memoryCacheChan
import "testing"
import "net/http"
import "io/ioutil"
var cache *MemoryCacheChan
func GetUrl(key string)(interface{}, error) {
resp, err := http.Get(key)
if err != nil {
return nil, err
} else {
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
return body, err
}
}
func TestNew(t *testing.T) {
cache = New(GetUrl)
}
func TestMemoryCacheChan_Get(t *testing.T) {
cache.Get("http://www.baodu.com")
}
func TestMemoryCacheChan_Close(t *testing.T) {
cache.Close()
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ttlworker
import (
"context"
"sync"
"time"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
type workerStatus int
const (
workerStatusCreated workerStatus = iota
workerStatusRunning
workerStatusStopping
workerStatusStopped
)
type worker interface {
Start()
Stop()
Status() workerStatus
Error() error
Send() chan<- interface{}
WaitStopped(ctx context.Context, timeout time.Duration) error
}
type baseWorker struct {
sync.Mutex
ctx context.Context
cancel func()
ch chan interface{}
loopFunc func() error
err error
status workerStatus
wg util.WaitGroupWrapper
}
func (w *baseWorker) init(loop func() error) {
w.ctx, w.cancel = context.WithCancel(context.Background())
w.status = workerStatusCreated
w.loopFunc = loop
w.ch = make(chan interface{})
}
func (w *baseWorker) Start() {
w.Lock()
defer w.Unlock()
if w.status != workerStatusCreated {
return
}
w.wg.Run(w.loop)
w.status = workerStatusRunning
}
func (w *baseWorker) Stop() {
w.Lock()
defer w.Unlock()
switch w.status {
case workerStatusCreated:
w.cancel()
w.toStopped(nil)
case workerStatusRunning:
w.cancel()
w.status = workerStatusStopping
}
}
func (w *baseWorker) Status() workerStatus {
w.Lock()
defer w.Unlock()
return w.status
}
func (w *baseWorker) Error() error {
w.Lock()
defer w.Unlock()
return w.err
}
func (w *baseWorker) WaitStopped(ctx context.Context, timeout time.Duration) error {
// consider the situation when the worker has stopped, but the context has also stopped. We should
// return without error
if w.Status() == workerStatusStopped {
return nil
}
ctx, cancel := context.WithTimeout(ctx, timeout)
go func() {
w.wg.Wait()
cancel()
}()
<-ctx.Done()
if w.Status() != workerStatusStopped {
return ctx.Err()
}
return nil
}
func (w *baseWorker) Send() chan<- interface{} {
return w.ch
}
func (w *baseWorker) loop() {
var err error
defer func() {
if r := recover(); r != nil {
logutil.BgLogger().Info("ttl worker panic", zap.Any("recover", r), zap.Stack("stack"))
}
w.Lock()
w.toStopped(err)
w.Unlock()
}()
err = w.loopFunc()
}
func (w *baseWorker) toStopped(err error) {
w.status = workerStatusStopped
w.err = err
close(w.ch)
}
|
package problems1to50
import (
"fmt"
)
// Problem1 : Multiples of 3 and 5
// Description: https://projecteuler.net/problem=1
func Problem1() {
i := 1
result := 0
// All the multiples of 3 or 5 below 1000
for i <= 1000 {
if i%3 == 0 || i%5 == 0 {
result += i
}
i++
}
fmt.Println(result)
}
|
package main
/*
* @lc app=leetcode id=148 lang=golang
*
* [148] Sort List
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
// Solution 1:归并排序
func mergeLists(l1, l2 *ListNode) *ListNode {
dummy := new(ListNode)
cur := dummy
for l1 != nil || l2 != nil {
if l2 == nil || (l1 != nil && l1.Val <= l2.Val) {
cur.Next = l1
cur = cur.Next
l1 = l1.Next
} else {
cur.Next = l2
cur = cur.Next
l2 = l2.Next
}
}
cur.Next = nil
return dummy.Next
}
// Solution 1: 归并排序
func sortList(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
slow, fast := head, head
for fast.Next != nil && fast.Next.Next != nil {
fast = fast.Next.Next
slow = slow.Next
}
head2 := slow.Next
slow.Next = nil
return mergeLists(sortList(head), sortList(head2))
}
|
package client
import (
"log"
"math/rand"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/valyala/fasthttp"
)
var (
// A high-performance 100% compatible drop-in replacement of "encoding/json"
json = jsoniter.ConfigCompatibleWithStandardLibrary
// extraction of json marshaller to allow for testing
jsonMarshal = json.Marshal
)
type Client interface {
SendMessages(amount int)
}
type client struct {
client *fasthttp.Client
address string
maxClientID int
retryDuration time.Duration
measureMessages int
}
type request struct {
Text string `json:"text"`
ContentID int `json:"content_id"`
ClientID int `json:"client_id"`
Timestamp int64 `json:"timestamp"`
}
func New(address string, maxClientID int, retryDuration time.Duration, measureMessages int) Client {
c := &client{}
c.client = &fasthttp.Client{}
c.address = address
c.maxClientID = maxClientID
c.retryDuration = retryDuration
c.measureMessages = measureMessages
return c
}
func (c *client) SendMessages(amount int) {
log.Printf("Sending %d messages to %s", amount, c.address)
for j := 1; j <= (amount / c.measureMessages); j++ {
begin := time.Now()
start := j * c.measureMessages
end := start + c.measureMessages
for i := start; i <= end; {
err := c.sendMessage(i)
if err != nil {
log.Println("Error when sending request: ", err)
log.Println("Retrying in: ", c.retryDuration)
time.Sleep(c.retryDuration)
continue
}
i++
}
elapsed := time.Since(begin)
remaining := amount - (j * c.measureMessages)
log.Printf("Sent %d messages in %s, %d remaining", c.measureMessages, elapsed, remaining)
}
}
func (c *client) sendMessage(contentID int) error {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req)
body, err := jsonMarshal(c.makeRequest(contentID))
if err != nil {
return err
}
req.SetBody(body)
req.Header.SetMethod("POST")
req.Header.SetContentType("application/json")
req.SetRequestURI(c.address)
if err := c.client.Do(req, nil); err != nil {
return err
}
return nil
}
func (c *client) makeRequest(contentID int) request {
var req request
req.Text = "hello world"
req.ContentID = contentID
req.ClientID = c.getClientID()
req.Timestamp = getMillisecondTimestamp()
return req
}
// returns a random number between 1 and maxClientID
func (c *client) getClientID() int {
return rand.Intn(c.maxClientID) + 1
}
// returns current time in a millisecond precision timestamp
func getMillisecondTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
|
package utils
import (
"../config"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/firehose"
"github.com/aws/aws-sdk-go/service/kinesis"
)
var (
AwsSession *session.Session
AwsKinesis *kinesis.Kinesis
AwsFirehose *firehose.Firehose
)
func CreateAwsSession() {
creds := credentials.NewStaticCredentials(config.Config.AwsKey, config.Config.AwsSecret, "")
_, err := creds.Get()
if err != nil {
panic(err)
}
AwsSession, err = session.NewSession(&aws.Config{
Region: aws.String("eu-west-1"),
Credentials: creds,
})
AwsKinesis = kinesis.New(AwsSession)
AwsFirehose = firehose.New(AwsSession)
if err != nil {
panic(err)
}
}
|
package mhfpacket
import (
"errors"
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/byteframe"
)
/*
00 58 // Opcode
00 00 00 00
00 00 00 4e
00 04 // Count
00 00 // Skipped(padding?)
00 01 00 00 00 00 00 00
00 02 00 00 5d fa 14 c0
00 03 00 00 5d fa 14 c0
00 06 00 00 5d e7 05 10
00 00 // Count of some buf up to 0x800 bytes following it.
00 10 // Trailer
*/
// ClientRight represents a right that the client has.
type ClientRight struct {
ID uint16
Unk0 uint16
Timestamp uint32
}
// MsgSysUpdateRight represents the MSG_SYS_UPDATE_RIGHT
type MsgSysUpdateRight struct {
Unk0 uint32
Unk1 uint32
//RightCount uint16
//Unk3 uint16 // Likely struct padding
Rights []ClientRight
UnkSize uint16 // Count of some buf up to 0x800 bytes following it.
}
// Opcode returns the ID associated with this packet type.
func (m *MsgSysUpdateRight) Opcode() network.PacketID {
return network.MSG_SYS_UPDATE_RIGHT
}
// Parse parses the packet from binary
func (m *MsgSysUpdateRight) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("Not implemented")
}
// Build builds a binary packet from the current data.
func (m *MsgSysUpdateRight) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
bf.WriteUint32(m.Unk0)
bf.WriteUint32(m.Unk1)
bf.WriteUint16(uint16(len(m.Rights)))
bf.WriteUint16(0) // m.Unk3, struct padding.
for _, v := range m.Rights {
bf.WriteUint16(v.ID)
bf.WriteUint16(v.Unk0)
bf.WriteUint32(v.Timestamp)
}
bf.WriteUint16(m.UnkSize)
return nil
}
|
package problems_test
import (
"encoding/json"
"errors"
"fmt"
"github.com/team-bonitto/bonitto/internal/problems"
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
)
var DB = make([]problems.P8UserStruct, 0)
func add(u problems.P8UserStruct) {
DB = append(DB, u)
}
func find(id string) (problems.P8UserStruct, error) {
for _, u := range DB {
if u.ID == id {
return u, nil
}
}
return problems.P8UserStruct{}, errors.New("not found")
}
func del(id string) error {
idx := -1
for i, u := range DB {
if u.ID == id {
idx = i
break
}
}
if idx < 0 {
return errors.New("not found")
}
l := len(DB)
DB[idx] = DB[l-1]
DB = DB[:l-1]
return nil
}
func update(u problems.P8UserStruct) (problems.P8UserStruct, error) {
u2, err := find(u.ID)
if err != nil {
return problems.P8UserStruct{}, nil
}
if u.Name != "" {
u2.Name = u.Name
}
if u.Email != "" {
u2.Email = u.Email
}
if u.Phone != "" {
u2.Phone = u.Phone
}
if err := del(u.ID); err != nil {
return problems.P8UserStruct{}, err
}
add(u2)
return u2, nil
}
func Test(t *testing.T) {
server := &http.Server{Addr: ":80"}
go func() {
http.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
GetUser(w, r)
case http.MethodPost:
PostUser(w, r)
case http.MethodPut:
PutUser(w, r)
case http.MethodDelete:
DeleteUser(w, r)
}
})
if err := http.ListenAndServe(":80", nil); err != nil {
panic(err)
}
}()
<-problems.P8User.WaitForReady()
for _, scenario := range problems.P8User.TestCases {
for _, tc := range scenario {
fmt.Println(tc.GetName())
res := tc.Run()
fmt.Println(res.Passed, res.Result)
if !res.Passed {
t.FailNow()
}
}
}
server.Close()
}
func encodeJSON(a interface{}) []byte {
b, err := json.Marshal(a)
if err != nil {
panic(err)
}
return b
}
func GetUser(w http.ResponseWriter, r *http.Request) {
ids, ok := r.URL.Query()["id"]
if !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
id := ids[0]
if !checkID(id) {
w.WriteHeader(http.StatusBadRequest)
return
}
u, err := find(id)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
io.WriteString(w, string(encodeJSON(u)))
}
func PostUser(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
u := problems.P8UserStruct{}
if err := json.Unmarshal(body, &u); err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if !(checkID(u.ID) &&
checkPW(u.PW) &&
checkName(u.Name) &&
checkEmail(u.Email) &&
checkPhone(u.Phone)) {
w.WriteHeader(http.StatusBadRequest)
return
}
add(u)
io.WriteString(w, string(encodeJSON(u)))
}
func PutUser(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
u := problems.P8UserStruct{}
if err := json.Unmarshal(body, &u); err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if u.Name != "" && !checkName(u.Name) {
w.WriteHeader(http.StatusBadRequest)
return
}
if u.Email != "" && !checkEmail(u.Email) {
w.WriteHeader(http.StatusBadRequest)
return
}
if u.Phone != "" && !checkPhone(u.Phone) {
w.WriteHeader(http.StatusBadRequest)
return
}
u2, err := update(u)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
io.WriteString(w, string(encodeJSON(u2)))
}
func DeleteUser(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
u := problems.P8UserStruct{}
if err := json.Unmarshal(body, &u); err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if err := del(u.ID); err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusBadRequest)
}
w.WriteHeader(http.StatusOK)
}
func checkOnlyAlphaNumeric(s string) bool {
sources := []rune{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_'}
for _, a := range s {
not := true
for _, r := range sources {
if a == r {
not = false
break
}
}
if not {
return false
}
}
return true
}
func checkLength(s string, min, max int) bool {
return min <= len(s) && len(s) <= max
}
func checkID(id string) bool {
return checkLength(id, 5, 12) && checkOnlyAlphaNumeric(id)
}
func checkPW(pw string) bool {
return checkLength(pw, 0, 50)
}
func checkName(name string) bool {
return checkLength(name, 2, 50) && checkOnlyAlphaNumeric(name)
}
func checkEmail(email string) bool {
return checkLength(email, 1, 50) && strings.Contains(email, "@")
}
func checkPhone(phone string) bool {
return checkLength(phone, 1, 20) && strings.Contains(phone, "+")
}
|
package vgform
import (
"sort"
"strings"
)
// KeyLister provides a list keys as a string slice.
// Keys are used in the `value` attribute of HTML option tags (with a select).
type KeyLister interface {
KeyList() []string
}
// KeyListerFunc implements KeyLister as a function.
type KeyListerFunc func() []string
// KeyList implements the KeyLister interface.
func (f KeyListerFunc) KeyList() []string { return f() }
// TextMapper provides mapping from a key to the corresponding text.
// Text is used inside the contents of an HTML option tag (with a select).
// Text values are always HTML escaped.
type TextMapper interface {
TextMap(key string) string
}
// TextMapperFunc implements TextMapper as a function.
type TextMapperFunc func(key string) string
// TextMap implements the TextMapper interface.
func (f TextMapperFunc) TextMap(key string) string { return f(key) }
// SimpleTitle implements TextMapper by replacing '-' and '_' with a space and calling strings.Title.
var SimpleTitle = TextMapperFunc(func(key string) string {
return strings.Title(strings.NewReplacer("-", " ", "_", " ").Replace(key))
})
// Options is an interface with KeyList and TextMap.
// It is used to express the options for a select element.
// It intentionally does not support option groups or other
// advanced behavior as that can be accomplished using slots (TO BE IMPLEMENTED).
// Options is provided to make it easy for the common case of
// adapting a slice or map to be used as select options.
type Options interface {
KeyLister
TextMapper
}
// MapOptions implements the Options interface on a map[string]string.
// The keys will be returned in alphanumeric sequence (using sort.Strings),
// or you can call SortFunc to assign a custom sort function.
type MapOptions map[string]string
// KeyList implements KeyLister by returning the map keys sorted with sort.Strings().
func (m MapOptions) KeyList() []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
sort.Strings(s)
return s
}
// TextMap implements TextMapper by returning `m[key]`.
func (m MapOptions) TextMap(key string) string { return m[key] }
// SortFunc returns an Options instance that uses this map for
// keys and text and sorts according to the order specified by this
// function.
func (m MapOptions) SortFunc(sf func(i, j int) bool) Options {
return customOptions{
KeyLister: KeyListerFunc(func() []string {
// build the key list directly, calling m.KeyList would call sort.Strings unnecessarily
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
sort.Slice(s, sf)
return s
}),
TextMapper: m,
}
}
// SliceOptions implements the Options interface on a []string.
// The slice specifies the sequence and these exact string keys are
// also used as the text. You can also call Title() to use the
// SimpleTitle mapper or use TextFunc to assign a custom text mapper.
type SliceOptions []string
// Title is shorthand for s.TextFunc(SimpleTitle).
func (s SliceOptions) Title() Options {
return s.TextFunc(SimpleTitle)
}
// KeyList implements KeyLister with a type conversion ([]string(s)).
func (s SliceOptions) KeyList() []string { return []string(s) }
// TextMap implements TextMapper by returning the key as the text.
func (s SliceOptions) TextMap(key string) string { return key }
// TextFunc returns an Options instance that uses this slice
// as the key list and the specified function for text mapping.
func (s SliceOptions) TextFunc(tmf TextMapperFunc) Options {
return customOptions{
KeyLister: s,
TextMapper: tmf,
}
}
type customOptions struct {
KeyLister
TextMapper
}
|
package Problem0301
import (
"fmt"
"sort"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
s string
ans []string
}{
{"(a)())()", []string{"(a)()()", "(a())()"}},
{"()())()", []string{"()()()", "(())()"}},
{"()())())", []string{"(()())", "(())()", "()(())", "()()()"}},
{"((())))))((()", []string{"((()))()"}},
{")d))", []string{"d"}},
{")(", []string{""}},
// 可以有多个 testcase
}
func Test_removeInvalidParentheses(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ans := removeInvalidParentheses(tc.s)
sort.Strings(ans)
sort.Strings(tc.ans)
ast.Equal(tc.ans, ans, "输入:%v", tc)
}
}
func Benchmark_removeInvalidParentheses(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
removeInvalidParentheses(tc.s)
}
}
}
|
package services
import (
"database/sql"
"github.com/tianhphahai2/hello-grpc"
)
type Test_rgpcServiceServer struct {
db * sql.DB
}
func (s *Test_rgpcServiceServer) checkAPI(api string) error {
if len(api) > 0 {
if
}
}
//func (HelloServiceImpl) Hello(ctx context.Context, rq *hello.HelloRequest) (*hello.HelloResponse, error) {
// return &hello.HelloResponse{
// Message: "Hello " + rq.Name,
// }, nil
//}
|
package types
import (
"io/fs"
"time"
)
type Filesystem interface {
Root() string
Protocol() string
ModTime() time.Time
Chdir(string) (Directory, error)
MkdirAll(string) (Directory, error)
Close() error
}
type Directory interface {
fs.FileInfo
fs.DirEntry
Path() string
Chdir(string) (Directory, error)
}
|
package main
import (
"fmt"
"net/http"
// "io"
"bytes"
"strings"
// "log"
"time"
"sync"
"io/ioutil"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/gin-gonic/gin"
// "encoding/json"
// "strconv"
)
var (
accessKey = ""
secretKey = ""
mac = qbox.NewMac(accessKey, secretKey)
bucket_domain = ""
bucket = ""
)
var wg sync.WaitGroup
var mutex sync.Mutex
var client = &http.Client{
Timeout: time.Second * 10,
}
func urlCheck(url string, i int, cinvalid chan string) {
defer wg.Done()
deadline := time.Now().Add(time.Second * 3600 * 2).Unix() //2小时有效期
privateAccessURL := storage.MakePrivateURL(mac, bucket_domain, url, deadline)
resp, err := http.Get(privateAccessURL)
defer resp.Body.Close()
if err != nil || resp.StatusCode != 200 {
fmt.Println("X:",resp.StatusCode)
cinvalid <- url
} else {
_, err1 := ioutil.ReadAll(resp.Body)
if err1 != nil {
fmt.Print("X")
cinvalid <- url
} else {
fmt.Print("√")
}
}
}
func main() {
r := gin.Default()
r.POST("/task", func(c *gin.Context) {
var cinvalid = make(chan string);
id := c.Query("id")
name_keys := strings.Split(c.PostForm("name_keys"), ",")
fmt.Printf("id: %s; urls: %s;\n", id, name_keys)
length_of_name_keys := len(name_keys)
var invalidList = make([]string, length_of_name_keys)
for i, v := range name_keys {
wg.Add(1)
go urlCheck(v, i, cinvalid)
}
go func() {
for {
iv, ok := <- cinvalid
if !ok {
fmt.Println("no invalid values")
close(cinvalid)
wg.Done()
break
} else {
fmt.Println("invalid value:", iv)
invalidList = append(invalidList, iv)
}
}
}()
wg.Wait()
fmt.Println("Finished here!")
fmt.Println(invalidList)
responseTo := ""
data := []byte(`{"urls":"` + strings.Join(invalidList, ",") + `"}`)
request, _ := http.NewRequest(http.MethodPut, responseTo, bytes.NewBuffer(data))
request.Header.Set("Content-Type", "application/json")
_, err := client.Do(request)
if err != nil {
fmt.Println(err)
}
})
r.Run()
}
|
package core
type Session struct {
parent Space
}
func (s *Session) Shutdown() {
}
|
package clockface
import (
"math"
"time"
)
const (
halfClockSeconds = 30
clockSeconds = halfClockSeconds * 2
halfClockMinutes = 30
clockMinutes = halfClockMinutes * 2
halfClockHours = 6
clockHours = 12
)
type Point struct {
X float64
Y float64
}
func secondHandPoint(tm time.Time) Point {
angle := secondInRadians(tm)
return angleToPoint(angle)
}
func minuteHandPoint(tm time.Time) Point {
angle := minuteInRadians(tm)
return angleToPoint(angle)
}
func hourHandPoint(tm time.Time) Point {
angle := hourInRadians(tm)
return angleToPoint(angle)
}
func secondInRadians(tm time.Time) float64 {
return math.Pi / (halfClockSeconds / float64(tm.Second()))
}
func minuteInRadians(tm time.Time) float64 {
return (secondInRadians(tm) / clockMinutes) +
(math.Pi / (halfClockMinutes / float64(tm.Minute())))
}
func hourInRadians(tm time.Time) float64 {
return (minuteInRadians(tm) / clockHours) +
(math.Pi / (halfClockHours / float64(tm.Hour()%12)))
}
func angleToPoint(angle float64) Point {
return Point{math.Sin(angle), math.Cos(angle)}
}
|
/* SPDX-License-Identifier: MIT
*
* Copyright (C) 2019 WireGuard LLC. All Rights Reserved.
*/
package router
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"log"
"net"
"sort"
"time"
"unsafe"
ole "github.com/go-ole/go-ole"
winipcfg "github.com/tailscale/winipcfg-go"
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/registry"
"tailscale.com/wgengine/winnet"
)
const (
sockoptIP_UNICAST_IF = 31
sockoptIPV6_UNICAST_IF = 31
)
func htonl(val uint32) uint32 {
bytes := make([]byte, 4)
binary.BigEndian.PutUint32(bytes, val)
return *(*uint32)(unsafe.Pointer(&bytes[0]))
}
func bindSocketRoute(family winipcfg.AddressFamily, device *device.Device, ourLuid uint64, lastLuid *uint64) error {
routes, err := winipcfg.GetRoutes(family)
if err != nil {
return err
}
lowestMetric := ^uint32(0)
index := uint32(0) // Zero is "unspecified", which for IP_UNICAST_IF resets the value, which is what we want.
luid := uint64(0) // Hopefully luid zero is unspecified, but hard to find docs saying so.
for _, route := range routes {
if route.DestinationPrefix.PrefixLength != 0 || route.InterfaceLuid == ourLuid {
continue
}
if route.Metric < lowestMetric {
lowestMetric = route.Metric
index = route.InterfaceIndex
luid = route.InterfaceLuid
}
}
if luid == *lastLuid {
return nil
}
*lastLuid = luid
if false {
// TODO(apenwarr): doesn't work with magic socket yet.
if family == winipcfg.AF_INET {
return device.BindSocketToInterface4(index, false)
} else if family == winipcfg.AF_INET6 {
return device.BindSocketToInterface6(index, false)
}
} else {
log.Printf("WARNING: skipping windows socket binding.\n")
}
return nil
}
func monitorDefaultRoutes(device *device.Device, autoMTU bool, tun *tun.NativeTun) (*winipcfg.RouteChangeCallback, error) {
guid := tun.GUID()
ourLuid, err := winipcfg.InterfaceGuidToLuid(&guid)
lastLuid4 := uint64(0)
lastLuid6 := uint64(0)
lastMtu := uint32(0)
if err != nil {
return nil, err
}
doIt := func() error {
err = bindSocketRoute(winipcfg.AF_INET, device, ourLuid, &lastLuid4)
if err != nil {
return err
}
err = bindSocketRoute(winipcfg.AF_INET6, device, ourLuid, &lastLuid6)
if err != nil {
return err
}
if !autoMTU {
return nil
}
mtu := uint32(0)
if lastLuid4 != 0 {
iface, err := winipcfg.InterfaceFromLUID(lastLuid4)
if err != nil {
return err
}
if iface.Mtu > 0 {
mtu = iface.Mtu
}
}
if lastLuid6 != 0 {
iface, err := winipcfg.InterfaceFromLUID(lastLuid6)
if err != nil {
return err
}
if iface.Mtu > 0 && iface.Mtu < mtu {
mtu = iface.Mtu
}
}
if mtu > 0 && (lastMtu == 0 || lastMtu != mtu) {
iface, err := winipcfg.GetIpInterface(ourLuid, winipcfg.AF_INET)
if err != nil {
return err
}
iface.NlMtu = mtu - 80
if iface.NlMtu < 576 {
iface.NlMtu = 576
}
err = iface.Set()
if err != nil {
return err
}
tun.ForceMTU(int(iface.NlMtu)) //TODO: it sort of breaks the model with v6 mtu and v4 mtu being different. Just set v4 one for now.
iface, err = winipcfg.GetIpInterface(ourLuid, winipcfg.AF_INET6)
if err != nil {
return err
}
iface.NlMtu = mtu - 80
if iface.NlMtu < 1280 {
iface.NlMtu = 1280
}
err = iface.Set()
if err != nil {
return err
}
lastMtu = mtu
}
return nil
}
err = doIt()
if err != nil {
return nil, err
}
cb, err := winipcfg.RegisterRouteChangeCallback(func(notificationType winipcfg.MibNotificationType, route *winipcfg.Route) {
//fmt.Printf("MonitorDefaultRoutes: changed: %v\n", route.DestinationPrefix)
if route.DestinationPrefix.PrefixLength == 0 {
_ = doIt()
}
})
if err != nil {
return nil, err
}
return cb, nil
}
func setDNSDomains(g windows.GUID, dnsDomains []string) {
gs := g.String()
log.Printf("setDNSDomains(%v) guid=%v\n", dnsDomains, gs)
p := `SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\` + gs
key, err := registry.OpenKey(registry.LOCAL_MACHINE, p, registry.READ|registry.SET_VALUE)
if err != nil {
log.Printf("setDNSDomains(%v): open: %v\n", p, err)
return
}
defer key.Close()
// Windows only supports a single per-interface DNS domain.
dom := ""
if len(dnsDomains) > 0 {
dom = dnsDomains[0]
}
err = key.SetStringValue("Domain", dom)
if err != nil {
log.Printf("setDNSDomains(%v): SetStringValue: %v\n", p, err)
}
}
func setFirewall(ifcGUID *windows.GUID) (bool, error) {
c := ole.Connection{}
err := c.Initialize()
if err != nil {
return false, fmt.Errorf("c.Initialize: %v", err)
}
defer c.Uninitialize()
m, err := winnet.NewNetworkListManager(&c)
if err != nil {
return false, fmt.Errorf("winnet.NewNetworkListManager: %v", err)
}
defer m.Release()
cl, err := m.GetNetworkConnections()
if err != nil {
return false, fmt.Errorf("m.GetNetworkConnections: %v", err)
}
defer cl.Release()
for _, nco := range cl {
aid, err := nco.GetAdapterId()
if err != nil {
return false, fmt.Errorf("nco.GetAdapterId: %v", err)
}
if aid != ifcGUID.String() {
log.Printf("skipping adapter id: %v\n", aid)
continue
}
log.Printf("found! adapter id: %v\n", aid)
n, err := nco.GetNetwork()
if err != nil {
return false, fmt.Errorf("GetNetwork: %v", err)
}
defer n.Release()
cat, err := n.GetCategory()
if err != nil {
return false, fmt.Errorf("GetCategory: %v", err)
}
if cat == 0 {
err = n.SetCategory(1)
if err != nil {
return false, fmt.Errorf("SetCategory: %v", err)
}
} else {
log.Printf("setFirewall: already category %v\n", cat)
}
return true, nil
}
return false, nil
}
func configureInterface(cfg *Config, tun *tun.NativeTun) error {
const mtu = 0
guid := tun.GUID()
log.Printf("wintun GUID is %v\n", guid)
iface, err := winipcfg.InterfaceFromGUID(&guid)
if err != nil {
return err
}
go func() {
// It takes a weirdly long time for Windows to notice the
// new interface has come up. Poll periodically until it
// does.
for i := 0; i < 20; i++ {
found, err := setFirewall(&guid)
if err != nil {
log.Printf("setFirewall: %v\n", err)
// fall through anyway, this isn't fatal.
}
if found {
break
}
time.Sleep(1 * time.Second)
}
}()
setDNSDomains(guid, cfg.DNSDomains)
routes := []winipcfg.RouteData{}
var firstGateway4 *net.IP
var firstGateway6 *net.IP
addresses := make([]*net.IPNet, len(cfg.LocalAddrs))
for i, addr := range cfg.LocalAddrs {
ipnet := addr.IPNet()
addresses[i] = ipnet
gateway := ipnet.IP
if addr.IP.Is4() && firstGateway4 == nil {
firstGateway4 = &gateway
} else if addr.IP.Is6() && firstGateway6 == nil {
firstGateway6 = &gateway
}
}
foundDefault4 := false
foundDefault6 := false
for _, route := range cfg.Routes {
if (route.IP.Is4() && firstGateway4 == nil) || (route.IP.Is6() && firstGateway6 == nil) {
return errors.New("Due to a Windows limitation, one cannot have interface routes without an interface address")
}
ipn := route.IPNet()
var gateway net.IP
if route.IP.Is4() {
gateway = *firstGateway4
} else if route.IP.Is6() {
gateway = *firstGateway6
}
r := winipcfg.RouteData{
Destination: net.IPNet{
IP: ipn.IP.Mask(ipn.Mask),
Mask: ipn.Mask,
},
NextHop: gateway,
Metric: 0,
}
if bytes.Compare(r.Destination.IP, gateway) == 0 {
// no need to add a route for the interface's
// own IP. The kernel does that for us.
// If we try to replace it, we'll fail to
// add the route unless NextHop is set, but
// then the interface's IP won't be pingable.
continue
}
if route.IP.Is4() {
if route.Bits == 0 {
foundDefault4 = true
}
r.NextHop = *firstGateway4
} else if route.IP.Is6() {
if route.Bits == 0 {
foundDefault6 = true
}
r.NextHop = *firstGateway6
}
routes = append(routes, r)
}
err = iface.SyncAddresses(addresses)
if err != nil {
return err
}
sort.Slice(routes, func(i, j int) bool {
return (bytes.Compare(routes[i].Destination.IP, routes[j].Destination.IP) == -1 ||
// Narrower masks first
bytes.Compare(routes[i].Destination.Mask, routes[j].Destination.Mask) == 1 ||
// No nexthop before non-empty nexthop
bytes.Compare(routes[i].NextHop, routes[j].NextHop) == -1 ||
// Lower metrics first
routes[i].Metric < routes[j].Metric)
})
deduplicatedRoutes := []*winipcfg.RouteData{}
for i := 0; i < len(routes); i++ {
// There's only one way to get to a given IP+Mask, so delete
// all matches after the first.
if i > 0 &&
bytes.Equal(routes[i].Destination.IP, routes[i-1].Destination.IP) &&
bytes.Equal(routes[i].Destination.Mask, routes[i-1].Destination.Mask) {
continue
}
deduplicatedRoutes = append(deduplicatedRoutes, &routes[i])
}
log.Printf("routes: %v\n", routes)
var errAcc error
err = iface.SyncRoutes(deduplicatedRoutes)
if err != nil && errAcc == nil {
log.Printf("setroutes: %v\n", err)
errAcc = err
}
var dnsIPs []net.IP
for _, ip := range cfg.DNS {
dnsIPs = append(dnsIPs, ip.IPAddr().IP)
}
err = iface.SetDNS(dnsIPs)
if err != nil && errAcc == nil {
log.Printf("setdns: %v\n", err)
errAcc = err
}
ipif, err := iface.GetIpInterface(winipcfg.AF_INET)
if err != nil {
log.Printf("getipif: %v\n", err)
return err
}
log.Printf("foundDefault4: %v\n", foundDefault4)
if foundDefault4 {
ipif.UseAutomaticMetric = false
ipif.Metric = 0
}
if mtu > 0 {
ipif.NlMtu = uint32(mtu)
tun.ForceMTU(int(ipif.NlMtu))
}
err = ipif.Set()
if err != nil && errAcc == nil {
errAcc = err
}
ipif, err = iface.GetIpInterface(winipcfg.AF_INET6)
if err != nil {
return err
}
if err != nil && errAcc == nil {
errAcc = err
}
if foundDefault6 {
ipif.UseAutomaticMetric = false
ipif.Metric = 0
}
if mtu > 0 {
ipif.NlMtu = uint32(mtu)
}
ipif.DadTransmits = 0
ipif.RouterDiscoveryBehavior = winipcfg.RouterDiscoveryDisabled
err = ipif.Set()
if err != nil && errAcc == nil {
errAcc = err
}
return errAcc
}
|
package simple
import (
"testing"
)
func TestPrepend(t *testing.T) {
e := []int{2, 4, 6, 8, 10, 12, 14, 16}
s := []int{10, 12, 14, 16}
s = Prepend(s, 2, 4, 6, 8)
if len(s) != len(e) {
t.Error("Expected slice len to be 8, got", len(e))
}
for i := range s {
if s[i] != e[i] {
t.Error("Not expecting", s[i])
}
}
}
func BenchmarkPrepend(b *testing.B) {
s := []int{10, 12, 14, 16}
for n := 0; n < b.N; n++ {
s = Prepend(s, 2, 4, 6, 8)
}
}
func TestAppend(t *testing.T) {
e := []int{2, 4, 6, 8, 10, 12, 14, 16}
s := []int{2, 4, 6, 8}
s = Append(s, 10, 12, 14, 16)
if len(s) != len(e) {
t.Error("Expected slice len to be 8, got", len(e))
}
for i := range s {
if s[i] != e[i] {
t.Error("Not expecting", s[i])
}
}
}
func BenchmarkAppend(b *testing.B) {
s := []int{2, 4, 6, 8}
for n := 0; n < b.N; n++ {
s = Append(s, 10, 12, 14, 16)
}
}
|
package controllers
type Hub struct {
clients map[*client]bool
broadcast chan []byte
register chan *client
unregister chan *client
content []byte
}
var h = Hub{
broadcast: make(chan []byte),
register: make(chan *client),
unregister: make(chan *client),
clients: make(map[*client]bool),
}
func InitHub() *Hub {
return &h
}
func (h *Hub) Run() {
for {
select {
case c := <-h.register:
h.clients[c] = true
c.send <- h.content
break
case c := <-h.unregister:
_, ok := h.clients[c]
if ok {
delete(h.clients, c)
close(c.send)
}
break
case m := <-h.broadcast:
h.content = m
h.broadcastMessage()
break
}
}
}
func (h *Hub) broadcastMessage() {
for c := range h.clients {
select {
case c.send <- h.content:
break
// We can't reach the client
default:
close(c.send)
delete(h.clients, c)
}
}
}
|
package main
import (
"container/heap"
"fmt"
"sort"
)
func main() {
fmt.Println(maxScore([]int{
1, 3, 3, 2,
}, []int{
2, 1, 3, 4,
}, 3))
}
func maxScore(nums1, nums2 []int, k int) int64 {
type pair [2]int
a := make([]pair, len(nums1))
sum := 0
for i, x := range nums1 {
a[i] = pair{x, nums2[i]}
}
sort.Slice(a, func(i, j int) bool { return a[i][1] > a[j][1] })
h := hp{nums2[:k]} // 复用内存
for i, p := range a[:k] {
sum += p[0]
h.IntSlice[i] = p[0]
}
ans := sum * a[k-1][1]
heap.Init(&h)
for _, p := range a[k:] {
if p[0] > h.IntSlice[0] {
sum += p[0] - h.replace(p[0])
ans = max(ans, sum*p[1])
}
}
return int64(ans)
}
type hp struct{ sort.IntSlice }
func (hp) Pop() (_ interface{}) { return }
func (hp) Push(interface{}) {}
func (h hp) replace(v int) int {
top := h.IntSlice[0]
h.IntSlice[0] = v
heap.Fix(&h, 0)
return top
}
func max(a, b int) int {
if b > a {
return b
}
return a
}
|
package middlewares
import (
"context"
"github.com/go-playground/validator/v10"
"github.com/jybbang/go-core-architecture/core"
)
type validationMiddleware struct {
core.Middleware
validate *validator.Validate
}
func NewValidationMiddleware() *validationMiddleware {
return &validationMiddleware{
validate: validator.New(),
}
}
func (m *validationMiddleware) Run(ctx context.Context, request core.Request) core.Result {
if err := m.validate.Struct(request); err != nil {
return core.Result{E: core.ErrBadRequest}
}
return m.Next()
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"ms/sun/shared/helper"
"ms/sun/servises/file_service_old"
"net/http"
"time"
)
var cnt int = 1
var size int = 0
func main() {
Insert_many(0)
file_service_old.Run()
http.HandleFunc("/hi", func(writer http.ResponseWriter, r *http.Request) {
writer.Write([]byte("hi"))
})
go func() {
time.Sleep(time.Second)
http.Get("http://localhost:5151/post_file/1518506476136010007_180.jpg")
}()
http.ListenAndServe(":5151", nil)
}
func Insert_many(num int) {
for i := 0; i < num; i++ {
insertMsg()
insertPost()
fmt.Println("cnt: ", i, " size:(mb)", size/1000000)
}
}
func insertPost() {
_, bs, err := RandImage()
if err != nil {
return
}
cas := file_service_old.Row{
Id: helper.NextRowsSeqId(),
Data: bs,
FileType: 1,
Extension: ".jpg",
DataThumb: []byte{},
}
cnt++
file_service_old.SavePostFile(cas)
}
func insertMsg() {
_, bs, err := RandImage()
if err != nil {
return
}
cas := file_service_old.Row{
Id: helper.NextRowsSeqId(),
Data: bs,
FileType: 1,
Extension: ".jpg",
DataThumb: []byte{},
}
cnt++
file_service_old.SaveMsgFile(cas)
}
func RandImage() (fn string, bs []byte, err error) {
const dir = `C:\Go\_gopath\src\ms\sun\upload\samples`
imageFiles, err := ioutil.ReadDir(dir)
if err != nil {
log.Fatal(err)
}
fn = dir + "/" + imageFiles[rand.Intn(len(imageFiles))].Name()
bs, err = ioutil.ReadFile(fn)
if err != nil {
log.Fatal(err)
}
size += len(bs)
return fn[2:], bs, nil
}
|
package main
type Animal interface {
Move()
}
// Cat is a concrete animal since it implements the method Move
type Cat struct{}
func (c *Cat) Move() {}
// and somewhere in the code we need to use the crocodile type which is often not our code and this Crocodile type does not implement the Animal interface
// but we need to use a crocodile as an animal
type Crocodile struct{}
func (c *Crocodile) Slither() {}
// we create an CrocodileAdapter struct that dapts an embeded crocodile so that it can be usedd as an Animal
type CrocodileAdapter struct {
*Crocodile
}
func NewCrocodile() *CrocodileAdapter {
return &CrocodileAdapter{new(Crocodile)}
}
func (this *CrocodileAdapter) Move() {
this.Slither()
}
func main() {
var animals []Animal
animals = append(animals, new(Cat))
animals = append(animals, NewCrocodile())
for _, entity := range animals {
entity.Move()
}
}
|
package handler
import (
"github.com/gin-gonic/gin"
"log"
"os"
)
func strToBool(s string) bool {
if s == "true" {
return true
}
return false
}
func deleteImg (imgSrc string) error {
err := os.Remove(imgSrc)
if err != nil {
log.Println("Error: while deleting image. Error is ", err)
return err
}
return nil
}
func (h *Handler) getImg(c *gin.Context) {
imgPath := c.Param("img")
//f1 := "images"
f2 := c.Param("f2")
c.File("./images/" + f2 + "/" + imgPath)
}
|
package repository
import (
"github.com/kiryalovik/gameoflife/cellular/model"
)
type DummyRepo struct {
currentField model.BinaryField
}
func NewDummyRepo() *DummyRepo {
return &DummyRepo{}
}
// ToDo replace every create with Upsert
func (r *DummyRepo) Create(field model.BinaryField) error {
r.currentField = field
return nil
}
func (r DummyRepo) Delete(ID model.Hash) error {
return nil
}
func (r DummyRepo) Find(ID model.Hash) (model.BinaryField, error) {
if r.currentField.ID == ID {
return r.currentField, nil
}
return r.currentField, ErrNotFound
}
func (r DummyRepo) UpdateNext(ID model.Hash, next model.Hash) error {
return nil
}
|
package main
import (
"fmt"
"net/http"
"os"
controller "github.com/danielTiringer/Go-Many-Ways/rest-api/controller"
router "github.com/danielTiringer/Go-Many-Ways/rest-api/http"
repository "github.com/danielTiringer/Go-Many-Ways/rest-api/repository"
service "github.com/danielTiringer/Go-Many-Ways/rest-api/service"
)
var (
postRepository repository.PostRepository = repository.NewSQLiteRepository()
postService service.PostService = service.NewPostService(postRepository)
postController controller.PostController = controller.NewPostController(postService)
httpRouter router.Router = router.NewMuxRouter()
)
func main() {
httpRouter.GET("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Up and running...")
})
httpRouter.GET("/posts", postController.GetPosts)
httpRouter.GET("/posts/{id}", postController.GetPostByID)
httpRouter.POST("/posts", postController.AddPost)
httpRouter.SERVE(":" + os.Getenv("PORT"))
}
|
// colly define the flow to file collecting
// and cache policy
package colly
import (
"os"
"log"
"fmt"
"sync"
"context"
"io/ioutil"
"github.com/pkg/errors"
"github.com/go-redis/redis"
"gopkg.in/natefinch/lumberjack.v2"
"github.com/smileboywtu/FileColly/common"
"time"
)
var logger *log.Logger
type Collector struct {
sync.RWMutex
// App Configs
UserConfigs *AppConfigOption
// File Walker Instance
FileWalkerInst *FileWalker
// File filters
Rule Rule
filters []FilterFuncs
// Files Deal numbers
FileCount int64
ctx context.Context
cancleFunc context.CancelFunc
}
func InitLogger(logFile string) {
fd, err := os.OpenFile(logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "open log file error")
}
logger = log.New(fd, "collector: ", log.Lshortfile)
logger.SetOutput(&lumberjack.Logger{
Filename: logFile,
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, //days
Compress: true, // disabled by default
})
}
// NewCollector init a collector to collect file in directory
func NewCollector(opts *AppConfigOption) (*Collector, error) {
ctx, cancle := context.WithCancel(context.Background())
// init logger
InitLogger(opts.LogFileName)
rule := Rule{
FileSizeLimit: common.HumanSize2Bytes(opts.FileMaxSize),
ReserveFile: opts.ReserveFile,
CollectWaitTime: opts.ReadWaitTime,
AllowEmpty: false,
}
return &Collector{
UserConfigs: opts,
FileWalkerInst: NewDirectoryWorker(opts.CollectDirectory, opts.ReaderMaxWorkers, rule, ctx),
FileCount: 0,
Rule: rule,
filters: make([]FilterFuncs, 0, 8),
ctx: ctx,
cancleFunc: cancle,
}, nil
}
// OnFilter add new filter to collector
func (c *Collector) OnFilter(callback FilterFuncs) {
c.Lock()
c.filters = append(c.filters, callback)
c.Unlock()
}
func (c *Collector) GetFileCount() int64 {
return c.FileCount
}
func (c *Collector) IncreaseFileCount(n int) {
c.Lock()
c.FileCount += int64(n)
c.Unlock()
}
func (c *Collector) CountClear() {
c.Lock()
c.FileCount = 0
c.Unlock()
}
// sendPoll send file to
func (c *Collector) sendPoll(result chan<- EncodeResult, item EncodeResult) {
select {
case result <- item:
case <-c.ctx.Done():
return
}
}
// encodeFlow encodes file content and send to backend
func (c *Collector) encodeFlow(fileItems <-chan FileItem, result chan<- EncodeResult) {
for item := range fileItems {
if !c.GetMatch(item.FilePath) {
c.sendPoll(result, EncodeResult{item.FilePath, "", errors.New("file not match")})
continue
}
data, err := ioutil.ReadFile(item.FilePath)
if err != nil {
c.sendPoll(result, EncodeResult{item.FilePath, "", err})
}
encoder := &FileContentEncoder{
FilePath: item.FileIndex,
FileContent: make([]byte, len(data)),
}
copy(encoder.FileContent, data)
packBytes, err := encoder.Encode()
c.sendPoll(result, EncodeResult{item.FilePath, packBytes, err})
}
}
func (c *Collector) Start() {
var wg sync.WaitGroup
buffers := make(chan EncodeResult)
fileItems, errc := c.FileWalkerInst.Walk()
wg.Add(c.UserConfigs.ReaderMaxWorkers)
for i := 0; i < c.UserConfigs.ReaderMaxWorkers; i++ {
go func() {
c.encodeFlow(fileItems, buffers)
wg.Done()
}()
}
go func() {
wg.Wait()
close(buffers)
}()
// wait all buffer deal done
c.sendFlow(buffers)
if err := <-errc; err != nil {
fmt.Println(err.Error())
logger.Println(err.Error())
}
}
// sendFlow cache current file in pipeline and remove file from directory
// if queue is out of limit size or reserve file is true, then do nothing
// about the file
func (c *Collector) sendFlow(buffers <-chan EncodeResult) {
redisOpts := &redis.Options{
Addr: fmt.Sprintf("%s:%d", c.UserConfigs.RedisHost, c.UserConfigs.RedisPort),
DB: c.UserConfigs.RedisDB,
Password: c.UserConfigs.RedisPW,
MaxRetries: 3,
}
backend, errs := NewRedisWriter(
redisOpts,
c.UserConfigs.DestinationRedisQueueName,
c.UserConfigs.DestinationRedisQueueLimit)
if backend == nil || errs != nil {
log.Fatal("redis connect error", errs)
}
defer backend.Client.Close()
c.CountClear()
c.IncreaseFileCount(int(backend.GetDestQueueSize()))
var wg sync.WaitGroup
wg.Add(c.UserConfigs.SenderMaxWorkers)
for i := 0; i < c.UserConfigs.SenderMaxWorkers; i++ {
go func() {
for r := range buffers {
if c.FileCount > int64(c.UserConfigs.DestinationRedisQueueLimit) {
if c.FileCount-int64(c.UserConfigs.DestinationRedisQueueLimit) > 10 {
c.CountClear()
c.IncreaseFileCount(int(backend.GetDestQueueSize()))
} else {
c.IncreaseFileCount(1)
logger.Println("destination redis queue if full")
continue
}
}
if r.Err == nil {
c.IncreaseFileCount(1)
backend.SendFileContent(r.EncodeContent)
os.Remove(r.Path)
logger.Println("send file: ", r.Path)
}
}
wg.Done()
}()
}
wg.Wait()
logger.Printf("current time: %s, send file total: %d", time.Now().Format("2006-01-02T15:04:05"), c.FileCount)
}
// GetMatch traverse the filters and check if file should be send
func (c *Collector) GetMatch(filepath string) bool {
if len(c.filters) > 0 {
for _, filterFunc := range c.filters {
if !filterFunc(filepath, c.Rule) {
return false
}
}
}
return true
}
// ShutDown close the file collect daemon
func (c *Collector) ShutDown() {
c.cancleFunc()
}
|
package bybus
import (
"bylib/bylog"
"bylib/byutils"
"encoding/binary"
"errors"
"fmt"
"github.com/tbrandon/mbserver"
"sync"
)
type MBTcpServer struct{
mbserv *mbserver.Server //modbus 服务器
holdMux sync.Mutex
readWriteHandler map[uint16]*ModbusReadWriteHandler
}
func (s *MBTcpServer) RegisterHandler(addr, quality uint16, readFunc ModbusReadHandleFunc, writeFunc ModbusWriteHandleFunc) {
s.readWriteHandler[addr] = &ModbusReadWriteHandler{
Addr:addr,
Quality:quality,
ReadFunc:readFunc,
WriteFunc:writeFunc,
}
}
//更新服务器上寄存器的值.
func (s *MBTcpServer)WriteHoldingRegisters(address uint16 ,values []uint16)error {
s.holdMux.Lock()
defer func() {
s.holdMux.Unlock()
}()
reg := int(address)
for i,r:=range values{
s.mbserv.HoldingRegisters[reg+i] = r
}
return nil
}
func (s *MBTcpServer)ReadInputRegsToBuffer(addr int, reg int,nr int) []byte {
register := addr+reg
endRegister := register + nr
return mbserver.Uint16ToBytes(s.mbserv.HoldingRegisters[register:endRegister])
}
func (s *MBTcpServer)Close(){
if s.mbserv!=nil{
s.mbserv.Close()
}
}
func registerAddressAndValues(frame mbserver.Framer) (int, []uint16) {
data := frame.GetData()
register := int(binary.BigEndian.Uint16(data[0:2]))
return register, byutil.MbBytesToUint16(data[5:])
}
func registerAddressAndValue(frame mbserver.Framer) (int, uint16) {
data := frame.GetData()
register := int(binary.BigEndian.Uint16(data[0:2]))
value := binary.BigEndian.Uint16(data[2:4])
return register, value
}
func registerAddressAndNumber(frame mbserver.Framer) (addr,register int, numRegs int, endRegister int) {
data := frame.GetData()
register = int(binary.BigEndian.Uint16(data[0:2]))
numRegs = int(binary.BigEndian.Uint16(data[2:4]))
if tcp, ok := frame.(*mbserver.TCPFrame); ok {
addr = int(tcp.Device)
}
endRegister = register + numRegs
return addr,register, numRegs, endRegister
}
func (mb *MBTcpServer)readHoldingRegisters(address, quantity uint16) ([]uint16, error) {
//bylog.Info("ReadInputRegisters from %v, quantity %v\n", address, quantity)
for reg:=address; reg < (address+quantity);{
if v,ok:=mb.readWriteHandler[reg];ok{
if v.ReadFunc!=nil{
values,err:=v.ReadFunc()
if err!=nil {
return nil,err
}
copy(mb.mbserv.HoldingRegisters[reg:],values)
reg+=v.Quality
continue
}
}
reg+=1
}
return mb.mbserv.HoldingRegisters[address : address+quantity], nil
}
func (mb *MBTcpServer)writeHoldingRegisters(address uint16, values []uint16) error {
//bylog.Info("WriteInputRegisters from %v, quantity %v value=%v\n", address, len(values),values)
for reg:=address; reg < (address+uint16(len(values)));{
if v,ok:=mb.readWriteHandler[reg];ok{
if v.WriteFunc!=nil{
off:=reg-address
if off > uint16(len(values)){
return errors.New("writeHoldingRegisters address invalid")
}
//寄存器的长度由各自函数去处理.
if err:=v.WriteFunc(values[off:off+v.Quality]);err!=nil{
return err
}
reg+=v.Quality
continue
}
}
reg+=1
}
return nil
}
//写单个寄存器.
func (mb *MBTcpServer)handleWriteHolding(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
mb.holdMux.Lock()
defer func() {
mb.holdMux.Unlock()
}()
register,values:=registerAddressAndValue(frame)
//bylog.Debug("handleWriteHolding register=%d values=% x",register,values)
if err:=mb.writeHoldingRegisters(uint16(register),[]uint16{values});err!=nil{
bylog.Error("writeHoldingRegisters err=%v",err)
}
return mbserver.WriteHoldingRegister(s ,frame)
}
//0x10 写多个寄存器
func (mb *MBTcpServer)handleWriteMultiHolding(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
mb.holdMux.Lock()
defer func() {
mb.holdMux.Unlock()
}()
register,values:=registerAddressAndValues(frame)
//bylog.Debug("handleWriteMultiHolding register=%d values=% x",register,values)
if err:=mb.writeHoldingRegisters(uint16(register),values);err!=nil{
bylog.Error("handleWriteMultiHolding err=%v",err)
}
return mbserver.WriteHoldingRegisters(s ,frame)
}
//0x3 读多个寄存器 只需要把holdingRegister中的数据返回出去就可以了
func (mb *MBTcpServer)handleReadHolding(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) {
mb.holdMux.Lock()
defer func() {
mb.holdMux.Unlock()
}()
_, register, numRegs, endRegister := registerAddressAndNumber(frame)
//bylog.Debug("addr=%d ,reg=%d num=%d,end=%d",addr,register,numRegs,endRegister)
endRegister = register + numRegs
if endRegister > 65536 {
return []byte{}, &mbserver.IllegalDataAddress
}
//检测对应的寄存器是否有过滤函数,有的话执行过滤函数,拷贝过滤函数的结果到对应的holding寄存器,最后统一的返回出去.
_,err:=mb.readHoldingRegisters(uint16(register), uint16(numRegs))
if err!=nil{
return nil,&mbserver.IllegalDataValue
}
return append([]byte{byte(numRegs * 2)},
mbserver.Uint16ToBytes(s.HoldingRegisters[register:endRegister])...),
&mbserver.Success
}
func NewModbusTcpServer(port int) *MBTcpServer{
serv:=MBTcpServer{
mbserv :mbserver.NewServer(),
readWriteHandler : make(map[uint16]*ModbusReadWriteHandler),
}
err := serv.mbserv.ListenTCP(fmt.Sprintf(":%d",port))
if err != nil {
bylog.Error("ListenTCP %d error %v",port,err)
return nil
}
serv.mbserv.RegisterFunctionHandler(3,serv.handleReadHolding)
serv.mbserv.RegisterFunctionHandler(6,serv.handleWriteHolding)
serv.mbserv.RegisterFunctionHandler(0x10,serv.handleWriteMultiHolding)
return &serv
}
|
package examples
import (
"fmt"
"time"
)
func worker_(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Println("worker ", id, "started job ", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
}
func init() {
const numJobs = 5
jobs := make(chan int, numJobs)
results := make(chan int, numJobs)
for w := 1; w <= 3; w++ {
go worker_(w, jobs, results)
}
for i := 1; i <= numJobs; i++ {
jobs <- i
}
close(jobs)
for a := 1; a <= numJobs; a++ {
<-results
}
}
|
package main
import (
"fmt"
"unsafe"
"strconv"
)
func main() {
a := float32(0.1)
fmt.Printf("float32(0.1): %032s\n", strconv.FormatUint((uint64)(*(*uint32)(unsafe.Pointer(&a))),2))
a = 0.2
fmt.Printf("float32(0.2): %032s\n", strconv.FormatUint((uint64)(*(*uint32)(unsafe.Pointer(&a))),2))
a = 0.3
fmt.Printf("float32(0.3): %032s\n", strconv.FormatUint((uint64)(*(*uint32)(unsafe.Pointer(&a))),2))
b := float64(0.1)
fmt.Printf("float64(0.1): %064s\n", strconv.FormatUint((*(*uint64)(unsafe.Pointer(&b))),2))
b = 0.2
fmt.Printf("float64(0.2): %064s\n", strconv.FormatUint((*(*uint64)(unsafe.Pointer(&b))),2))
b = 0.3
fmt.Printf("float64(0.3): %064s\n", strconv.FormatUint((*(*uint64)(unsafe.Pointer(&b))),2))
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"gorm.io/gen"
"bscp.io/pkg/dal/table"
)
func main() {
g := gen.NewGenerator(gen.Config{
OutPath: "./pkg/dal/gen",
Mode: gen.WithDefaultQuery | gen.WithQueryInterface,
FieldNullable: true,
})
// 需要 Gen 的模型这里添加
g.ApplyBasic(
table.IDGenerator{},
table.Audit{},
table.App{},
table.ArchivedApp{},
table.ConfigItem{},
table.ReleasedConfigItem{},
table.Commit{},
table.Content{},
table.ResourceLock{},
table.Event{},
table.Credential{},
table.CredentialScope{},
table.Strategy{},
table.Group{},
table.ReleasedGroup{},
table.GroupAppBind{},
table.Release{},
table.ReleasedConfigItem{},
table.Hook{},
table.HookRevision{},
table.ReleasedHook{},
table.TemplateSpace{},
table.Template{},
table.TemplateSet{},
table.TemplateRevision{},
table.TemplateVariable{},
table.AppTemplateBinding{},
table.ReleasedAppTemplate{},
table.AppTemplateVariable{},
table.ReleasedAppTemplateVariable{},
)
g.Execute()
}
|
// https://leetcode-cn.com/problems/remove-element/
package main
import "fmt"
func main() {
nums := []int{2}
val := 3
n := len(nums)
i := 0
for i < n {
if nums[i] == val {
nums[i] = nums[n-1]
n--
} else {
i++
}
}
fmt.Println(n, nums)
}
func removeElement1() {
// nums := []int{0, 1, 2, 2, 3, 0, 4, 2}
// val := 2
// nums := []int{3, 2, 2, 3}
nums := []int{2}
val := 3
j := len(nums)
for i := 0; i < len(nums); i++ {
if j == i {
break
}
if nums[i] == val {
for j > i {
if nums[j-1] != val {
nums[i] = nums[j-1]
nums[j-1] = val
break
}
j--
}
}
}
fmt.Println(j, nums)
}
|
package models
type Kitchens struct {
Name string `json:"name"`
Id uint `json:"id"`
Status bool `json:"status"`
}
type Admin struct {
Email string `json:"email"`
Passwd string `json:"password"`
}
type User struct {
Id int `json:"id"`
FirstName string `json:"firstName"`
LastName string `json:"lastName"`
Email string `json:"email"`
Passwd string `json:"password"`
}
type SigninUser struct {
Id uint64 `json:"id"`
Email string `json:"email"`
CurrentPass string `json:"password"`
IsManager string `json:"isManager"`
}
type Food struct {
Id uint `json:"id"`
Title string `json:"title"`
Price string `json:"price"`
}
type Order struct {
User_id uint `json:"user_id"`
Food_id uint `json:"food_id"`
}
type Address struct {
Place string `json:"place"`
Contact string `json:"contact"`
}
|
package intervalIntersection
import (
"reflect"
"testing"
)
func Test_intervalIntersection(t *testing.T) {
type args struct {
A [][]int
B [][]int
}
tests := []struct {
name string
args args
wantC [][]int
}{
// TODO: Add test cases.
{
name: "first",
args: args{
A: [][]int{
{0, 2}, {5, 10}, {13, 23}, {24, 25},
},
B: [][]int{
{1, 5}, {8, 12}, {15, 24}, {25, 26},
},
},
wantC: [][]int{
{1, 2}, {5, 5}, {8, 10}, {15, 23}, {24, 24}, {25, 25},
},
},
{
name: "second",
args: args{
A: [][]int{
{0, 2}, {4, 6}, {8, 10}, {12, 14},
},
B: [][]int{
{2, 4}, {6, 8}, {10, 12}, {14, 16},
},
},
wantC: [][]int{
{2, 2}, {4, 4}, {6, 6}, {8, 8}, {10, 10}, {12, 12}, {14, 14},
},
},
{
name: "third",
args: args{
A: [][]int{
{0, 2}, {5, 10}, {13, 23}, {24, 25},
},
B: [][]int{
{3, 4}, {11, 12},
},
},
wantC: [][]int{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotC := intervalIntersection(tt.args.A, tt.args.B); !reflect.DeepEqual(gotC, tt.wantC) {
t.Errorf("intervalIntersection() = %v, want %v", gotC, tt.wantC)
}
})
}
}
|
package main
import (
"net/http"
"restapi/controllers"
"github.com/gin-gonic/gin"
validator "github.com/gobeam/custom-validator"
)
func main() {
router := gin.Default()
validate := []validator.ExtraValidation{
{Tag: "number", Message: "Invalid %s Format!"},
{Tag: "email", Message: "Invalid %s Format!"},
}
validator.MakeExtraValidation(validate)
router.GET("/", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"message": "hello jay",
})
})
// users
router.Use(validator.Errors())
{
router.POST("/register", controllers.UserRegister)
router.POST("/register/guide", controllers.GuideRegister)
router.POST("/login", controllers.UserLogin)
}
router.GET("/users", controllers.Users)
router.GET("/confirm-email/:token", controllers.ConfirmEmail)
router.POST("/resend-email", controllers.ResendEmail)
router.GET("/create-cookie", controllers.Cookie)
router.GET("/delete-cookie", controllers.DeleteCookie)
router.Run()
}
|
package database
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
"github.com/super-link-manager/models"
"github.com/super-link-manager/utils"
"os"
)
func ConnectDB() *sql.DB {
var host = os.Getenv("POSTGRES_HOST")
var port = os.Getenv("POSTGRES_PORT")
var user = os.Getenv("POSTGRES_USERNAME")
var password = os.Getenv("POSTGRES_PASSWORD")
var database = os.Getenv("POSTGRES_DB")
connectionString := fmt.Sprintf("host=%s port=%s user=%s password='%s' dbname=%s sslmode=disable", host, port, user, password, database)
db, err := sql.Open("postgres", connectionString)
utils.CheckErr(err)
return db
}
func Links() []models.Link {
db := ConnectDB()
linksQuery, err := db.Query("select * from links order by id asc")
utils.CheckErr(err)
var links []models.Link
for linksQuery.Next() {
var id string
var linkType string
var name string
var price int
err = linksQuery.Scan(
&id,
&linkType,
&name,
&price,
)
utils.CheckErr(err)
link := models.Link{
Id: id,
LinkType: linkType,
Name: name,
Price: price,
}
links = append(links, link)
}
defer db.Close()
return links
}
func CreateLink(id, linkType, name string, price int) bool {
db := ConnectDB()
linkInsert, err := db.Prepare("insert into links (id, type, name, price) values ($1, $2, $3, $4)")
utils.CheckErr(err)
result, err := linkInsert.Exec(id, linkType, name, price)
utils.CheckErr(err)
rowsAffected, err := result.RowsAffected()
utils.CheckErr(err)
defer db.Close()
return rowsAffected > 0
}
func DeleteLink(id string) bool {
db := ConnectDB()
productDelete, err := db.Prepare("delete from links where id=$1")
utils.CheckErr(err)
result, err := productDelete.Exec(id)
utils.CheckErr(err)
rowsAffected, err := result.RowsAffected()
utils.CheckErr(err)
defer db.Close()
return rowsAffected > 0
}
func UpdateLink(id, linkType, name string, price int) bool {
db := ConnectDB()
updateProduct, err := db.Prepare("update links set type=$2, name=$3, price=$4 where id=$1")
utils.CheckErr(err)
result, err := updateProduct.Exec(id, linkType, name, price)
utils.CheckErr(err)
rowsAffected, err := result.RowsAffected()
utils.CheckErr(err)
defer db.Close()
return rowsAffected > 0
}
|
package gauth
import (
"math"
"strconv"
)
// Returns a boolean indicating if the provided otp string is valid for
// the provided secret at the current time. The secret should be base32
// encoded.
func ValidateOTP(otp, secret string) bool {
correctOTP, err := GetOTP(secret)
return (otp == correctOTP) && (err == nil)
}
// Returns a string containing the TOTP token associated with
// the provided secret at the current time. The secret should be
// base32 encoded.
func GetOTP(secret string) (string, error) {
return computeTOTP(secret, timestamp())
}
// Generates a cryptographically secure base32 string that is the
// proper length for a secret to be used with Google Authenticator.
// This can be used for generating new secrets.
func GenerateSecret() (string, error) {
return generateBase32CryptoString(SECRET_LENGTH)
}
func computeTOTP(secret string, time int64) (string, error) {
key, err := decodeSecret(secret)
if err != nil {
return "", err
}
msg := encodeTime(time)
hash := computeHMAC(msg, key)
offset := hash[len(hash)-1] & 0x0F
binary := (int(hash[offset]&0x7F) << 24) |
(int(hash[offset+1]&0xFF) << 16) |
(int(hash[offset+2]&0xFF) << 8) |
int(hash[offset+3]&0xFF)
otp := binary % int(math.Pow10(RETURN_DIGITS))
result := strconv.Itoa(otp)
for len(result) < RETURN_DIGITS {
result = "0" + result
}
return result, nil
}
|
// Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.
// Use of this source code is governed by the MIT-license that can be
// found in the LICENSE file.
package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/spf13/pflag"
)
var ErrNoCmd = fmt.Errorf("no cmd")
type Command struct {
Str string
Aliases []string
Desc string
HandleFunc func(ctx context.Context, args []string)
Usage string
Options map[string]string
Example string
LongLived bool
MayConnect bool
}
func (cmd Command) PrintUsage() {
fmt.Fprintf(os.Stderr, "%s - %s\n", cmd.Str, cmd.Desc)
fmt.Fprintf(os.Stderr, "\nUsage:\n %s %s\n", cmd.Str, strings.TrimSpace(cmd.Usage))
if len(cmd.Options) > 0 {
fmt.Fprint(os.Stderr, "\nOptions:\n")
for f, desc := range cmd.Options {
fmt.Fprintf(os.Stderr, " %-17s %s\n", f, desc)
}
}
if cmd.Example != "" {
fmt.Fprintf(os.Stderr, "\nExample:\n %s\n", strings.TrimSpace(cmd.Example))
}
fmt.Fprint(os.Stderr, "\n")
}
func parseFlags(args []string) (cmd Command, arguments []string) {
var options []string
var err error
cmd, options, arguments, err = findCommand(args)
if err != nil {
pflag.Usage()
os.Exit(1)
}
optionsSet().Parse(options)
if len(arguments) == 0 {
arguments = append(arguments, "")
}
switch arguments[0] {
case "--help", "-help", "help", "-h":
cmd.PrintUsage()
os.Exit(1)
}
return
}
func findCommand(args []string) (cmd Command, pre, post []string, err error) {
cmdMap := make(map[string]Command, len(commands))
for _, c := range commands {
cmdMap[c.Str] = c
for _, alias := range c.Aliases {
cmdMap[alias] = c
}
}
for i, arg := range args {
if cmd, ok := cmdMap[arg]; ok {
return cmd, args[1:i], args[i+1:], nil
}
}
err = ErrNoCmd
return
}
|
package main
import (
"bytes"
"fmt"
"log"
"study-go--mercaridoc/07.エラー処理/エラー処理をまとめる/util"
)
func main() {
bf := bytes.NewBufferString("ハローworld!")
s := util.NewRuneScanner(bf)
for s.Scan() {
rune := s.Rune()
fmt.Printf("%s", string(rune))
}
if err := s.Err(); err != nil {
log.Fatal(err.Error())
}
}
|
package e2e
import (
"context"
"fmt"
"strings"
"time"
"github.com/blang/semver/v4"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
authorizationv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
v1 "github.com/operator-framework/api/pkg/operators/v1"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx"
)
var _ = Describe("Operator Group", func() {
var (
c operatorclient.ClientInterface
crc versioned.Interface
)
BeforeEach(func() {
c = ctx.Ctx().KubeClient()
crc = ctx.Ctx().OperatorClient()
})
AfterEach(func() {
TearDown(testNamespace)
})
It("e2e functionality", func() {
// Create namespace with specific label
// Create CRD
// Create CSV in operator namespace
// Create operator group that watches namespace and uses specific label
// Verify operator group status contains correct status
// Verify csv in target namespace exists, has copied status, has annotations
// Verify deployments have correct namespace annotation
// (Verify that the operator can operate in the target namespace)
// Update CSV to support no InstallModes
// Verify the CSV transitions to FAILED
// Verify the copied CSV transitions to FAILED
// Delete CSV
// Verify copied CVS is deleted
log := func(s string) {
GinkgoT().Logf("%s: %s", time.Now().Format("15:04:05.9999"), s)
}
csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation
opGroupNamespace := genName(testNamespace + "-")
matchingLabel := map[string]string{"inGroup": opGroupNamespace}
otherNamespaceName := genName(opGroupNamespace + "-")
bothNamespaceNames := opGroupNamespace + "," + otherNamespaceName
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: opGroupNamespace,
Labels: matchingLabel,
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), opGroupNamespace, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
otherNamespace := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: otherNamespaceName,
Labels: matchingLabel,
},
}
createdOtherNamespace, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &otherNamespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), otherNamespaceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
log("Creating CRD")
mainCRDPlural := genName("opgroup")
mainCRD := newCRD(mainCRDPlural)
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
log("Creating operator group")
operatorGroup := v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: opGroupNamespace,
},
Spec: v1.OperatorGroupSpec{
Selector: &metav1.LabelSelector{
MatchLabels: matchingLabel,
},
},
}
_, err = crc.OperatorsV1().OperatorGroups(opGroupNamespace).Create(context.TODO(), &operatorGroup, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
expectedOperatorGroupStatus := v1.OperatorGroupStatus{
Namespaces: []string{opGroupNamespace, createdOtherNamespace.GetName()},
}
log("Waiting on operator group to have correct status")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, fetchErr := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), operatorGroup.Name, metav1.GetOptions{})
if fetchErr != nil {
return false, fetchErr
}
if len(fetched.Status.Namespaces) > 0 {
require.ElementsMatch(GinkgoT(), expectedOperatorGroupStatus.Namespaces, fetched.Status.Namespaces, "have %#v", fetched.Status.Namespaces)
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
log("Creating CSV")
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{mainCRD.Spec.Group},
Resources: []string{mainCRDPlural},
},
},
},
}
// Create a new NamedInstallStrategy
deploymentName := genName("operator-deployment")
namedStrategy := newNginxInstallStrategy(deploymentName, permissions, nil)
aCSV := newCSV(csvName, opGroupNamespace, "", semver.MustParse("0.0.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &namedStrategy)
createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Create(context.TODO(), &aCSV, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName,
},
}
ownerutil.AddNonBlockingOwner(serviceAccount, createdCSV)
err = ownerutil.AddOwnerLabels(serviceAccount, createdCSV)
require.NoError(GinkgoT(), err)
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-role",
},
Rules: permissions[0].Rules,
}
ownerutil.AddNonBlockingOwner(role, createdCSV)
err = ownerutil.AddOwnerLabels(role, createdCSV)
require.NoError(GinkgoT(), err)
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-rb",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: opGroupNamespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
},
}
ownerutil.AddNonBlockingOwner(roleBinding, createdCSV)
err = ownerutil.AddOwnerLabels(roleBinding, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.CreateServiceAccount(serviceAccount)
require.NoError(GinkgoT(), err)
_, err = c.CreateRole(role)
require.NoError(GinkgoT(), err)
_, err = c.CreateRoleBinding(roleBinding)
require.NoError(GinkgoT(), err)
log("wait for CSV to succeed")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(context.TODO(), createdCSV.GetName(), metav1.GetOptions{})
if err != nil {
return false, err
}
log(fmt.Sprintf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message))
return csvSucceededChecker(fetched), nil
})
require.NoError(GinkgoT(), err)
log("Waiting for operator namespace csv to have annotations")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
log(fmt.Sprintf("Error (in %v): %v", testNamespace, fetchErr.Error()))
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, &operatorGroup, true, bothNamespaceNames) == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
log("Waiting for target namespace csv to have annotations (but not target namespaces)")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
log(fmt.Sprintf("Error (in %v): %v", otherNamespaceName, fetchErr.Error()))
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, &operatorGroup, false, "") == nil {
return true, nil
}
return false, nil
})
log("Checking status on csv in target namespace")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
GinkgoT().Logf("Error (in %v): %v", otherNamespaceName, fetchErr.Error())
return false, fetchErr
}
if fetchedCSV.Status.Reason == v1alpha1.CSVReasonCopied {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
log("Waiting on deployment to have correct annotations")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
createdDeployment, err := c.GetDeployment(opGroupNamespace, deploymentName)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
if checkOperatorGroupAnnotations(&createdDeployment.Spec.Template, &operatorGroup, true, bothNamespaceNames) == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
// check rbac in target namespace
informerFactory := informers.NewSharedInformerFactory(c.KubernetesInterface(), 1*time.Second)
roleInformer := informerFactory.Rbac().V1().Roles()
roleBindingInformer := informerFactory.Rbac().V1().RoleBindings()
clusterRoleInformer := informerFactory.Rbac().V1().ClusterRoles()
clusterRoleBindingInformer := informerFactory.Rbac().V1().ClusterRoleBindings()
// kick off informers
stopCh := make(chan struct{})
defer func() {
stopCh <- struct{}{}
}()
for _, informer := range []cache.SharedIndexInformer{roleInformer.Informer(), roleBindingInformer.Informer(), clusterRoleInformer.Informer(), clusterRoleBindingInformer.Informer()} {
go func() {
defer GinkgoRecover()
informer.Run(stopCh)
}()
synced := func() (bool, error) {
return informer.HasSynced(), nil
}
// wait until the informer has synced to continue
err := wait.PollUntil(500*time.Millisecond, synced, stopCh)
require.NoError(GinkgoT(), err)
}
ruleChecker := install.NewCSVRuleChecker(roleInformer.Lister(), roleBindingInformer.Lister(), clusterRoleInformer.Lister(), clusterRoleBindingInformer.Lister(), &aCSV)
log("Waiting for operator to have rbac in target namespace")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
for _, perm := range permissions {
sa, err := c.GetServiceAccount(opGroupNamespace, perm.ServiceAccountName)
require.NoError(GinkgoT(), err)
for _, rule := range perm.Rules {
satisfied, err := ruleChecker.RuleSatisfied(sa, otherNamespaceName, rule)
if err != nil {
GinkgoT().Log(err.Error())
return false, nil
}
if !satisfied {
return false, nil
}
}
}
return true, nil
})
// validate provided API clusterroles for the operatorgroup
adminRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-admin", metav1.GetOptions{})
require.NoError(GinkgoT(), err)
adminPolicyRules := []rbacv1.PolicyRule{
{Verbs: []string{"*"}, APIGroups: []string{mainCRD.Spec.Group}, Resources: []string{mainCRDPlural}},
}
require.Equal(GinkgoT(), adminPolicyRules, adminRole.Rules)
editRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-edit", metav1.GetOptions{})
require.NoError(GinkgoT(), err)
editPolicyRules := []rbacv1.PolicyRule{
{Verbs: []string{"create", "update", "patch", "delete"}, APIGroups: []string{mainCRD.Spec.Group}, Resources: []string{mainCRDPlural}},
}
require.Equal(GinkgoT(), editPolicyRules, editRole.Rules)
viewRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-view", metav1.GetOptions{})
require.NoError(GinkgoT(), err)
viewPolicyRules := []rbacv1.PolicyRule{
{Verbs: []string{"get"}, APIGroups: []string{"apiextensions.k8s.io"}, Resources: []string{"customresourcedefinitions"}, ResourceNames: []string{mainCRD.Name}},
{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{mainCRD.Spec.Group}, Resources: []string{mainCRDPlural}},
}
require.Equal(GinkgoT(), viewPolicyRules, viewRole.Rules)
// Unsupport all InstallModes
log("unsupporting all csv installmodes")
fetchedCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(context.TODO(), csvName, metav1.GetOptions{})
require.NoError(GinkgoT(), err, "could not fetch csv")
fetchedCSV.Spec.InstallModes = []v1alpha1.InstallMode{}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(fetchedCSV.GetNamespace()).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err, "could not update csv installmodes")
// Ensure CSV fails
_, err = fetchCSV(crc, csvName, opGroupNamespace, csvFailedChecker)
require.NoError(GinkgoT(), err, "csv did not transition to failed as expected")
// ensure deletion cleans up copied CSV
log("deleting parent csv")
err = crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Delete(context.TODO(), csvName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
log("waiting for orphaned csv to be deleted")
err = waitForDelete(func() error {
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
return err
})
require.NoError(GinkgoT(), err)
err = crc.OperatorsV1().OperatorGroups(opGroupNamespace).Delete(context.TODO(), operatorGroup.Name, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
GinkgoT().Log("Waiting for OperatorGroup RBAC to be garbage collected")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
_, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-admin", metav1.GetOptions{})
if err == nil {
return false, nil
}
return true, err
})
require.True(GinkgoT(), apierrors.IsNotFound(err))
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
_, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-edit", metav1.GetOptions{})
if err == nil {
return false, nil
}
return true, err
})
require.True(GinkgoT(), apierrors.IsNotFound(err))
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
_, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(context.TODO(), operatorGroup.Name+"-view", metav1.GetOptions{})
if err == nil {
return false, nil
}
return true, err
})
require.True(GinkgoT(), apierrors.IsNotFound(err))
})
It("role aggregation", func() {
// kubectl -n a8v4sw auth can-i create alp999.cluster.com --as system:serviceaccount:a8v4sw:padmin-xqdfz
// Generate namespaceA
// Generate operatorGroupA - OwnNamespace
// Generate csvA in namespaceA with all installmodes supported
// Create crd so csv succeeds
// Ensure clusterroles created and aggregated for access provided APIs
nsA := genName("a")
GinkgoT().Logf("generating namespaceA: %s", nsA)
c := newKubeClient()
for _, ns := range []string{nsA} {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
}
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(name string) {
require.NoError(GinkgoT(), c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(ns)
}
groupAName := genName("a")
GinkgoT().Logf("Generate operatorGroupA - OwnNamespace: %s", groupAName)
groupA := newOperatorGroup(nsA, groupAName, nil, nil, []string{nsA}, false)
_, err := crc.OperatorsV1().OperatorGroups(nsA).Create(context.TODO(), groupA, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1().OperatorGroups(nsA).Delete(context.TODO(), groupA.GetName(), metav1.DeleteOptions{}))
}()
crdAName := genName("a")
strategyName := genName("dep-")
csvAName := "nginx-a"
GinkgoT().Logf("Generate csv (%s/%s) with crd %s and with all installmodes supported: %s", nsA, csvAName, crdAName, strategyName)
crd := newCRD(crdAName)
namedStrategy := newNginxInstallStrategy(strategyName, nil, nil)
csvA := newCSV(csvAName, nsA, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crd}, nil, &namedStrategy)
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Create(context.TODO(), &csvA, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Delete(context.TODO(), csvA.GetName(), metav1.DeleteOptions{}))
}()
GinkgoT().Logf("Create crd %s so csv %s/%s succeeds", crdAName, nsA, csvAName)
cleanupCRD, err := createCRD(c, crd)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
_, err = fetchCSV(crc, csvA.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
depName := genName("hat-server")
GinkgoT().Logf("Create csv %s/%s for an apiserver", nsA, depName)
mockGroup := fmt.Sprintf("hats.%s.redhat.com", genName(""))
version := "v1alpha1"
mockGroupVersion := strings.Join([]string{mockGroup, version}, "/")
mockKinds := []string{"fez", "fedora"}
mockNames := []string{"fezs", "fedoras"}
depSpec := newMockExtServerDeployment(depName, []mockGroupVersionKind{{depName, mockGroupVersion, mockKinds, 5443}})
strategy := v1alpha1.StrategyDetailsDeployment{
DeploymentSpecs: []v1alpha1.StrategyDeploymentSpec{
{
Name: depName,
Spec: depSpec,
},
},
}
owned := make([]v1alpha1.APIServiceDescription, len(mockKinds))
for i, kind := range mockKinds {
owned[i] = v1alpha1.APIServiceDescription{
Name: mockNames[i],
Group: mockGroup,
Version: version,
Kind: kind,
DeploymentName: depName,
ContainerPort: int32(5443),
DisplayName: kind,
Description: fmt.Sprintf("A %s", kind),
}
}
csvB := v1alpha1.ClusterServiceVersion{
Spec: v1alpha1.ClusterServiceVersionSpec{
MinKubeVersion: "0.0.0",
InstallModes: []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: true,
},
},
InstallStrategy: v1alpha1.NamedInstallStrategy{
StrategyName: v1alpha1.InstallStrategyNameDeployment,
StrategySpec: strategy,
},
APIServiceDefinitions: v1alpha1.APIServiceDefinitions{
Owned: owned,
},
},
}
csvB.SetName(depName)
GinkgoT().Logf("Create the APIService CSV %s/%s", nsA, depName)
cleanupCSV, err := createCSV(c, crc, csvB, nsA, false, true)
require.NoError(GinkgoT(), err)
defer cleanupCSV()
GinkgoT().Logf("Fetch the APIService CSV %s/%s", nsA, depName)
_, err = fetchCSV(crc, csvB.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Ensure clusterroles created and aggregated for access provided APIs")
padmin, cleanupPadmin := createProjectAdmin(GinkgoT(), c, nsA)
defer cleanupPadmin()
GinkgoT().Logf("Check CRD access aggregated")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: padmin,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: nsA,
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Resource: crd.Spec.Names.Plural,
Verb: "create",
},
},
}, metav1.CreateOptions{})
if err != nil {
return false, err
}
if res == nil {
return false, nil
}
GinkgoT().Logf("checking padmin for permission: %#v", res)
return res.Status.Allowed, nil
})
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Check apiserver access aggregated")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: padmin,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: nsA,
Group: mockGroup,
Version: version,
Resource: mockNames[1],
Verb: "create",
},
},
}, metav1.CreateOptions{})
if err != nil {
return false, err
}
if res == nil {
return false, nil
}
GinkgoT().Logf("checking padmin for permission: %#v", res)
return res.Status.Allowed, nil
})
require.NoError(GinkgoT(), err)
})
It("install mode support", func() {
// Generate namespaceA
// Generate namespaceB
// Create operatorGroupA in namespaceA that selects namespaceA
// Generate csvA with an unfulfilled required CRD and no supported InstallModes in namespaceA
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
// Update csvA to have OwnNamespace supported=true
// Ensure csvA transitions to Succeeded
// Update operatorGroupA's target namespaces to select namespaceB
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
// Update csvA to have SingleNamespace supported=true
// Ensure csvA transitions to Pending
// Update operatorGroupA's target namespaces to select namespaceA and namespaceB
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
// Update csvA to have MultiNamespace supported=true
// Ensure csvA transitions to Pending
// Update operatorGroupA to select all namespaces
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
// Update csvA to have AllNamespaces supported=true
// Ensure csvA transitions to Pending
// Generate namespaceA and namespaceB
nsA := genName("a")
nsB := genName("b")
c := newKubeClient()
for _, ns := range []string{nsA, nsB} {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
}
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(name string) {
require.NoError(GinkgoT(), c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(ns)
}
// Generate operatorGroupA
groupA := newOperatorGroup(nsA, genName("a"), nil, nil, []string{nsA}, false)
_, err := crc.OperatorsV1().OperatorGroups(nsA).Create(context.TODO(), groupA, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1().OperatorGroups(nsA).Delete(context.TODO(), groupA.GetName(), metav1.DeleteOptions{}))
}()
// Generate csvA in namespaceA with no supported InstallModes
crd := newCRD(genName("b"))
namedStrategy := newNginxInstallStrategy(genName("dep-"), nil, nil)
csv := newCSV("nginx-a", nsA, "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{crd}, &namedStrategy)
csvA := &csv
csvA.Spec.InstallModes = []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: false,
},
}
csvA, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Create(context.TODO(), csvA, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Delete(context.TODO(), csvA.GetName(), metav1.DeleteOptions{}))
}()
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
failedWithUnsupportedOperatorGroup := func(csv *v1alpha1.ClusterServiceVersion) bool {
return csvFailedChecker(csv) && csv.Status.Reason == v1alpha1.CSVReasonUnsupportedOperatorGroup
}
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, failedWithUnsupportedOperatorGroup)
require.NoError(GinkgoT(), err)
// Update csvA to have OwnNamespace supported=true
csvA.Spec.InstallModes = []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: false,
},
}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Update(context.TODO(), csvA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Create crd so csv succeeds
cleanupCRD, err := createCRD(c, crd)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
// Ensure csvA transitions to Succeeded
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update operatorGroupA's target namespaces to select namespaceB
groupA, err = crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
groupA.Spec.TargetNamespaces = []string{nsB}
_, err = crc.OperatorsV1().OperatorGroups(nsA).Update(context.TODO(), groupA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, failedWithUnsupportedOperatorGroup)
require.NoError(GinkgoT(), err)
// Update csvA to have SingleNamespace supported=true
csvA.Spec.InstallModes = []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: false,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: false,
},
}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Update(context.TODO(), csvA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Succeeded
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update operatorGroupA's target namespaces to select namespaceA and namespaceB
groupA, err = crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
groupA.Spec.TargetNamespaces = []string{nsA, nsB}
_, err = crc.OperatorsV1().OperatorGroups(nsA).Update(context.TODO(), groupA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, failedWithUnsupportedOperatorGroup)
require.NoError(GinkgoT(), err)
// Update csvA to have MultiNamespace supported=true
csvA.Spec.InstallModes = []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: false,
},
}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Update(context.TODO(), csvA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Succeeded
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update operatorGroupA's target namespaces to select all namespaces
groupA, err = crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
groupA.Spec.TargetNamespaces = []string{}
_, err = crc.OperatorsV1().OperatorGroups(nsA).Update(context.TODO(), groupA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Failed with reason "UnsupportedOperatorGroup"
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, failedWithUnsupportedOperatorGroup)
require.NoError(GinkgoT(), err)
// Update csvA to have AllNamespaces supported=true
csvA.Spec.InstallModes = []v1alpha1.InstallMode{
{
Type: v1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeSingleNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeMultiNamespace,
Supported: true,
},
{
Type: v1alpha1.InstallModeTypeAllNamespaces,
Supported: true,
},
}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Update(context.TODO(), csvA, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA transitions to Pending
csvA, err = fetchCSV(crc, csvA.GetName(), nsA, csvSucceededChecker)
require.NoError(GinkgoT(), err)
})
It("intersection", func() {
// Generate namespaceA
// Generate namespaceB
// Generate namespaceC
// Generate namespaceD
// Generate namespaceE
// Generate operatorGroupD in namespaceD that selects namespace D and E
// Generate csvD in namespaceD
// Wait for csvD to be successful
// Wait for csvD to have a CSV with copied status in namespace E
// Wait for operatorGroupD to have providedAPI annotation with crdD's Kind.version.group
// Generate operatorGroupA in namespaceA that selects AllNamespaces
// Generate csvD in namespaceA
// Wait for csvD to fail with status "InterOperatorGroupOwnerConflict"
// Ensure operatorGroupA's providedAPIs are empty
// Ensure csvD in namespaceD is still successful
// Generate csvA in namespaceA that owns crdA
// Wait for csvA to be successful
// Ensure clusterroles created and aggregated for accessing provided APIs
// Wait for operatorGroupA to have providedAPI annotation with crdA's Kind.version.group in its providedAPIs annotation
// Wait for csvA to have a CSV with copied status in namespace D
// Ensure csvA retains the operatorgroup annotations for operatorgroupA
// Wait for csvA to have a CSV with copied status in namespace C
// Generate operatorGroupB in namespaceB that selects namespace C
// Generate csvB in namespaceB that owns crdA
// Wait for csvB to fail with status "InterOperatorGroupOwnerConflict"
// Delete csvA
// Wait for crdA's Kind.version.group to be removed from operatorGroupA's providedAPIs annotation
// Ensure csvA's deployments are deleted
// Wait for csvB to be successful
// Wait for operatorGroupB to have providedAPI annotation with crdB's Kind.version.group
// Wait for csvB to have a CSV with a copied status in namespace C
// Create a catalog for csvA, csvB, and csvD
pkgA := genName("a-")
pkgB := genName("b-")
pkgD := genName("d-")
pkgAStable := pkgA + "-stable"
pkgBStable := pkgB + "-stable"
pkgDStable := pkgD + "-stable"
stableChannel := "stable"
strategyA := newNginxInstallStrategy(pkgAStable, nil, nil)
strategyB := newNginxInstallStrategy(pkgBStable, nil, nil)
strategyD := newNginxInstallStrategy(pkgDStable, nil, nil)
crdA := newCRD(genName(pkgA))
crdB := newCRD(genName(pkgB))
crdD := newCRD(genName(pkgD))
kvgA := fmt.Sprintf("%s.%s.%s", crdA.Spec.Names.Kind, crdA.Spec.Versions[0].Name, crdA.Spec.Group)
kvgB := fmt.Sprintf("%s.%s.%s", crdB.Spec.Names.Kind, crdB.Spec.Versions[0].Name, crdB.Spec.Group)
kvgD := fmt.Sprintf("%s.%s.%s", crdD.Spec.Names.Kind, crdD.Spec.Versions[0].Name, crdD.Spec.Group)
csvA := newCSV(pkgAStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crdA}, nil, &strategyA)
csvB := newCSV(pkgBStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crdA, crdB}, nil, &strategyB)
csvD := newCSV(pkgDStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crdD}, nil, &strategyD)
// Create namespaces
nsA, nsB, nsC, nsD, nsE := genName("a-"), genName("b-"), genName("c-"), genName("d-"), genName("e-")
for _, ns := range []string{nsA, nsB, nsC, nsD, nsE} {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
}
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(name string) {
require.NoError(GinkgoT(), c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(ns)
}
// Create the initial catalogsources
manifests := []registry.PackageManifest{
{
PackageName: pkgA,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: pkgAStable},
},
DefaultChannelName: stableChannel,
},
{
PackageName: pkgB,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: pkgBStable},
},
DefaultChannelName: stableChannel,
},
{
PackageName: pkgD,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: pkgDStable},
},
DefaultChannelName: stableChannel,
},
}
catalog := genName("catalog-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, catalog, nsA, manifests, []apiextensions.CustomResourceDefinition{crdA, crdD, crdB}, []v1alpha1.ClusterServiceVersion{csvA, csvB, csvD})
defer cleanupCatalogSource()
_, err := fetchCatalogSourceOnStatus(crc, catalog, nsA, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
_, cleanupCatalogSource = createInternalCatalogSource(c, crc, catalog, nsB, manifests, []apiextensions.CustomResourceDefinition{crdA, crdD, crdB}, []v1alpha1.ClusterServiceVersion{csvA, csvB, csvD})
defer cleanupCatalogSource()
_, err = fetchCatalogSourceOnStatus(crc, catalog, nsB, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
_, cleanupCatalogSource = createInternalCatalogSource(c, crc, catalog, nsD, manifests, []apiextensions.CustomResourceDefinition{crdA, crdD, crdB}, []v1alpha1.ClusterServiceVersion{csvA, csvB, csvD})
defer cleanupCatalogSource()
_, err = fetchCatalogSourceOnStatus(crc, catalog, nsD, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Create operatorgroups
groupA := newOperatorGroup(nsA, genName("a-"), nil, nil, nil, false)
groupB := newOperatorGroup(nsB, genName("b-"), nil, nil, []string{nsC}, false)
groupD := newOperatorGroup(nsD, genName("d-"), nil, nil, []string{nsD, nsE}, false)
for _, group := range []*v1.OperatorGroup{groupA, groupB, groupD} {
_, err := crc.OperatorsV1().OperatorGroups(group.GetNamespace()).Create(context.TODO(), group, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(namespace, name string) {
require.NoError(GinkgoT(), crc.OperatorsV1().OperatorGroups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(group.GetNamespace(), group.GetName())
}
// Create subscription for csvD in namespaceD
subDName := genName("d-")
cleanupSubD := createSubscriptionForCatalog(crc, nsD, subDName, catalog, pkgD, stableChannel, pkgDStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubD()
subD, err := fetchSubscription(crc, nsD, subDName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subD)
// Await csvD's success
_, err = awaitCSV(crc, nsD, csvD.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Await csvD's copy in namespaceE
_, err = awaitCSV(crc, nsE, csvD.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
// Await annotation on groupD
q := func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsD).Get(context.TODO(), groupD.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgD}))
// Create subscription for csvD2 in namespaceA
subD2Name := genName("d2-")
cleanupSubD2 := createSubscriptionForCatalog(crc, nsA, subD2Name, catalog, pkgD, stableChannel, pkgDStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubD2()
subD2, err := fetchSubscription(crc, nsA, subD2Name, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subD2)
// Await csvD2's failure
csvD2, err := awaitCSV(crc, nsA, csvD.GetName(), csvFailedChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), v1alpha1.CSVReasonInterOperatorGroupOwnerConflict, csvD2.Status.Reason)
// Ensure groupA's annotations are blank
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{}))
// Ensure csvD is still successful
_, err = awaitCSV(crc, nsD, csvD.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Create subscription for csvA in namespaceA
subAName := genName("a-")
cleanupSubA := createSubscriptionForCatalog(crc, nsA, subAName, catalog, pkgA, stableChannel, pkgAStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubA()
subA, err := fetchSubscription(crc, nsA, subAName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subA)
// Await csvA's success
_, err = awaitCSV(crc, nsA, csvA.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Ensure clusterroles created and aggregated for access provided APIs
padmin, cleanupPadmin := createProjectAdmin(GinkgoT(), c, nsA)
defer cleanupPadmin()
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: padmin,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: nsA,
Group: crdA.Spec.Group,
Version: crdA.Spec.Versions[0].Name,
Resource: crdA.Spec.Names.Plural,
Verb: "create",
},
},
}, metav1.CreateOptions{})
if err != nil {
return false, err
}
if res == nil {
return false, nil
}
GinkgoT().Log("checking padmin for permission")
return res.Status.Allowed, nil
})
require.NoError(GinkgoT(), err)
// Await annotation on groupA
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
// Wait for csvA to have a CSV with copied status in namespace D
csvAinNsD, err := awaitCSV(crc, nsD, csvA.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
// trigger a resync of operatorgropuD
fetchedGroupD, err := crc.OperatorsV1().OperatorGroups(nsD).Get(context.TODO(), groupD.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
fetchedGroupD.Annotations["bump"] = "update"
_, err = crc.OperatorsV1().OperatorGroups(nsD).Update(context.TODO(), fetchedGroupD, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Ensure csvA retains the operatorgroup annotations for operatorgroupA
csvAinNsD, err = awaitCSV(crc, nsD, csvA.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), groupA.GetName(), csvAinNsD.Annotations[v1.OperatorGroupAnnotationKey])
require.Equal(GinkgoT(), nsA, csvAinNsD.Annotations[v1.OperatorGroupNamespaceAnnotationKey])
require.Equal(GinkgoT(), nsA, csvAinNsD.Labels[v1alpha1.CopiedLabelKey])
// Await csvA's copy in namespaceC
_, err = awaitCSV(crc, nsC, csvA.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
// Create subscription for csvB in namespaceB
subBName := genName("b-")
cleanupSubB := createSubscriptionForCatalog(crc, nsB, subBName, catalog, pkgB, stableChannel, pkgBStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubB()
subB, err := fetchSubscription(crc, nsB, subBName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subB)
// Await csvB's failure
fetchedB, err := awaitCSV(crc, nsB, csvB.GetName(), csvFailedChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), v1alpha1.CSVReasonInterOperatorGroupOwnerConflict, fetchedB.Status.Reason)
// Ensure no annotation on groupB
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsB).Get(context.TODO(), groupB.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{}))
// Delete csvA
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().ClusterServiceVersions(nsA).Delete(context.TODO(), csvA.GetName(), metav1.DeleteOptions{}))
// Ensure annotations are removed from groupA
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: ""}))
// Ensure csvA's deployment is deleted
require.NoError(GinkgoT(), waitForDeploymentToDelete(testNamespace, c, pkgAStable))
// Await csvB's success
_, err = awaitCSV(crc, nsB, csvB.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Await csvB's copy in namespace C
_, err = awaitCSV(crc, nsC, csvB.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
// Ensure annotations exist on group B
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsB).Get(context.TODO(), groupB.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: strings.Join([]string{kvgA, kvgB}, ",")}))
})
It("static provider", func() {
// Generate namespaceA
// Generate namespaceB
// Generate namespaceC
// Generate namespaceD
// Create static operatorGroupA in namespaceA that targets namespaceD with providedAPIs annotation containing KindA.version.group
// Create operatorGroupB in namespaceB that targets all namespaces
// Create operatorGroupC in namespaceC that targets namespaceC
// Create csvA in namespaceB that provides KindA.version.group
// Wait for csvA in namespaceB to fail
// Ensure no providedAPI annotations on operatorGroupB
// Ensure providedAPI annotations are unchanged on operatorGroupA
// Create csvA in namespaceC
// Wait for csvA in namespaceC to succeed
// Ensure KindA.version.group providedAPI annotation on operatorGroupC
// Create csvB in namespaceB that provides KindB.version.group
// Wait for csvB to succeed
// Wait for csvB to be copied to namespaceA, namespaceC, and namespaceD
// Wait for KindB.version.group to exist in operatorGroupB's providedAPIs annotation
// Add namespaceD to operatorGroupC's targetNamespaces
// Wait for csvA in namespaceC to FAIL with status "InterOperatorGroupOwnerConflict"
// Wait for KindA.version.group providedAPI annotation to be removed from operatorGroupC's providedAPIs annotation
// Ensure KindA.version.group providedAPI annotation on operatorGroupA
// Create a catalog for csvA, csvB
pkgA := genName("a-")
pkgB := genName("b-")
pkgAStable := pkgA + "-stable"
pkgBStable := pkgB + "-stable"
stableChannel := "stable"
strategyA := newNginxInstallStrategy(pkgAStable, nil, nil)
strategyB := newNginxInstallStrategy(pkgBStable, nil, nil)
crdA := newCRD(genName(pkgA))
crdB := newCRD(genName(pkgB))
kvgA := fmt.Sprintf("%s.%s.%s", crdA.Spec.Names.Kind, crdA.Spec.Versions[0].Name, crdA.Spec.Group)
kvgB := fmt.Sprintf("%s.%s.%s", crdB.Spec.Names.Kind, crdB.Spec.Versions[0].Name, crdB.Spec.Group)
csvA := newCSV(pkgAStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crdA}, nil, &strategyA)
csvB := newCSV(pkgBStable, testNamespace, "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crdB}, nil, &strategyB)
// Create namespaces
nsA, nsB, nsC, nsD := genName("a-"), genName("b-"), genName("c-"), genName("d-")
for _, ns := range []string{nsA, nsB, nsC, nsD} {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
}
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(name string) {
require.NoError(GinkgoT(), c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(ns)
}
// Create the initial catalogsources
manifests := []registry.PackageManifest{
{
PackageName: pkgA,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: pkgAStable},
},
DefaultChannelName: stableChannel,
},
{
PackageName: pkgB,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: pkgBStable},
},
DefaultChannelName: stableChannel,
},
}
// Create catalog in namespaceB and namespaceC
catalog := genName("catalog-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, catalog, nsB, manifests, []apiextensions.CustomResourceDefinition{crdA, crdB}, []v1alpha1.ClusterServiceVersion{csvA, csvB})
defer cleanupCatalogSource()
_, err := fetchCatalogSourceOnStatus(crc, catalog, nsB, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
_, cleanupCatalogSource = createInternalCatalogSource(c, crc, catalog, nsC, manifests, []apiextensions.CustomResourceDefinition{crdA, crdB}, []v1alpha1.ClusterServiceVersion{csvA, csvB})
defer cleanupCatalogSource()
_, err = fetchCatalogSourceOnStatus(crc, catalog, nsC, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Create OperatorGroups
groupA := newOperatorGroup(nsA, genName("a-"), map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}, nil, []string{nsD}, true)
groupB := newOperatorGroup(nsB, genName("b-"), nil, nil, nil, false)
groupC := newOperatorGroup(nsC, genName("d-"), nil, nil, []string{nsC}, false)
for _, group := range []*v1.OperatorGroup{groupA, groupB, groupC} {
_, err := crc.OperatorsV1().OperatorGroups(group.GetNamespace()).Create(context.TODO(), group, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func(namespace, name string) {
require.NoError(GinkgoT(), crc.OperatorsV1().OperatorGroups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}))
}(group.GetNamespace(), group.GetName())
}
// Create subscription for csvA in namespaceB
subAName := genName("a-")
cleanupSubA := createSubscriptionForCatalog(crc, nsB, subAName, catalog, pkgA, stableChannel, pkgAStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubA()
subA, err := fetchSubscription(crc, nsB, subAName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subA)
// Await csvA's failure
fetchedCSVA, err := awaitCSV(crc, nsB, csvA.GetName(), csvFailedChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), v1alpha1.CSVReasonInterOperatorGroupOwnerConflict, fetchedCSVA.Status.Reason)
// Ensure operatorGroupB doesn't have providedAPI annotation
q := func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsB).Get(context.TODO(), groupB.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{}))
// Ensure operatorGroupA still has KindA.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
// Create subscription for csvA in namespaceC
cleanupSubAC := createSubscriptionForCatalog(crc, nsC, subAName, catalog, pkgA, stableChannel, pkgAStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubAC()
subAC, err := fetchSubscription(crc, nsC, subAName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subAC)
// Await csvA's success
_, err = awaitCSV(crc, nsC, csvA.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Ensure operatorGroupC has KindA.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsC).Get(context.TODO(), groupC.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
// Ensure operatorGroupA still has KindA.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
// Create subscription for csvB in namespaceB
subBName := genName("b-")
cleanupSubB := createSubscriptionForCatalog(crc, nsB, subBName, catalog, pkgB, stableChannel, pkgBStable, v1alpha1.ApprovalAutomatic)
defer cleanupSubB()
subB, err := fetchSubscription(crc, nsB, subBName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subB)
// Await csvB's success
_, err = awaitCSV(crc, nsB, csvB.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Await copied csvBs
_, err = awaitCSV(crc, nsA, csvB.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
_, err = awaitCSV(crc, nsC, csvB.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
_, err = awaitCSV(crc, nsD, csvB.GetName(), csvCopiedChecker)
require.NoError(GinkgoT(), err)
// Ensure operatorGroupB has KindB.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsB).Get(context.TODO(), groupB.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgB}))
// Ensure operatorGroupA still has KindA.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
// Add namespaceD to operatorGroupC's targetNamespaces
groupC, err = crc.OperatorsV1().OperatorGroups(groupC.GetNamespace()).Get(context.TODO(), groupC.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
groupC.Spec.TargetNamespaces = []string{nsC, nsD}
_, err = crc.OperatorsV1().OperatorGroups(groupC.GetNamespace()).Update(context.TODO(), groupC, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Wait for csvA in namespaceC to fail with status "InterOperatorGroupOwnerConflict"
fetchedCSVA, err = awaitCSV(crc, nsC, csvA.GetName(), csvFailedChecker)
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), v1alpha1.CSVReasonInterOperatorGroupOwnerConflict, fetchedCSVA.Status.Reason)
// Wait for crdA's providedAPIs to be removed from operatorGroupC's providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsC).Get(context.TODO(), groupC.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: ""}))
// Ensure operatorGroupA still has KindA.version.group in its providedAPIs annotation
q = func() (metav1.ObjectMeta, error) {
g, err := crc.OperatorsV1().OperatorGroups(nsA).Get(context.TODO(), groupA.GetName(), metav1.GetOptions{})
return g.ObjectMeta, err
}
require.NoError(GinkgoT(), awaitAnnotations(GinkgoT(), q, map[string]string{v1.OperatorGroupProvidedAPIsAnnotationKey: kvgA}))
})
// TODO: Test OperatorGroup resizing collisions
// TODO: Test Subscriptions with depedencies and transitive dependencies in intersecting OperatorGroups
// TODO: Test Subscription upgrade paths with + and - providedAPIs
It("CSV copy watching all namespaces", func() {
csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation
opGroupNamespace := testNamespace
matchingLabel := map[string]string{"inGroup": opGroupNamespace}
otherNamespaceName := genName(opGroupNamespace + "-")
GinkgoT().Log("Creating CRD")
mainCRDPlural := genName("opgroup-")
mainCRD := newCRD(mainCRDPlural)
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
GinkgoT().Logf("Getting default operator group 'global-operators' installed via operatorgroup-default.yaml %v", opGroupNamespace)
operatorGroup, err := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), "global-operators", metav1.GetOptions{})
require.NoError(GinkgoT(), err)
expectedOperatorGroupStatus := v1.OperatorGroupStatus{
Namespaces: []string{metav1.NamespaceAll},
}
GinkgoT().Log("Waiting on operator group to have correct status")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, fetchErr := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), operatorGroup.Name, metav1.GetOptions{})
if fetchErr != nil {
return false, fetchErr
}
if len(fetched.Status.Namespaces) > 0 {
require.ElementsMatch(GinkgoT(), expectedOperatorGroupStatus.Namespaces, fetched.Status.Namespaces)
fmt.Println(fetched.Status.Namespaces)
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
GinkgoT().Log("Creating CSV")
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{mainCRD.Spec.Group},
Resources: []string{mainCRDPlural},
},
},
},
}
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName,
},
}
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-role",
},
Rules: permissions[0].Rules,
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-rb",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: opGroupNamespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
},
}
_, err = c.CreateServiceAccount(serviceAccount)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteServiceAccount(serviceAccount.GetNamespace(), serviceAccount.GetName(), metav1.NewDeleteOptions(0))
}()
createdRole, err := c.CreateRole(role)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteRole(role.GetNamespace(), role.GetName(), metav1.NewDeleteOptions(0))
}()
createdRoleBinding, err := c.CreateRoleBinding(roleBinding)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteRoleBinding(roleBinding.GetNamespace(), roleBinding.GetName(), metav1.NewDeleteOptions(0))
}()
// Create a new NamedInstallStrategy
deploymentName := genName("operator-deployment")
namedStrategy := newNginxInstallStrategy(deploymentName, permissions, nil)
aCSV := newCSV(csvName, opGroupNamespace, "", semver.MustParse("0.0.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &namedStrategy)
// Use the It spec name as label after stripping whitespaces
aCSV.Labels = map[string]string{"label": K8sSafeCurrentTestDescription()}
createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Create(context.TODO(), &aCSV, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
err = ownerutil.AddOwnerLabels(createdRole, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.UpdateRole(createdRole)
require.NoError(GinkgoT(), err)
err = ownerutil.AddOwnerLabels(createdRoleBinding, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.UpdateRoleBinding(createdRoleBinding)
require.NoError(GinkgoT(), err)
GinkgoT().Log("wait for CSV to succeed")
_, err = fetchCSV(crc, createdCSV.GetName(), opGroupNamespace, csvSucceededChecker)
require.NoError(GinkgoT(), err)
GinkgoT().Log("wait for roles to be promoted to clusterroles")
var fetchedRole *rbacv1.ClusterRole
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedRole, err = c.GetClusterRole(role.GetName())
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.EqualValues(GinkgoT(), append(role.Rules, rbacv1.PolicyRule{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"namespaces"},
}), fetchedRole.Rules)
var fetchedRoleBinding *rbacv1.ClusterRoleBinding
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedRoleBinding, err = c.GetClusterRoleBinding(roleBinding.GetName())
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.EqualValues(GinkgoT(), roleBinding.Subjects, fetchedRoleBinding.Subjects)
require.EqualValues(GinkgoT(), roleBinding.RoleRef.Name, fetchedRoleBinding.RoleRef.Name)
require.EqualValues(GinkgoT(), "rbac.authorization.k8s.io", fetchedRoleBinding.RoleRef.APIGroup)
require.EqualValues(GinkgoT(), "ClusterRole", fetchedRoleBinding.RoleRef.Kind)
GinkgoT().Log("ensure operator was granted namespace list permission")
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: "system:serviceaccount:" + opGroupNamespace + ":" + serviceAccountName,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Group: corev1.GroupName,
Version: "v1",
Resource: "namespaces",
Verb: "list",
},
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), res.Status.Allowed, "got %#v", res.Status)
GinkgoT().Log("Waiting for operator namespace csv to have annotations")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
GinkgoT().Logf("Error (in %v): %v", testNamespace, fetchErr.Error())
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, operatorGroup, true, corev1.NamespaceAll) == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
csvList, err := crc.OperatorsV1alpha1().ClusterServiceVersions(corev1.NamespaceAll).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("label=%s", K8sSafeCurrentTestDescription())})
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Found CSV count of %v", len(csvList.Items))
GinkgoT().Logf("Create other namespace %s", otherNamespaceName)
otherNamespace := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: otherNamespaceName,
Labels: matchingLabel,
},
}
_, err = c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &otherNamespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), otherNamespaceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
GinkgoT().Log("Waiting to ensure copied CSV shows up in other namespace")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
GinkgoT().Logf("Error (in %v): %v", otherNamespaceName, fetchErr.Error())
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, operatorGroup, false, "") == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
GinkgoT( // verify created CSV is cleaned up after operator group is "contracted"
).Log("Modifying operator group to no longer watch all namespaces")
currentOperatorGroup, err := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), operatorGroup.Name, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
currentOperatorGroup.Spec.TargetNamespaces = []string{opGroupNamespace}
_, err = crc.OperatorsV1().OperatorGroups(opGroupNamespace).Update(context.TODO(), currentOperatorGroup, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
GinkgoT().Log("Re-modifying operator group to be watching all namespaces")
currentOperatorGroup, err = crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), operatorGroup.Name, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
currentOperatorGroup.Spec = v1.OperatorGroupSpec{}
_, err = crc.OperatorsV1().OperatorGroups(opGroupNamespace).Update(context.TODO(), currentOperatorGroup, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
}()
err = wait.Poll(pollInterval, 2*pollDuration, func() (bool, error) {
_, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return true, nil
}
GinkgoT().Logf("Error (in %v): %v", opGroupNamespace, fetchErr.Error())
return false, fetchErr
}
return false, nil
})
require.NoError(GinkgoT(), err)
})
It("insufficient permissions resolve via RBAC", func() {
log := func(s string) {
GinkgoT().Logf("%s: %s", time.Now().Format("15:04:05.9999"), s)
}
csvName := genName("another-csv-")
newNamespaceName := genName(testNamespace + "-")
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: newNamespaceName,
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), newNamespaceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
log("Creating CRD")
mainCRDPlural := genName("opgroup")
mainCRD := newCRD(mainCRDPlural)
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
log("Creating operator group")
serviceAccountName := genName("nginx-sa")
// intentionally creating an operator group without a service account already existing
operatorGroup := v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: newNamespaceName,
},
Spec: v1.OperatorGroupSpec{
ServiceAccountName: serviceAccountName,
TargetNamespaces: []string{newNamespaceName},
},
}
_, err = crc.OperatorsV1().OperatorGroups(newNamespaceName).Create(context.TODO(), &operatorGroup, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
log("Creating CSV")
// Create a new NamedInstallStrategy
deploymentName := genName("operator-deployment")
namedStrategy := newNginxInstallStrategy(deploymentName, nil, nil)
aCSV := newCSV(csvName, newNamespaceName, "", semver.MustParse("0.0.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &namedStrategy)
createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Create(context.TODO(), &aCSV, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: newNamespaceName,
Name: serviceAccountName,
},
}
ownerutil.AddNonBlockingOwner(serviceAccount, createdCSV)
err = ownerutil.AddOwnerLabels(serviceAccount, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.CreateServiceAccount(serviceAccount)
require.NoError(GinkgoT(), err)
// Create token secret for the serviceaccount
_, cleanupSE := newTokenSecret(c, newNamespaceName, serviceAccount.GetName())
defer cleanupSE()
log("wait for CSV to fail")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Get(context.TODO(), createdCSV.GetName(), metav1.GetOptions{})
if err != nil {
return false, err
}
log(fmt.Sprintf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message))
return csvFailedChecker(fetched), nil
})
require.NoError(GinkgoT(), err)
// now add cluster admin permissions to service account
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: newNamespaceName,
Name: serviceAccountName + "-role",
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"*"},
APIGroups: []string{"*"},
Resources: []string{"*"},
},
},
}
ownerutil.AddNonBlockingOwner(role, createdCSV)
err = ownerutil.AddOwnerLabels(role, createdCSV)
require.NoError(GinkgoT(), err)
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: newNamespaceName,
Name: serviceAccountName + "-rb",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: newNamespaceName,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
},
}
ownerutil.AddNonBlockingOwner(roleBinding, createdCSV)
err = ownerutil.AddOwnerLabels(roleBinding, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.CreateRole(role)
require.NoError(GinkgoT(), err)
_, err = c.CreateRoleBinding(roleBinding)
require.NoError(GinkgoT(), err)
log("wait for CSV to succeeed")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Get(context.TODO(), createdCSV.GetName(), metav1.GetOptions{})
if err != nil {
return false, err
}
log(fmt.Sprintf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message))
return csvSucceededChecker(fetched), nil
})
require.NoError(GinkgoT(), err)
})
It("insufficient permissions resolve via service account removal", func() {
log := func(s string) {
GinkgoT().Logf("%s: %s", time.Now().Format("15:04:05.9999"), s)
}
csvName := genName("another-csv-")
newNamespaceName := genName(testNamespace + "-")
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: newNamespaceName,
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), newNamespaceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
log("Creating CRD")
mainCRDPlural := genName("opgroup")
mainCRD := newCRD(mainCRDPlural)
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
log("Creating operator group")
serviceAccountName := genName("nginx-sa")
// intentionally creating an operator group without a service account already existing
operatorGroup := v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: newNamespaceName,
},
Spec: v1.OperatorGroupSpec{
ServiceAccountName: serviceAccountName,
TargetNamespaces: []string{newNamespaceName},
},
}
_, err = crc.OperatorsV1().OperatorGroups(newNamespaceName).Create(context.TODO(), &operatorGroup, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
log("Creating CSV")
// Create a new NamedInstallStrategy
deploymentName := genName("operator-deployment")
namedStrategy := newNginxInstallStrategy(deploymentName, nil, nil)
aCSV := newCSV(csvName, newNamespaceName, "", semver.MustParse("0.0.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &namedStrategy)
createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Create(context.TODO(), &aCSV, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: newNamespaceName,
Name: serviceAccountName,
},
}
ownerutil.AddNonBlockingOwner(serviceAccount, createdCSV)
err = ownerutil.AddOwnerLabels(serviceAccount, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.CreateServiceAccount(serviceAccount)
require.NoError(GinkgoT(), err)
// Create token secret for the serviceaccount
_, cleanupSE := newTokenSecret(c, newNamespaceName, serviceAccount.GetName())
defer cleanupSE()
log("wait for CSV to fail")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Get(context.TODO(), createdCSV.GetName(), metav1.GetOptions{})
if err != nil {
return false, err
}
log(fmt.Sprintf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message))
return csvFailedChecker(fetched), nil
})
require.NoError(GinkgoT(), err)
// now remove operator group specified service account
createdOpGroup, err := crc.OperatorsV1().OperatorGroups(newNamespaceName).Get(context.TODO(), operatorGroup.GetName(), metav1.GetOptions{})
require.NoError(GinkgoT(), err)
createdOpGroup.Spec.ServiceAccountName = ""
_, err = crc.OperatorsV1().OperatorGroups(newNamespaceName).Update(context.TODO(), createdOpGroup, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
log("wait for CSV to succeeed")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(newNamespaceName).Get(context.TODO(), createdCSV.GetName(), metav1.GetOptions{})
if err != nil {
return false, err
}
log(fmt.Sprintf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message))
return csvSucceededChecker(fetched), nil
})
require.NoError(GinkgoT(), err)
})
// Versions of OLM at 0.14.1 and older had a bug that would place the wrong namespace annotation on copied CSVs,
// preventing them from being GCd. This ensures that any leftover CSVs in that state are properly cleared up.
It("cleanup csvs with bad namespace annotation", func() {
csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation
opGroupNamespace := testNamespace
matchingLabel := map[string]string{"inGroup": opGroupNamespace}
otherNamespaceName := genName(opGroupNamespace + "-")
GinkgoT().Log("Creating CRD")
mainCRDPlural := genName("opgroup-")
mainCRD := newCRD(mainCRDPlural)
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
GinkgoT().Logf("Getting default operator group 'global-operators' installed via operatorgroup-default.yaml %v", opGroupNamespace)
operatorGroup, err := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), "global-operators", metav1.GetOptions{})
require.NoError(GinkgoT(), err)
expectedOperatorGroupStatus := v1.OperatorGroupStatus{
Namespaces: []string{metav1.NamespaceAll},
}
GinkgoT().Log("Waiting on operator group to have correct status")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetched, fetchErr := crc.OperatorsV1().OperatorGroups(opGroupNamespace).Get(context.TODO(), operatorGroup.Name, metav1.GetOptions{})
if fetchErr != nil {
return false, fetchErr
}
if len(fetched.Status.Namespaces) > 0 {
require.ElementsMatch(GinkgoT(), expectedOperatorGroupStatus.Namespaces, fetched.Status.Namespaces)
fmt.Println(fetched.Status.Namespaces)
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
GinkgoT().Log("Creating CSV")
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []v1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{mainCRD.Spec.Group},
Resources: []string{mainCRDPlural},
},
},
},
}
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName,
},
}
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-role",
},
Rules: permissions[0].Rules,
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: opGroupNamespace,
Name: serviceAccountName + "-rb",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: opGroupNamespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
},
}
_, err = c.CreateServiceAccount(serviceAccount)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteServiceAccount(serviceAccount.GetNamespace(), serviceAccount.GetName(), metav1.NewDeleteOptions(0))
}()
createdRole, err := c.CreateRole(role)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteRole(role.GetNamespace(), role.GetName(), metav1.NewDeleteOptions(0))
}()
createdRoleBinding, err := c.CreateRoleBinding(roleBinding)
require.NoError(GinkgoT(), err)
defer func() {
c.DeleteRoleBinding(roleBinding.GetNamespace(), roleBinding.GetName(), metav1.NewDeleteOptions(0))
}()
// Create a new NamedInstallStrategy
deploymentName := genName("operator-deployment")
namedStrategy := newNginxInstallStrategy(deploymentName, permissions, nil)
aCSV := newCSV(csvName, opGroupNamespace, "", semver.MustParse("0.0.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &namedStrategy)
// Use the It spec name as label after stripping whitespaces
aCSV.Labels = map[string]string{"label": K8sSafeCurrentTestDescription()}
createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Create(context.TODO(), &aCSV, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
err = ownerutil.AddOwnerLabels(createdRole, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.UpdateRole(createdRole)
require.NoError(GinkgoT(), err)
err = ownerutil.AddOwnerLabels(createdRoleBinding, createdCSV)
require.NoError(GinkgoT(), err)
_, err = c.UpdateRoleBinding(createdRoleBinding)
require.NoError(GinkgoT(), err)
GinkgoT().Log("wait for CSV to succeed")
_, err = fetchCSV(crc, createdCSV.GetName(), opGroupNamespace, csvSucceededChecker)
require.NoError(GinkgoT(), err)
GinkgoT().Log("wait for roles to be promoted to clusterroles")
var fetchedRole *rbacv1.ClusterRole
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedRole, err = c.GetClusterRole(role.GetName())
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.EqualValues(GinkgoT(), append(role.Rules, rbacv1.PolicyRule{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"namespaces"},
}), fetchedRole.Rules)
var fetchedRoleBinding *rbacv1.ClusterRoleBinding
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedRoleBinding, err = c.GetClusterRoleBinding(roleBinding.GetName())
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.EqualValues(GinkgoT(), roleBinding.Subjects, fetchedRoleBinding.Subjects)
require.EqualValues(GinkgoT(), roleBinding.RoleRef.Name, fetchedRoleBinding.RoleRef.Name)
require.EqualValues(GinkgoT(), "rbac.authorization.k8s.io", fetchedRoleBinding.RoleRef.APIGroup)
require.EqualValues(GinkgoT(), "ClusterRole", fetchedRoleBinding.RoleRef.Kind)
GinkgoT().Log("ensure operator was granted namespace list permission")
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: "system:serviceaccount:" + opGroupNamespace + ":" + serviceAccountName,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Group: corev1.GroupName,
Version: "v1",
Resource: "namespaces",
Verb: "list",
},
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), res.Status.Allowed, "got %#v", res.Status)
GinkgoT().Log("Waiting for operator namespace csv to have annotations")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
GinkgoT().Logf("Error (in %v): %v", testNamespace, fetchErr.Error())
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, operatorGroup, true, corev1.NamespaceAll) == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
csvList, err := crc.OperatorsV1alpha1().ClusterServiceVersions(corev1.NamespaceAll).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("label=%s", K8sSafeCurrentTestDescription())})
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Found CSV count of %v", len(csvList.Items))
GinkgoT().Logf("Create other namespace %s", otherNamespaceName)
otherNamespace := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: otherNamespaceName,
Labels: matchingLabel,
},
}
_, err = c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &otherNamespace, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), otherNamespaceName, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
GinkgoT().Log("Waiting to ensure copied CSV shows up in other namespace")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return false, nil
}
GinkgoT().Logf("Error (in %v): %v", otherNamespaceName, fetchErr.Error())
return false, fetchErr
}
if checkOperatorGroupAnnotations(fetchedCSV, operatorGroup, false, "") == nil {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
GinkgoT().Log("Copied CSV showed up in other namespace, giving copied CSV a bad OpertorGroup annotation")
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
return false, fetchErr
}
fetchedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] = fetchedCSV.GetNamespace()
_, updateErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{})
if updateErr != nil {
GinkgoT().Logf("Error updating copied CSV (in %v): %v", otherNamespaceName, updateErr.Error())
return false, updateErr
}
return true, nil
})
require.NoError(GinkgoT(), err)
GinkgoT().Log("Done updating copied CSV with bad annotation OperatorGroup, waiting for CSV to be gc'd")
err = wait.Poll(pollInterval, 2*pollDuration, func() (bool, error) {
csv, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{})
if fetchErr != nil {
if apierrors.IsNotFound(fetchErr) {
return true, nil
}
GinkgoT().Logf("Error (in %v): %v", opGroupNamespace, fetchErr.Error())
return false, fetchErr
}
// The CSV with the wrong annotation could have been replaced with a new copied CSV by this time
// If we find a CSV in the namespace, and it contains the correct annotation, it means the CSV
// with the wrong annotation was GCed
if csv.Annotations[v1.OperatorGroupNamespaceAnnotationKey] != csv.GetNamespace() {
return true, nil
}
return false, nil
})
require.NoError(GinkgoT(), err)
})
It("OperatorGroupLabels", func() {
// Create the namespaces that will have an OperatorGroup Label applied.
testNamespaceA := genName("namespace-a-")
testNamespaceB := genName("namespace-b-")
testNamespaceC := genName("namespace-c-")
testNamespaces := []string{
testNamespaceA, testNamespaceB, testNamespaceC,
}
// Create the namespaces
for _, namespace := range testNamespaces {
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
}
// Cleanup namespaces
defer func() {
for _, namespace := range testNamespaces {
err := c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}
}()
// Create an OperatorGroup
operatorGroup := &v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: testNamespaceA,
},
Spec: v1.OperatorGroupSpec{
TargetNamespaces: []string{},
},
}
operatorGroup, err := crc.OperatorsV1().OperatorGroups(testNamespaceA).Create(context.TODO(), operatorGroup, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
// Cleanup OperatorGroup
defer func() {
err := crc.OperatorsV1().OperatorGroups(testNamespaceA).Delete(context.TODO(), operatorGroup.GetName(), metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()
// Create the OperatorGroup Label
ogLabel, err := getOGLabelKey(operatorGroup)
require.NoError(GinkgoT(), err)
// Create list options
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{ogLabel: ""}).String(),
}
namespaceList, err := pollForNamespaceListCount(c, listOptions, 0)
require.NoError(GinkgoT(), err)
// Update the OperatorGroup to include a single namespace
operatorGroup.Spec.TargetNamespaces = []string{testNamespaceA}
updateOGSpecFunc := updateOperatorGroupSpecFunc(GinkgoT(), crc, testNamespaceA, operatorGroup.GetName())
require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateOGSpecFunc(operatorGroup.Spec)))
namespaceList, err = pollForNamespaceListCount(c, listOptions, 1)
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), checkForOperatorGroupLabels(operatorGroup, namespaceList.Items))
// Update the OperatorGroup to include two namespaces
operatorGroup.Spec.TargetNamespaces = []string{testNamespaceA, testNamespaceC}
require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateOGSpecFunc(operatorGroup.Spec)))
namespaceList, err = pollForNamespaceListCount(c, listOptions, 2)
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), checkForOperatorGroupLabels(operatorGroup, namespaceList.Items))
// Update the OperatorGroup to include three namespaces
operatorGroup.Spec.TargetNamespaces = []string{testNamespaceA, testNamespaceB, testNamespaceC}
require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateOGSpecFunc(operatorGroup.Spec)))
namespaceList, err = pollForNamespaceListCount(c, listOptions, 3)
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), checkForOperatorGroupLabels(operatorGroup, namespaceList.Items))
// Update the OperatorGroup to include two namespaces
operatorGroup.Spec.TargetNamespaces = []string{testNamespaceA, testNamespaceC}
require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateOGSpecFunc(operatorGroup.Spec)))
namespaceList, err = pollForNamespaceListCount(c, listOptions, 2)
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), checkForOperatorGroupLabels(operatorGroup, namespaceList.Items))
// Make the OperatorGroup a Cluster OperatorGroup.
operatorGroup.Spec.TargetNamespaces = []string{}
require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateOGSpecFunc(operatorGroup.Spec)))
namespaceList, err = pollForNamespaceListCount(c, listOptions, 0)
require.NoError(GinkgoT(), err)
})
It("CleanupDeletedOperatorGroupLabels", func() {
// Create the namespaces that will have an OperatorGroup Label applied.
testNamespaceA := genName("namespace-a-")
testNamespaceB := genName("namespace-b-")
testNamespaceC := genName("namespace-c-")
testNamespaces := []string{
testNamespaceA, testNamespaceB, testNamespaceC,
}
// Create the namespaces
for _, namespace := range testNamespaces {
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
}
// Cleanup namespaces
defer func() {
for _, namespace := range testNamespaces {
err := c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}
}()
// Create an OperatorGroup with three target namespaces.
operatorGroup := &v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: testNamespaceA,
},
Spec: v1.OperatorGroupSpec{
TargetNamespaces: testNamespaces,
},
}
operatorGroup, err := crc.OperatorsV1().OperatorGroups(testNamespaceA).Create(context.TODO(), operatorGroup, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
// Create the OperatorGroup Label
ogLabel, err := getOGLabelKey(operatorGroup)
require.NoError(GinkgoT(), err)
// Create list options
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{ogLabel: ""}).String(),
}
namespaceList, err := pollForNamespaceListCount(c, listOptions, 3)
require.NoError(GinkgoT(), err)
require.True(GinkgoT(), checkForOperatorGroupLabels(operatorGroup, namespaceList.Items))
// Delete the operatorGroup.
err = crc.OperatorsV1().OperatorGroups(testNamespaceA).Delete(context.TODO(), operatorGroup.GetName(), metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
// Check that no namespaces have the OperatorGroup.
namespaceList, err = pollForNamespaceListCount(c, listOptions, 0)
require.NoError(GinkgoT(), err)
})
Context("Given a set of Namespaces", func() {
var (
c operatorclient.ClientInterface
crc versioned.Interface
testNamespaces []string
testNamespaceA string
)
BeforeEach(func() {
c = newKubeClient()
crc = newCRClient()
// Create the namespaces that will have an OperatorGroup Label applied.
testNamespaceA = genName("namespace-a-")
testNamespaceB := genName("namespace-b-")
testNamespaceC := genName("namespace-c-")
testNamespaces = []string{
testNamespaceA, testNamespaceB, testNamespaceC,
}
// Create the namespaces
for _, namespace := range testNamespaces {
_, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
}
})
AfterEach(func() {
// Cleanup namespaces
for _, namespace := range testNamespaces {
err := c.KubernetesInterface().CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
}
})
Context("Associating these Namespaces with a label", func() {
var (
matchingLabel map[string]string
)
BeforeEach(func() {
matchingLabel = map[string]string{"foo": "bar"}
// Updating Namespace with labels
for _, namespace := range testNamespaces {
_, err := c.KubernetesInterface().CoreV1().Namespaces().Update(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: matchingLabel,
},
}, metav1.UpdateOptions{})
Expect(err).ToNot(HaveOccurred())
}
})
When("an OperatorGroup is created having matching label selector defined", func() {
var operatorGroup *v1.OperatorGroup
BeforeEach(func() {
// Creating operator group
operatorGroup = &v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: testNamespaceA,
},
Spec: v1.OperatorGroupSpec{
Selector: &metav1.LabelSelector{
MatchLabels: matchingLabel,
},
},
}
var err error
operatorGroup, err = crc.OperatorsV1().OperatorGroups(testNamespaceA).Create(context.TODO(), operatorGroup, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
})
// issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2637
It("[FLAKE] OLM applies labels to Namespaces that are associated with an OperatorGroup", func() {
ogLabel, err := getOGLabelKey(operatorGroup)
Expect(err).ToNot(HaveOccurred())
// Create list options
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{ogLabel: ""}).String(),
}
// Verify that all the namespaces listed in targetNamespaces field of OperatorGroup have labels applied on them
namespaceList, err := pollForNamespaceListCount(c, listOptions, 3)
Expect(err).ToNot(HaveOccurred())
Expect(checkForOperatorGroupLabels(operatorGroup, namespaceList.Items)).Should(BeTrue())
})
})
})
When("an OperatorGroup is created having above Namespaces defined under targetNamespaces field", func() {
var operatorGroup *v1.OperatorGroup
BeforeEach(func() {
// Create an OperatorGroup with three target namespaces.
operatorGroup = &v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("e2e-operator-group-"),
Namespace: testNamespaceA,
},
Spec: v1.OperatorGroupSpec{
TargetNamespaces: testNamespaces,
},
}
var err error
operatorGroup, err = crc.OperatorsV1().OperatorGroups(testNamespaceA).Create(context.TODO(), operatorGroup, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
})
It("OLM applies labels to Namespaces that are associated with an OperatorGroup", func() {
ogLabel, err := getOGLabelKey(operatorGroup)
Expect(err).ToNot(HaveOccurred())
// Create list options
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(map[string]string{ogLabel: ""}).String(),
}
// Verify that all the namespaces listed in targetNamespaces field of OperatorGroup have labels applied on them
namespaceList, err := pollForNamespaceListCount(c, listOptions, 3)
Expect(err).ToNot(HaveOccurred())
Expect(checkForOperatorGroupLabels(operatorGroup, namespaceList.Items)).Should(BeTrue())
})
})
})
})
func checkOperatorGroupAnnotations(obj metav1.Object, op *v1.OperatorGroup, checkTargetNamespaces bool, targetNamespaces string) error {
if checkTargetNamespaces {
if annotation, ok := obj.GetAnnotations()[v1.OperatorGroupTargetsAnnotationKey]; !ok || annotation != targetNamespaces {
return fmt.Errorf("missing targetNamespaces annotation on %v", obj.GetName())
}
} else {
if _, found := obj.GetAnnotations()[v1.OperatorGroupTargetsAnnotationKey]; found {
return fmt.Errorf("targetNamespaces annotation unexpectedly found on %v", obj.GetName())
}
}
if annotation, ok := obj.GetAnnotations()[v1.OperatorGroupNamespaceAnnotationKey]; !ok || annotation != op.GetNamespace() {
return fmt.Errorf("missing operatorNamespace on %v", obj.GetName())
}
if annotation, ok := obj.GetAnnotations()[v1.OperatorGroupAnnotationKey]; !ok || annotation != op.GetName() {
return fmt.Errorf("missing operatorGroup annotation on %v", obj.GetName())
}
return nil
}
func newOperatorGroup(namespace, name string, annotations map[string]string, selector *metav1.LabelSelector, targetNamespaces []string, static bool) *v1.OperatorGroup {
return &v1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
Annotations: annotations,
},
Spec: v1.OperatorGroupSpec{
TargetNamespaces: targetNamespaces,
Selector: selector,
StaticProvidedAPIs: static,
},
}
}
func createProjectAdmin(t GinkgoTInterface, c operatorclient.ClientInterface, namespace string) (string, cleanupFunc) {
sa, err := c.CreateServiceAccount(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: genName("padmin-"),
},
})
require.NoError(t, err)
rb, err := c.CreateRoleBinding(&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: genName("padmin-"),
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "admin",
},
})
require.NoError(t, err)
return "system:serviceaccount:" + namespace + ":" + sa.GetName(), func() {
_ = c.DeleteServiceAccount(sa.GetNamespace(), sa.GetName(), metav1.NewDeleteOptions(0))
_ = c.DeleteRoleBinding(rb.GetNamespace(), rb.GetName(), metav1.NewDeleteOptions(0))
}
}
func checkForOperatorGroupLabels(operatorGroup *v1.OperatorGroup, namespaces []corev1.Namespace) bool {
for _, ns := range operatorGroup.Spec.TargetNamespaces {
if !containsNamespace(namespaces, ns) {
return false
}
}
return true
}
func updateOperatorGroupSpecFunc(t GinkgoTInterface, crc versioned.Interface, namespace, operatorGroupName string) func(v1.OperatorGroupSpec) func() error {
return func(operatorGroupSpec v1.OperatorGroupSpec) func() error {
return func() error {
fetchedOG, err := crc.OperatorsV1().OperatorGroups(namespace).Get(context.TODO(), operatorGroupName, metav1.GetOptions{})
require.NoError(t, err)
fetchedOG.Spec = operatorGroupSpec
_, err = crc.OperatorsV1().OperatorGroups(namespace).Update(context.TODO(), fetchedOG, metav1.UpdateOptions{})
return err
}
}
}
func pollForNamespaceListCount(c operatorclient.ClientInterface, listOptions metav1.ListOptions, expectedLength int) (list *corev1.NamespaceList, err error) {
Eventually(func() (bool, error) {
list, err = c.KubernetesInterface().CoreV1().Namespaces().List(context.TODO(), listOptions)
if err != nil {
return false, err
}
if len(list.Items) == expectedLength {
return true, nil
}
return false, nil
}).Should(BeTrue())
return
}
func containsNamespace(namespaces []corev1.Namespace, namespaceName string) bool {
for i := range namespaces {
if namespaces[i].GetName() == namespaceName {
return true
}
}
return false
}
func getOGLabelKey(og *v1.OperatorGroup) (string, error) {
ogUID := string(og.GetUID())
if ogUID == "" {
return "", fmt.Errorf("OperatorGroup UID is empty string")
}
return fmt.Sprintf("olm.operatorgroup.uid/%s", og.GetUID()), nil
}
|
package main
import (
"context"
"fmt"
"log"
"github.com/go-playground/mold/v4"
)
func main() {
tform := mold.New()
tform.Register("set", transformMyData)
type Test struct {
StringField string `mold:"set"`
}
tt := Test{StringField: "string"}
err := tform.Struct(context.Background(), &tt)
if err != nil {
log.Fatal(err)
}
fmt.Printf("tt: %#v\n", tt)
}
func transformMyData(_ context.Context, fl mold.FieldLevel) error {
switch fl.Field().Interface().(type) {
case string:
fl.Field().SetString("prefix " + fl.Field().String() + " suffix")
return nil
default:
return fmt.Errorf("nope")
}
}
|
package resource
// File holds information about a file.
type File struct {
ID ID `json:"id"`
Version Version `json:"version"`
FileData
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package lhttp implements HTTP client helper code (JSON, automatic retries,
// authentication, etc).
//
// 'l' stands for luci.
package lhttp
|
/*
Mexican Wave Simulator
The wave (known as a Mexican wave in the English-speaking world outside North America) is an example of metachronal rhythm achieved in a packed stadium when successive groups of spectators briefly stand, yell, and raise their arms.
Create a function that takes a string and turns it into a Mexican Wave.
Notes
All test cases will be lowercase strings.
Ignore spaces (they are considered empty seats).
An empty string should return an empty array.
*/
package main
import (
"fmt"
"unicode"
)
func main() {
fmt.Printf("%q\n", wave("edabit"))
fmt.Printf("%q\n", wave("just do it"))
fmt.Printf("%q\n", wave("dogs cats pigs"))
fmt.Printf("%q\n", wave(" "))
fmt.Printf("%q\n", wave(""))
fmt.Printf("%q\n", wave("G"))
fmt.Printf("%q\n", wave(" blue"))
fmt.Printf("%q\n", wave("green "))
}
func wave(s string) []string {
var w []string
for i, u := range s {
if unicode.IsSpace(u) {
continue
}
p := ""
for j, r := range s {
if i == j {
r = unicode.ToUpper(r)
}
p += string(r)
}
w = append(w, p)
}
return w
}
|
package self
import (
"fmt"
"io/ioutil"
"net"
"time"
_ "github.com/lucas-clemente/quic-clients" // download clients
quic "gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/integrationtests/tools/proxy"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/integrationtests/tools/testserver"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/protocol"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/testdata"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("non-zero RTT", func() {
for _, v := range append(protocol.SupportedVersions, protocol.VersionTLS) {
version := v
Context(fmt.Sprintf("with QUIC version %s", version), func() {
roundTrips := [...]time.Duration{
10 * time.Millisecond,
50 * time.Millisecond,
100 * time.Millisecond,
200 * time.Millisecond,
}
for _, r := range roundTrips {
rtt := r
It(fmt.Sprintf("downloads a message with %s RTT", rtt), func() {
ln, err := quic.ListenAddr(
"localhost:0",
testdata.GetTLSConfig(),
&quic.Config{
Versions: []protocol.VersionNumber{version},
},
)
Expect(err).ToNot(HaveOccurred())
done := make(chan struct{})
go func() {
defer GinkgoRecover()
sess, err := ln.Accept()
Expect(err).ToNot(HaveOccurred())
str, err := sess.OpenStream()
Expect(err).ToNot(HaveOccurred())
_, err = str.Write(testserver.PRData)
Expect(err).ToNot(HaveOccurred())
str.Close()
close(done)
}()
serverPort := ln.Addr().(*net.UDPAddr).Port
proxy, err := quicproxy.NewQuicProxy("localhost:0", version, &quicproxy.Opts{
RemoteAddr: fmt.Sprintf("localhost:%d", serverPort),
DelayPacket: func(d quicproxy.Direction, p uint64) time.Duration {
return rtt / 2
},
})
Expect(err).ToNot(HaveOccurred())
defer proxy.Close()
sess, err := quic.DialAddr(
fmt.Sprintf("quic.clemente.io:%d", proxy.LocalPort()),
nil,
&quic.Config{Versions: []protocol.VersionNumber{version}},
)
Expect(err).ToNot(HaveOccurred())
str, err := sess.AcceptStream()
Expect(err).ToNot(HaveOccurred())
data, err := ioutil.ReadAll(str)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(testserver.PRData))
sess.Close()
Eventually(done).Should(BeClosed())
})
}
})
}
})
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysqlindexer
import (
"fmt"
"io"
"os"
"strconv"
"camli/blobref"
"camli/blobserver"
"camli/jsonconfig"
)
type Indexer struct {
*blobserver.SimpleBlobHubPartitionMap
KeyFetcher blobref.StreamingFetcher // for verifying claims
// Used for fetching blobs to find the complete sha1 of schema
// blobs.
BlobSource blobserver.Storage
db *MySQLWrapper
}
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, os.Error) {
blobPrefix := config.RequiredString("blobSource")
db := &MySQLWrapper{
Host: config.OptionalString("host", "localhost"),
User: config.RequiredString("user"),
Password: config.OptionalString("password", ""),
Database: config.RequiredString("database"),
}
indexer := &Indexer{
SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{},
db: db,
}
if err := config.Validate(); err != nil {
return nil, err
}
sto, err := ld.GetStorage(blobPrefix)
if err != nil {
return nil, err
}
indexer.BlobSource = sto
// Good enough, for now:
indexer.KeyFetcher = indexer.BlobSource
ok, err := indexer.IsAlive()
if !ok {
return nil, fmt.Errorf("Failed to connect to MySQL: %v", err)
}
version, err := indexer.SchemaVersion()
if err != nil {
return nil, fmt.Errorf("error getting schema version (need to init database?): %v", err)
}
if version != requiredSchemaVersion {
if os.Getenv("CAMLI_ADVERTISED_PASSWORD") != "" {
// Good signal that we're using the dev-server script, so help out
// the user with a more useful tip:
return nil, fmt.Errorf("database schema version is %d; expect %d (run \"./dev-server --wipe\" to wipe both your blobs and re-populate the database schema)", version, requiredSchemaVersion)
}
return nil, fmt.Errorf("database schema version is %d; expect %d (need to re-init/upgrade database?)",
version, requiredSchemaVersion)
}
return indexer, nil
}
func init() {
blobserver.RegisterStorageConstructor("mysqlindexer", blobserver.StorageConstructor(newFromConfig))
}
func (mi *Indexer) IsAlive() (ok bool, err os.Error) {
err = mi.db.Ping()
ok = err == nil
return
}
func (mi *Indexer) SchemaVersion() (version int, err os.Error) {
rs, err := mi.db.Query("SELECT value FROM meta WHERE metakey='version'")
if err != nil {
return
}
defer rs.Close()
if !rs.Next() {
return 0, nil
}
strVersion := ""
if err = rs.Scan(&strVersion); err != nil {
return
}
return strconv.Atoi(strVersion)
}
func (mi *Indexer) Fetch(blob *blobref.BlobRef) (blobref.ReadSeekCloser, int64, os.Error) {
return nil, 0, os.NewError("Fetch isn't supported by the MySQL indexer")
}
func (mi *Indexer) FetchStreaming(blob *blobref.BlobRef) (io.ReadCloser, int64, os.Error) {
return nil, 0, os.NewError("Fetch isn't supported by the MySQL indexer")
}
func (mi *Indexer) RemoveBlobs(blobs []*blobref.BlobRef) os.Error {
return os.NewError("RemoveBlobs isn't supported by the MySQL indexer")
}
|
package main
import (
"bytes"
"fmt"
"io"
"net/http"
"testing"
"github.com/slack-go/slack"
"github.com/slack-go/slack/slackevents"
"github.com/slack-go/slack/socketmode"
"go.uber.org/zap"
)
type dummySocketmodeClient struct{}
func (d dummySocketmodeClient) Ack(_ socketmode.Request, _ ...interface{}) {}
func (d dummySocketmodeClient) Run() error { return nil }
type dummySlackClient struct {
err bool
body string
}
func (d *dummySlackClient) PostMessage(_ string, opts ...slack.MsgOption) (_, _ string, err error) {
if d.err {
err = fmt.Errorf("an error")
}
_, r, _ := slack.UnsafeApplyMsgOptions("", "", "", opts...)
d.body = r.Get("text")
return
}
type dummyHTTPClient struct {
status int
err bool
}
func (d dummyHTTPClient) Do(_ *http.Request) (resp *http.Response, err error) {
if d.err {
err = fmt.Errorf("an error")
}
resp = new(http.Response)
resp.StatusCode = d.status
resp.Body = io.NopCloser(bytes.NewBufferString(`{"message":{"result":{"translatedText":"foo"}}}}`))
return
}
func TestNew(t *testing.T) {
defer func() {
err := recover()
if err != nil {
t.Fatal(err)
}
}()
New("", "", "", "")
}
func TestBot_Process(t *testing.T) {
for _, test := range []struct {
name string
message string
slackClient slackClient
httpclient httpClient
expect string
}{
{"happy path, english message", "Good morning!", &dummySlackClient{}, dummyHTTPClient{}, "foo"},
{"happy path, korean message", "오.. 신기합니다.", &dummySlackClient{}, dummyHTTPClient{}, "foo"},
{"papago errors", "message", &dummySlackClient{}, dummyHTTPClient{err: true}, ""},
} {
t.Run(test.name, func(t *testing.T) {
l, _ := zap.NewDevelopment()
b := Bot{
s: dummySocketmodeClient{},
slack: test.slackClient,
client: test.httpclient,
eventChan: make(chan socketmode.Event),
logger: l.Sugar(),
}
go func() {
b.eventChan <- socketmode.Event{
Type: socketmode.EventTypeEventsAPI,
Data: slackevents.EventsAPIEvent{
Type: slackevents.CallbackEvent,
InnerEvent: slackevents.EventsAPIInnerEvent{
Data: &slackevents.MessageEvent{
Text: test.message,
TimeStamp: "0",
ThreadTimeStamp: "0",
},
},
},
Request: &socketmode.Request{},
}
close(b.eventChan)
}()
b.Process()
body := b.slack.(*dummySlackClient).body
if body != test.expect {
t.Errorf("expected %q, received %q", test.expect, body)
}
})
}
}
|
package sources
import (
"path/filepath"
dcapi "github.com/mudler/docker-companion/api"
"github.com/lxc/distrobuilder/shared"
)
// DockerHTTP represents the Docker HTTP downloader.
type DockerHTTP struct{}
// NewDockerHTTP create a new DockerHTTP instance.
func NewDockerHTTP() *DockerHTTP {
return &DockerHTTP{}
}
// Run downloads and unpacks a docker image
func (d *DockerHTTP) Run(definition shared.Definition, rootfsDir string) error {
absRootfsDir, err := filepath.Abs(rootfsDir)
if err != nil {
return err
}
// NOTE: For now we use only docker official server but we can
// add a new parameter on DefinitionSource struct.
return dcapi.DownloadAndUnpackImage(definition.Source.URL, absRootfsDir, nil)
}
|
package podstatus
import (
"encoding/json"
"github.com/square/p2/pkg/launch"
"github.com/square/p2/pkg/store/consul/podstore"
"github.com/square/p2/pkg/store/consul/statusstore"
"github.com/square/p2/pkg/types"
"github.com/square/p2/pkg/util"
"github.com/hashicorp/consul/api"
context "golang.org/x/net/context"
)
type ConsulStore struct {
statusStore statusstore.Store
// The consul implementation statusstore.Store formats keys like
// /status/<resource-type>/<resource-id>/<namespace>. The namespace
// portion is useful if multiple subsystems need to record their
// own view of a resource.
namespace statusstore.Namespace
}
// TODO: this pod store is coupled with the PodStatus struct, which represents
// the book keeping that the preparer does about a pod. In other words it only
// makes sense if the namespace is consul.PreparerPodStatusNamespace. We should
// probably take namespace out of all these APIs and use that constant instead.
func NewConsul(statusStore statusstore.Store, namespace statusstore.Namespace) ConsulStore {
return ConsulStore{
statusStore: statusStore,
namespace: namespace,
}
}
func (c ConsulStore) Get(key types.PodUniqueKey) (PodStatus, *api.QueryMeta, error) {
if key == "" {
return PodStatus{}, nil, util.Errorf("Cannot retrieve status for a pod with an empty uuid")
}
status, queryMeta, err := c.statusStore.GetStatus(statusstore.POD, statusstore.ResourceID(key), c.namespace)
if err != nil {
return PodStatus{}, queryMeta, err
}
podStatus, err := statusToPodStatus(status)
if err != nil {
return PodStatus{}, queryMeta, err
}
return podStatus, queryMeta, nil
}
func (c ConsulStore) WaitForStatus(key types.PodUniqueKey, waitIndex uint64) (PodStatus, *api.QueryMeta, error) {
if key == "" {
return PodStatus{}, nil, util.Errorf("Cannot retrieve status for a pod with an empty uuid")
}
status, queryMeta, err := c.statusStore.WatchStatus(statusstore.POD, statusstore.ResourceID(key), c.namespace, waitIndex)
if err != nil {
return PodStatus{}, queryMeta, err
}
podStatus, err := statusToPodStatus(status)
if err != nil {
return PodStatus{}, queryMeta, err
}
return podStatus, queryMeta, nil
}
func (c ConsulStore) GetStatusFromIndex(index podstore.PodIndex) (PodStatus, *api.QueryMeta, error) {
return c.Get(index.PodKey)
}
func (c ConsulStore) Set(key types.PodUniqueKey, status PodStatus) error {
if key == "" {
return util.Errorf("Could not set status for pod with empty uuid")
}
rawStatus, err := podStatusToStatus(status)
if err != nil {
return err
}
return c.statusStore.SetStatus(statusstore.POD, statusstore.ResourceID(key), c.namespace, rawStatus)
}
func (c ConsulStore) CAS(ctx context.Context, key types.PodUniqueKey, status PodStatus, modifyIndex uint64) error {
if key == "" {
return util.Errorf("Could not set status for pod with empty uuid")
}
rawStatus, err := podStatusToStatus(status)
if err != nil {
return err
}
return c.statusStore.CASStatus(ctx, statusstore.POD, statusstore.ResourceID(key), c.namespace, rawStatus, modifyIndex)
}
// Convenience function for only mutating a part of the status structure.
// First, the status is retrieved and the consul ModifyIndex is read. The
// status is then passed to a mutator function, and then the new status is
// written back to consul using a CAS operation, guaranteeing that nothing else
// about the status changed.
func (c ConsulStore) MutateStatus(ctx context.Context, key types.PodUniqueKey, mutator func(PodStatus) (PodStatus, error)) error {
var lastIndex uint64
status, queryMeta, err := c.Get(key)
switch {
case statusstore.IsNoStatus(err):
// We just want to make sure the key doesn't exist when we set it, so
// use an index of 0
lastIndex = 0
case err != nil:
return err
default:
lastIndex = queryMeta.LastIndex
}
newStatus, err := mutator(status)
if err != nil {
return err
}
return c.CAS(ctx, key, newStatus, lastIndex)
}
// A helper method for updating the LastExit field of one of the processes in a
// pod. Searches through p.ProcessStatuses for a process matching the
// launchable ID and launchableScriptName, and mutates its LastExit if found.
// If not found, a new process is added.
func (c ConsulStore) SetLastExit(ctx context.Context, podUniqueKey types.PodUniqueKey, launchableID launch.LaunchableID, entryPoint string, exitStatus ExitStatus) error {
mutator := func(p PodStatus) (PodStatus, error) {
for _, processStatus := range p.ProcessStatuses {
if processStatus.LaunchableID == launchableID && processStatus.EntryPoint == entryPoint {
processStatus.LastExit = &exitStatus
return p, nil
}
}
p.ProcessStatuses = append(p.ProcessStatuses, ProcessStatus{
LaunchableID: launchableID,
EntryPoint: entryPoint,
LastExit: &exitStatus,
})
return p, nil
}
return c.MutateStatus(ctx, podUniqueKey, mutator)
}
// List lists all of the pod status entries in consul.
func (c ConsulStore) List() (map[types.PodUniqueKey]PodStatus, error) {
allStatus, err := c.statusStore.GetAllStatusForResourceType(statusstore.POD)
if err != nil {
return nil, util.Errorf("could not fetch all status for %s resource type: %s", statusstore.POD, err)
}
ret := make(map[types.PodUniqueKey]PodStatus)
for id, statusMap := range allStatus {
if status, ok := statusMap[c.namespace]; ok {
podUniqueKey, err := types.ToPodUniqueKey(id.String())
if err != nil {
return nil, util.Errorf("got status record with ID %s that could not be converted to pod unique key: %s", id, err)
}
var podStatus PodStatus
err = json.Unmarshal(status.Bytes(), &podStatus)
if err != nil {
return nil, util.Errorf("could not unmarshal status for %s as JSON (raw status=%q): %s", podUniqueKey, string(status.Bytes()), err)
}
ret[podUniqueKey] = podStatus
}
}
return ret, nil
}
func (c ConsulStore) Delete(podUniqueKey types.PodUniqueKey) error {
if podUniqueKey == "" {
return util.Errorf("pod unique key cannot be empty")
}
return c.statusStore.DeleteStatus(statusstore.POD, statusstore.ResourceID(podUniqueKey.String()), c.namespace)
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package external
import (
"context"
"io"
"testing"
"time"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/stretchr/testify/require"
"golang.org/x/exp/rand"
)
func TestAddKeyValueMaintainRangeProperty(t *testing.T) {
ctx := context.Background()
memStore := storage.NewMemStorage()
writer, err := memStore.Create(ctx, "/test", nil)
require.NoError(t, err)
rc := &rangePropertiesCollector{
propSizeDist: 100,
propKeysDist: 2,
}
rc.reset()
initRC := *rc
kvStore, err := NewKeyValueStore(ctx, writer, rc, 1, 1)
require.NoError(t, err)
require.Equal(t, &initRC, rc)
encoded := rc.encode()
require.Len(t, encoded, 0)
k1, v1 := []byte("key1"), []byte("value1")
err = kvStore.AddKeyValue(k1, v1)
require.NoError(t, err)
// when not accumulated enough data, no range property will be added.
require.Equal(t, &initRC, rc)
// propKeysDist = 2, so after adding 2 keys, a new range property will be added.
k2, v2 := []byte("key2"), []byte("value2")
err = kvStore.AddKeyValue(k2, v2)
require.NoError(t, err)
require.Len(t, rc.props, 1)
expected := &rangeProperty{
key: k1,
offset: 0,
size: uint64(len(k1) + len(v1) + len(k2) + len(v2)),
keys: 2,
}
require.Equal(t, expected, rc.props[0])
encoded = rc.encode()
require.Greater(t, len(encoded), 0)
// when not accumulated enough data, no range property will be added.
k3, v3 := []byte("key3"), []byte("value3")
err = kvStore.AddKeyValue(k3, v3)
require.NoError(t, err)
require.Len(t, rc.props, 1)
err = writer.Close(ctx)
require.NoError(t, err)
kvStore.Close()
expected = &rangeProperty{
key: k3,
offset: uint64(len(k1) + len(v1) + 16 + len(k2) + len(v2) + 16),
size: uint64(len(k3) + len(v3)),
keys: 1,
}
require.Len(t, rc.props, 2)
require.Equal(t, expected, rc.props[1])
writer, err = memStore.Create(ctx, "/test2", nil)
require.NoError(t, err)
rc = &rangePropertiesCollector{
propSizeDist: 1,
propKeysDist: 100,
}
rc.reset()
kvStore, err = NewKeyValueStore(ctx, writer, rc, 2, 2)
require.NoError(t, err)
err = kvStore.AddKeyValue(k1, v1)
require.NoError(t, err)
require.Len(t, rc.props, 1)
expected = &rangeProperty{
key: k1,
offset: 0,
size: uint64(len(k1) + len(v1)),
keys: 1,
}
require.Equal(t, expected, rc.props[0])
err = kvStore.AddKeyValue(k2, v2)
require.NoError(t, err)
require.Len(t, rc.props, 2)
expected = &rangeProperty{
key: k2,
offset: uint64(len(k1) + len(v1) + 16),
size: uint64(len(k2) + len(v2)),
keys: 1,
}
require.Equal(t, expected, rc.props[1])
kvStore.Close()
// Length of properties should not change after close.
require.Len(t, rc.props, 2)
err = writer.Close(ctx)
require.NoError(t, err)
}
func TestKVReadWrite(t *testing.T) {
seed := time.Now().Unix()
rand.Seed(uint64(seed))
t.Logf("seed: %d", seed)
ctx := context.Background()
memStore := storage.NewMemStorage()
writer, err := memStore.Create(ctx, "/test", nil)
require.NoError(t, err)
rc := &rangePropertiesCollector{
propSizeDist: 100,
propKeysDist: 2,
}
rc.reset()
kvStore, err := NewKeyValueStore(ctx, writer, rc, 1, 1)
require.NoError(t, err)
kvCnt := rand.Intn(10) + 10
keys := make([][]byte, kvCnt)
values := make([][]byte, kvCnt)
for i := 0; i < kvCnt; i++ {
randLen := rand.Intn(10) + 1
keys[i] = make([]byte, randLen)
rand.Read(keys[i])
randLen = rand.Intn(10) + 1
values[i] = make([]byte, randLen)
rand.Read(values[i])
err = kvStore.AddKeyValue(keys[i], values[i])
require.NoError(t, err)
}
err = writer.Close(ctx)
require.NoError(t, err)
bufSize := rand.Intn(100) + 1
kvReader, err := newKVReader(ctx, "/test", memStore, 0, bufSize)
require.NoError(t, err)
for i := 0; i < kvCnt; i++ {
key, value, err := kvReader.nextKV()
require.NoError(t, err)
require.Equal(t, keys[i], key)
require.Equal(t, values[i], value)
}
_, _, err = kvReader.nextKV()
require.Equal(t, io.EOF, err)
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package behaviors
// Area is a conformance area composed of a list of test suites
type Area struct {
Area string `json:"area,omitempty"`
Suites []Suite `json:"suites,omitempty"`
}
// Suite is a conformance test suite composed of a list of behaviors
type Suite struct {
Suite string `json:"suite,omitempty"`
Description string `json:"description,omitempty"`
Behaviors []Behavior `json:"behaviors,omitempty"`
}
// Behavior describes the set of properties for a conformance behavior
type Behavior struct {
ID string `json:"id,omitempty"`
APIObject string `json:"apiObject,omitempty"`
APIField string `json:"apiField,omitempty"`
APIType string `json:"apiType,omitempty"`
Description string `json:"description,omitempty"`
}
// ConformanceData describes the structure of the conformance.yaml file
type ConformanceData struct {
// A URL to the line of code in the kube src repo for the test. Omitted from the YAML to avoid exposing line number.
URL string `yaml:"-"`
// Extracted from the "Testname:" comment before the test
TestName string
// CodeName is taken from the actual ginkgo descriptions, e.g. `[sig-apps] Foo should bar [Conformance]`
CodeName string
// Extracted from the "Description:" comment before the test
Description string
// Version when this test is added or modified ex: v1.12, v1.13
Release string
// File is the filename where the test is defined. We intentionally don't save the line here to avoid meaningless changes.
File string
// Behaviors is the list of conformance behaviors tested by a particular e2e test
Behaviors []string `yaml:"behaviors,omitempty"`
}
|
package fs
import (
"bytes"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
)
func TestOSFilesystemImplementsFS(t *testing.T) {
var fs Filesystem
fs = OS{}
_ = fs
}
func TestOSFindFilesSuccess(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, filepath.Join(logdir))
mkdirHard(t, filepath.Join(logdir, "emptydir"))
mkdirHard(t, filepath.Join(logdir, "run1"))
mkdirHard(t, filepath.Join(logdir, "run2"))
mkdirHard(t, filepath.Join(logdir, "nonrun"))
touchHard(t, filepath.Join(logdir, "run1", "tfevents.1"))
touchHard(t, filepath.Join(logdir, "run2", "tfevents.1"))
touchHard(t, filepath.Join(logdir, "run2", "tfevents.2"))
touchHard(t, filepath.Join(logdir, "run2", "randomfile1"))
touchHard(t, filepath.Join(logdir, "nonrun", "randomfile2"))
gotFiles, err := OS{}.FindFiles(logdir, "*tfevents*")
wantFiles := []string{
filepath.Join(logdir, "run1", "tfevents.1"),
filepath.Join(logdir, "run2", "tfevents.1"),
filepath.Join(logdir, "run2", "tfevents.2"),
}
if err != nil || !reflect.DeepEqual(gotFiles, wantFiles) {
t.Errorf(`FindFiles(logdir, "*tfevents*"): got %v, %v; want %v, %v`, gotFiles, err, wantFiles, nil)
}
}
func TestOSFindFilesBadPattern(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, logdir)
// We need to actually process a file for the error condition to be
// hit; filepath doesn't offer a way to precompile the pattern.
touchHard(t, filepath.Join(logdir, "somefile"))
pat := "[" // ]
files, err := OS{}.FindFiles(logdir, pat)
if len(files) != 0 || !errors.Is(err, filepath.ErrBadPattern) {
t.Errorf(`FindFiles(logdir, %q): got %v, %v; want nil, ErrBadPattern`, pat, files, err)
}
}
func TestOSFindFilesOSError(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, logdir)
nondir := filepath.Join(logdir, "enoent")
pat := "*tfevents*"
files, err := OS{}.FindFiles(nondir, pat)
if len(files) != 0 || !os.IsNotExist(err) {
t.Errorf("FindFiles(%q, %q): got %v, %v; want nil, ENOENT", nondir, pat, files, err)
}
}
func TestOSListFilesSuccess(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, filepath.Join(logdir))
mkdirHard(t, filepath.Join(logdir, "subdir"))
touchHard(t, filepath.Join(logdir, "file1"))
touchHard(t, filepath.Join(logdir, "file2"))
touchHard(t, filepath.Join(logdir, "subdir", "file3"))
gotFiles, err := OS{}.ListFiles(logdir)
wantFiles := []string{
filepath.Join(logdir, "file1"),
filepath.Join(logdir, "file2"),
}
if err != nil || !reflect.DeepEqual(gotFiles, wantFiles) {
t.Errorf("ListFiles(%q): got %v, %v; want %v, %v", logdir, gotFiles, err, wantFiles, nil)
}
}
func TestOSListFilesOSError(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, filepath.Join(logdir))
touchHard(t, filepath.Join(logdir, "file1"))
nondir := filepath.Join(dir, "enoent")
files, err := OS{}.ListFiles(nondir)
if len(files) != 0 || !os.IsNotExist(err) {
t.Errorf("ListFiles(%q): got %v, %v; want nil, ENOENT", nondir, files, err)
}
}
func TestOSOpenSuccess(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
logdir := filepath.Join(dir, "logs")
mkdirHard(t, logdir)
path := filepath.Join(logdir, "myfile")
{
f, err := os.Create(path)
if err != nil {
t.Fatalf("os.Create(%q): %v", path, err)
}
if _, err := f.Write([]byte("hello")); err != nil {
t.Fatalf("f.Write(...): %v", err)
}
if err := f.Close(); err != nil {
t.Fatalf("f.Close(): %v", err)
}
}
f, err := OS{}.Open(path)
if err != nil {
t.Fatalf("OS{}.Open(%q): %v", path, err)
}
var buf bytes.Buffer
if n, err := buf.ReadFrom(f); err != nil {
t.Fatalf("first buf.ReadFrom(f): %v, %v", n, err)
}
if n, err := f.Seek(4, io.SeekStart); err != nil {
t.Fatalf("f.Seek(4, io.SeekStart): %v, %v", n, err)
}
if got, want := buf.String(), "hello"; got != want {
t.Errorf("first buf.String(): got %q, want %q", got, want)
}
if n, err := buf.ReadFrom(f); err != nil {
t.Fatalf("second buf.ReadFrom(f): %v, %v", n, err)
}
if got, want := buf.String(), "helloo"; got != want {
t.Errorf("first buf.String(): got %q, want %q", got, want)
}
if err := f.Close(); err != nil {
t.Errorf("f.Close(): got %v, want nil", err)
}
}
func TestOSOpenOSError(t *testing.T) {
dir := tempdir(t)
defer os.RemoveAll(dir)
path := filepath.Join(dir, "enoent")
f, err := OS{}.Open(path)
if !os.IsNotExist(err) {
t.Errorf("OS{}.Open(%q): got %v, %v; want nil, ENOENT", path, f, err)
}
}
func tempdir(t *testing.T) string {
name, err := ioutil.TempDir("", "fs_os_test")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
return name
}
func mkdirHard(t *testing.T, name string) {
mode := os.FileMode(0700)
err := os.Mkdir(name, mode)
if err != nil {
t.Fatalf("os.Mkdir(%q, %O) = %v", name, mode, err)
}
}
func touchHard(t *testing.T, name string) {
f, err := os.Create(name)
if err != nil {
t.Fatalf("os.Create(%q) = %v, %v", name, f, err)
}
if err := f.Close(); err != nil {
t.Fatalf("f<%q>.Close() = %v", name, err)
}
}
|
package main
import (
"fmt"
"log"
"time"
)
func threeSecond() {
time.Sleep(3000 * time.Millisecond)
fmt.Println("3 seconds passed")
}
func oneSecond() {
time.Sleep(1000 * time.Millisecond)
fmt.Println("1 second passed")
}
func twoSecond() {
time.Sleep(2000 * time.Millisecond)
fmt.Println("2 second passed")
}
func section1() {
// https://qiita.com/TakaakiFuruse/items/241578174fd2f00aaa8a
fmt.Println(time.Now())
go threeSecond()
oneSecond()
twoSecond()
fmt.Println(time.Now())
}
func wait1(c chan string) {
time.Sleep(1 * time.Second)
log.Print("waited 1 sec")
c <- "wait1 finished"
}
func wait2(c chan string) {
time.Sleep(2 * time.Second)
log.Print("waited 2 sec")
c <- "wait2 finished"
}
func wait5(c chan string) {
time.Sleep(5 * time.Second)
log.Print("waited 5 sec")
c <- "wait5 finished"
}
func section2() {
// https://qiita.com/TakaakiFuruse/items/241578174fd2f00aaa8a
// channelを作ると他の処理が全部終わるまで待ってくれる
c := make(chan string)
log.Print("started")
go wait1(c)
go wait2(c)
go wait5(c)
w1, w2, w3 := <-c, <-c, <-c
log.Print("finished")
fmt.Println(w1)
fmt.Println(w2)
fmt.Println(w3) // この三行は一気に表示される
}
func section3() {
// https://qiita.com/kkohtaka/items/c42bfc75bede7cd8dc50
res := make(chan int)
go func(res chan int) {
fmt.Println("go func print")
time.Sleep(2 * time.Second)
res <- 100
}(res)
fmt.Println("normal print", <-res)
}
func section4() {
// https://qiita.com/kkohtaka/items/c42bfc75bede7cd8dc50
res := make(chan int)
// buffered channel
//res := make(chan int, 5)
//res := make(chan int, 10)
go func(res chan int) {
for i := 0; i < 42; i++ {
log.Println("another goroutine is sending:", i)
res <- i
}
close(res)
}(res)
for {
i, ok := <-res
if !ok {
break
}
log.Println("the main goroutine receives:", i)
}
}
func section5() {
// https://qiita.com/kkohtaka/items/c42bfc75bede7cd8dc50
// buffered channel では, 未受信の値でバッファが埋められている間はバッファに空きが生まれるまでブロックする
// 複数の並行処理の並行度を制限することができる
sem := make(chan struct{}, 3) // concurrency: 3
for i := 0; i < 42; i++ {
sem <- struct{}{}
go func(i int, sem chan struct{}) {
log.Println("goroutine: ", i)
<-sem // 値を受信=解放する
}(i, sem)
}
log.Println("the main goroutine")
}
func section6() {
// https://qiita.com/kkohtaka/items/c42bfc75bede7cd8dc50
// BとCは実質同じ
// Aはchannelを渡すためのchannel、という前提を忘れない
A := make(chan chan int)
go func(A chan chan int) {
for {
select {
case B := <-A: // 値を受信して代入、Bはchannel
log.Println("another goroutine receives a request")
B <- 100 // 値を送信
}
}
}(A)
request := func() int {
C := make(chan int)
A <- C // 値を送信
return <-C // 値を受信
}
time.Sleep(3 * time.Second)
log.Println("the main goroutine receives a response:", request())
}
func main() {
//section1()
//section2()
//section3()
//section4()
//section5()
section6()
}
|
package uaaclient
import (
"fmt"
"net/http"
)
// Session ...
type Session struct {
ID string
Cookie http.Cookie
}
// SetSessionCookie creates a new session and writes it in a cookie.
func (o *UaaClient) SetSessionCookie(w http.ResponseWriter, r *http.Request) string {
id := fmt.Sprintf("%s.%s", simpleUUID(), o.config.ClientID)
cookie := http.Cookie{
Name: "JSESSIONID",
Value: id,
Path: o.config.RedirectURL,
Domain: r.Host,
HttpOnly: true,
}
http.SetCookie(w, &cookie)
return id
}
// GetSessionID returns session ID from cookie
func GetSessionID(r *http.Request) (string, error) {
sessionCookie, err := r.Cookie("JSESSIONID")
if err != nil {
return "", err
}
return sessionCookie.Value, nil
}
|
package util
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/elgris/sqrl"
"github.com/alewgbl/fdwctl/internal/database"
"github.com/alewgbl/fdwctl/internal/logger"
"github.com/alewgbl/fdwctl/internal/model"
)
func GetServers(ctx context.Context, dbConnection *sql.DB) ([]model.ForeignServer, error) {
log := logger.Log(ctx).
WithField("function", "GetServers")
query, _, err := sqrl.
Select(
"fs.foreign_server_name",
"fs.foreign_data_wrapper_name",
"fs.authorization_identifier",
"fsoh.option_value AS hostname",
"fsop.option_value::int AS port",
"fsod.option_value AS dbname",
).From("information_schema.foreign_servers fs").
Join("information_schema.foreign_server_options fsoh ON fsoh.foreign_server_name = fs.foreign_server_name AND fsoh.option_name = 'host'").
Join("information_schema.foreign_server_options fsop ON fsop.foreign_server_name = fs.foreign_server_name AND fsop.option_name = 'port'").
Join("information_schema.foreign_server_options fsod ON fsod.foreign_server_name = fs.foreign_server_name AND fsod.option_name = 'dbname'").
ToSql()
if err != nil {
log.Errorf("error creating query: %s", err)
return nil, err
}
log.Tracef("query: %s", query)
rows, err := dbConnection.Query(query)
if err != nil {
log.Errorf("error querying for servers: %s", err)
return nil, err
}
defer database.CloseRows(ctx, rows)
servers := make([]model.ForeignServer, 0)
for rows.Next() {
server := new(model.ForeignServer)
err = rows.Scan(&server.Name, &server.Wrapper, &server.Owner, &server.Host, &server.Port, &server.DB)
if err != nil {
log.Errorf("error scanning result row: %s", err)
continue
}
servers = append(servers, *server)
}
if rows.Err() != nil {
log.Errorf("error iterating result rows: %s", rows.Err())
return nil, rows.Err()
}
return servers, nil
}
func FindForeignServer(foreignServers []model.ForeignServer, serverName string) *model.ForeignServer {
for _, server := range foreignServers {
if server.Name == serverName {
return &server
}
}
return nil
}
func DropServer(ctx context.Context, dbConnection *sql.DB, servername string, cascade bool) error {
log := logger.Log(ctx).
WithField("function", "DropServer")
if servername == "" {
return logger.ErrorfAsError(log, "server name is required")
}
query := fmt.Sprintf("DROP SERVER %s", servername)
if cascade {
query = fmt.Sprintf("%s CASCADE", query)
}
log.Tracef("query: %s", query)
_, err := dbConnection.Exec(query)
if err != nil {
log.Errorf("error dropping server: %s", err)
return err
}
return nil
}
func CreateServer(ctx context.Context, dbConnection *sql.DB, server model.ForeignServer) error {
log := logger.Log(ctx).
WithField("function", "CreateServer")
query := fmt.Sprintf(
"CREATE SERVER %s FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host '%s', port '%d', dbname '%s')",
server.Name,
server.Host,
server.Port,
server.DB,
)
log.Tracef("query: %s", query)
_, err := dbConnection.Exec(query)
if err != nil {
log.Errorf("error creating server: %s", err)
return err
}
return nil
}
func UpdateServer(ctx context.Context, dbConnection *sql.DB, server model.ForeignServer) error {
log := logger.Log(ctx).
WithField("function", "UpdateServer")
// Edit server hostname, port, and dbname
query := fmt.Sprintf("ALTER SERVER %s OPTIONS (", server.Name)
opts := make([]string, 0)
if server.Host != "" {
opts = append(opts, fmt.Sprintf("SET host '%s'", server.Host))
}
if server.Port > 0 {
opts = append(opts, fmt.Sprintf("SET port '%d'", server.Port))
}
if server.DB != "" {
opts = append(opts, fmt.Sprintf("SET dbname '%s'", server.DB))
}
query = fmt.Sprintf("%s %s )", query, strings.Join(opts, ","))
log.Tracef("query: %s", query)
_, err := dbConnection.Exec(query)
if err != nil {
log.Errorf("error updating server: %s", err)
return err
}
return nil
}
func UpdateServerName(ctx context.Context, dbConnection *sql.DB, server model.ForeignServer, newServerName string) error {
log := logger.Log(ctx).
WithField("function", "UpdateServerName")
query := fmt.Sprintf("ALTER SERVER %s RENAME TO %s", server.Name, newServerName)
log.Tracef("query: %s", query)
_, err := dbConnection.Exec(query)
if err != nil {
log.Errorf("error renaming server object: %s", err)
return err
}
return nil
}
// DiffForeignServers produces a list of `ForeignServers` to remove, add, and modify to bring `dbServers` in line with `dStateServers`
func DiffForeignServers(dStateServers []model.ForeignServer, dbServers []model.ForeignServer) (fsRemove []model.ForeignServer, fsAdd []model.ForeignServer, fsModify []model.ForeignServer) {
// Init return variables
fsRemove = make([]model.ForeignServer, 0)
fsAdd = make([]model.ForeignServer, 0)
fsModify = make([]model.ForeignServer, 0)
// fsRemove
for _, dbServer := range dbServers {
if FindForeignServer(dStateServers, dbServer.Name) == nil {
fsRemove = append(fsRemove, dbServer)
}
}
// fsAdd + fsModify
for _, dStateServer := range dStateServers {
if FindForeignServer(dbServers, dStateServer.Name) == nil {
fsAdd = append(fsAdd, dStateServer)
} else {
fsModify = append(fsModify, dStateServer)
}
}
return
}
|
package commands
import (
"fmt"
"sync"
"strconv"
"encoding/json"
"github.com/JFrogDev/artifactory-cli-go/utils"
)
// Downloads the artifacts using the specified download pattern.
// Returns the AQL query used for the download.
func Download(downloadPattern string, flags *utils.Flags) string {
if flags.ArtDetails.SshKeyPath != "" {
utils.SshAuthentication(flags.ArtDetails)
}
aqlUrl := flags.ArtDetails.Url + "api/search/aql"
data := utils.BuildAqlSearchQuery(downloadPattern, flags.Recursive, flags.Props)
fmt.Println("Searching Artifactory using AQL query: " + data)
if !flags.DryRun {
resp, json := utils.SendPost(aqlUrl, []byte(data), *flags.ArtDetails)
fmt.Println("Artifactory response:", resp.Status)
if resp.StatusCode == 200 {
resultItems := parseAqlSearchResponse(json)
downloadFiles(resultItems, flags)
fmt.Println("Downloaded " + strconv.Itoa(len(resultItems)) + " artifacts from Artifactory.")
}
}
return data
}
func downloadFiles(resultItems []AqlSearchResultItem, flags *utils.Flags) {
size := len(resultItems)
var wg sync.WaitGroup
for i := 0; i < flags.Threads; i++ {
wg.Add(1)
go func(threadId int) {
for j := threadId; j < size; j += flags.Threads {
downloadPath := buildDownloadUrl(flags.ArtDetails.Url, resultItems[j])
logMsgPrefix := utils.GetLogMsgPrefix(threadId, flags.DryRun)
fmt.Println(logMsgPrefix + " Downloading " + downloadPath)
if !flags.DryRun {
downloadFile(downloadPath, resultItems[j].Path, resultItems[j].Name, logMsgPrefix, flags)
}
}
wg.Done()
}(i)
}
wg.Wait()
}
func downloadFile(downloadPath, localPath, localFileName, logMsgPrefix string, flags *utils.Flags) {
details := utils.GetFileDetailsFromArtifactory(downloadPath, *flags.ArtDetails)
localFilePath := localPath + "/" + localFileName
if shouldDownloadFile(localFilePath, details, flags.ArtDetails.User, flags.ArtDetails.Password) {
if flags.SplitCount == 0 || flags.MinSplitSize < 0 || flags.MinSplitSize*1000 > details.Size || !details.AcceptRanges {
resp := utils.DownloadFile(downloadPath, localPath, localFileName, flags.Flat, *flags.ArtDetails)
fmt.Println(logMsgPrefix + " Artifactory response:", resp.Status)
} else {
utils.DownloadFileConcurrently(
downloadPath, localPath, localFileName, logMsgPrefix, details.Size, flags)
}
} else {
fmt.Println(logMsgPrefix + " File already exists locally.")
}
}
func buildDownloadUrl(baseUrl string, resultItem AqlSearchResultItem) string {
if resultItem.Path == "." {
return baseUrl + resultItem.Repo + "/" + resultItem.Name
}
return baseUrl + resultItem.Repo + "/" + resultItem.Path + "/" + resultItem.Name
}
func shouldDownloadFile(localFilePath string, artifactoryFileDetails *utils.FileDetails, user string, password string) bool {
if !utils.IsFileExists(localFilePath) {
return true
}
localFileDetails := utils.GetFileDetails(localFilePath)
if localFileDetails.Md5 != artifactoryFileDetails.Md5 || localFileDetails.Sha1 != artifactoryFileDetails.Sha1 {
return true
}
return false
}
func parseAqlSearchResponse(resp []byte) []AqlSearchResultItem {
var result AqlSearchResult
err := json.Unmarshal(resp, &result)
utils.CheckError(err)
return result.Results
}
type AqlSearchResult struct {
Results []AqlSearchResultItem
}
type AqlSearchResultItem struct {
Repo string
Path string
Name string
}
|
package timer
import (
"time"
msg "../messageTypes"
)
func SendWithDelayInt(delay time.Duration, ch chan<- int, message int) {
go sendWithDelayIntFunction(delay, ch, message)
}
func sendWithDelayIntFunction(delay time.Duration, ch chan<- int, message int) {
<-time.After(delay)
ch <- message
}
func SendWithDelayHallOrder(delay time.Duration, ch chan<- msg.HallOrder, message msg.HallOrder) {
go sendWithDelayHallOrderFunction(delay, ch, message)
}
func sendWithDelayHallOrderFunction(delay time.Duration, ch chan<- msg.HallOrder, message msg.HallOrder) {
<-time.After(delay)
ch <- message
}
|
/*
Copyright 2020 The Qmgo Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package field
import (
"time"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// DefaultFieldHook defines the interface to change default fields by hook
type DefaultFieldHook interface {
DefaultUpdateAt()
DefaultCreateAt()
DefaultId()
}
// DefaultField defines the default fields to handle when operation happens
// import the DefaultField in document struct to make it working
type DefaultField struct {
Id primitive.ObjectID `bson:"_id"`
CreateAt time.Time `bson:"createAt"`
UpdateAt time.Time `bson:"updateAt"`
}
// DefaultUpdateAt changes the default updateAt field
func (df *DefaultField) DefaultUpdateAt() {
df.UpdateAt = time.Now().Local()
}
// DefaultCreateAt changes the default createAt field
func (df *DefaultField) DefaultCreateAt() {
if df.CreateAt.IsZero() {
df.CreateAt = time.Now().Local()
}
}
// DefaultId changes the default _id field
func (df *DefaultField) DefaultId() {
if df.Id.IsZero() {
df.Id = primitive.NewObjectID()
}
}
|
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20191025
import (
"encoding/json"
tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http"
)
type BindVpcDnsDomainRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// VPC信息
VpcInfos []*VpcInfos `json:"VpcInfos,omitempty" name:"VpcInfos" list`
}
func (r *BindVpcDnsDomainRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *BindVpcDnsDomainRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type BindVpcDnsDomainResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *BindVpcDnsDomainResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *BindVpcDnsDomainResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsDomainRemarkRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// 备注
Remark *string `json:"Remark,omitempty" name:"Remark"`
}
func (r *CreateVpcDnsDomainRemarkRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsDomainRemarkRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsDomainRemarkResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *CreateVpcDnsDomainRemarkResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsDomainRemarkResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsDomainRequest struct {
*tchttp.BaseRequest
// 域名
Domain *string `json:"Domain,omitempty" name:"Domain"`
// 标签数组
Tags []*Tag `json:"Tags,omitempty" name:"Tags" list`
}
func (r *CreateVpcDnsDomainRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsDomainRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsDomainResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *CreateVpcDnsDomainResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsDomainResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsRecordRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// 子域名
SubDomain *string `json:"SubDomain,omitempty" name:"SubDomain"`
// 记录类型
RecordType *string `json:"RecordType,omitempty" name:"RecordType"`
// 记录值
Value *string `json:"Value,omitempty" name:"Value"`
// 权重
Weight *string `json:"Weight,omitempty" name:"Weight"`
// MX优先级
Mx *uint64 `json:"Mx,omitempty" name:"Mx"`
}
func (r *CreateVpcDnsRecordRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsRecordRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type CreateVpcDnsRecordResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *CreateVpcDnsRecordResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *CreateVpcDnsRecordResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DeleteVpcDnsDomainRequest struct {
*tchttp.BaseRequest
// 域名ID,以逗号分隔
DomainIds *string `json:"DomainIds,omitempty" name:"DomainIds"`
}
func (r *DeleteVpcDnsDomainRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DeleteVpcDnsDomainRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DeleteVpcDnsDomainResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *DeleteVpcDnsDomainResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DeleteVpcDnsDomainResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DeleteVpcDnsRecordRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// 记录ID,逗号分隔
RecordIds *string `json:"RecordIds,omitempty" name:"RecordIds"`
}
func (r *DeleteVpcDnsRecordRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DeleteVpcDnsRecordRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DeleteVpcDnsRecordResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *DeleteVpcDnsRecordResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DeleteVpcDnsRecordResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DescribeVpcDnsDomainListRequest struct {
*tchttp.BaseRequest
// 长度
Limit *uint64 `json:"Limit,omitempty" name:"Limit"`
// 偏移
Offset *uint64 `json:"Offset,omitempty" name:"Offset"`
// 过滤
Filters []*DomainListFilters `json:"Filters,omitempty" name:"Filters" list`
}
func (r *DescribeVpcDnsDomainListRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DescribeVpcDnsDomainListRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DescribeVpcDnsDomainListResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *DescribeVpcDnsDomainListResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DescribeVpcDnsDomainListResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DescribeVpcDnsRecordListRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// 长度
Limit *uint64 `json:"Limit,omitempty" name:"Limit"`
// 偏移
Offset *uint64 `json:"Offset,omitempty" name:"Offset"`
// 过滤
Filters []*RecordListFilters `json:"Filters,omitempty" name:"Filters" list`
}
func (r *DescribeVpcDnsRecordListRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DescribeVpcDnsRecordListRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DescribeVpcDnsRecordListResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *DescribeVpcDnsRecordListResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *DescribeVpcDnsRecordListResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type DomainListFilters struct {
// 过滤类型
Name *string `json:"Name,omitempty" name:"Name"`
// 过滤值
Values []*string `json:"Values,omitempty" name:"Values" list`
}
type ModifyVpcDnsRecordRequest struct {
*tchttp.BaseRequest
// 域名ID
DomainId *uint64 `json:"DomainId,omitempty" name:"DomainId"`
// 记录ID
RecordId *uint64 `json:"RecordId,omitempty" name:"RecordId"`
// 子域名
SubDomain *string `json:"SubDomain,omitempty" name:"SubDomain"`
// 记录类型
RecordType *string `json:"RecordType,omitempty" name:"RecordType"`
// 记录值
Value *string `json:"Value,omitempty" name:"Value"`
// 权重
Weight *string `json:"Weight,omitempty" name:"Weight"`
// MX优先级
Mx *uint64 `json:"Mx,omitempty" name:"Mx"`
}
func (r *ModifyVpcDnsRecordRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *ModifyVpcDnsRecordRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type ModifyVpcDnsRecordResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *ModifyVpcDnsRecordResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *ModifyVpcDnsRecordResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type RecordListFilters struct {
// 过滤类型
Name *string `json:"Name,omitempty" name:"Name"`
// 过滤值
Values []*string `json:"Values,omitempty" name:"Values" list`
}
type Tag struct {
// 标签键
Key *string `json:"Key,omitempty" name:"Key"`
// 标签值
Value *string `json:"Value,omitempty" name:"Value"`
}
type VpcInfos struct {
// VpcId
VpcId *string `json:"VpcId,omitempty" name:"VpcId"`
// RegionId
RegionId *string `json:"RegionId,omitempty" name:"RegionId"`
// UnVpcId
UnVpcId *string `json:"UnVpcId,omitempty" name:"UnVpcId"`
}
|
package ufile
import (
"context"
"fmt"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx"
"github.com/cloudreve/Cloudreve/v3/pkg/request"
ufsdk "github.com/ufilesdk-dev/ufile-gosdk"
"testing"
)
func TestDriver_List(t *testing.T) {
config := ufsdk.Config{
PublicKey: "",
PrivateKey: "",
BucketName: "",
FileHost: "",
BucketHost: "",
VerifyUploadMD5: false,
}
uf, err := ufsdk.NewFileRequest(&config, nil)
if err != nil {
fmt.Println(err)
return
}
handler := Driver{
Client: uf,
HTTPClient: request.HTTPClient{},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
list, err := handler.List(ctx, "cloud", false)
if err != nil {
fmt.Println(err)
}
fmt.Println(list)
}
func TestDriver_Get(t *testing.T) {
config := ufsdk.Config{
PublicKey: "",
PrivateKey: "",
BucketName: "",
FileHost: "",
BucketHost: "",
VerifyUploadMD5: false,
}
uf, err := ufsdk.NewFileRequest(&config, nil)
if err != nil {
fmt.Println(err)
return
}
handler := Driver{
Policy: &model.Policy{
IsPrivate: true,
},
Client: uf,
HTTPClient: request.HTTPClient{},
}
ctx := context.WithValue(context.Background(), fsctx.FileModelCtx, model.File{Size: 3, Name: "abc.txt"})
cache.Set("setting_preview_timeout", "3600", 0)
resp, err := handler.Get(ctx, "cloud123/")
if err != nil {
fmt.Println(err)
}
fmt.Println("resp",resp)
}
func TestDriver_Token(t *testing.T) {
config := ufsdk.Config{
PublicKey: "",
PrivateKey: "",
BucketName: "",
FileHost: "",
BucketHost: "",
VerifyUploadMD5: false,
}
uf, err := ufsdk.NewFileRequest(&config, nil)
if err != nil {
fmt.Println(err)
return
}
handler := Driver{
Policy: &model.Policy{
IsPrivate: true,
},
Client: uf,
HTTPClient: request.HTTPClient{},
}
cache.Set("setting_siteURL", "http://localhost:5212", 0)
ctx := context.WithValue(context.Background(), fsctx.SavePathCtx, "test.txt")
resp, err := handler.Token(ctx, 120, "")
if err != nil {
fmt.Println(err)
}
fmt.Println(resp)
}
func TestDriver_CreateBucket(t *testing.T) {
config := ufsdk.Config{
PublicKey: "",
PrivateKey: "",
BucketName: "",
FileHost: "",
BucketHost: "",
VerifyUploadMD5: false,
}
uf, err := ufsdk.NewBucketRequest(&config, nil)
if err != nil {
fmt.Println(err)
return
}
handler := Driver{
Client: uf,
HTTPClient: request.HTTPClient{},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err = handler.CreateBucket(ctx, "yx2020", "cn-gd", "private", "")
if err != nil {
fmt.Println(err)
}
fmt.Println("create over")
}
|
package energy_resources
import (
"net/http"
httplib "gitlab.com/semestr-6/projekt-grupowy/backend/go-libs/http-lib"
)
func GetRoutes() (routes httplib.Routes) {
routes = httplib.Routes{
httplib.Route{
HttpMethod: http.MethodPost,
Route: "/add-energy-resource-attribute",
HandlerFunc: addEnergyResourceAttribute,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/add-energy-resource-attribute",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodPost,
Route: "/add-gus-resource-attribute",
HandlerFunc: addGUSResourceAttribute,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/add-gus-resource-attribute",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-all-energy-resource-attributes",
HandlerFunc: getAllEnergyResourceAttributes,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-all-energy-resource-attributes",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-all-energy-resources",
HandlerFunc: getAllEnergyResources,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-all-energy-resources",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-all-gus-resources",
HandlerFunc: getAllGUSResources,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-all-gus-resources",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-gus-resources-id",
HandlerFunc: getGUSResourcesID,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-gus-resources-id",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-energy-resource-attribute-by-id",
HandlerFunc: getEnergyResourceAttributeById,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-energy-resource-attribute-by-id",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodGet,
Route: "/get-gus-resource-by-id",
HandlerFunc: getGUSResourceById,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/get-gus-resource-by-id",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodPut,
Route: "/edit-energy-resource-attribute",
HandlerFunc: editEnergyResourceAttribute,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/edit-energy-resource-attribute",
HandlerFunc: httplib.RespondStatus200,
},
httplib.Route{
HttpMethod: http.MethodPut,
Route: "/edit-gus-resource",
HandlerFunc: editGUSResource,
},
httplib.Route{
HttpMethod: http.MethodOptions,
Route: "/edit-gus-resource",
HandlerFunc: httplib.RespondStatus200,
},
}
return
}
|
package medkit
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// showConfigCmd represents the showConfig command
var showConfigCmd = &cobra.Command{
Use: "config",
Short: "display the MEDKIT configuration",
Long: `
Display the current MEDKIT configuration. This command takes into account
the config file, environment variables, and command line flags.`,
Run: func(cmd *cobra.Command, args []string) {
showConfig()
},
}
func init() {
}
// showConfig displays the program's configuration settings.
func showConfig() {
configKeys := []string{HomeDirectory, Bundles, DotFilesDirectory, BackupExtension}
fmt.Println("MEDKIT configuration settings:")
fmt.Println()
for _, configKey := range configKeys {
var configValue = viper.GetString(configKey)
if configValue == "" {
fmt.Printf(" %s: (no value set)\n", configKey)
} else {
fmt.Printf(" %s: %s\n", configKey, configValue)
}
}
}
|
/*
You are given an array people where people[i] is the weight of the ith person, and an infinite number of boats where each boat can carry a maximum weight of limit. Each boat carries at most two people at the same time, provided the sum of the weight of those people is at most limit.
Return the minimum number of boats to carry every given person.
Example 1:
Input: people = [1,2], limit = 3
Output: 1
Explanation: 1 boat (1, 2)
Example 2:
Input: people = [3,2,2,1], limit = 3
Output: 3
Explanation: 3 boats (1, 2), (2) and (3)
Example 3:
Input: people = [3,5,3,4], limit = 5
Output: 4
Explanation: 4 boats (3), (3), (4), (5)
Constraints:
1 <= people.length <= 5 * 10^4
1 <= people[i] <= limit <= 3 * 10^4
*/
package main
import "sort"
func main() {
assert(boats([]int{1, 2}, 3) == 1)
assert(boats([]int{3, 2, 2, 1}, 3) == 3)
assert(boats([]int{3, 5, 3, 4}, 5) == 4)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func boats(people []int, limit int) int {
sort.Ints(people)
boat := 0
for i, j := 0, len(people)-1; i <= j; j-- {
if people[i] <= limit-people[j] {
i++
}
boat++
}
return boat
}
|
package main
import (
"fmt"
"os"
)
var cwd string
func init() {
var err error
cwd, err = os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "os.Getwd failed: %v", err)
}
//fmt.Printf("Current work directory: %s\n", cwd)
}
/*
func main() {
x := "hello!"
fmt.Printf("hello addr %v\n", &x)
for _, x := range x {
fmt.Printf("out addr %v\n", &x)
if x != '!' {
fmt.Printf("inner1 addr %v\n", &x)
x := x + 'A' - 'a'
fmt.Printf("inner2 addr %v\n", &x)
//fmt.Printf("%c", x)
}
}
}*/
func main() {
x := "hello!"
for i := 0; i < len(x); i++ {
x := x[i]
fmt.Printf("out addr %v\n", &x)
if x != '!' {
fmt.Printf("inner1 addr %v\n", &x)
x := x + 'A' - 'a'
fmt.Printf("inner2 addr %v\n", &x)
}
}
}
|
package admin
import "firstProject/app/dto"
type AdminContract interface {
Register(dto dto.AdminDto) error
Login(dto dto.AdminDto) error
}
|
package handlers
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/rest_service_task/impl/errors"
"github.com/rest_service_task/impl/structs"
)
// A errorResponse is the default error message that is generated.
//
// swagger:response errorResponse
type GenericError struct {
// in: body
Body struct {
Code int32 `json:"code"`
Message error `json:"message"`
} `json:"body"`
}
// A GetQueryFlags contains the query flags for things that list films.
// swagger:parameters listFilms
type GetQueryFlags struct {
// Film genre
Genre string `json:"g"`
// Genre of film
ReleaseYear int `json:"ry"`
// Page number
Page int `json:"p"`
// Page size
Size int `json:"sz"`
}
// swagger:route GET /get film listFilms
// get films
// Responses:
// default: errorResponse
// 200:
func (hs *Handlers) Get(w http.ResponseWriter, r *http.Request) {
var genrePtr *string = nil
var releaseYearPtr *int = nil
var page int
var size int
if genre := r.URL.Query().Get("g"); genre != "" {
genre = strings.ToLower(genre)
genrePtr = &genre
}
if releaseYear := r.URL.Query().Get("ry"); releaseYear != "" {
v, err := strconv.Atoi(releaseYear)
if err != nil {
hs.logger.Println(err.Error())
fatal := errors.WriteHttpErrorMessage(w, http.StatusBadRequest, errors.NewError(errors.BAD_REQUEST_ERROR))
if fatal != nil {
hs.logger.Println(err.Error())
}
return
}
releaseYearPtr = &v
}
if pageStr := r.URL.Query().Get("p"); pageStr != "" {
v, err := strconv.Atoi(pageStr)
if err != nil {
hs.logger.Println(err.Error())
fatal := errors.WriteHttpErrorMessage(w, http.StatusBadRequest, errors.NewError(errors.BAD_REQUEST_ERROR))
if fatal != nil {
hs.logger.Println(err.Error())
}
return
}
page = v
}
if sizeStr := r.URL.Query().Get("sz"); sizeStr != "" {
v, err := strconv.Atoi(sizeStr)
if err != nil {
hs.logger.Println(err.Error())
fatal := errors.WriteHttpErrorMessage(w, http.StatusBadRequest, errors.NewError(errors.BAD_REQUEST_ERROR))
if fatal != nil {
hs.logger.Println(err.Error())
}
return
}
size = v
}
if page <= 0 || size <= 0 {
fatal := errors.WriteHttpErrorMessage(w, http.StatusInternalServerError, errors.NewError(errors.BAD_REQUEST_ERROR))
if fatal != nil {
hs.logger.Println(fatal.Error())
}
return
}
films, err := hs.database.GetFilms(size, page, genrePtr, releaseYearPtr)
if err != nil {
hs.logger.Println(err.Error())
fatal := errors.WriteHttpErrorMessage(w, http.StatusInternalServerError, errors.NewError(errors.INTERNAL_ERROR))
if fatal != nil {
hs.logger.Println(fatal.Error())
}
return
}
w.Header().Set("Content-Type", "application/json;charset=UTF-8")
w.WriteHeader(http.StatusOK)
response := structs.FilmResponce{}
if films != nil {
response.Count = len(*films)
response.Films = *films
}
if err = json.NewEncoder(w).Encode(response); err != nil {
hs.logger.Println(err.Error())
}
}
|
//go:build go1.21
// +build go1.21
package main
import (
"github.com/go-playground/log/v8"
stdlog "log"
"log/slog"
)
func main() {
// This example demonstrates how to redirect the std log and slog to this logger by using it as
// an slog.Handler.
log.RedirectGoStdLog(true)
log.WithFields(log.G("grouped", log.F("key", "value"))).Debug("test")
stdlog.Println("test stdlog")
slog.Info("test slog", slog.Group("group", "key", "value"))
}
|
package main
import (
"context"
"log"
"time"
)
func main() {
go consume()
if err := produce(context.Background()); err != nil {
log.Fatalf("Error producing messages to kafka: %v", err)
}
time.Sleep(3*time.Second)
}
|
package store
import (
"github.com/david-sorm/montesquieu/article"
"github.com/david-sorm/montesquieu/users"
"html/template"
)
// import "github.com/lib/pq"
// StoreConfig contains data passed to a Store implementation
type StoreConfig struct {
Host string
Database string
Username string
Password string
Port string
ArticlesPerIndexPage uint64
}
// StoreInfo should contain info about the store implementation, so Montesquieu can
// properly register it
type StoreInfo struct {
// should be a json-friendly and short name
Name string
// doesn't have to be json-friendly
Developer string
}
/*
Store is an interface meant to be implemented by a package which should do the
actual work of managing and keeping the data
*/
type Store interface {
// TODO better function argument design (it's still pretty bad)
// TODO better internal (caused by the app malfunctioning etc.) and external (user-caused) error handling
// Info() has to return general info about the Store implementation itself
Info() StoreInfo
/*
Store should be prepared for work upon returning nil from this function
Non-nil response means an error has occurred; error will be shown in console
If the first argument is nil, it means the store shouldn't monitor changes
If a function is passed, it should be called every time a change is detected
The second parameter is a config that contains relevant parsed data from config
file
*/
Init(f func(), cfg StoreConfig) error
ArticleStore
UserStore
AuthorStore
AdminStore
}
/*
CachingStore should mostly have the same functionality as Store,
only with the difference of Use(Store) and different internal logic
(returning data from i's own cache instead of doing queries every time there's an
article request, etc.)
*/
type CachingStore interface {
Store
/*
We use this method to pass the Store which should be used by the CachingStore.
CachingStore should call Init() on the Store before it starts initialising itself.
Any errors that happened during the Init() of the Store should be returned
through CachingStore's Init()
*/
Use(Store)
}
type ArticleStore interface {
// Articles
/*
Should return a slice of articles sorted from latest.
'from' means how many articles from latest should be cut off from the start
(0 = don't cut off anything).
'to' means how many articles minus latest should be cut off to the end.
Example: LoadArticlesSortedByLatest(2,7) should load 5 articles, starting
with the 3rd most recent and article and ending with the 7th
*/
LoadArticlesSortedByLatest(from uint64, to uint64) []article.Article
/*
Should return the article by the unique ID, obviously the ID in Article will
be ignored, so it can be set to nil.
If an article with the ID can't be found, the second return parameter should
return false, else if an article was found, return true
*/
GetArticleByID(id uint64) (article.Article, bool)
/*
Should return the total number of articles, used for determining how many
index pages we have
*/
GetArticleNumber() uint64
// When called, the Store should make a new article in its database and save it.
AddArticle(title string, authorId uint64, timestamp uint64, content template.HTML)
// Store should look up the article by its ID and make corresponding changes
EditArticle(article.Article)
// The article should be looked up by its ID and deleted
RemoveArticle(id uint64)
}
type UserStore interface {
// Users
// Lists Users, sorts by ID
ListUsers(from uint64, to uint64) []users.User
// Gets user ID from login name
// Returns whether a matching user was find using bool
// True = Found, False = Not
GetUserID(login string) (uint64, bool)
// Searches for a user by ID
GetUser(id uint64) users.User
// Makes a new user
AddUser(displayName string, login string, password string)
// Edits a user according to his ID
EditUser(users.User)
// Removes a user according to his ID
RemoveUser(id uint64)
}
type AuthorStore interface {
// Authors
// Lists Authors, sorts by ID
ListAuthors(from uint64, to uint64) []users.Author
// Returns nil if the User is not an Author
GetAuthor(userId uint64) users.Author
// Adds an Author
AddAuthor(userId uint64, authorName string)
// Links a user to an Author
// If User is nil, any link of an Author to a User should be deleted
LinkAuthor(authorId uint64, userId uint64)
// Removes an author
RemoveAuthor(authorId uint64)
}
type AdminStore interface {
// Admins
// Searches whether user is an admin according to whether his ID exists
IsAdmin(userId uint64) bool
// Lists Admins, sorts by ID
// Since admins are just users with elevated privileges, just return the user's
// info
ListAdmins(from uint64, to uint64) []users.User
// Promotes a User to be an Admin
PromoteToAdmin(userId uint64)
// Demotes an Admin to a User only
DemoteFromAdmin(userID uint64)
}
|
/**
* Copyright 2019 Innodev LLC. All rights reserved.
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
package errors
import (
"fmt"
"runtime"
"github.com/jinzhu/copier"
)
func _copy(e interface{}) *Err {
switch err := e.(type) {
case *Err:
if err == nil {
return nil
}
return _copy(*err)
case Err:
out := new(Err)
copier.Copy(&out, err)
if err.Meta != nil {
out.Meta = map[string]interface{}{}
for k, v := range err.Meta {
out.Meta[k] = v
}
}
if err.Stack != nil {
out.Stack = make([]Frame, len(err.Stack))
for i := range out.Stack {
copier.Copy(&out.Stack[i], err.Stack[i])
}
}
return out
}
return nil
}
func _new(e interface{}, depth int) Error {
if e == nil {
return nil
}
var msg string
switch val := e.(type) {
case *Err:
return _copy(*val)
case error:
msg = val.Error()
default:
msg = fmt.Sprintf("%v", e)
}
out := &Err{
Meta: map[string]interface{}{},
Message: msg,
}
if depth != -1 {
out.Stack = getStack(depth)
}
return out
}
func _wrap(err error, message string) Error {
if err == nil {
return nil
}
out := &Err{
Message: message,
}
if e, ok := err.(*Err); ok {
cpy := _copy(e)
out.Err = cpy
out.Stack = cpy.Stack
out.Meta = cpy.Meta
} else {
out.Stack = getStack(2)
}
return out
}
func getStack(skip int) []Frame {
pcs := make([]uintptr, StackBufferSize)
length := runtime.Callers(2+skip, pcs)
if length == 0 {
return nil
}
pcs = pcs[:length]
frames := runtime.CallersFrames(pcs)
out := make([]Frame, 0, length)
for {
frame, more := frames.Next()
if !more {
break
}
fn := frame.Func.Name()
if fn == "runtime.main" || fn == "runtime.goexit" {
continue
}
out = append(out, Frame{
Function: fn,
File: frame.File,
Line: frame.Line,
})
}
return out
}
|
package main
import (
"github.com/gin-gonic/gin"
"net/http"
)
// IndexGet redirects to sparrho.com
func IndexGet(c *gin.Context) {
c.Redirect(http.StatusSeeOther, "http://www.sparrho.com/")
}
// SuccessJSON returns a JSON saying which method was used.
func SuccessJSON(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"method": c.Request.Method})
}
// RecordAnalytics handles the recording of an analytics event.
func RecordAnalytics(c *gin.Context) {
c.String(http.StatusAccepted, "")
}
|
package vending_machine
import "spark_networks_assessment/pkg/repositories/products"
type Service interface {
Charge(coins []int) bool
Select(product *products.Product) bool
Balance() int
}
type service struct {
userCoins []int
productRepo products.Repository
}
func New(productRepo products.Repository) Service {
return &service{
userCoins: make([]int, 0),
productRepo: productRepo,
}
}
func (s *service) Balance() int {
var response int
for _, coins := range s.userCoins {
response += coins
}
return response
}
func (s *service) Charge(coins []int) bool {
//TODO validate coins
s.userCoins = append(s.userCoins, coins...)
return true
}
func (s *service) Select(product *products.Product) bool {
if s.productRepo.Check(product) == 0 {
return false
}
//Check if we have enough coins
if s.Balance() >= product.Value {
return s.productRepo.Purchase(product)
}
return false
}
|
package main
import (
"flag"
"log"
"os"
"path/filepath"
)
var startDir string
func main() {
flag.StringVar(&startDir, "d", ".", "starting directory")
flag.Parse()
log.Printf("Walking tree based at %q\n", startDir)
filepath.Walk(startDir, walker)
}
func walker(path string, info os.FileInfo, inErr error) (outErr error) {
log.Printf("Current path:%s isdir:%t\n", path, info.IsDir())
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"github.com/gin-gonic/gin"
)
func callMicro2(ctx *gin.Context) string {
tracingHeaders := []string{
"x-request-id",
"x-b3-traceid",
"x-b3-spanid",
"x-b3-sampled",
"x-b3-parentspanid",
"x-b3-flags",
"x-ot-span-context",
}
headersToSend := make(map[string]string)
for _, key := range tracingHeaders {
if val := ctx.Request.Header.Get(key); val != "" {
headersToSend[key] = val
}
}
client := &http.Client{}
req, err := http.NewRequest("GET", "http://micro2:8081/call", nil)
if err != nil {
log.Fatal(err)
}
for clave, valor := range headersToSend {
req.Header.Add(clave, valor)
}
response, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(body))
cuerpo := " Micro1 llamando a " + string(body)
return cuerpo
}
func setupRouter() *gin.Engine {
r := gin.Default()
//tracer := opentracing.GlobalTracer()
//r.Use(ginhttp.Middleware(tracer))
r.GET("/call", func(ctx *gin.Context) {
ctx.String(200, callMicro2(ctx))
})
r.GET("/hello", func(ctx *gin.Context) {
ctx.JSON(200, gin.H{
"message": "Hello World",
})
})
return r
}
func main() {
r := setupRouter()
r.Run(":8080")
}
|
package site
import (
"fmt"
"net/http"
)
func Abort(status int, w http.ResponseWriter, r *http.Request) {
switch status {
case 404:
w.WriteHeader(http.StatusNotFound)
fmt.Fprint(w, "404 page error")
case 301:
//
case 302:
//
}
}
|
package model
import "time"
type Precipitation struct {
Id int64 `json:"id"`
Value float32 `json:"value"`
Timestamp time.Time `json:"timestamp"`
}
type Precipitations []Precipitation
func (this Precipitation) GetId() (int64) {
return this.Id
}
func (this Precipitation) GetValue() (float32) {
return this.Value
}
func (this Precipitation) GetTimestamp() (time.Time) {
return this.Timestamp
}
|
package game_map
import (
"github.com/faiface/pixel/pixelgl"
)
type BlockUntilEvent struct {
UntilFunc func() bool
}
func BlockUntilEventCreate(untilFunc func() bool) *BlockUntilEvent {
return &BlockUntilEvent{
UntilFunc: untilFunc,
}
}
func (b BlockUntilEvent) Update(dt float64) {
}
func (b BlockUntilEvent) IsBlocking() bool {
return !b.UntilFunc()
}
func (b BlockUntilEvent) IsFinished() bool {
return !b.IsBlocking()
}
func (b BlockUntilEvent) Render(win *pixelgl.Window) {
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"github.com/andschneider/goqtt"
"github.com/andschneider/goqtt/packets"
influxdb2 "github.com/influxdata/influxdb-client-go"
)
// config contains the necessary information to create clients for both
// Influx and goqtt. It also holds an InfluxDB client, which after a
// successful connection can be used to write data.
type config struct {
// address of influxdb
influxDB string
// influxdb bucket name
influxBucket string
// influxdb client
influxClient influxdb2.Client
// address of MQTT broker
mqttServer string
// MQTT topic to subscribe to
mqttTopic string
}
// loadConfig loads in the required configuration from environment variables.
// If a variable isn't set the program will exit with an exit code of 1.
func loadConfig() *config {
defaultEnv := func(n string) string {
e := os.Getenv(n)
if e == "" {
log.Printf("must set %s env variable\n", n)
os.Exit(1)
}
return e
}
cfg := &config{
influxDB: defaultEnv("INFLUX_HOST"),
influxBucket: defaultEnv("INFLUX_BUCKET"),
mqttServer: defaultEnv("MQTT_HOST"),
mqttTopic: defaultEnv("MQTT_TOPIC"),
}
return cfg
}
// reading is a struct representing the expected sensor reading data in
// the MQTT message. It is expected to a be a JSON.
type reading struct {
Moisture int `json:"moisture"`
Temperature float32 `json:"temperature"`
Sid string `json:"sid"`
}
// writeData writes the data to influx using the blocking API.
// Might switch to non-blocking later.
func (c *config) writeData(line string) {
log.Printf("writing line: %s", line)
writeApi := c.influxClient.WriteAPIBlocking("", c.influxBucket)
err := writeApi.WriteRecord(context.Background(), line)
if err != nil {
log.Printf("write error: %s\n", err.Error())
}
}
// handleMessage unmarshalls the MQTT message and saves it to influx.
func (c *config) handleMessage(m *packets.PublishPacket) {
log.Printf("received message: '%s' from topic: '%s'\n", string(m.Message), m.Topic)
// mes := []byte(`{"moisture": 588, "temperature": 26.39, "sid": "sensor1"}`)
r := reading{}
if err := json.Unmarshal(m.Message, &r); err != nil {
log.Printf("could not unmarshal json data: %v", err)
}
// save data to influx
moist := fmt.Sprintf("moisture,unit=capacitance,sensor=%s avg=%d", r.Sid, r.Moisture)
//log.Printf("moisture: %s\n", moist)
c.writeData(moist)
temp := fmt.Sprintf("temperature,unit=celsius,sensor=%s avg=%f", r.Sid, r.Temperature)
//log.Printf("temperature: %s\n", temp)
c.writeData(temp)
}
func main() {
cfg := loadConfig()
// connect to MQTT broker
log.Println("connecting to MQTT")
mqttClient := goqtt.NewClient(cfg.mqttServer, goqtt.Topic(cfg.mqttTopic))
err := mqttClient.Connect()
if err != nil {
log.Fatal(err)
}
defer mqttClient.Disconnect()
// setup influx connection
log.Println("connecting to Influx")
cfg.influxClient = influxdb2.NewClient(cfg.influxDB, "")
defer cfg.influxClient.Close()
// Subscribe to MQTT topic
err = mqttClient.Subscribe()
if err != nil {
log.Fatal(err)
}
log.Println("waiting for messages...")
for {
m, err := mqttClient.ReadLoop()
if err != nil {
log.Printf("error: read loop: %v\n", err)
}
if m != nil {
cfg.handleMessage(m)
}
}
}
|
package controlplane
import (
"context"
"fmt"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/pkg/grpc"
databrokerpb "github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/protoutil"
)
const maxEvents = 50
var outboundGRPCConnection = new(grpc.CachedOutboundGRPClientConn)
func (srv *Server) storeEvent(ctx context.Context, evt proto.Message) error {
data := protoutil.NewAny(evt)
client, err := srv.getDataBrokerClient(ctx)
if err != nil {
return err
}
if !srv.haveSetCapacity[data.GetTypeUrl()] {
_, err = client.SetOptions(ctx, &databrokerpb.SetOptionsRequest{
Type: data.GetTypeUrl(),
Options: &databrokerpb.Options{
Capacity: proto.Uint64(maxEvents),
},
})
if err != nil {
return err
}
srv.haveSetCapacity[data.GetTypeUrl()] = true
}
var id string
if withID, ok := evt.(interface{ GetId() string }); ok {
id = withID.GetId()
} else {
id = uuid.NewString()
}
_, err = client.Put(ctx, &databrokerpb.PutRequest{
Records: []*databrokerpb.Record{{
Type: data.GetTypeUrl(),
Id: id,
Data: data,
}},
})
if err != nil {
return err
}
return nil
}
func (srv *Server) getDataBrokerClient(ctx context.Context) (databrokerpb.DataBrokerServiceClient, error) {
cfg := srv.currentConfig.Load()
sharedKey, err := cfg.Options.GetSharedKey()
if err != nil {
return nil, err
}
cc, err := outboundGRPCConnection.Get(context.Background(), &grpc.OutboundOptions{
OutboundPort: cfg.OutboundPort,
InstallationID: cfg.Options.InstallationID,
ServiceName: cfg.Options.Services,
SignedJWTKey: sharedKey,
})
if err != nil {
return nil, fmt.Errorf("controlplane: error creating databroker connection: %w", err)
}
_ = grpc.WaitForReady(ctx, cc, time.Second*10)
client := databrokerpb.NewDataBrokerServiceClient(cc)
return client, nil
}
// withGRPCBackoff runs f. If an unavailable or resource exhausted error occurs, the request will be retried.
// All other errors return immediately.
func withGRPCBackoff(ctx context.Context, f func() error) {
bo := backoff.NewExponentialBackOff()
bo.MaxElapsedTime = 0
for {
err := f()
switch {
case err == nil:
return
case status.Code(err) == codes.Unavailable,
status.Code(err) == codes.ResourceExhausted,
status.Code(err) == codes.DeadlineExceeded:
log.Error(ctx).Err(err).Msg("controlplane: error storing configuration event, retrying")
// retry
default:
log.Error(ctx).Err(err).Msg("controlplane: error storing configuration event")
return
}
select {
case <-ctx.Done():
return
case <-time.After(bo.NextBackOff()):
}
}
}
|
package routes
import "github.com/tedsuo/rata"
const (
Ping = "PING"
Env = "ENV"
InstanceIndex = "INDEX"
StartedAt = "STARTED_AT"
ListExperiments = "LIST_EXPERIMENTS"
Experiments = "EXPERIMENTS"
Hello = "HELLO"
Exit = "EXIT"
MakeTmpFile = "MAKE_TMP_FILE"
DeleteTmpFile = "DELETE_TMP_FILE"
)
var Routes = rata.Routes{
{Path: "/", Method: "GET", Name: Hello},
{Path: "/ping", Method: "GET", Name: Ping},
{Path: "/env", Method: "GET", Name: Env},
{Path: "/started-at", Method: "GET", Name: StartedAt},
{Path: "/index", Method: "GET", Name: InstanceIndex},
{Path: "/file/:filename", Method: "POST", Name: MakeTmpFile},
{Path: "/file/:filename", Method: "DELETE", Name: DeleteTmpFile},
{Path: "/exit/:code", Method: "POST", Name: Exit},
{Path: "/experiments", Method: "GET", Name: ListExperiments},
{Path: "/experiments/:experiment", Method: "GET", Name: Experiments},
}
|
package auth
import (
"errors"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/majid-cj/go-docker-mongo/util"
"github.com/dgrijalva/jwt-go"
)
// TokenInterface ...
type TokenInterface interface {
CreateJWTToken(string, string) (*TokenDetail, error)
ExtractJWTTokenMetadata(*http.Request) (*AccessDetail, error)
}
// Token ...
type Token struct{}
var _ TokenInterface = &Token{}
// NewToken ...
func NewToken() *Token {
return &Token{}
}
// CreateJWTToken ...
func (token *Token) CreateJWTToken(userid, usertype string) (*TokenDetail, error) {
tokenDetail := &TokenDetail{}
tokenDetail.AccessTokenExpire = time.Now().Add(time.Hour * 24).Unix()
tokenDetail.TokenUUID = util.UUID()
tokenDetail.RefreshTokenExpire = time.Now().Add(time.Hour * 24 * 7).Unix()
tokenDetail.RefreshUUID = fmt.Sprintf("%s++%s", tokenDetail.TokenUUID, userid)
var err error
accessTokenClaim := jwt.MapClaims{}
accessTokenClaim["authorization"] = true
accessTokenClaim["access_uuid"] = tokenDetail.TokenUUID
accessTokenClaim["user_id"] = userid
accessTokenClaim["user_type"] = usertype
accessTokenClaim["exp"] = tokenDetail.AccessTokenExpire
accessToken := jwt.NewWithClaims(jwt.SigningMethodHS256, accessTokenClaim)
tokenDetail.AccessToken, err = accessToken.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, errors.New("general_error")
}
refreshTokenClaim := jwt.MapClaims{}
refreshTokenClaim["refresh_uuid"] = tokenDetail.RefreshUUID
refreshTokenClaim["user_id"] = userid
refreshTokenClaim["user_type"] = usertype
refreshTokenClaim["exp"] = tokenDetail.RefreshTokenExpire
refreshToken := jwt.NewWithClaims(jwt.SigningMethodHS256, refreshTokenClaim)
tokenDetail.RefreshToken, err = refreshToken.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, errors.New("general_error")
}
return tokenDetail, nil
}
// ExtractJWTTokenMetadata ...
func (token *Token) ExtractJWTTokenMetadata(request *http.Request) (*AccessDetail, error) {
_token, err := VerifyToken(request)
if err != nil {
return nil, errors.New("general_error")
}
claims, ok := _token.Claims.(jwt.MapClaims)
if ok && _token.Valid {
accessUUID, ok := claims["access_uuid"].(string)
if !ok {
return nil, errors.New("general_error")
}
userID, ok := claims["user_id"].(string)
if !ok {
return nil, errors.New("general_error")
}
return &AccessDetail{
TokenUUID: accessUUID,
UserID: userID,
}, nil
}
return nil, errors.New("general_error")
}
// ExtractMemberType ...
func ExtractMemberType(request *http.Request) (string, error) {
_token, err := VerifyToken(request)
if err != nil {
return "", err
}
claims, ok := _token.Claims.(jwt.MapClaims)
if ok && _token.Valid {
memberType, ok := claims["user_type"].(string)
if !ok {
// error getting token claims (member type)
return "", errors.New("error_parsing_data")
}
return memberType, nil
}
return "", err
}
// TokenValid ...
func TokenValid(request *http.Request) error {
token, err := VerifyToken(request)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
// invalid token
return errors.New("general_error")
}
return nil
}
// VerifyToken ...
func VerifyToken(request *http.Request) (*jwt.Token, error) {
_token := ExtractToken(request)
token, err := jwt.Parse(_token, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
// error parsing token
return nil, errors.New("general_error")
}
return token, nil
}
// ExtractToken ...
func ExtractToken(request *http.Request) string {
bearer := request.Header.Get("Authorization")
token := strings.Split(bearer, " ")
if token[0] != "Bearer" {
return ""
}
if len(token) == 2 {
return token[1]
}
return ""
}
|
package apis
import "encoding/json"
/**
* api结构体, 所有API基于此结构体.
*/
type Api struct{}
func (this Api) StatusCode(status int, message string, data interface{}) string {
result := make(map[string]interface{})
result["status"] = status
result["message"] = message
result["data"] = data
jsonString, _ := json.Marshal(result)
return string(jsonString)
}
|
package main
import (
"delay"
"fmt"
"math/rand"
"rtos"
"display/eve"
"display/eve/ft80"
"stm32/evedci"
"stm32/hal/dma"
"stm32/hal/exti"
"stm32/hal/gpio"
"stm32/hal/irq"
"stm32/hal/spi"
"stm32/hal/system"
"stm32/hal/system/timer/systick"
)
var dci *evedci.SPI
func init() {
system.Setup168(8)
systick.Setup(2e6)
// GPIO
gpio.A.EnableClock(true)
csn := gpio.A.Pin(4)
spiport, sck, miso, mosi := gpio.A, gpio.Pin5, gpio.Pin6, gpio.Pin7
gpio.C.EnableClock(true)
irqn := gpio.C.Pin(4)
pdn := gpio.C.Pin(5)
// EVE SPI
spiport.Setup(sck|mosi, &gpio.Config{Mode: gpio.Alt, Speed: gpio.High})
spiport.Setup(miso, &gpio.Config{Mode: gpio.AltIn})
spiport.SetAltFunc(sck|miso|mosi, gpio.SPI1)
d := dma.DMA2
d.EnableClock(true)
spidrv := spi.NewDriver(spi.SPI1, d.Channel(2, 3), d.Channel(3, 3))
spidrv.P.EnableClock(true)
rtos.IRQ(irq.SPI1).Enable()
rtos.IRQ(irq.DMA2_Stream2).Enable()
rtos.IRQ(irq.DMA2_Stream3).Enable()
// EVE control lines
cfg := gpio.Config{Mode: gpio.Out, Speed: gpio.High}
pdn.Setup(&cfg)
csn.Setup(&cfg)
irqn.Setup(&gpio.Config{Mode: gpio.In})
irqline := exti.Lines(irqn.Mask())
irqline.Connect(irqn.Port())
irqline.EnableFallTrig()
irqline.EnableIRQ()
rtos.IRQ(irq.EXTI9_5).Enable()
dci = evedci.NewSPI(spidrv, csn, pdn)
}
func curFreq(lcd *eve.Driver) uint32 {
clk1 := lcd.ReadUint32(ft80.REG_CLOCK)
t1 := rtos.Nanosec()
delay.Millisec(8)
clk2 := lcd.ReadUint32(ft80.REG_CLOCK)
t2 := rtos.Nanosec()
return uint32(int64(clk2-clk1) * 1e9 / (t2 - t1))
}
func printFreq(lcd *eve.Driver) {
fmt.Printf("FT800 clock: %d Hz\n", curFreq(lcd))
}
func main() {
delay.Millisec(200)
spibus := dci.SPI().P.Bus()
baudrate := dci.SPI().P.Baudrate(dci.SPI().P.Conf())
fmt.Printf(
"\nSPI on %s (%d MHz).\nSPI speed: %d bps.\n",
spibus, spibus.Clock()/1e6, baudrate,
)
// Wakeup from POWERDOWN to STANDBY.
dci.SetPDN(0)
delay.Millisec(20)
dci.SetPDN(1)
delay.Millisec(20) // Wait 20 ms for internal oscilator and PLL.
lcd := eve.NewDriver(dci, 128)
fmt.Print("Init:")
// Wakeup from STANDBY to ACTIVE.
lcd.HostCmd(ft80.ACTIVE, 0)
/*
// Simple triming algorithm if internal oscilator is used.
for trim := uint32(0); trim <= 31; trim++ {
lcd.W(ft80.REG_TRIM).W32(trim)
if f := curFreq(lcd); f > 47040000 {
lcd.W(ft80.REG_FREQUENCY).W32(f)
break
}
}
*/
// Select external 12 MHz oscilator as clock source.
lcd.HostCmd(ft80.CLKEXT, 0)
if lcd.ReadByte(ft80.REG_ID) != 0x7c {
fmt.Printf("Not EVE controller.\n")
return
}
if lcd.ReadUint32(ft80.ROM_CHIPID) != 0x10008 {
fmt.Printf("Not FT800 controller.\n")
return
}
check(lcd.Err(false))
printFreq(lcd)
fmt.Print("Configure WQVGA (480x272) display:")
lcd.WriteByte(ft80.REG_PWM_DUTY, 0)
const pclkDiv = 5 // Pixel Clock divider: pclk = mainClk / pclkDiv.
// Refresh rate: pclk/(hcycle*vcycle) = 48 MHz/5/(548*292) = 59.99 Hz.
lcd.W(ft80.REG_CSPREAD).Write32(
1, // REG_CSPREAD (color signals spread, reduces EM noise)
1, // REG_PCLK_POL (define active edge of PCLK)
0, // REG_PCLK (temporary disable PCLK)
)
lcd.W(ft80.REG_HCYCLE).Write32(
548, // REG_HCYCLE (total number of clocks per line)
43, // REG_HOFFSET (tart of active line)
480, // REG_HSIZE (active width of LCD display)
0, // REG_HSYNC0 (start of horizontal sync pulse)
41, // REG_HSYNC1 (end of horizontal sync pulse)
292, // REG_VCYCLE (total number of lines per screen)
12, // REG_VOFFSET (start of active screen)
272, // REG_VSIZE (active height of LCD display)
0, // REG_VSYNC0 (start of vertical sync pulse)
10, // REG_VSYNC1 (end of vertical sync pulse)
)
check(lcd.Err(false))
fmt.Print("Write initial display list and enable display:")
dl := lcd.DL(ft80.RAM_DL)
dl.ClearColorRGB(0)
dl.Clear(eve.CST)
dl.Display()
// Alternative, method:
//
// lcd.W(ft80.RAM_DL).Write32(
// eve.CLEAR_COLOR_RGB,
// eve.CLEAR|eve.CST,
// eve.DISPLAY,
// )
lcd.WriteByte(ft80.REG_DLSWAP, eve.DLSWAP_FRAME)
gpio := lcd.ReadByte(ft80.REG_GPIO)
lcd.WriteByte(ft80.REG_GPIO, gpio|0x80)
lcd.WriteByte(ft80.REG_PCLK, pclkDiv) // Enable PCLK.
check(lcd.Err(false))
printFreq(lcd)
delay.Millisec(20) // Wait for new main clock.
printFreq(lcd)
// FT800CB-HY50B display is unstable with fast SPI and VCC <= 3.3V. If you
// have problems please comment the line bellow or better desolder U1 and U2
// (74LCX125 buffers) and short the U1:2-3,5-6,11-2, U2:2-3,5-6 pins.
dci.SPI().P.SetConf(dci.SPI().P.Conf()&^spi.BR256 | dci.SPI().P.BR(30e6))
fmt.Printf("SPI set to %d Hz\n", dci.SPI().P.Baudrate(dci.SPI().P.Conf()))
lcd.WriteByte(ft80.REG_PWM_DUTY, 64)
fmt.Print("Draw two points:")
dl = lcd.DL(ft80.RAM_DL)
dl.Clear(eve.CST)
dl.Begin(eve.POINTS)
dl.ColorRGB(eve.MakeRGB(161, 244, 97))
dl.PointSize(100 * 16)
dl.Vertex2f(200*16, 100*16)
dl.ColorRGB(0xFF00FF)
dl.PointSize(50 * 16)
dl.Vertex2f(300*16, 200*16)
dl.Display()
lcd.WriteByte(ft80.REG_DLSWAP, eve.DLSWAP_FRAME)
check(lcd.Err(false))
delay.Millisec(1000)
fmt.Print("Load bitmap:")
lcd.W(ft80.RAM_G).Write(LenaFace[:])
check(lcd.Err(false))
fmt.Print("Draw widgets on top of 1000 bitmaps:")
var rnd rand.XorShift64
rnd.Seed(1)
addr := ft80.RAM_DL
dl = lcd.DL(addr)
dl.BitmapHandle(1)
dl.BitmapSource(ft80.RAM_G)
dl.BitmapLayout(eve.RGB565, 80, 40)
dl.BitmapSize(0, 40, 40)
dl.Clear(eve.CST)
dl.Begin(eve.BITMAPS)
dl.ColorA(255)
dl.BitmapHandle(1)
for i := 0; i < 1000; i++ {
v := rnd.Uint64()
vl := uint32(v)
vh := uint32(v >> 32)
dl.Vertex2f(int((vl%480-20)*16), int((vh%272-20)*16))
}
addr += dl.Close()
lcd.WriteInt(ft80.REG_CMD_DL, addr)
n := lcd.ReadInt(ft80.REG_CMD_WRITE)
ge := lcd.GE(ft80.RAM_CMD + n)
ge.Button(170, 110, 140, 40, 23, 0, "Push me!")
ge.Clock(440, 40, 30, 0, 21, 22, 42, 00)
ge.Gauge(440, 232, 30, 0, 5, 5, 33, 100)
ge.Keys(30, 242, 120, 20, 18, 0, "ABCDE")
ge.Progress(180, 248, 100, 10, 0, 75, 100)
ge.Scrollbar(10, 10, 100, 10, 0, 50, 25, 100)
ge.Slider(10, 30, 100, 10, 0, 25, 100)
ge.Dial(40, 80, 30, 0, 3000)
ge.Toggle(25, 130, 30, 18, 0, true, "yes")
ge.Display()
ge.Swap()
n += ge.Close()
lcd.WriteInt(ft80.REG_CMD_WRITE, n)
check(lcd.Err(false))
for {
delay.Millisec(1000)
fmt.Print("Swap DL:")
lcd.WriteByte(ft80.REG_DLSWAP, eve.DLSWAP_FRAME)
check(lcd.Err(false))
}
}
func check(err error) {
if err == nil {
fmt.Printf(" OK\n")
return
}
fmt.Printf(" %v\n", err)
for {
}
}
func lcdSPIISR() {
dci.SPI().ISR()
}
func lcdRxDMAISR() {
dci.SPI().DMAISR(dci.SPI().RxDMA)
}
func lcdTxDMAISR() {
dci.SPI().DMAISR(dci.SPI().TxDMA)
}
func exti9_5ISR() {
exti.Pending().ClearPending()
dci.ISR()
}
//emgo:const
//c:__attribute__((section(".ISRs")))
var ISRs = [...]func(){
irq.SPI1: lcdSPIISR,
irq.DMA2_Stream2: lcdRxDMAISR,
irq.DMA2_Stream3: lcdTxDMAISR,
irq.EXTI9_5: exti9_5ISR,
}
|
package pie
// Keys returns the keys in the map. All of the items will be unique.
//
// Due to Go's randomization of iterating maps the order is not deterministic.
func Keys[K comparable, V any](m map[K]V) []K {
// Avoid allocation
l := len(m)
if l == 0 {
return nil
}
i := 0
keys := make([]K, len(m))
for key := range m {
keys[i] = key
i++
}
return keys
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.