file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
gobuild.go | // Copyright 2009-2010 by Maurice Gilden. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
gobuild - build tool to automate building go programs/libraries
*/
package main
// import "fmt"
import (
"os"
"runtime"
"exec"
"flag"
path "path/filepath"
"strings"
"container/vector"
"./godata"
"./logger"
)
// ========== command line parameters ==========
var flagLibrary *bool = flag.Bool("lib", false, "build all packages as librarys")
var flagBuildAll *bool = flag.Bool("a", false, "build all executables")
var flagTesting *bool = flag.Bool("t", false, "(not yet implemented) Build all tests")
var flagSingleMainFile *bool = flag.Bool("single-main", false, "one main file per executable")
var flagIncludeInvisible *bool = flag.Bool("include-hidden", false, "Include hidden directories")
var flagOutputFileName *string = flag.String("o", "", "output file")
var flagQuietMode *bool = flag.Bool("q", false, "only print warnings/errors")
var flagQuieterMode *bool = flag.Bool("qq", false, "only print errors")
var flagVerboseMode *bool = flag.Bool("v", false, "print debug messages")
var flagIncludePaths *string = flag.String("I", "", "additional include paths")
var flagClean *bool = flag.Bool("clean", false, "delete all temporary files")
var flagRunExec *bool = flag.Bool("run", false, "run the created executable(s)")
var flagMatch *string = flag.String("match", "", "regular expression to select tests to run")
var flagBenchmarks *string = flag.String("benchmarks", "", "regular expression to select benchmarks to run")
var flagIgnore *string = flag.String("ignore", "", "ignore these files")
var flagKeepAFiles *bool = flag.Bool("keep-a-files", false, "don't automatically delete .a archive files")
// ========== global (package) variables ==========
var compilerBin string
var linkerBin string
var gopackBin string = "gopack"
var compileErrors bool = false
var linkErrors bool = false
var rootPath string
var rootPathPerm uint32
var objExt string
var outputDirPrefix string
var goPackages *godata.GoPackageContainer
// ========== goFileVisitor ==========
// this visitor looks for files with the extension .go
type goFileVisitor struct {
rootpath string
realpath string
symname string
}
// implementation of the Visitor interface for the file walker
func (v *goFileVisitor) VisitDir(dirpath string, d *os.FileInfo) bool {
if strings.LastIndex(dirpath, "/") < len(dirpath)-1 {
if dirpath[strings.LastIndex(dirpath, "/")+1] == '.' {
return *flagIncludeInvisible
}
}
return true
}
// implementation of the Visitor interface for the file walker
func (v *goFileVisitor) VisitFile(filepath string, d *os.FileInfo) {
// parse hidden directories?
if (filepath[strings.LastIndex(filepath, "/")+1] == '.') && (!*flagIncludeInvisible) {
return
}
// check if this is a symlink
if dir, err := os.Stat(filepath); err == nil {
if dir.FollowedSymlink && dir.IsDirectory() {
readFiles(filepath)
}
} else {
logger.Warn("%s\n", err)
}
// run .y files through goyacc first to create .go files
if strings.HasSuffix(filepath, ".y") {
filepath = goyacc(filepath)
}
if strings.HasSuffix(filepath, ".go") {
// include *_test.go files?
if strings.HasSuffix(filepath, "_test.go") && (!*flagTesting) {
return
}
cwd, _ := os.Getwd()
// fmt.Println(path.Join(cwd, *flagIgnore), filepath)
if filepath == path.Join(cwd, *flagIgnore) {
return
}
var gf godata.GoFile
if v.realpath != v.rootpath {
gf = godata.GoFile{v.symname + filepath[strings.LastIndex(filepath, "/"):],
nil, false, false, strings.HasSuffix(filepath, "_test.go"), nil, nil,
}
} else {
gf = godata.GoFile{filepath[len(v.realpath)+1 : len(filepath)], nil,
false, false, strings.HasSuffix(filepath, "_test.go"), nil, nil,
}
}
if gf.IsTestFile {
gf.TestFunctions = new(vector.Vector)
gf.BenchmarkFunctions = new(vector.Vector)
}
logger.Debug("Parsing file: %s\n", filepath)
gf.ParseFile(goPackages)
}
}
// ========== (local) functions ==========
// unused right now, may be used later for a target directory for .[568] files
func getObjDir() string {
return ""
// this doesn't work for 'import "./blub"' style imports
/*
if *flagTesting {
return "_test/";
}
return "_obj/";*/
}
/*
Returns an argv array in a single string with spaces dividing the entries.
*/
func getCommandline(argv []string) string {
var str string
for _, s := range argv {
str += s + " "
}
return str[0 : len(str)-1]
}
/*
readFiles reads all files with the .go extension and creates their AST.
It also creates a list of local imports (everything starting with ./)
and searches the main package files for the main function.
*/
func readFiles(rootpath string) {
var realpath, symname string
// path walker error channel
errorChannel := make(chan os.Error, 64)
// check if this is a symlink
if dir, err := os.Stat(rootpath); err == nil {
if dir.FollowedSymlink {
realpath, _ = os.Readlink(rootpath)
if realpath[0] != '/' {
realpath = rootpath[0:strings.LastIndex(rootpath, "/")+1] + realpath
}
symname = rootpath[len(rootPath)+1:]
} else {
realpath = rootpath
}
} else {
logger.Warn("%s\n", err)
}
// visitor for the path walker
visitor := &goFileVisitor{rootpath, realpath, symname}
path.Walk(visitor.realpath, visitor, errorChannel)
select {
case err := <-errorChannel:
logger.Error("Error while traversing directories: %s\n", err)
default:
}
}
/*
Creates a main package and _testmain.go file for building a test application.
*/
func createTestPackage() *godata.GoPackage {
var testFileSource string
var testArrays string
var testCalls string
var benchCalls string
var testGoFile *godata.GoFile
var testPack *godata.GoPackage
var testFile *os.File
var err os.Error
var pack *godata.GoPackage
testGoFile = new(godata.GoFile)
testPack = godata.NewGoPackage("main")
testGoFile.Filename = "_testmain.go"
testGoFile.Pack = testPack
testGoFile.HasMain = true
testGoFile.IsTestFile = true
testPack.OutputFile = "_testmain"
testPack.Files.Push(testGoFile)
// search for packages with _test.go files
for _, packName := range goPackages.GetPackageNames() {
pack, _ = goPackages.Get(packName)
if pack.HasTestFiles() {
testPack.Depends.Push(pack)
}
}
if testPack.Depends.Len() == 0 {
logger.Error("No _test.go files found.\n")
os.Exit(1)
}
// imports
testFileSource =
"package main\n" +
"\nimport \"testing\"\n" +
"import __regexp__ \"regexp\"\n" +
"import \"fmt\"\n"
// will create an array per package with all the Test* and Benchmark* functions
// tests/benchmarks will be done for each package seperatly so that running
// the _testmain program will result in multiple PASS (or fail) outputs.
for _, ipack := range *testPack.Depends {
var tmpStr string
var fnCount int = 0
pack := (ipack.(*godata.GoPackage))
// localPackVarName: contains the test functions, package name
// with '/' replaced by '_'
var localPackVarName string = strings.Map(func(rune int) int {
if rune == '/' {
return '_'
}
return rune
},pack.Name)
// localPackName: package name without path/parent directories
var localPackName string
if strings.LastIndex(pack.Name, "/") >= 0 {
localPackName = pack.Name[strings.LastIndex(pack.Name, "/")+1:]
} else {
localPackName = pack.Name
}
testFileSource += "import \"" + pack.Name + "\"\n"
tmpStr = "var test_" + localPackVarName + " = []testing.InternalTest {\n"
for _, igf := range *pack.Files {
logger.Debug("Test* from %s: \n", (igf.(*godata.GoFile)).Filename)
if (igf.(*godata.GoFile)).IsTestFile {
for _, istr := range *(igf.(*godata.GoFile)).TestFunctions {
tmpStr += "\ttesting.InternalTest{ \"" +
pack.Name + "." + istr.(string) +
"\", " +
localPackName + "." + istr.(string) +
" },\n"
fnCount++
}
}
}
tmpStr += "}\n\n"
if fnCount > 0 {
testCalls +=
"\tfmt.Println(\"Testing " + pack.Name + ":\");\n" +
"\ttesting.Main(__regexp__.MatchString, test_" + localPackVarName + ");\n"
testArrays += tmpStr
}
fnCount = 0
tmpStr = "var bench_" + localPackVarName + " = []testing.Benchmark {\n"
for _, igf := range *pack.Files {
if (igf.(*godata.GoFile)).IsTestFile {
for _, istr := range *(igf.(*godata.GoFile)).BenchmarkFunctions {
tmpStr += "\ttesting.Benchmark{ \"" +
pack.Name + "." + istr.(string) +
"\", " +
localPackName + "." + istr.(string) +
" },\n"
fnCount++
}
}
}
tmpStr += "}\n\n"
if fnCount > 0 {
benchCalls +=
"\tfmt.Println(\"Benchmarking " + pack.Name + ":\");\n" +
"\ttesting.RunBenchmarks(bench_" + localPackVarName + ");\n"
testArrays += tmpStr
}
}
testFileSource += "\n" + testArrays
// func main()
testFileSource +=
"\nfunc main() {\n" +
testCalls +
benchCalls +
"}\n"
testFile, err = os.Create(testGoFile.Filename)
if err != nil {
logger.Error("Could not create %s: %s\n", testGoFile.Filename, err)
os.Exit(1)
}
testFile.WriteString(testFileSource)
testFile.Close()
return testPack
}
/*
The compile method will run the compiler for every package it has found,
starting with the main package.
Returns true if compiled successfully.
*/
func compile(pack *godata.GoPackage) bool {
var argc int
var argv []string
var argvFilled int
var objDir = "" //outputDirPrefix + getObjDir();
// check for recursive dependencies
if pack.InProgress {
logger.Error("Found a recurisve dependency in %s. This is not supported in Go.\n", pack.Name)
pack.HasErrors = true
pack.InProgress = false
return false
}
pack.InProgress = true
// first compile all dependencies
for _, idep := range *pack.Depends {
dep := idep.(*godata.GoPackage)
if dep.HasErrors {
pack.HasErrors = true
pack.InProgress = false
return false
}
if !dep.Compiled &&
(dep.Type == godata.LOCAL_PACKAGE ||
dep.Type == godata.UNKNOWN_PACKAGE && dep.Files.Len() > 0) {
if !compile(dep) {
pack.HasErrors = true
pack.InProgress = false
return false
}
}
}
// cgo files (the ones which import "C") can't be compiled
// at the moment. They need to be compiled by hand into .a files.
if pack.HasCGOFiles() {
if pack.HasExistingAFile() {
pack.Compiled = true
pack.InProgress = false
return true
} else {
logger.Error("Can't compile cgo files. Please manually compile them.\n")
os.Exit(1)
}
}
// check if this package has any files (if not -> error)
if pack.Files.Len() == 0 && pack.Type == godata.LOCAL_PACKAGE {
logger.Error("No files found for package %s.\n", pack.Name)
os.Exit(1)
}
// if the outputDirPrefix points to something, subdirectories
// need to be created if they don't already exist
outputFile := objDir + pack.OutputFile
if strings.Index(outputFile, "/") != -1 {
path := outputFile[0:strings.LastIndex(outputFile, "/")]
dir, err := os.Stat(path)
if err != nil {
err = os.MkdirAll(path, rootPathPerm)
if err != nil {
logger.Error("Could not create output path %s: %s\n", path, err)
os.Exit(1)
}
} else if !dir.IsDirectory() {
logger.Error("File found in %s instead of a directory.\n", path)
os.Exit(1)
}
}
// before compiling, remove any .a file
// this is done because the compiler/linker looks for .a files
// before it looks for .[568] files
if !*flagKeepAFiles {
if err := os.Remove(outputFile + ".a"); err == nil {
logger.Debug("Removed file %s.a.\n", outputFile)
}
}
// construct compiler command line arguments
if pack.Name != "main" {
logger.Info("Compiling %s...\n", pack.Name)
} else {
logger.Info("Compiling %s (%s)...\n", pack.Name, pack.OutputFile)
}
argc = pack.Files.Len() + 3
if *flagIncludePaths != "" {
argc += 2 * (strings.Count(*flagIncludePaths, ",") + 1)
}
if pack.NeedsLocalSearchPath() || objDir != "" {
argc += 2
}
if pack.Name == "main" {
argc += 2
}
argv = make([]string, argc*2)
argv[argvFilled] = compilerBin
argvFilled++
argv[argvFilled] = "-o"
argvFilled++
argv[argvFilled] = outputFile + objExt
argvFilled++
if *flagIncludePaths != "" {
for _, includePath := range strings.Split(*flagIncludePaths, ",", -1) {
argv[argvFilled] = "-I"
argvFilled++
argv[argvFilled] = includePath
argvFilled++
}
}
// for _, arg := range argv {
// logger.Info(arg)
// logger.Info(" ")
// }
// logger.Info("\n")
if pack.NeedsLocalSearchPath() || objDir != "" {
argv[argvFilled] = "-I"
argvFilled++
if objDir != "" {
argv[argvFilled] = objDir
} else {
argv[argvFilled] = "."
}
argvFilled++
}
if pack.Name == "main" {
argv[argvFilled] = "-I"
argvFilled++
argv[argvFilled] = "."
argvFilled++
}
for i := 0; i < pack.Files.Len(); i++ {
gf := pack.Files.At(i).(*godata.GoFile)
argv[argvFilled] = gf.Filename
argvFilled++
}
logger.Info(" %s\n", getCommandline(argv[0:argvFilled]))
cmd, err := exec.Run(compilerBin, argv[0:argvFilled], os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Compiler execution error (%s), aborting compilation.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
pack.HasErrors = true
pack.InProgress = false
return false
}
// it should now be compiled
pack.Compiled = true
pack.InProgress = false
return true
}
/*
Calls the linker for the main file, which should be called "main.(5|6|8)".
*/
func link(pack *godata.GoPackage) bool {
var argc int
var argv []string
var argvFilled int
var objDir string = "" //outputDirPrefix + getObjDir();
// build the command line for the linker
argc = 4
if *flagIncludePaths != "" {
argc += 2
}
if pack.NeedsLocalSearchPath() {
argc += 2
}
if pack.Name == "main" {
argc += 2
}
argv = make([]string, argc*3)
argv[argvFilled] = linkerBin
argvFilled++
argv[argvFilled] = "-o"
argvFilled++
argv[argvFilled] = outputDirPrefix + pack.OutputFile
argvFilled++
if *flagIncludePaths != "" {
for _, v := range strings.Split(*flagIncludePaths, ",", -1) {
argv[argvFilled] = "-L"
argvFilled++
argv[argvFilled] = v
argvFilled++
}
}
// if pack.NeedsLocalSearchPath() {
// argv[argvFilled] = "-L"
// argvFilled++
// if objDir != "" {
// argv[argvFilled] = objDir
// } else {
// argv[argvFilled] = "."
// }
// argvFilled++
// }
if pack.Name == "main" {
argv[argvFilled] = "-L"
argvFilled++
argv[argvFilled] = "."
argvFilled++
}
argv[argvFilled] = objDir + pack.OutputFile + objExt
argvFilled++
logger.Info("Linking %s...\n", argv[2])
logger.Info(" %s\n\n", getCommandline(argv))
cmd, err := exec.Run(linkerBin, argv[0:argvFilled], os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Linker execution error (%s), aborting compilation.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("Linker returned with errors, aborting.\n")
return false
}
return true
}
/*
Executes goyacc for a single .y file. The new .go files is prefixed with
an underscore and returned as a string for further use.
*/
func goyacc(filepath string) string {
// construct output file path
var outFilepath string
l_idx := strings.LastIndex(filepath, "/")
if l_idx >= 0 {
outFilepath = filepath[0:l_idx+1] +
"_" + filepath[l_idx+1:len(filepath)-1] + "go"
} else {
outFilepath = "_" + filepath[0:len(filepath)-1] + "go"
}
goyaccPath, err := exec.LookPath("goyacc")
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
logger.Info("Parsing goyacc file %s.\n", filepath)
argv := []string{goyaccPath, "-o", outFilepath, filepath}
logger.Debug("%s\n", argv)
cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath,
exec.PassThrough, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Executing goyacc failed: %s.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
os.Exit(waitmsg.ExitStatus())
}
return outFilepath
}
/*
Executes something. Used for the -run command line option.
*/
func runExec(argv []string) {
logger.Info("Executing %s:\n", argv[0])
logger.Debug("%s\n", getCommandline(argv))
cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath,
exec.PassThrough, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Executing %s failed: %s.\n", argv[0], err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
os.Exit(waitmsg.ExitStatus())
}
}
/*
Creates a .a file for a single GoPackage
*/
func packLib(pack *godata.GoPackage) {
var objDir string = "" //outputDirPrefix + getObjDir();
// ignore packages that need to be build manually (like cgo packages)
if pack.HasCGOFiles() {
logger.Debug("Skipped %s.a because it can't be build with gobuild.\n", pack.Name)
return
}
logger.Info("Creating %s.a...\n", pack.Name)
argv := []string{
gopackBin,
"crg", // create new go archive
outputDirPrefix + pack.Name + ".a",
objDir + pack.Name + objExt,
}
logger.Debug("%s\n", getCommandline(argv))
cmd, err := exec.Run(gopackBin, argv, os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("gopack execution error (%s), aborting.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("gopack returned with errors, aborting.\n")
os.Exit(waitmsg.ExitStatus())
}
os.Remove(objDir + pack.Name + objExt)
}
/*
Build an executable from the given sources.
*/
func buildExecutable() {
var executables []string
var execFilled int
// check if there's a main package:
if goPackages.GetMainCount() == 0 {
logger.Error("No main package found.\n")
os.Exit(1)
}
// multiple main, no command file from command line and no -a -> error
if (goPackages.GetMainCount() > 1) && (flag.NArg() == 0) && !*flagBuildAll {
logger.Error("Multiple files found with main function.\n")
logger.ErrorContinue("Please specify one or more as command line parameter or\n")
logger.ErrorContinue("run gobuild with -a. Available main files are:\n")
for _, fn := range goPackages.GetMainFilenames() {
logger.ErrorContinue("\t %s\n", fn)
}
os.Exit(1)
}
// compile all needed packages
if flag.NArg() > 0 {
if *flagRunExec {
executables = make([]string, flag.NArg())
}
for _, fn := range flag.Args() {
mainPack, exists := goPackages.GetMain(fn, !*flagSingleMainFile)
if !exists {
logger.Error("File %s not found.\n", fn)
return // or os.Exit?
}
if compile(mainPack) {
// link everything together
if link(mainPack) {
if *flagRunExec {
executables[execFilled] = outputDirPrefix + mainPack.OutputFile
execFilled++
}
} else {
linkErrors = true
}
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
}
} else {
if *flagRunExec {
executables = make([]string, goPackages.GetMainCount())
}
for _, mainPack := range goPackages.GetMainPackages(!*flagSingleMainFile) {
if compile(mainPack) {
if link(mainPack) {
if *flagRunExec {
executables[execFilled] = outputDirPrefix + mainPack.OutputFile
execFilled++
}
} else {
linkErrors = true
}
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
}
}
if *flagRunExec && !linkErrors && !compileErrors {
for i := 0; i < execFilled; i++ {
runExec([]string{executables[i]})
}
}
}
/*
Build library files (.a) for all packages or the ones given though
command line parameters.
*/
func buildLibrary() {
var packNames []string
var pack *godata.GoPackage
var exists bool
if goPackages.GetPackageCount() == 0 {
logger.Warn("No packages found to build.\n")
return
}
// check for there is at least one package that can be compiled
var hasNoCompilablePacks bool = true
for _, packName := range goPackages.GetPackageNames() {
pack, _ := goPackages.Get(packName)
if pack.Name == "main" {
continue
}
if pack.Files.Len() > 0 && !pack.HasCGOFiles() {
hasNoCompilablePacks = false
break
}
}
if hasNoCompilablePacks {
logger.Warn("No packages found that could be compiled by gobuild.\n")
compileErrors = true
return
}
// check for command line parameters
if flag.NArg() > 0 {
packNames = flag.Args()
} else {
packNames = goPackages.GetPackageNames()
}
// loop over all packages, compile them and build a .a file
for _, name := range packNames {
if name == "main" {
continue // don't make this into a library
}
pack, exists = goPackages.Get(name)
if !exists {
logger.Error("Package %s doesn't exist.\n", name)
continue // or exit?
}
// don't compile remote packages or packages without files
if pack.Type == godata.REMOTE_PACKAGE || pack.Files.Len() == 0 {
continue
}
// these packages come from invalid/unhandled imports
if pack.Files.Len() == 0 {
logger.Debug("Skipping package %s, no files to compile.\n", pack.Name)
continue
}
if !pack.Compiled && !pack.HasErrors {
compileErrors = !compile(pack) || compileErrors
}
if pack.HasErrors {
logger.Error("Can't create library because of compile errors.\n")
compileErrors = true
} else {
packLib(pack)
}
}
} | case -benchmarks/-match/-v are also passed on.
*/
func buildTestExecutable() {
// this will create a file called "_testmain.go"
testPack := createTestPackage()
if compile(testPack) {
linkErrors = !link(testPack) || linkErrors
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
// delete temporary _testmain.go file
// os.Remove("_testmain.go")
if compileErrors || linkErrors {
return
}
if *flagRunExec {
var argvFilled int
var argc int = 1
if *flagMatch != "" {
argc += 2
}
if *flagBenchmarks != "" {
argc += 2
}
if *flagVerboseMode {
argc++
}
argv := make([]string, argc)
argv[argvFilled] = outputDirPrefix + testPack.OutputFile
argvFilled++
if *flagMatch != "" {
argv[argvFilled] = "-match"
argvFilled++
argv[argvFilled] = *flagMatch
argvFilled++
}
if *flagBenchmarks != "" {
argv[argvFilled] = "-benchmarks"
argvFilled++
argv[argvFilled] = *flagBenchmarks
argvFilled++
}
if *flagVerboseMode {
argv[argvFilled] = "-v"
argvFilled++
}
runExec(argv)
}
}
/*
This function does exactly the same as "make clean".
*/
func clean() {
bashBin, err := exec.LookPath("bash")
if err != nil {
logger.Error("Need bash to clean.\n")
os.Exit(127)
}
argv := []string{bashBin, "-c", "commandhere"}
if *flagVerboseMode {
argv[2] = "rm -rfv *.[568]"
} else {
argv[2] = "rm -rf *.[568]"
}
logger.Info("Running: %v\n", argv[2:])
cmd, err := exec.Run(bashBin, argv, os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Couldn't delete files: %s\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("rm returned with errors.\n")
os.Exit(waitmsg.ExitStatus())
}
}
// Returns the bigger number.
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
Entry point. Used for setting some variables and parsing the command line.
*/
func main() {
var err os.Error
var rootPathDir *os.FileInfo
// parse command line arguments
flag.Parse()
if *flagQuieterMode {
logger.SetVerbosityLevel(logger.ERROR)
} else if *flagQuietMode {
logger.SetVerbosityLevel(logger.WARN)
} else if *flagVerboseMode {
logger.SetVerbosityLevel(logger.DEBUG)
}
if *flagClean {
clean()
os.Exit(0)
}
// get the compiler/linker executable
var goarch string
goarch = os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
switch goarch {
case "amd64":
compilerBin = "6g"
linkerBin = "6l"
objExt = ".6"
case "386":
compilerBin = "8g"
linkerBin = "8l"
objExt = ".8"
case "arm":
compilerBin = "5g"
linkerBin = "5l"
objExt = ".5"
default:
logger.Error("Unsupported architecture: " + goarch + "\n")
os.Exit(1)
}
// get the complete path to the compiler/linker
compilerBin, err = exec.LookPath(compilerBin)
if err != nil {
logger.Error("Could not find compiler %s: %s\n", compilerBin, err)
os.Exit(127)
}
linkerBin, err = exec.LookPath(linkerBin)
if err != nil {
logger.Error("Could not find linker %s: %s\n", linkerBin, err)
os.Exit(127)
}
gopackBin, err = exec.LookPath(gopackBin)
if err != nil {
logger.Error("Could not find gopack executable (%s): %s\n", gopackBin, err)
os.Exit(127)
}
// get the root path from where the application was called
// and its permissions (used for subdirectories)
if rootPath, err = os.Getwd(); err != nil {
logger.Error("Could not get the root path: %s\n", err)
os.Exit(1)
}
if rootPathDir, err = os.Stat(rootPath); err != nil {
logger.Error("Could not read the root path: %s\n", err)
os.Exit(1)
}
rootPathPerm = rootPathDir.Permission()
// create the package container
goPackages = godata.NewGoPackageContainer()
// check if -o with path
if *flagOutputFileName != "" {
dir, err := os.Stat(*flagOutputFileName)
if err != nil {
// doesn't exist? try to make it if it's a path
if (*flagOutputFileName)[len(*flagOutputFileName)-1] == '/' {
err = os.MkdirAll(*flagOutputFileName, rootPathPerm)
if err == nil {
outputDirPrefix = *flagOutputFileName
}
} else {
godata.DefaultOutputFileName = *flagOutputFileName
}
} else if dir.IsDirectory() {
if (*flagOutputFileName)[len(*flagOutputFileName)-1] == '/' {
outputDirPrefix = *flagOutputFileName
} else {
outputDirPrefix = *flagOutputFileName + "/"
}
} else {
godata.DefaultOutputFileName = *flagOutputFileName
}
// make path to output file
if outputDirPrefix == "" && strings.Index(*flagOutputFileName, "/") != -1 {
err = os.MkdirAll((*flagOutputFileName)[0:strings.LastIndex(*flagOutputFileName, "/")], rootPathPerm)
if err != nil {
logger.Error("Could not create %s: %s\n",
(*flagOutputFileName)[0:strings.LastIndex(*flagOutputFileName, "/")],
err)
}
}
}
// read all go files in the current path + subdirectories and parse them
logger.Info("Parsing go file(s)...\n")
readFiles(rootPath)
if *flagTesting {
buildTestExecutable()
} else if *flagLibrary {
buildLibrary()
} else {
buildExecutable()
}
// make sure exit status is != 0 if there were compiler/linker errors
if compileErrors || linkErrors {
os.Exit(1)
}
} |
/*
Creates a new file called _testmain.go and compiles/links it to _testmain.
If the -run command line option is given it will also run the tests. In this | random_line_split |
gobuild.go | // Copyright 2009-2010 by Maurice Gilden. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
gobuild - build tool to automate building go programs/libraries
*/
package main
// import "fmt"
import (
"os"
"runtime"
"exec"
"flag"
path "path/filepath"
"strings"
"container/vector"
"./godata"
"./logger"
)
// ========== command line parameters ==========
var flagLibrary *bool = flag.Bool("lib", false, "build all packages as librarys")
var flagBuildAll *bool = flag.Bool("a", false, "build all executables")
var flagTesting *bool = flag.Bool("t", false, "(not yet implemented) Build all tests")
var flagSingleMainFile *bool = flag.Bool("single-main", false, "one main file per executable")
var flagIncludeInvisible *bool = flag.Bool("include-hidden", false, "Include hidden directories")
var flagOutputFileName *string = flag.String("o", "", "output file")
var flagQuietMode *bool = flag.Bool("q", false, "only print warnings/errors")
var flagQuieterMode *bool = flag.Bool("qq", false, "only print errors")
var flagVerboseMode *bool = flag.Bool("v", false, "print debug messages")
var flagIncludePaths *string = flag.String("I", "", "additional include paths")
var flagClean *bool = flag.Bool("clean", false, "delete all temporary files")
var flagRunExec *bool = flag.Bool("run", false, "run the created executable(s)")
var flagMatch *string = flag.String("match", "", "regular expression to select tests to run")
var flagBenchmarks *string = flag.String("benchmarks", "", "regular expression to select benchmarks to run")
var flagIgnore *string = flag.String("ignore", "", "ignore these files")
var flagKeepAFiles *bool = flag.Bool("keep-a-files", false, "don't automatically delete .a archive files")
// ========== global (package) variables ==========
var compilerBin string
var linkerBin string
var gopackBin string = "gopack"
var compileErrors bool = false
var linkErrors bool = false
var rootPath string
var rootPathPerm uint32
var objExt string
var outputDirPrefix string
var goPackages *godata.GoPackageContainer
// ========== goFileVisitor ==========
// this visitor looks for files with the extension .go
type goFileVisitor struct {
rootpath string
realpath string
symname string
}
// implementation of the Visitor interface for the file walker
func (v *goFileVisitor) VisitDir(dirpath string, d *os.FileInfo) bool {
if strings.LastIndex(dirpath, "/") < len(dirpath)-1 {
if dirpath[strings.LastIndex(dirpath, "/")+1] == '.' {
return *flagIncludeInvisible
}
}
return true
}
// implementation of the Visitor interface for the file walker
func (v *goFileVisitor) VisitFile(filepath string, d *os.FileInfo) {
// parse hidden directories?
if (filepath[strings.LastIndex(filepath, "/")+1] == '.') && (!*flagIncludeInvisible) {
return
}
// check if this is a symlink
if dir, err := os.Stat(filepath); err == nil {
if dir.FollowedSymlink && dir.IsDirectory() {
readFiles(filepath)
}
} else {
logger.Warn("%s\n", err)
}
// run .y files through goyacc first to create .go files
if strings.HasSuffix(filepath, ".y") {
filepath = goyacc(filepath)
}
if strings.HasSuffix(filepath, ".go") {
// include *_test.go files?
if strings.HasSuffix(filepath, "_test.go") && (!*flagTesting) {
return
}
cwd, _ := os.Getwd()
// fmt.Println(path.Join(cwd, *flagIgnore), filepath)
if filepath == path.Join(cwd, *flagIgnore) {
return
}
var gf godata.GoFile
if v.realpath != v.rootpath {
gf = godata.GoFile{v.symname + filepath[strings.LastIndex(filepath, "/"):],
nil, false, false, strings.HasSuffix(filepath, "_test.go"), nil, nil,
}
} else {
gf = godata.GoFile{filepath[len(v.realpath)+1 : len(filepath)], nil,
false, false, strings.HasSuffix(filepath, "_test.go"), nil, nil,
}
}
if gf.IsTestFile {
gf.TestFunctions = new(vector.Vector)
gf.BenchmarkFunctions = new(vector.Vector)
}
logger.Debug("Parsing file: %s\n", filepath)
gf.ParseFile(goPackages)
}
}
// ========== (local) functions ==========
// unused right now, may be used later for a target directory for .[568] files
func getObjDir() string {
return ""
// this doesn't work for 'import "./blub"' style imports
/*
if *flagTesting {
return "_test/";
}
return "_obj/";*/
}
/*
Returns an argv array in a single string with spaces dividing the entries.
*/
func getCommandline(argv []string) string {
var str string
for _, s := range argv {
str += s + " "
}
return str[0 : len(str)-1]
}
/*
readFiles reads all files with the .go extension and creates their AST.
It also creates a list of local imports (everything starting with ./)
and searches the main package files for the main function.
*/
func readFiles(rootpath string) {
var realpath, symname string
// path walker error channel
errorChannel := make(chan os.Error, 64)
// check if this is a symlink
if dir, err := os.Stat(rootpath); err == nil {
if dir.FollowedSymlink {
realpath, _ = os.Readlink(rootpath)
if realpath[0] != '/' {
realpath = rootpath[0:strings.LastIndex(rootpath, "/")+1] + realpath
}
symname = rootpath[len(rootPath)+1:]
} else {
realpath = rootpath
}
} else {
logger.Warn("%s\n", err)
}
// visitor for the path walker
visitor := &goFileVisitor{rootpath, realpath, symname}
path.Walk(visitor.realpath, visitor, errorChannel)
select {
case err := <-errorChannel:
logger.Error("Error while traversing directories: %s\n", err)
default:
}
}
/*
Creates a main package and _testmain.go file for building a test application.
*/
func createTestPackage() *godata.GoPackage {
var testFileSource string
var testArrays string
var testCalls string
var benchCalls string
var testGoFile *godata.GoFile
var testPack *godata.GoPackage
var testFile *os.File
var err os.Error
var pack *godata.GoPackage
testGoFile = new(godata.GoFile)
testPack = godata.NewGoPackage("main")
testGoFile.Filename = "_testmain.go"
testGoFile.Pack = testPack
testGoFile.HasMain = true
testGoFile.IsTestFile = true
testPack.OutputFile = "_testmain"
testPack.Files.Push(testGoFile)
// search for packages with _test.go files
for _, packName := range goPackages.GetPackageNames() {
pack, _ = goPackages.Get(packName)
if pack.HasTestFiles() {
testPack.Depends.Push(pack)
}
}
if testPack.Depends.Len() == 0 {
logger.Error("No _test.go files found.\n")
os.Exit(1)
}
// imports
testFileSource =
"package main\n" +
"\nimport \"testing\"\n" +
"import __regexp__ \"regexp\"\n" +
"import \"fmt\"\n"
// will create an array per package with all the Test* and Benchmark* functions
// tests/benchmarks will be done for each package seperatly so that running
// the _testmain program will result in multiple PASS (or fail) outputs.
for _, ipack := range *testPack.Depends {
var tmpStr string
var fnCount int = 0
pack := (ipack.(*godata.GoPackage))
// localPackVarName: contains the test functions, package name
// with '/' replaced by '_'
var localPackVarName string = strings.Map(func(rune int) int {
if rune == '/' {
return '_'
}
return rune
},pack.Name)
// localPackName: package name without path/parent directories
var localPackName string
if strings.LastIndex(pack.Name, "/") >= 0 {
localPackName = pack.Name[strings.LastIndex(pack.Name, "/")+1:]
} else {
localPackName = pack.Name
}
testFileSource += "import \"" + pack.Name + "\"\n"
tmpStr = "var test_" + localPackVarName + " = []testing.InternalTest {\n"
for _, igf := range *pack.Files {
logger.Debug("Test* from %s: \n", (igf.(*godata.GoFile)).Filename)
if (igf.(*godata.GoFile)).IsTestFile {
for _, istr := range *(igf.(*godata.GoFile)).TestFunctions {
tmpStr += "\ttesting.InternalTest{ \"" +
pack.Name + "." + istr.(string) +
"\", " +
localPackName + "." + istr.(string) +
" },\n"
fnCount++
}
}
}
tmpStr += "}\n\n"
if fnCount > 0 {
testCalls +=
"\tfmt.Println(\"Testing " + pack.Name + ":\");\n" +
"\ttesting.Main(__regexp__.MatchString, test_" + localPackVarName + ");\n"
testArrays += tmpStr
}
fnCount = 0
tmpStr = "var bench_" + localPackVarName + " = []testing.Benchmark {\n"
for _, igf := range *pack.Files {
if (igf.(*godata.GoFile)).IsTestFile {
for _, istr := range *(igf.(*godata.GoFile)).BenchmarkFunctions {
tmpStr += "\ttesting.Benchmark{ \"" +
pack.Name + "." + istr.(string) +
"\", " +
localPackName + "." + istr.(string) +
" },\n"
fnCount++
}
}
}
tmpStr += "}\n\n"
if fnCount > 0 {
benchCalls +=
"\tfmt.Println(\"Benchmarking " + pack.Name + ":\");\n" +
"\ttesting.RunBenchmarks(bench_" + localPackVarName + ");\n"
testArrays += tmpStr
}
}
testFileSource += "\n" + testArrays
// func main()
testFileSource +=
"\nfunc main() {\n" +
testCalls +
benchCalls +
"}\n"
testFile, err = os.Create(testGoFile.Filename)
if err != nil {
logger.Error("Could not create %s: %s\n", testGoFile.Filename, err)
os.Exit(1)
}
testFile.WriteString(testFileSource)
testFile.Close()
return testPack
}
/*
The compile method will run the compiler for every package it has found,
starting with the main package.
Returns true if compiled successfully.
*/
func compile(pack *godata.GoPackage) bool {
var argc int
var argv []string
var argvFilled int
var objDir = "" //outputDirPrefix + getObjDir();
// check for recursive dependencies
if pack.InProgress {
logger.Error("Found a recurisve dependency in %s. This is not supported in Go.\n", pack.Name)
pack.HasErrors = true
pack.InProgress = false
return false
}
pack.InProgress = true
// first compile all dependencies
for _, idep := range *pack.Depends {
dep := idep.(*godata.GoPackage)
if dep.HasErrors {
pack.HasErrors = true
pack.InProgress = false
return false
}
if !dep.Compiled &&
(dep.Type == godata.LOCAL_PACKAGE ||
dep.Type == godata.UNKNOWN_PACKAGE && dep.Files.Len() > 0) {
if !compile(dep) {
pack.HasErrors = true
pack.InProgress = false
return false
}
}
}
// cgo files (the ones which import "C") can't be compiled
// at the moment. They need to be compiled by hand into .a files.
if pack.HasCGOFiles() {
if pack.HasExistingAFile() {
pack.Compiled = true
pack.InProgress = false
return true
} else {
logger.Error("Can't compile cgo files. Please manually compile them.\n")
os.Exit(1)
}
}
// check if this package has any files (if not -> error)
if pack.Files.Len() == 0 && pack.Type == godata.LOCAL_PACKAGE {
logger.Error("No files found for package %s.\n", pack.Name)
os.Exit(1)
}
// if the outputDirPrefix points to something, subdirectories
// need to be created if they don't already exist
outputFile := objDir + pack.OutputFile
if strings.Index(outputFile, "/") != -1 {
path := outputFile[0:strings.LastIndex(outputFile, "/")]
dir, err := os.Stat(path)
if err != nil {
err = os.MkdirAll(path, rootPathPerm)
if err != nil {
logger.Error("Could not create output path %s: %s\n", path, err)
os.Exit(1)
}
} else if !dir.IsDirectory() {
logger.Error("File found in %s instead of a directory.\n", path)
os.Exit(1)
}
}
// before compiling, remove any .a file
// this is done because the compiler/linker looks for .a files
// before it looks for .[568] files
if !*flagKeepAFiles {
if err := os.Remove(outputFile + ".a"); err == nil {
logger.Debug("Removed file %s.a.\n", outputFile)
}
}
// construct compiler command line arguments
if pack.Name != "main" {
logger.Info("Compiling %s...\n", pack.Name)
} else {
logger.Info("Compiling %s (%s)...\n", pack.Name, pack.OutputFile)
}
argc = pack.Files.Len() + 3
if *flagIncludePaths != "" {
argc += 2 * (strings.Count(*flagIncludePaths, ",") + 1)
}
if pack.NeedsLocalSearchPath() || objDir != "" {
argc += 2
}
if pack.Name == "main" {
argc += 2
}
argv = make([]string, argc*2)
argv[argvFilled] = compilerBin
argvFilled++
argv[argvFilled] = "-o"
argvFilled++
argv[argvFilled] = outputFile + objExt
argvFilled++
if *flagIncludePaths != "" {
for _, includePath := range strings.Split(*flagIncludePaths, ",", -1) {
argv[argvFilled] = "-I"
argvFilled++
argv[argvFilled] = includePath
argvFilled++
}
}
// for _, arg := range argv {
// logger.Info(arg)
// logger.Info(" ")
// }
// logger.Info("\n")
if pack.NeedsLocalSearchPath() || objDir != "" {
argv[argvFilled] = "-I"
argvFilled++
if objDir != "" {
argv[argvFilled] = objDir
} else {
argv[argvFilled] = "."
}
argvFilled++
}
if pack.Name == "main" {
argv[argvFilled] = "-I"
argvFilled++
argv[argvFilled] = "."
argvFilled++
}
for i := 0; i < pack.Files.Len(); i++ {
gf := pack.Files.At(i).(*godata.GoFile)
argv[argvFilled] = gf.Filename
argvFilled++
}
logger.Info(" %s\n", getCommandline(argv[0:argvFilled]))
cmd, err := exec.Run(compilerBin, argv[0:argvFilled], os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Compiler execution error (%s), aborting compilation.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
pack.HasErrors = true
pack.InProgress = false
return false
}
// it should now be compiled
pack.Compiled = true
pack.InProgress = false
return true
}
/*
Calls the linker for the main file, which should be called "main.(5|6|8)".
*/
func link(pack *godata.GoPackage) bool {
var argc int
var argv []string
var argvFilled int
var objDir string = "" //outputDirPrefix + getObjDir();
// build the command line for the linker
argc = 4
if *flagIncludePaths != "" {
argc += 2
}
if pack.NeedsLocalSearchPath() {
argc += 2
}
if pack.Name == "main" {
argc += 2
}
argv = make([]string, argc*3)
argv[argvFilled] = linkerBin
argvFilled++
argv[argvFilled] = "-o"
argvFilled++
argv[argvFilled] = outputDirPrefix + pack.OutputFile
argvFilled++
if *flagIncludePaths != "" {
for _, v := range strings.Split(*flagIncludePaths, ",", -1) {
argv[argvFilled] = "-L"
argvFilled++
argv[argvFilled] = v
argvFilled++
}
}
// if pack.NeedsLocalSearchPath() {
// argv[argvFilled] = "-L"
// argvFilled++
// if objDir != "" {
// argv[argvFilled] = objDir
// } else {
// argv[argvFilled] = "."
// }
// argvFilled++
// }
if pack.Name == "main" {
argv[argvFilled] = "-L"
argvFilled++
argv[argvFilled] = "."
argvFilled++
}
argv[argvFilled] = objDir + pack.OutputFile + objExt
argvFilled++
logger.Info("Linking %s...\n", argv[2])
logger.Info(" %s\n\n", getCommandline(argv))
cmd, err := exec.Run(linkerBin, argv[0:argvFilled], os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Linker execution error (%s), aborting compilation.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("Linker returned with errors, aborting.\n")
return false
}
return true
}
/*
Executes goyacc for a single .y file. The new .go files is prefixed with
an underscore and returned as a string for further use.
*/
func goyacc(filepath string) string {
// construct output file path
var outFilepath string
l_idx := strings.LastIndex(filepath, "/")
if l_idx >= 0 {
outFilepath = filepath[0:l_idx+1] +
"_" + filepath[l_idx+1:len(filepath)-1] + "go"
} else {
outFilepath = "_" + filepath[0:len(filepath)-1] + "go"
}
goyaccPath, err := exec.LookPath("goyacc")
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
logger.Info("Parsing goyacc file %s.\n", filepath)
argv := []string{goyaccPath, "-o", outFilepath, filepath}
logger.Debug("%s\n", argv)
cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath,
exec.PassThrough, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Executing goyacc failed: %s.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
os.Exit(waitmsg.ExitStatus())
}
return outFilepath
}
/*
Executes something. Used for the -run command line option.
*/
func | (argv []string) {
logger.Info("Executing %s:\n", argv[0])
logger.Debug("%s\n", getCommandline(argv))
cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath,
exec.PassThrough, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Executing %s failed: %s.\n", argv[0], err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
os.Exit(waitmsg.ExitStatus())
}
}
/*
Creates a .a file for a single GoPackage
*/
func packLib(pack *godata.GoPackage) {
var objDir string = "" //outputDirPrefix + getObjDir();
// ignore packages that need to be build manually (like cgo packages)
if pack.HasCGOFiles() {
logger.Debug("Skipped %s.a because it can't be build with gobuild.\n", pack.Name)
return
}
logger.Info("Creating %s.a...\n", pack.Name)
argv := []string{
gopackBin,
"crg", // create new go archive
outputDirPrefix + pack.Name + ".a",
objDir + pack.Name + objExt,
}
logger.Debug("%s\n", getCommandline(argv))
cmd, err := exec.Run(gopackBin, argv, os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("gopack execution error (%s), aborting.\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("gopack returned with errors, aborting.\n")
os.Exit(waitmsg.ExitStatus())
}
os.Remove(objDir + pack.Name + objExt)
}
/*
Build an executable from the given sources.
*/
func buildExecutable() {
var executables []string
var execFilled int
// check if there's a main package:
if goPackages.GetMainCount() == 0 {
logger.Error("No main package found.\n")
os.Exit(1)
}
// multiple main, no command file from command line and no -a -> error
if (goPackages.GetMainCount() > 1) && (flag.NArg() == 0) && !*flagBuildAll {
logger.Error("Multiple files found with main function.\n")
logger.ErrorContinue("Please specify one or more as command line parameter or\n")
logger.ErrorContinue("run gobuild with -a. Available main files are:\n")
for _, fn := range goPackages.GetMainFilenames() {
logger.ErrorContinue("\t %s\n", fn)
}
os.Exit(1)
}
// compile all needed packages
if flag.NArg() > 0 {
if *flagRunExec {
executables = make([]string, flag.NArg())
}
for _, fn := range flag.Args() {
mainPack, exists := goPackages.GetMain(fn, !*flagSingleMainFile)
if !exists {
logger.Error("File %s not found.\n", fn)
return // or os.Exit?
}
if compile(mainPack) {
// link everything together
if link(mainPack) {
if *flagRunExec {
executables[execFilled] = outputDirPrefix + mainPack.OutputFile
execFilled++
}
} else {
linkErrors = true
}
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
}
} else {
if *flagRunExec {
executables = make([]string, goPackages.GetMainCount())
}
for _, mainPack := range goPackages.GetMainPackages(!*flagSingleMainFile) {
if compile(mainPack) {
if link(mainPack) {
if *flagRunExec {
executables[execFilled] = outputDirPrefix + mainPack.OutputFile
execFilled++
}
} else {
linkErrors = true
}
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
}
}
if *flagRunExec && !linkErrors && !compileErrors {
for i := 0; i < execFilled; i++ {
runExec([]string{executables[i]})
}
}
}
/*
Build library files (.a) for all packages or the ones given though
command line parameters.
*/
func buildLibrary() {
var packNames []string
var pack *godata.GoPackage
var exists bool
if goPackages.GetPackageCount() == 0 {
logger.Warn("No packages found to build.\n")
return
}
// check for there is at least one package that can be compiled
var hasNoCompilablePacks bool = true
for _, packName := range goPackages.GetPackageNames() {
pack, _ := goPackages.Get(packName)
if pack.Name == "main" {
continue
}
if pack.Files.Len() > 0 && !pack.HasCGOFiles() {
hasNoCompilablePacks = false
break
}
}
if hasNoCompilablePacks {
logger.Warn("No packages found that could be compiled by gobuild.\n")
compileErrors = true
return
}
// check for command line parameters
if flag.NArg() > 0 {
packNames = flag.Args()
} else {
packNames = goPackages.GetPackageNames()
}
// loop over all packages, compile them and build a .a file
for _, name := range packNames {
if name == "main" {
continue // don't make this into a library
}
pack, exists = goPackages.Get(name)
if !exists {
logger.Error("Package %s doesn't exist.\n", name)
continue // or exit?
}
// don't compile remote packages or packages without files
if pack.Type == godata.REMOTE_PACKAGE || pack.Files.Len() == 0 {
continue
}
// these packages come from invalid/unhandled imports
if pack.Files.Len() == 0 {
logger.Debug("Skipping package %s, no files to compile.\n", pack.Name)
continue
}
if !pack.Compiled && !pack.HasErrors {
compileErrors = !compile(pack) || compileErrors
}
if pack.HasErrors {
logger.Error("Can't create library because of compile errors.\n")
compileErrors = true
} else {
packLib(pack)
}
}
}
/*
Creates a new file called _testmain.go and compiles/links it to _testmain.
If the -run command line option is given it will also run the tests. In this
case -benchmarks/-match/-v are also passed on.
*/
func buildTestExecutable() {
// this will create a file called "_testmain.go"
testPack := createTestPackage()
if compile(testPack) {
linkErrors = !link(testPack) || linkErrors
} else {
logger.Error("Can't link executable because of compile errors.\n")
compileErrors = true
}
// delete temporary _testmain.go file
// os.Remove("_testmain.go")
if compileErrors || linkErrors {
return
}
if *flagRunExec {
var argvFilled int
var argc int = 1
if *flagMatch != "" {
argc += 2
}
if *flagBenchmarks != "" {
argc += 2
}
if *flagVerboseMode {
argc++
}
argv := make([]string, argc)
argv[argvFilled] = outputDirPrefix + testPack.OutputFile
argvFilled++
if *flagMatch != "" {
argv[argvFilled] = "-match"
argvFilled++
argv[argvFilled] = *flagMatch
argvFilled++
}
if *flagBenchmarks != "" {
argv[argvFilled] = "-benchmarks"
argvFilled++
argv[argvFilled] = *flagBenchmarks
argvFilled++
}
if *flagVerboseMode {
argv[argvFilled] = "-v"
argvFilled++
}
runExec(argv)
}
}
/*
This function does exactly the same as "make clean".
*/
func clean() {
bashBin, err := exec.LookPath("bash")
if err != nil {
logger.Error("Need bash to clean.\n")
os.Exit(127)
}
argv := []string{bashBin, "-c", "commandhere"}
if *flagVerboseMode {
argv[2] = "rm -rfv *.[568]"
} else {
argv[2] = "rm -rf *.[568]"
}
logger.Info("Running: %v\n", argv[2:])
cmd, err := exec.Run(bashBin, argv, os.Environ(), rootPath,
exec.DevNull, exec.PassThrough, exec.PassThrough)
if err != nil {
logger.Error("%s\n", err)
os.Exit(1)
}
waitmsg, err := cmd.Wait(0)
if err != nil {
logger.Error("Couldn't delete files: %s\n", err)
os.Exit(1)
}
if waitmsg.ExitStatus() != 0 {
logger.Error("rm returned with errors.\n")
os.Exit(waitmsg.ExitStatus())
}
}
// Returns the bigger number.
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
Entry point. Used for setting some variables and parsing the command line.
*/
func main() {
var err os.Error
var rootPathDir *os.FileInfo
// parse command line arguments
flag.Parse()
if *flagQuieterMode {
logger.SetVerbosityLevel(logger.ERROR)
} else if *flagQuietMode {
logger.SetVerbosityLevel(logger.WARN)
} else if *flagVerboseMode {
logger.SetVerbosityLevel(logger.DEBUG)
}
if *flagClean {
clean()
os.Exit(0)
}
// get the compiler/linker executable
var goarch string
goarch = os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
switch goarch {
case "amd64":
compilerBin = "6g"
linkerBin = "6l"
objExt = ".6"
case "386":
compilerBin = "8g"
linkerBin = "8l"
objExt = ".8"
case "arm":
compilerBin = "5g"
linkerBin = "5l"
objExt = ".5"
default:
logger.Error("Unsupported architecture: " + goarch + "\n")
os.Exit(1)
}
// get the complete path to the compiler/linker
compilerBin, err = exec.LookPath(compilerBin)
if err != nil {
logger.Error("Could not find compiler %s: %s\n", compilerBin, err)
os.Exit(127)
}
linkerBin, err = exec.LookPath(linkerBin)
if err != nil {
logger.Error("Could not find linker %s: %s\n", linkerBin, err)
os.Exit(127)
}
gopackBin, err = exec.LookPath(gopackBin)
if err != nil {
logger.Error("Could not find gopack executable (%s): %s\n", gopackBin, err)
os.Exit(127)
}
// get the root path from where the application was called
// and its permissions (used for subdirectories)
if rootPath, err = os.Getwd(); err != nil {
logger.Error("Could not get the root path: %s\n", err)
os.Exit(1)
}
if rootPathDir, err = os.Stat(rootPath); err != nil {
logger.Error("Could not read the root path: %s\n", err)
os.Exit(1)
}
rootPathPerm = rootPathDir.Permission()
// create the package container
goPackages = godata.NewGoPackageContainer()
// check if -o with path
if *flagOutputFileName != "" {
dir, err := os.Stat(*flagOutputFileName)
if err != nil {
// doesn't exist? try to make it if it's a path
if (*flagOutputFileName)[len(*flagOutputFileName)-1] == '/' {
err = os.MkdirAll(*flagOutputFileName, rootPathPerm)
if err == nil {
outputDirPrefix = *flagOutputFileName
}
} else {
godata.DefaultOutputFileName = *flagOutputFileName
}
} else if dir.IsDirectory() {
if (*flagOutputFileName)[len(*flagOutputFileName)-1] == '/' {
outputDirPrefix = *flagOutputFileName
} else {
outputDirPrefix = *flagOutputFileName + "/"
}
} else {
godata.DefaultOutputFileName = *flagOutputFileName
}
// make path to output file
if outputDirPrefix == "" && strings.Index(*flagOutputFileName, "/") != -1 {
err = os.MkdirAll((*flagOutputFileName)[0:strings.LastIndex(*flagOutputFileName, "/")], rootPathPerm)
if err != nil {
logger.Error("Could not create %s: %s\n",
(*flagOutputFileName)[0:strings.LastIndex(*flagOutputFileName, "/")],
err)
}
}
}
// read all go files in the current path + subdirectories and parse them
logger.Info("Parsing go file(s)...\n")
readFiles(rootPath)
if *flagTesting {
buildTestExecutable()
} else if *flagLibrary {
buildLibrary()
} else {
buildExecutable()
}
// make sure exit status is != 0 if there were compiler/linker errors
if compileErrors || linkErrors {
os.Exit(1)
}
}
| runExec | identifier_name |
sa_collection.py | from __future__ import print_function
import mysql.connector
import requests
import time
import json
from http.cookies import SimpleCookie
from bs4 import BeautifulSoup
##################################
# #
# CONSTANTS #
# #
##################################
# After you set up your mySQL database, alter the information in this
# file.
db_config_file = "../config/db_config.json"
# Log into SA, then copy paste your cookie into this file.
raw_cookie_file = "../config/raw_cookie.txt"
user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
add_article = ("INSERT INTO articles"
"(articleID, ticker_symbol, published_date, author_name, title, text, num_likes, includes_symbols)"
"VALUES (%(articleID)s, %(ticker_symbol)s, %(published_date)s, %(author_name)s, %(title)s, %(text)s,"
" %(num_likes)s, %(includes_symbols)s)")
add_comment = ("INSERT INTO comments"
"(articleID, commentID, userID, comment_date, content, parentID, discussionID)"
"VALUES (%(articleID)s, %(commentID)s, %(userID)s, %(comment_date)s, %(content)s, %(parentID)s,"
"%(discussionID)s)")
##################################
# #
# DATA CLASSES #
# #
##################################
class Article:
def __init__(self, _id, a_cookie, a_user_agent):
"""
Initializes all fields with default values then parses the
information from the url.
"""
self._id = _id
self.ticker = ''
self.pub_date = '0001-01-01'
self.author = ''
self.title = ''
self.text = ''
self.includes = ''
self.comments = []
self.valid = True
self._parse_article(a_cookie, a_user_agent)
def _parse_article(self, a_cookie, a_ua):
"""
Parses article info from the given url.
"""
url = "https://seekingalpha.com/article/%s" % self._id
r = safe_request(url, {})
r_login = safe_request(url, a_cookie)
soup_log = BeautifulSoup(r_login.text, 'html.parser')
# Stops process if article invalid
primary_about = soup_log.find_all("a", href=True, sasource="article_primary_about")
if len(primary_about) != 1:
# Excludes non-single-ticker articles
print("Invalid Article")
self.valid = False
return
else:
self.ticker = primary_about[0].text.split()[-1][1:-1]
# Gets all includes and author
about = soup_log.find_all("a", href=True)
for a in about:
if 'sasource' in a.attrs:
if a.attrs['sasource'] == "article_about":
self.includes += a.text + ","
elif a.attrs['sasource'] == "auth_header_name":
self.author += a.text + ","
self.includes = self.includes[:-1]
self.author = self.author[:-1]
self.title = soup_log.find_all('h1')[0].text
self.pub_date = soup_log.find_all('time', itemprop="datePublished")[0]['content'][:10]
# Get Full Article Text
name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')
print(name_box)
try:
disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',
range(len(name_box))))[0]
except IndexError:
disc_idx = len(name_box)
self.text = ''.join(map(lambda x: x.text + "\n", name_box[:disc_idx]))
def json(self):
"""
Returns json representation of an article (for writing
to the database).
"""
if self.valid:
return {
'articleID': self._id,
'ticker_symbol': self.ticker,
'published_date': self.pub_date,
'author_name': self.author,
'title': self.title,
'text': self.text,
'num_likes': 0,
'includes_symbols': self.includes
}
return {}
class Comment:
def __init__(self, article_id, comment):
self.articleID = article_id
self.commentID = comment['id']
self.userID = comment['user_id']
self.date = comment['created_on'][:10]
self.text = comment['content']
self.parentID = comment['parent_id']
self.discussionID = comment['discussion_id']
self.children_ids = comment['children']
def get_children(self):
"""
Recursively returns an array of all the children of the comment.
"""
children = []
for i in self.children_ids:
child = Comment(self.articleID, self.children_ids[i])
children.append(child)
children.extend(child.get_children())
return children
def json(self):
return {
'articleID': self.articleID, | 'comment_date': self.date,
'content': self.text.encode('ascii', errors='ignore').decode(),
'parentID': self.parentID,
'discussionID': self.discussionID
}
##################################
# #
# FILE FUNCTIONS #
# #
##################################
def read_json_file(filename):
"""
Reads a json formatted file.
"""
with open(filename) as f:
try:
data = json.loads(f.read())
except:
data = {}
return data
def write_json_file(json_data, filename):
"""
Writes a json to a file.
"""
try:
str_data = json.dumps(json_data)
with open(filename, "w") as f:
f.write(str_data)
return True
except MemoryError:
return False
def browser_cookie(rawcookie):
cookie = SimpleCookie()
cookie.load(rawcookie)
# reference: https://stackoverflow.com/questions/32281041/converting-cookie-string-into-python-dict
# Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
# which is incompatible with requests. Manually construct a dictionary instead.
cookies = {}
for key, morsel in cookie.items():
cookies[key] = morsel.value
return cookies
def default_cookie():
"""
Gets cookie from the raw cookie file.
"""
with open(raw_cookie_file) as f:
rc = "".join(f.readlines())
return browser_cookie(rc)
def default_db_config():
"""
Gets default database configuration.
"""
return read_json_file(db_config_file)
def safe_request(url, cookie):
"""
Continues trying to make a request until a certain amount of
tries have failed.
"""
count = 0
r = ""
# Adjust this number if a certain amount of failed attempts
# is acceptable
while count < 1:
try:
r = requests.get(url, cookies=cookie, headers=user_agent)
if r.status_code != 200:
print(r.status_code, "blocked")
count += 1
else:
break
except requests.exceptions.ConnectionError:
print("timeout", url)
time.sleep(1)
return r
def get_comment_jsons(article_id, cookie):
"""
Returns all comments for the given article as array of
jsons.
"""
url = "https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=" % article_id
r = safe_request(url, cookie)
comments = []
if r.status_code != 404:
res = json.loads(r.text)
for comment in res['comments'].values():
c = Comment(article_id, comment)
comments.append(c.json())
comments.extend(map(lambda x: x.json(), c.get_children()))
return comments
def try_add_comment(com_jsons, cursor, article_id):
"""
Given array of comment jsons, adds comments to database.
"""
if not com_jsons:
print("\t No comments found for " + article_id)
for c in com_jsons:
try:
cursor.execute(add_comment, c)
except mysql.connector.DatabaseError as err:
if not err.errno == 1062:
print("Wrong Comment Format: " + c["id"])
def try_add_article(art_json, cursor):
"""
Given an article json, tries to write that article to database.
"""
try:
cursor.execute(add_article, art_json)
except mysql.connector.errors.IntegrityError:
print("Duplicate Article")
def try_add_db(art_json, com_jsons, cursor, article_id):
try_add_article(art_json, cursor)
try_add_comment(com_jsons, cursor, article_id)
def gather_mysql_data(article_fn, start=0, stop=None, comments_only=False):
"""
Given a file with Seeking Alpha article ids separated by commas, iterates
through the article ids in the article and records the article and comment
data in the mysql database.
"""
config = default_db_config()
cookie = default_cookie()
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
with open(article_fn) as f:
articles = f.read().split(",")
i, total = start+1, float(len(articles))
for a in articles[start: stop]:
if comments_only:
com_jsons = get_comment_jsons(a, cookie)
try_add_comment(com_jsons, cursor, a)
else:
art_json = Article(a, cookie, user_agent).json()
if art_json:
com_jsons = get_comment_jsons(a, cookie)
try_add_db(art_json, com_jsons, cursor, a)
cnx.commit()
print("%0.4f" % (i/total*100), "%\t Article idx:", i-1)
i += 1
cursor.close()
cnx.close()
if __name__ == '__main__':
# Collection has not been updated in a long time so there are some
# aspects of the pipeline that do not seem to work anymore. While
# writing to the database seems fine, getting the full article text seems
# to be not working again.
a = Article("239509", default_cookie(), user_agent)
print(a.json())
# Do NOT run collection of articles before that bug has been fixed because
# you will overwrite your database with the truncated text version of these
# articles. | 'commentID': self.commentID,
'userID': self.userID, | random_line_split |
sa_collection.py | from __future__ import print_function
import mysql.connector
import requests
import time
import json
from http.cookies import SimpleCookie
from bs4 import BeautifulSoup
##################################
# #
# CONSTANTS #
# #
##################################
# After you set up your mySQL database, alter the information in this
# file.
db_config_file = "../config/db_config.json"
# Log into SA, then copy paste your cookie into this file.
raw_cookie_file = "../config/raw_cookie.txt"
user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
add_article = ("INSERT INTO articles"
"(articleID, ticker_symbol, published_date, author_name, title, text, num_likes, includes_symbols)"
"VALUES (%(articleID)s, %(ticker_symbol)s, %(published_date)s, %(author_name)s, %(title)s, %(text)s,"
" %(num_likes)s, %(includes_symbols)s)")
add_comment = ("INSERT INTO comments"
"(articleID, commentID, userID, comment_date, content, parentID, discussionID)"
"VALUES (%(articleID)s, %(commentID)s, %(userID)s, %(comment_date)s, %(content)s, %(parentID)s,"
"%(discussionID)s)")
##################################
# #
# DATA CLASSES #
# #
##################################
class Article:
def __init__(self, _id, a_cookie, a_user_agent):
"""
Initializes all fields with default values then parses the
information from the url.
"""
self._id = _id
self.ticker = ''
self.pub_date = '0001-01-01'
self.author = ''
self.title = ''
self.text = ''
self.includes = ''
self.comments = []
self.valid = True
self._parse_article(a_cookie, a_user_agent)
def _parse_article(self, a_cookie, a_ua):
"""
Parses article info from the given url.
"""
url = "https://seekingalpha.com/article/%s" % self._id
r = safe_request(url, {})
r_login = safe_request(url, a_cookie)
soup_log = BeautifulSoup(r_login.text, 'html.parser')
# Stops process if article invalid
primary_about = soup_log.find_all("a", href=True, sasource="article_primary_about")
if len(primary_about) != 1:
# Excludes non-single-ticker articles
print("Invalid Article")
self.valid = False
return
else:
self.ticker = primary_about[0].text.split()[-1][1:-1]
# Gets all includes and author
about = soup_log.find_all("a", href=True)
for a in about:
if 'sasource' in a.attrs:
if a.attrs['sasource'] == "article_about":
self.includes += a.text + ","
elif a.attrs['sasource'] == "auth_header_name":
self.author += a.text + ","
self.includes = self.includes[:-1]
self.author = self.author[:-1]
self.title = soup_log.find_all('h1')[0].text
self.pub_date = soup_log.find_all('time', itemprop="datePublished")[0]['content'][:10]
# Get Full Article Text
name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')
print(name_box)
try:
disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',
range(len(name_box))))[0]
except IndexError:
disc_idx = len(name_box)
self.text = ''.join(map(lambda x: x.text + "\n", name_box[:disc_idx]))
def | (self):
"""
Returns json representation of an article (for writing
to the database).
"""
if self.valid:
return {
'articleID': self._id,
'ticker_symbol': self.ticker,
'published_date': self.pub_date,
'author_name': self.author,
'title': self.title,
'text': self.text,
'num_likes': 0,
'includes_symbols': self.includes
}
return {}
class Comment:
def __init__(self, article_id, comment):
self.articleID = article_id
self.commentID = comment['id']
self.userID = comment['user_id']
self.date = comment['created_on'][:10]
self.text = comment['content']
self.parentID = comment['parent_id']
self.discussionID = comment['discussion_id']
self.children_ids = comment['children']
def get_children(self):
"""
Recursively returns an array of all the children of the comment.
"""
children = []
for i in self.children_ids:
child = Comment(self.articleID, self.children_ids[i])
children.append(child)
children.extend(child.get_children())
return children
def json(self):
return {
'articleID': self.articleID,
'commentID': self.commentID,
'userID': self.userID,
'comment_date': self.date,
'content': self.text.encode('ascii', errors='ignore').decode(),
'parentID': self.parentID,
'discussionID': self.discussionID
}
##################################
# #
# FILE FUNCTIONS #
# #
##################################
def read_json_file(filename):
"""
Reads a json formatted file.
"""
with open(filename) as f:
try:
data = json.loads(f.read())
except:
data = {}
return data
def write_json_file(json_data, filename):
"""
Writes a json to a file.
"""
try:
str_data = json.dumps(json_data)
with open(filename, "w") as f:
f.write(str_data)
return True
except MemoryError:
return False
def browser_cookie(rawcookie):
cookie = SimpleCookie()
cookie.load(rawcookie)
# reference: https://stackoverflow.com/questions/32281041/converting-cookie-string-into-python-dict
# Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
# which is incompatible with requests. Manually construct a dictionary instead.
cookies = {}
for key, morsel in cookie.items():
cookies[key] = morsel.value
return cookies
def default_cookie():
"""
Gets cookie from the raw cookie file.
"""
with open(raw_cookie_file) as f:
rc = "".join(f.readlines())
return browser_cookie(rc)
def default_db_config():
"""
Gets default database configuration.
"""
return read_json_file(db_config_file)
def safe_request(url, cookie):
"""
Continues trying to make a request until a certain amount of
tries have failed.
"""
count = 0
r = ""
# Adjust this number if a certain amount of failed attempts
# is acceptable
while count < 1:
try:
r = requests.get(url, cookies=cookie, headers=user_agent)
if r.status_code != 200:
print(r.status_code, "blocked")
count += 1
else:
break
except requests.exceptions.ConnectionError:
print("timeout", url)
time.sleep(1)
return r
def get_comment_jsons(article_id, cookie):
"""
Returns all comments for the given article as array of
jsons.
"""
url = "https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=" % article_id
r = safe_request(url, cookie)
comments = []
if r.status_code != 404:
res = json.loads(r.text)
for comment in res['comments'].values():
c = Comment(article_id, comment)
comments.append(c.json())
comments.extend(map(lambda x: x.json(), c.get_children()))
return comments
def try_add_comment(com_jsons, cursor, article_id):
"""
Given array of comment jsons, adds comments to database.
"""
if not com_jsons:
print("\t No comments found for " + article_id)
for c in com_jsons:
try:
cursor.execute(add_comment, c)
except mysql.connector.DatabaseError as err:
if not err.errno == 1062:
print("Wrong Comment Format: " + c["id"])
def try_add_article(art_json, cursor):
"""
Given an article json, tries to write that article to database.
"""
try:
cursor.execute(add_article, art_json)
except mysql.connector.errors.IntegrityError:
print("Duplicate Article")
def try_add_db(art_json, com_jsons, cursor, article_id):
try_add_article(art_json, cursor)
try_add_comment(com_jsons, cursor, article_id)
def gather_mysql_data(article_fn, start=0, stop=None, comments_only=False):
"""
Given a file with Seeking Alpha article ids separated by commas, iterates
through the article ids in the article and records the article and comment
data in the mysql database.
"""
config = default_db_config()
cookie = default_cookie()
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
with open(article_fn) as f:
articles = f.read().split(",")
i, total = start+1, float(len(articles))
for a in articles[start: stop]:
if comments_only:
com_jsons = get_comment_jsons(a, cookie)
try_add_comment(com_jsons, cursor, a)
else:
art_json = Article(a, cookie, user_agent).json()
if art_json:
com_jsons = get_comment_jsons(a, cookie)
try_add_db(art_json, com_jsons, cursor, a)
cnx.commit()
print("%0.4f" % (i/total*100), "%\t Article idx:", i-1)
i += 1
cursor.close()
cnx.close()
if __name__ == '__main__':
# Collection has not been updated in a long time so there are some
# aspects of the pipeline that do not seem to work anymore. While
# writing to the database seems fine, getting the full article text seems
# to be not working again.
a = Article("239509", default_cookie(), user_agent)
print(a.json())
# Do NOT run collection of articles before that bug has been fixed because
# you will overwrite your database with the truncated text version of these
# articles.
| json | identifier_name |
sa_collection.py | from __future__ import print_function
import mysql.connector
import requests
import time
import json
from http.cookies import SimpleCookie
from bs4 import BeautifulSoup
##################################
# #
# CONSTANTS #
# #
##################################
# After you set up your mySQL database, alter the information in this
# file.
db_config_file = "../config/db_config.json"
# Log into SA, then copy paste your cookie into this file.
raw_cookie_file = "../config/raw_cookie.txt"
user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
add_article = ("INSERT INTO articles"
"(articleID, ticker_symbol, published_date, author_name, title, text, num_likes, includes_symbols)"
"VALUES (%(articleID)s, %(ticker_symbol)s, %(published_date)s, %(author_name)s, %(title)s, %(text)s,"
" %(num_likes)s, %(includes_symbols)s)")
add_comment = ("INSERT INTO comments"
"(articleID, commentID, userID, comment_date, content, parentID, discussionID)"
"VALUES (%(articleID)s, %(commentID)s, %(userID)s, %(comment_date)s, %(content)s, %(parentID)s,"
"%(discussionID)s)")
##################################
# #
# DATA CLASSES #
# #
##################################
class Article:
def __init__(self, _id, a_cookie, a_user_agent):
"""
Initializes all fields with default values then parses the
information from the url.
"""
self._id = _id
self.ticker = ''
self.pub_date = '0001-01-01'
self.author = ''
self.title = ''
self.text = ''
self.includes = ''
self.comments = []
self.valid = True
self._parse_article(a_cookie, a_user_agent)
def _parse_article(self, a_cookie, a_ua):
"""
Parses article info from the given url.
"""
url = "https://seekingalpha.com/article/%s" % self._id
r = safe_request(url, {})
r_login = safe_request(url, a_cookie)
soup_log = BeautifulSoup(r_login.text, 'html.parser')
# Stops process if article invalid
primary_about = soup_log.find_all("a", href=True, sasource="article_primary_about")
if len(primary_about) != 1:
# Excludes non-single-ticker articles
print("Invalid Article")
self.valid = False
return
else:
self.ticker = primary_about[0].text.split()[-1][1:-1]
# Gets all includes and author
about = soup_log.find_all("a", href=True)
for a in about:
if 'sasource' in a.attrs:
if a.attrs['sasource'] == "article_about":
self.includes += a.text + ","
elif a.attrs['sasource'] == "auth_header_name":
self.author += a.text + ","
self.includes = self.includes[:-1]
self.author = self.author[:-1]
self.title = soup_log.find_all('h1')[0].text
self.pub_date = soup_log.find_all('time', itemprop="datePublished")[0]['content'][:10]
# Get Full Article Text
name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')
print(name_box)
try:
disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',
range(len(name_box))))[0]
except IndexError:
disc_idx = len(name_box)
self.text = ''.join(map(lambda x: x.text + "\n", name_box[:disc_idx]))
def json(self):
"""
Returns json representation of an article (for writing
to the database).
"""
if self.valid:
return {
'articleID': self._id,
'ticker_symbol': self.ticker,
'published_date': self.pub_date,
'author_name': self.author,
'title': self.title,
'text': self.text,
'num_likes': 0,
'includes_symbols': self.includes
}
return {}
class Comment:
def __init__(self, article_id, comment):
self.articleID = article_id
self.commentID = comment['id']
self.userID = comment['user_id']
self.date = comment['created_on'][:10]
self.text = comment['content']
self.parentID = comment['parent_id']
self.discussionID = comment['discussion_id']
self.children_ids = comment['children']
def get_children(self):
"""
Recursively returns an array of all the children of the comment.
"""
children = []
for i in self.children_ids:
child = Comment(self.articleID, self.children_ids[i])
children.append(child)
children.extend(child.get_children())
return children
def json(self):
return {
'articleID': self.articleID,
'commentID': self.commentID,
'userID': self.userID,
'comment_date': self.date,
'content': self.text.encode('ascii', errors='ignore').decode(),
'parentID': self.parentID,
'discussionID': self.discussionID
}
##################################
# #
# FILE FUNCTIONS #
# #
##################################
def read_json_file(filename):
"""
Reads a json formatted file.
"""
with open(filename) as f:
try:
data = json.loads(f.read())
except:
data = {}
return data
def write_json_file(json_data, filename):
"""
Writes a json to a file.
"""
try:
str_data = json.dumps(json_data)
with open(filename, "w") as f:
f.write(str_data)
return True
except MemoryError:
return False
def browser_cookie(rawcookie):
cookie = SimpleCookie()
cookie.load(rawcookie)
# reference: https://stackoverflow.com/questions/32281041/converting-cookie-string-into-python-dict
# Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
# which is incompatible with requests. Manually construct a dictionary instead.
cookies = {}
for key, morsel in cookie.items():
cookies[key] = morsel.value
return cookies
def default_cookie():
"""
Gets cookie from the raw cookie file.
"""
with open(raw_cookie_file) as f:
rc = "".join(f.readlines())
return browser_cookie(rc)
def default_db_config():
"""
Gets default database configuration.
"""
return read_json_file(db_config_file)
def safe_request(url, cookie):
"""
Continues trying to make a request until a certain amount of
tries have failed.
"""
count = 0
r = ""
# Adjust this number if a certain amount of failed attempts
# is acceptable
while count < 1:
try:
r = requests.get(url, cookies=cookie, headers=user_agent)
if r.status_code != 200:
print(r.status_code, "blocked")
count += 1
else:
break
except requests.exceptions.ConnectionError:
print("timeout", url)
time.sleep(1)
return r
def get_comment_jsons(article_id, cookie):
"""
Returns all comments for the given article as array of
jsons.
"""
url = "https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=" % article_id
r = safe_request(url, cookie)
comments = []
if r.status_code != 404:
res = json.loads(r.text)
for comment in res['comments'].values():
c = Comment(article_id, comment)
comments.append(c.json())
comments.extend(map(lambda x: x.json(), c.get_children()))
return comments
def try_add_comment(com_jsons, cursor, article_id):
|
def try_add_article(art_json, cursor):
"""
Given an article json, tries to write that article to database.
"""
try:
cursor.execute(add_article, art_json)
except mysql.connector.errors.IntegrityError:
print("Duplicate Article")
def try_add_db(art_json, com_jsons, cursor, article_id):
try_add_article(art_json, cursor)
try_add_comment(com_jsons, cursor, article_id)
def gather_mysql_data(article_fn, start=0, stop=None, comments_only=False):
"""
Given a file with Seeking Alpha article ids separated by commas, iterates
through the article ids in the article and records the article and comment
data in the mysql database.
"""
config = default_db_config()
cookie = default_cookie()
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
with open(article_fn) as f:
articles = f.read().split(",")
i, total = start+1, float(len(articles))
for a in articles[start: stop]:
if comments_only:
com_jsons = get_comment_jsons(a, cookie)
try_add_comment(com_jsons, cursor, a)
else:
art_json = Article(a, cookie, user_agent).json()
if art_json:
com_jsons = get_comment_jsons(a, cookie)
try_add_db(art_json, com_jsons, cursor, a)
cnx.commit()
print("%0.4f" % (i/total*100), "%\t Article idx:", i-1)
i += 1
cursor.close()
cnx.close()
if __name__ == '__main__':
# Collection has not been updated in a long time so there are some
# aspects of the pipeline that do not seem to work anymore. While
# writing to the database seems fine, getting the full article text seems
# to be not working again.
a = Article("239509", default_cookie(), user_agent)
print(a.json())
# Do NOT run collection of articles before that bug has been fixed because
# you will overwrite your database with the truncated text version of these
# articles.
| """
Given array of comment jsons, adds comments to database.
"""
if not com_jsons:
print("\t No comments found for " + article_id)
for c in com_jsons:
try:
cursor.execute(add_comment, c)
except mysql.connector.DatabaseError as err:
if not err.errno == 1062:
print("Wrong Comment Format: " + c["id"]) | identifier_body |
sa_collection.py | from __future__ import print_function
import mysql.connector
import requests
import time
import json
from http.cookies import SimpleCookie
from bs4 import BeautifulSoup
##################################
# #
# CONSTANTS #
# #
##################################
# After you set up your mySQL database, alter the information in this
# file.
db_config_file = "../config/db_config.json"
# Log into SA, then copy paste your cookie into this file.
raw_cookie_file = "../config/raw_cookie.txt"
user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
add_article = ("INSERT INTO articles"
"(articleID, ticker_symbol, published_date, author_name, title, text, num_likes, includes_symbols)"
"VALUES (%(articleID)s, %(ticker_symbol)s, %(published_date)s, %(author_name)s, %(title)s, %(text)s,"
" %(num_likes)s, %(includes_symbols)s)")
add_comment = ("INSERT INTO comments"
"(articleID, commentID, userID, comment_date, content, parentID, discussionID)"
"VALUES (%(articleID)s, %(commentID)s, %(userID)s, %(comment_date)s, %(content)s, %(parentID)s,"
"%(discussionID)s)")
##################################
# #
# DATA CLASSES #
# #
##################################
class Article:
def __init__(self, _id, a_cookie, a_user_agent):
"""
Initializes all fields with default values then parses the
information from the url.
"""
self._id = _id
self.ticker = ''
self.pub_date = '0001-01-01'
self.author = ''
self.title = ''
self.text = ''
self.includes = ''
self.comments = []
self.valid = True
self._parse_article(a_cookie, a_user_agent)
def _parse_article(self, a_cookie, a_ua):
"""
Parses article info from the given url.
"""
url = "https://seekingalpha.com/article/%s" % self._id
r = safe_request(url, {})
r_login = safe_request(url, a_cookie)
soup_log = BeautifulSoup(r_login.text, 'html.parser')
# Stops process if article invalid
primary_about = soup_log.find_all("a", href=True, sasource="article_primary_about")
if len(primary_about) != 1:
# Excludes non-single-ticker articles
print("Invalid Article")
self.valid = False
return
else:
self.ticker = primary_about[0].text.split()[-1][1:-1]
# Gets all includes and author
about = soup_log.find_all("a", href=True)
for a in about:
if 'sasource' in a.attrs:
if a.attrs['sasource'] == "article_about":
self.includes += a.text + ","
elif a.attrs['sasource'] == "auth_header_name":
self.author += a.text + ","
self.includes = self.includes[:-1]
self.author = self.author[:-1]
self.title = soup_log.find_all('h1')[0].text
self.pub_date = soup_log.find_all('time', itemprop="datePublished")[0]['content'][:10]
# Get Full Article Text
name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')
print(name_box)
try:
disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',
range(len(name_box))))[0]
except IndexError:
disc_idx = len(name_box)
self.text = ''.join(map(lambda x: x.text + "\n", name_box[:disc_idx]))
def json(self):
"""
Returns json representation of an article (for writing
to the database).
"""
if self.valid:
return {
'articleID': self._id,
'ticker_symbol': self.ticker,
'published_date': self.pub_date,
'author_name': self.author,
'title': self.title,
'text': self.text,
'num_likes': 0,
'includes_symbols': self.includes
}
return {}
class Comment:
def __init__(self, article_id, comment):
self.articleID = article_id
self.commentID = comment['id']
self.userID = comment['user_id']
self.date = comment['created_on'][:10]
self.text = comment['content']
self.parentID = comment['parent_id']
self.discussionID = comment['discussion_id']
self.children_ids = comment['children']
def get_children(self):
"""
Recursively returns an array of all the children of the comment.
"""
children = []
for i in self.children_ids:
child = Comment(self.articleID, self.children_ids[i])
children.append(child)
children.extend(child.get_children())
return children
def json(self):
return {
'articleID': self.articleID,
'commentID': self.commentID,
'userID': self.userID,
'comment_date': self.date,
'content': self.text.encode('ascii', errors='ignore').decode(),
'parentID': self.parentID,
'discussionID': self.discussionID
}
##################################
# #
# FILE FUNCTIONS #
# #
##################################
def read_json_file(filename):
"""
Reads a json formatted file.
"""
with open(filename) as f:
try:
data = json.loads(f.read())
except:
data = {}
return data
def write_json_file(json_data, filename):
"""
Writes a json to a file.
"""
try:
str_data = json.dumps(json_data)
with open(filename, "w") as f:
f.write(str_data)
return True
except MemoryError:
return False
def browser_cookie(rawcookie):
cookie = SimpleCookie()
cookie.load(rawcookie)
# reference: https://stackoverflow.com/questions/32281041/converting-cookie-string-into-python-dict
# Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
# which is incompatible with requests. Manually construct a dictionary instead.
cookies = {}
for key, morsel in cookie.items():
cookies[key] = morsel.value
return cookies
def default_cookie():
"""
Gets cookie from the raw cookie file.
"""
with open(raw_cookie_file) as f:
rc = "".join(f.readlines())
return browser_cookie(rc)
def default_db_config():
"""
Gets default database configuration.
"""
return read_json_file(db_config_file)
def safe_request(url, cookie):
"""
Continues trying to make a request until a certain amount of
tries have failed.
"""
count = 0
r = ""
# Adjust this number if a certain amount of failed attempts
# is acceptable
while count < 1:
|
return r
def get_comment_jsons(article_id, cookie):
"""
Returns all comments for the given article as array of
jsons.
"""
url = "https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=" % article_id
r = safe_request(url, cookie)
comments = []
if r.status_code != 404:
res = json.loads(r.text)
for comment in res['comments'].values():
c = Comment(article_id, comment)
comments.append(c.json())
comments.extend(map(lambda x: x.json(), c.get_children()))
return comments
def try_add_comment(com_jsons, cursor, article_id):
"""
Given array of comment jsons, adds comments to database.
"""
if not com_jsons:
print("\t No comments found for " + article_id)
for c in com_jsons:
try:
cursor.execute(add_comment, c)
except mysql.connector.DatabaseError as err:
if not err.errno == 1062:
print("Wrong Comment Format: " + c["id"])
def try_add_article(art_json, cursor):
"""
Given an article json, tries to write that article to database.
"""
try:
cursor.execute(add_article, art_json)
except mysql.connector.errors.IntegrityError:
print("Duplicate Article")
def try_add_db(art_json, com_jsons, cursor, article_id):
try_add_article(art_json, cursor)
try_add_comment(com_jsons, cursor, article_id)
def gather_mysql_data(article_fn, start=0, stop=None, comments_only=False):
"""
Given a file with Seeking Alpha article ids separated by commas, iterates
through the article ids in the article and records the article and comment
data in the mysql database.
"""
config = default_db_config()
cookie = default_cookie()
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
with open(article_fn) as f:
articles = f.read().split(",")
i, total = start+1, float(len(articles))
for a in articles[start: stop]:
if comments_only:
com_jsons = get_comment_jsons(a, cookie)
try_add_comment(com_jsons, cursor, a)
else:
art_json = Article(a, cookie, user_agent).json()
if art_json:
com_jsons = get_comment_jsons(a, cookie)
try_add_db(art_json, com_jsons, cursor, a)
cnx.commit()
print("%0.4f" % (i/total*100), "%\t Article idx:", i-1)
i += 1
cursor.close()
cnx.close()
if __name__ == '__main__':
# Collection has not been updated in a long time so there are some
# aspects of the pipeline that do not seem to work anymore. While
# writing to the database seems fine, getting the full article text seems
# to be not working again.
a = Article("239509", default_cookie(), user_agent)
print(a.json())
# Do NOT run collection of articles before that bug has been fixed because
# you will overwrite your database with the truncated text version of these
# articles.
| try:
r = requests.get(url, cookies=cookie, headers=user_agent)
if r.status_code != 200:
print(r.status_code, "blocked")
count += 1
else:
break
except requests.exceptions.ConnectionError:
print("timeout", url)
time.sleep(1) | conditional_block |
set.js | "use strict";
module.exports = {
AbzanCharm: require("./AbzanCharm"),
AbzanFalconer: require("./AbzanFalconer"),
AcademyElite: require("./AcademyElite"),
AeonChronicler: require("./AeonChronicler"),
AkiriLineSlinger: require("./AkiriLineSlinger"),
AkroanHorse: require("./AkroanHorse"),
AleshaWhoSmilesatDeath: require("./AleshaWhoSmilesatDeath"),
AncientExcavation: require("./AncientExcavation"),
AnkleShanker: require("./AnkleShanker"),
ArcaneDenial: require("./ArcaneDenial"),
ArcaneSanctum: require("./ArcaneSanctum"),
ArmoryAutomaton: require("./ArmoryAutomaton"),
ArmyoftheDamned: require("./ArmyoftheDamned"),
ArtifactMutation: require("./ArtifactMutation"),
AshBarrens: require("./AshBarrens"),
AssaultSuit: require("./AssaultSuit"),
AstralCornucopia: require("./AstralCornucopia"),
AtraxaPraetorsVoice: require("./AtraxaPraetorsVoice"),
AuraMutation: require("./AuraMutation"),
AzoriusChancery: require("./AzoriusChancery"),
BalefulStrix: require("./BalefulStrix"),
BaneoftheLiving: require("./BaneoftheLiving"),
BeaconofUnrest: require("./BeaconofUnrest"),
BeastmasterAscension: require("./BeastmasterAscension"),
BeastWithin: require("./BeastWithin"),
BenefactorsDraught: require("./BenefactorsDraught"),
BituminousBlast: require("./BituminousBlast"),
BlasphemousAct: require("./BlasphemousAct"),
BlazingArchon: require("./BlazingArchon"),
BlindObedience: require("./BlindObedience"),
BlinkmothUrn: require("./BlinkmothUrn"),
BloodbraidElf: require("./BloodbraidElf"),
BloodTyrant: require("./BloodTyrant"),
Bonehoard: require("./Bonehoard"),
Boompile: require("./Boompile"),
BorosCharm: require("./BorosCharm"),
BorosGarrison: require("./BorosGarrison"),
BravetheSands: require("./BravetheSands"),
BreathofFury: require("./BreathofFury"),
BredfortheHunt: require("./BredfortheHunt"),
BreyaEtheriumShaper: require("./BreyaEtheriumShaper"),
BruseTarlBoorishHerder: require("./BruseTarlBoorishHerder"),
BrutalHordechief: require("./BrutalHordechief"),
Burgeoning: require("./Burgeoning"),
BuriedRuin: require("./BuriedRuin"),
CatharsCrusade: require("./CatharsCrusade"),
CauldronofSouls: require("./CauldronofSouls"),
CavesofKoilos: require("./CavesofKoilos"),
ChainofVapor: require("./ChainofVapor"),
ChampionofLambholt: require("./ChampionofLambholt"),
ChaosWarp: require("./ChaosWarp"),
ChargingCinderhorn: require("./ChargingCinderhorn"),
ChasmSkulker: require("./ChasmSkulker"),
ChiefEngineer: require("./ChiefEngineer"),
ChromaticLantern: require("./ChromaticLantern"),
CitadelSiege: require("./CitadelSiege"),
ClanDefiance: require("./ClanDefiance"),
CoastalBreach: require("./CoastalBreach"),
CoilingOracle: require("./CoilingOracle"),
CollectiveVoyage: require("./CollectiveVoyage"),
CommandersSphere: require("./CommandersSphere"),
CommandTower: require("./CommandTower"),
ConquerorsFlail: require("./ConquerorsFlail"),
ConsumingAberration: require("./ConsumingAberration"),
CorpsejackMenace: require("./CorpsejackMenace"),
CracklingDoom: require("./CracklingDoom"),
CranialPlating: require("./CranialPlating"),
CruelEntertainment: require("./CruelEntertainment"),
CrumblingNecropolis: require("./CrumblingNecropolis"),
CrystallineCrawler: require("./CrystallineCrawler"),
Cultivate: require("./Cultivate"),
CurseofVengeance: require("./CurseofVengeance"),
CurtainsCall: require("./CurtainsCall"),
CustodiSoulbinders: require("./CustodiSoulbinders"),
DarettiScrapSavant: require("./DarettiScrapSavant"),
DarksteelCitadel: require("./DarksteelCitadel"),
DarksteelIngot: require("./DarksteelIngot"),
DarkwaterCatacombs: require("./DarkwaterCatacombs"),
DauntlessEscort: require("./DauntlessEscort"),
Decimate: require("./Decimate"),
DeepglowSkate: require("./DeepglowSkate"),
DenProtector: require("./DenProtector"),
DevastationTide: require("./DevastationTide"),
DimirAqueduct: require("./DimirAqueduct"),
DisdainfulStroke: require("./DisdainfulStroke"),
DismalBackwater: require("./DismalBackwater"),
DispellersCapsule: require("./DispellersCapsule"),
DivergentTransformations: require("./DivergentTransformations"),
DragonMage: require("./DragonMage"),
DragonskullSummit: require("./DragonskullSummit"),
DreadshipReef: require("./DreadshipReef"),
DuelistsHeritage: require("./DuelistsHeritage"),
Duneblast: require("./Duneblast"),
EdricSpymasterofTrest: require("./EdricSpymasterofTrest"),
EliteScaleguard: require("./EliteScaleguard"),
EmpyrialPlate: require("./EmpyrialPlate"),
EnduringScalelord: require("./EnduringScalelord"),
EntrapmentManeuver: require("./EntrapmentManeuver"),
EtchedOracle: require("./EtchedOracle"),
EtheriumHornSorcerer: require("./EtheriumHornSorcerer"),
EtheriumSculptor: require("./EtheriumSculptor"),
EtherswornAdjudicator: require("./EtherswornAdjudicator"),
Evacuation: require("./Evacuation"),
EverflowingChalice: require("./EverflowingChalice"),
EverlastingTorment: require("./EverlastingTorment"),
EvolutionaryEscalation: require("./EvolutionaryEscalation"),
EvolvingWilds: require("./EvolvingWilds"),
ExecutionersCapsule: require("./ExecutionersCapsule"),
ExoticOrchard: require("./ExoticOrchard"),
FaerieArtisans: require("./FaerieArtisans"),
Farseek: require("./Farseek"),
FarWanderings: require("./FarWanderings"),
FathomMage: require("./FathomMage"),
FellwarStone: require("./FellwarStone"),
Festercreep: require("./Festercreep"),
FiligreeAngel: require("./FiligreeAngel"),
ForbiddenOrchard: require("./ForbiddenOrchard"),
Forest: require("./Forest"),
ForgottenAncient: require("./ForgottenAncient"),
FrenziedFugue: require("./FrenziedFugue"),
FrontierBivouac: require("./FrontierBivouac"),
Gamekeeper: require("./Gamekeeper"),
GhastlyConscription: require("./GhastlyConscription"),
GhaveGuruofSpores: require("./GhaveGuruofSpores"),
GhostlyPrison: require("./GhostlyPrison"),
GlintEyeNephilim: require("./GlintEyeNephilim"),
GoblinSpymaster: require("./GoblinSpymaster"),
GodoBanditWarlord: require("./GodoBanditWarlord"),
GolgariRotFarm: require("./GolgariRotFarm"),
GolgariSignet: require("./GolgariSignet"),
GrabtheReins: require("./GrabtheReins"),
GrandColiseum: require("./GrandColiseum"),
GraveUpheaval: require("./GraveUpheaval"),
GripofPhyresis: require("./GripofPhyresis"),
GruulSignet: require("./GruulSignet"),
GruulTurf: require("./GruulTurf"),
Guiltfeeder: require("./Guiltfeeder"),
GwafaHazidProfiteer: require("./GwafaHazidProfiteer"),
HannaShipsNavigator: require("./HannaShipsNavigator"),
HardenedScales: require("./HardenedScales"),
HellkiteIgniter: require("./HellkiteIgniter"),
HellkiteTyrant: require("./HellkiteTyrant"),
HomewardPath: require("./HomewardPath"),
HoofprintsoftheStag: require("./HoofprintsoftheStag"),
HorizonChimera: require("./HorizonChimera"),
HowlingMine: require("./HowlingMine"),
HumbleDefector: require("./HumbleDefector"),
HushwingGryff: require("./HushwingGryff"),
IchorWellspring: require("./IchorWellspring"),
IkraShidiqitheUsurper: require("./IkraShidiqitheUsurper"),
InGarruksWake: require("./InGarruksWake"),
InspiringCall: require("./InspiringCall"),
IroasGodofVictory: require("./IroasGodofVictory"),
IshaiOjutaiDragonspeaker: require("./IshaiOjutaiDragonspeaker"),
Island: require("./Island"),
IzzetBoilerworks: require("./IzzetBoilerworks"),
JorKadeenthePrevailer: require("./JorKadeenthePrevailer"),
JungleHollow: require("./JungleHollow"),
JungleShrine: require("./JungleShrine"),
JuniperOrderRanger: require("./JuniperOrderRanger"),
KalonianHydra: require("./KalonianHydra"),
KarplusanForest: require("./KarplusanForest"),
KazuulTyrantoftheCliffs: require("./KazuulTyrantoftheCliffs"),
KeeningStone: require("./KeeningStone"),
KodamasReach: require("./KodamasReach"),
KorozdaGuildmage: require("./KorozdaGuildmage"),
KraumLudevicsOpus: require("./KraumLudevicsOpus"),
KrosanVerge: require("./KrosanVerge"),
KydeleChosenofKruphix: require("./KydeleChosenofKruphix"),
KynaiosandTiroofMeletis: require("./KynaiosandTiroofMeletis"),
Languish: require("./Languish"),
Lavalanche: require("./Lavalanche"),
LightningGreaves: require("./LightningGreaves"),
LoxodonWarhammer: require("./LoxodonWarhammer"),
LudevicNecroAlchemist: require("./LudevicNecroAlchemist"),
LurkingPredators: require("./LurkingPredators"),
MagusoftheWill: require("./MagusoftheWill"),
ManagorgerHydra: require("./ManagorgerHydra"),
ManifoldInsights: require("./ManifoldInsights"),
MasterBiomancer: require("./MasterBiomancer"),
MasterofEtherium: require("./MasterofEtherium"),
MentoroftheMeek: require("./MentoroftheMeek"),
MercilessEviction: require("./MercilessEviction"),
MigratoryRoute: require("./MigratoryRoute"),
MindsAglow: require("./MindsAglow"),
MirrorEntity: require("./MirrorEntity"),
Mirrorweave: require("./Mirrorweave"),
Mortify: require("./Mortify"),
MosswortBridge: require("./MosswortBridge"),
Mountain: require("./Mountain"),
MurmuringBosk: require("./MurmuringBosk"),
Mycoloth: require("./Mycoloth"),
MycosynthWellspring: require("./MycosynthWellspring"),
MyrBattlesphere: require("./MyrBattlesphere"),
MyriadLandscape: require("./MyriadLandscape"),
MyrRetriever: require("./MyrRetriever"),
MysticMonastery: require("./MysticMonastery"),
NathoftheGiltLeaf: require("./NathoftheGiltLeaf"),
NayaCharm: require("./NayaCharm"),
Necrogenesis: require("./Necrogenesis"),
Necroplasm: require("./Necroplasm"),
NevinyrralsDisk: require("./NevinyrralsDisk"),
NomadOutpost: require("./NomadOutpost"),
OathofDruids: require("./OathofDruids"),
Oblation: require("./Oblation"),
OpalPalace: require("./OpalPalace"),
OpentheVaults: require("./OpentheVaults"),
OpulentPalace: require("./OpulentPalace"),
OrderChaos: require("./OrderChaos"),
OrzhovAdvokist: require("./OrzhovAdvokist"),
OrzhovBasilica: require("./OrzhovBasilica"),
OrzhovSignet: require("./OrzhovSignet"),
PartingThoughts: require("./PartingThoughts"),
PastinFlames: require("./PastinFlames"),
PhyrexianRebirth: require("./PhyrexianRebirth"),
Plains: require("./Plains"),
PrimevalProtector: require("./PrimevalProtector"),
PrismaticGeoscope: require("./PrismaticGeoscope"),
ProgenitorMimic: require("./ProgenitorMimic"),
Propaganda: require("./Propaganda"),
PsychosisCrawler: require("./PsychosisCrawler"),
Putrefy: require("./Putrefy"),
QuirionExplorer: require("./QuirionExplorer"),
RakdosCarnarium: require("./RakdosCarnarium"),
RakdosCharm: require("./RakdosCharm"),
RakdosSignet: require("./RakdosSignet"),
RampantGrowth: require("./RampantGrowth"),
RavosSoultender: require("./RavosSoultender"),
ReadtheRunes: require("./ReadtheRunes"),
RealmSeekers: require("./RealmSeekers"),
ReforgetheSoul: require("./ReforgetheSoul"),
ReinsofPower: require("./ReinsofPower"),
ReliquaryTower: require("./ReliquaryTower"),
Reveillark: require("./Reveillark"),
ReversetheSands: require("./ReversetheSands"),
ReyhanLastoftheAbzan: require("./ReyhanLastoftheAbzan"),
RitesofFlourishing: require("./RitesofFlourishing"),
RootboundCrag: require("./RootboundCrag"),
Rubblehulk: require("./Rubblehulk"),
RuggedHighlands: require("./RuggedHighlands"),
RunehornHellkite: require("./RunehornHellkite"),
RuptureSpire: require("./RuptureSpire"),
SakuraTribeElder: require("./SakuraTribeElder"),
SanctumGargoyle: require("./SanctumGargoyle"),
SandsteppeCitadel: require("./SandsteppeCitadel"),
Sangromancer: require("./Sangromancer"),
SaskiatheUnyielding: require("./SaskiatheUnyielding"),
SatyrWayfinder: require("./SatyrWayfinder"),
SavageLands: require("./SavageLands"),
ScavengingOoze: require("./ScavengingOoze"),
SeasideCitadel: require("./SeasideCitadel"),
SeatoftheSynod: require("./SeatoftheSynod"),
SeedsofRenewal: require("./SeedsofRenewal"),
SelesnyaGuildmage: require("./SelesnyaGuildmage"),
SelesnyaSanctuary: require("./SelesnyaSanctuary"),
SelflessSquire: require("./SelflessSquire"),
SelvalaExplorerReturned: require("./SelvalaExplorerReturned"),
ShadowbloodRidge: require("./ShadowbloodRidge"),
ShamanicRevelation: require("./ShamanicRevelation"),
SharuumtheHegemon: require("./SharuumtheHegemon"),
ShimmerMyr: require("./ShimmerMyr"),
SidarKondoofJamuraa: require("./SidarKondoofJamuraa"),
SilasRennSeekerAdept: require("./SilasRennSeekerAdept"),
SimicGrowthChamber: require("./SimicGrowthChamber"),
SimicSignet: require("./SimicSignet"),
Skullclamp: require("./Skullclamp"),
SlobadGoblinTinkerer: require("./SlobadGoblinTinkerer"),
SolemnSimulacrum: require("./SolemnSimulacrum"),
SolidarityofHeroes: require("./SolidarityofHeroes"),
SolRing: require("./SolRing"),
SoulofNewPhyrexia: require("./SoulofNewPhyrexia"),
SpellheartChimera: require("./SpellheartChimera"),
Spelltwine: require("./Spelltwine"),
SphereofSafety: require("./SphereofSafety"),
SphinxSummoner: require("./SphinxSummoner"),
SpinerockKnoll: require("./SpinerockKnoll"),
SpittingImage: require("./SpittingImage"),
StalkingVengeance: require("./StalkingVengeance"),
StonehoofChieftain: require("./StonehoofChieftain"),
SublimeExhalation: require("./SublimeExhalation"),
Sunforger: require("./Sunforger"),
SungrassPrairie: require("./SungrassPrairie"),
SunpetalGrove: require("./SunpetalGrove"),
Swamp: require("./Swamp"),
SwanSong: require("./SwanSong"),
SwiftfootBoots: require("./SwiftfootBoots"),
SwiftwaterCliffs: require("./SwiftwaterCliffs"),
SwordstoPlowshares: require("./SwordstoPlowshares"),
SydriGalvanicGenius: require("./SydriGalvanicGenius"),
SylvanReclamation: require("./SylvanReclamation"),
SylvokExplorer: require("./SylvokExplorer"),
TanatheBloodsower: require("./TanatheBloodsower"),
TaureanMauler: require("./TaureanMauler"),
TempleBell: require("./TempleBell"),
TempleoftheFalseGod: require("./TempleoftheFalseGod"),
TemptwithDiscovery: require("./TemptwithDiscovery"),
Terminate: require("./Terminate"),
TerramorphicExpanse: require("./TerramorphicExpanse"),
TezzeretsGambit: require("./TezzeretsGambit"),
TheloniteHermit: require("./TheloniteHermit"),
ThopterFoundry: require("./ThopterFoundry"),
ThornwoodFalls: require("./ThornwoodFalls"),
ThrasiosTritonHero: require("./ThrasiosTritonHero"),
Thrummingbird: require("./Thrummingbird"),
ThunderfootBaloth: require("./ThunderfootBaloth"),
TradingPost: require("./TradingPost"),
TransguildPromenade: require("./TransguildPromenade"),
TrashforTreasure: require("./TrashforTreasure"),
TreacherousTerrain: require("./TreacherousTerrain"),
TreasureCruise: require("./TreasureCruise"),
TrialError: require("./TrialError"),
TrinketMage: require("./TrinketMage"),
TuskguardCaptain: require("./TuskguardCaptain"),
TymnatheWeaver: require("./TymnatheWeaver"),
UndergroundRiver: require("./UndergroundRiver"),
UtterEnd: require("./UtterEnd"),
VedalkenEngineer: require("./VedalkenEngineer"),
VensersJournal: require("./VensersJournal"),
VeteranExplorer: require("./VeteranExplorer"),
VialSmashertheFierce: require("./VialSmashertheFierce"),
VolcanicVision: require("./VolcanicVision"),
VoreloftheHullClade: require("./VoreloftheHullClade"),
VulturousZombie: require("./VulturousZombie"),
WallofBlossoms: require("./WallofBlossoms"),
WasteNot: require("./WasteNot"),
WaveofReckoning: require("./WaveofReckoning"),
WheelofFate: require("./WheelofFate"),
WhimsoftheFates: require("./WhimsoftheFates"),
Whipflare: require("./Whipflare"),
WhisperingMadness: require("./WhisperingMadness"),
WhispersilkCloak: require("./WhispersilkCloak"),
WightofPrecinctSix: require("./WightofPrecinctSix"),
WildBeastmaster: require("./WildBeastmaster"),
WildernessElemental: require("./WildernessElemental"),
WindbornMuse: require("./WindbornMuse"),
WindbriskHeights: require("./WindbriskHeights"),
Windfall: require("./Windfall"),
WormHarvest: require("./WormHarvest"),
YidrisMaelstromWielder: require("./YidrisMaelstromWielder"),
ZedruutheGreathearted: require("./ZedruutheGreathearted"),
ZhurTaaDruid: require("./ZhurTaaDruid")
};
if (window) {if (!window.mtgSets) | window.mtgSets.setC16 = module.exports;} | { window.mtgSets = {}; } | conditional_block |
set.js | "use strict";
module.exports = {
AbzanCharm: require("./AbzanCharm"),
AbzanFalconer: require("./AbzanFalconer"),
AcademyElite: require("./AcademyElite"),
AeonChronicler: require("./AeonChronicler"),
AkiriLineSlinger: require("./AkiriLineSlinger"),
AkroanHorse: require("./AkroanHorse"),
AleshaWhoSmilesatDeath: require("./AleshaWhoSmilesatDeath"),
AncientExcavation: require("./AncientExcavation"),
AnkleShanker: require("./AnkleShanker"),
ArcaneDenial: require("./ArcaneDenial"),
ArcaneSanctum: require("./ArcaneSanctum"),
ArmoryAutomaton: require("./ArmoryAutomaton"),
ArmyoftheDamned: require("./ArmyoftheDamned"),
ArtifactMutation: require("./ArtifactMutation"),
AshBarrens: require("./AshBarrens"),
AssaultSuit: require("./AssaultSuit"),
AstralCornucopia: require("./AstralCornucopia"),
AtraxaPraetorsVoice: require("./AtraxaPraetorsVoice"),
AuraMutation: require("./AuraMutation"),
AzoriusChancery: require("./AzoriusChancery"),
BalefulStrix: require("./BalefulStrix"),
BaneoftheLiving: require("./BaneoftheLiving"),
BeaconofUnrest: require("./BeaconofUnrest"),
BeastmasterAscension: require("./BeastmasterAscension"),
BeastWithin: require("./BeastWithin"),
BenefactorsDraught: require("./BenefactorsDraught"),
BituminousBlast: require("./BituminousBlast"),
BlasphemousAct: require("./BlasphemousAct"),
BlazingArchon: require("./BlazingArchon"),
BlindObedience: require("./BlindObedience"),
BlinkmothUrn: require("./BlinkmothUrn"),
BloodbraidElf: require("./BloodbraidElf"),
BloodTyrant: require("./BloodTyrant"),
Bonehoard: require("./Bonehoard"),
Boompile: require("./Boompile"),
BorosCharm: require("./BorosCharm"),
BorosGarrison: require("./BorosGarrison"),
BravetheSands: require("./BravetheSands"),
BreathofFury: require("./BreathofFury"),
BredfortheHunt: require("./BredfortheHunt"),
BreyaEtheriumShaper: require("./BreyaEtheriumShaper"),
BruseTarlBoorishHerder: require("./BruseTarlBoorishHerder"),
BrutalHordechief: require("./BrutalHordechief"),
Burgeoning: require("./Burgeoning"),
BuriedRuin: require("./BuriedRuin"),
CatharsCrusade: require("./CatharsCrusade"),
CauldronofSouls: require("./CauldronofSouls"),
CavesofKoilos: require("./CavesofKoilos"),
ChainofVapor: require("./ChainofVapor"),
ChampionofLambholt: require("./ChampionofLambholt"),
ChaosWarp: require("./ChaosWarp"),
ChargingCinderhorn: require("./ChargingCinderhorn"),
ChasmSkulker: require("./ChasmSkulker"),
ChiefEngineer: require("./ChiefEngineer"),
ChromaticLantern: require("./ChromaticLantern"),
CitadelSiege: require("./CitadelSiege"),
ClanDefiance: require("./ClanDefiance"),
CoastalBreach: require("./CoastalBreach"),
CoilingOracle: require("./CoilingOracle"),
CollectiveVoyage: require("./CollectiveVoyage"),
CommandersSphere: require("./CommandersSphere"),
CommandTower: require("./CommandTower"),
ConquerorsFlail: require("./ConquerorsFlail"),
ConsumingAberration: require("./ConsumingAberration"),
CorpsejackMenace: require("./CorpsejackMenace"),
CracklingDoom: require("./CracklingDoom"),
CranialPlating: require("./CranialPlating"),
CruelEntertainment: require("./CruelEntertainment"),
CrumblingNecropolis: require("./CrumblingNecropolis"),
CrystallineCrawler: require("./CrystallineCrawler"),
Cultivate: require("./Cultivate"),
CurseofVengeance: require("./CurseofVengeance"),
CurtainsCall: require("./CurtainsCall"),
CustodiSoulbinders: require("./CustodiSoulbinders"),
DarettiScrapSavant: require("./DarettiScrapSavant"),
DarksteelCitadel: require("./DarksteelCitadel"),
DarksteelIngot: require("./DarksteelIngot"),
DarkwaterCatacombs: require("./DarkwaterCatacombs"),
DauntlessEscort: require("./DauntlessEscort"),
Decimate: require("./Decimate"),
DeepglowSkate: require("./DeepglowSkate"),
DenProtector: require("./DenProtector"),
DevastationTide: require("./DevastationTide"),
DimirAqueduct: require("./DimirAqueduct"),
DisdainfulStroke: require("./DisdainfulStroke"),
DismalBackwater: require("./DismalBackwater"),
DispellersCapsule: require("./DispellersCapsule"),
DivergentTransformations: require("./DivergentTransformations"),
DragonMage: require("./DragonMage"),
DragonskullSummit: require("./DragonskullSummit"),
DreadshipReef: require("./DreadshipReef"),
DuelistsHeritage: require("./DuelistsHeritage"),
Duneblast: require("./Duneblast"),
EdricSpymasterofTrest: require("./EdricSpymasterofTrest"),
EliteScaleguard: require("./EliteScaleguard"),
EmpyrialPlate: require("./EmpyrialPlate"),
EnduringScalelord: require("./EnduringScalelord"),
EntrapmentManeuver: require("./EntrapmentManeuver"),
EtchedOracle: require("./EtchedOracle"),
EtheriumHornSorcerer: require("./EtheriumHornSorcerer"),
EtheriumSculptor: require("./EtheriumSculptor"),
EtherswornAdjudicator: require("./EtherswornAdjudicator"),
Evacuation: require("./Evacuation"),
EverflowingChalice: require("./EverflowingChalice"),
EverlastingTorment: require("./EverlastingTorment"),
EvolutionaryEscalation: require("./EvolutionaryEscalation"),
EvolvingWilds: require("./EvolvingWilds"),
ExecutionersCapsule: require("./ExecutionersCapsule"),
ExoticOrchard: require("./ExoticOrchard"),
FaerieArtisans: require("./FaerieArtisans"),
Farseek: require("./Farseek"),
FarWanderings: require("./FarWanderings"),
FathomMage: require("./FathomMage"),
FellwarStone: require("./FellwarStone"),
Festercreep: require("./Festercreep"),
FiligreeAngel: require("./FiligreeAngel"),
ForbiddenOrchard: require("./ForbiddenOrchard"),
Forest: require("./Forest"),
ForgottenAncient: require("./ForgottenAncient"),
FrenziedFugue: require("./FrenziedFugue"),
FrontierBivouac: require("./FrontierBivouac"),
Gamekeeper: require("./Gamekeeper"),
GhastlyConscription: require("./GhastlyConscription"),
GhaveGuruofSpores: require("./GhaveGuruofSpores"),
GhostlyPrison: require("./GhostlyPrison"),
GlintEyeNephilim: require("./GlintEyeNephilim"),
GoblinSpymaster: require("./GoblinSpymaster"),
GodoBanditWarlord: require("./GodoBanditWarlord"),
GolgariRotFarm: require("./GolgariRotFarm"),
GolgariSignet: require("./GolgariSignet"),
GrabtheReins: require("./GrabtheReins"),
GrandColiseum: require("./GrandColiseum"),
GraveUpheaval: require("./GraveUpheaval"),
GripofPhyresis: require("./GripofPhyresis"),
GruulSignet: require("./GruulSignet"),
GruulTurf: require("./GruulTurf"),
Guiltfeeder: require("./Guiltfeeder"),
GwafaHazidProfiteer: require("./GwafaHazidProfiteer"),
HannaShipsNavigator: require("./HannaShipsNavigator"),
HardenedScales: require("./HardenedScales"),
HellkiteIgniter: require("./HellkiteIgniter"),
HellkiteTyrant: require("./HellkiteTyrant"),
HomewardPath: require("./HomewardPath"),
HoofprintsoftheStag: require("./HoofprintsoftheStag"),
HorizonChimera: require("./HorizonChimera"),
HowlingMine: require("./HowlingMine"),
HumbleDefector: require("./HumbleDefector"),
HushwingGryff: require("./HushwingGryff"),
IchorWellspring: require("./IchorWellspring"),
IkraShidiqitheUsurper: require("./IkraShidiqitheUsurper"),
InGarruksWake: require("./InGarruksWake"),
InspiringCall: require("./InspiringCall"),
IroasGodofVictory: require("./IroasGodofVictory"),
IshaiOjutaiDragonspeaker: require("./IshaiOjutaiDragonspeaker"),
Island: require("./Island"),
IzzetBoilerworks: require("./IzzetBoilerworks"),
JorKadeenthePrevailer: require("./JorKadeenthePrevailer"),
JungleHollow: require("./JungleHollow"),
JungleShrine: require("./JungleShrine"),
JuniperOrderRanger: require("./JuniperOrderRanger"),
KalonianHydra: require("./KalonianHydra"),
KarplusanForest: require("./KarplusanForest"),
KazuulTyrantoftheCliffs: require("./KazuulTyrantoftheCliffs"),
KeeningStone: require("./KeeningStone"),
KodamasReach: require("./KodamasReach"),
KorozdaGuildmage: require("./KorozdaGuildmage"),
KraumLudevicsOpus: require("./KraumLudevicsOpus"),
KrosanVerge: require("./KrosanVerge"),
KydeleChosenofKruphix: require("./KydeleChosenofKruphix"),
KynaiosandTiroofMeletis: require("./KynaiosandTiroofMeletis"),
Languish: require("./Languish"),
Lavalanche: require("./Lavalanche"),
LightningGreaves: require("./LightningGreaves"),
LoxodonWarhammer: require("./LoxodonWarhammer"),
LudevicNecroAlchemist: require("./LudevicNecroAlchemist"),
LurkingPredators: require("./LurkingPredators"),
MagusoftheWill: require("./MagusoftheWill"),
ManagorgerHydra: require("./ManagorgerHydra"),
ManifoldInsights: require("./ManifoldInsights"),
MasterBiomancer: require("./MasterBiomancer"),
MasterofEtherium: require("./MasterofEtherium"),
MentoroftheMeek: require("./MentoroftheMeek"),
MercilessEviction: require("./MercilessEviction"),
MigratoryRoute: require("./MigratoryRoute"),
MindsAglow: require("./MindsAglow"),
MirrorEntity: require("./MirrorEntity"),
Mirrorweave: require("./Mirrorweave"),
Mortify: require("./Mortify"),
MosswortBridge: require("./MosswortBridge"),
Mountain: require("./Mountain"),
MurmuringBosk: require("./MurmuringBosk"),
Mycoloth: require("./Mycoloth"),
MycosynthWellspring: require("./MycosynthWellspring"),
MyrBattlesphere: require("./MyrBattlesphere"),
MyriadLandscape: require("./MyriadLandscape"),
MyrRetriever: require("./MyrRetriever"),
MysticMonastery: require("./MysticMonastery"),
NathoftheGiltLeaf: require("./NathoftheGiltLeaf"),
NayaCharm: require("./NayaCharm"),
Necrogenesis: require("./Necrogenesis"),
Necroplasm: require("./Necroplasm"),
NevinyrralsDisk: require("./NevinyrralsDisk"),
NomadOutpost: require("./NomadOutpost"),
OathofDruids: require("./OathofDruids"),
Oblation: require("./Oblation"),
OpalPalace: require("./OpalPalace"),
OpentheVaults: require("./OpentheVaults"),
OpulentPalace: require("./OpulentPalace"),
OrderChaos: require("./OrderChaos"),
OrzhovAdvokist: require("./OrzhovAdvokist"),
OrzhovBasilica: require("./OrzhovBasilica"),
OrzhovSignet: require("./OrzhovSignet"),
PartingThoughts: require("./PartingThoughts"),
PastinFlames: require("./PastinFlames"),
PhyrexianRebirth: require("./PhyrexianRebirth"),
Plains: require("./Plains"),
PrimevalProtector: require("./PrimevalProtector"),
PrismaticGeoscope: require("./PrismaticGeoscope"),
ProgenitorMimic: require("./ProgenitorMimic"),
Propaganda: require("./Propaganda"),
PsychosisCrawler: require("./PsychosisCrawler"),
Putrefy: require("./Putrefy"),
QuirionExplorer: require("./QuirionExplorer"),
RakdosCarnarium: require("./RakdosCarnarium"),
RakdosCharm: require("./RakdosCharm"),
RakdosSignet: require("./RakdosSignet"),
RampantGrowth: require("./RampantGrowth"),
RavosSoultender: require("./RavosSoultender"),
ReadtheRunes: require("./ReadtheRunes"),
RealmSeekers: require("./RealmSeekers"),
ReforgetheSoul: require("./ReforgetheSoul"),
ReinsofPower: require("./ReinsofPower"),
ReliquaryTower: require("./ReliquaryTower"),
Reveillark: require("./Reveillark"),
ReversetheSands: require("./ReversetheSands"),
ReyhanLastoftheAbzan: require("./ReyhanLastoftheAbzan"),
RitesofFlourishing: require("./RitesofFlourishing"),
RootboundCrag: require("./RootboundCrag"),
Rubblehulk: require("./Rubblehulk"),
RuggedHighlands: require("./RuggedHighlands"),
RunehornHellkite: require("./RunehornHellkite"),
RuptureSpire: require("./RuptureSpire"),
SakuraTribeElder: require("./SakuraTribeElder"),
SanctumGargoyle: require("./SanctumGargoyle"), | SaskiatheUnyielding: require("./SaskiatheUnyielding"),
SatyrWayfinder: require("./SatyrWayfinder"),
SavageLands: require("./SavageLands"),
ScavengingOoze: require("./ScavengingOoze"),
SeasideCitadel: require("./SeasideCitadel"),
SeatoftheSynod: require("./SeatoftheSynod"),
SeedsofRenewal: require("./SeedsofRenewal"),
SelesnyaGuildmage: require("./SelesnyaGuildmage"),
SelesnyaSanctuary: require("./SelesnyaSanctuary"),
SelflessSquire: require("./SelflessSquire"),
SelvalaExplorerReturned: require("./SelvalaExplorerReturned"),
ShadowbloodRidge: require("./ShadowbloodRidge"),
ShamanicRevelation: require("./ShamanicRevelation"),
SharuumtheHegemon: require("./SharuumtheHegemon"),
ShimmerMyr: require("./ShimmerMyr"),
SidarKondoofJamuraa: require("./SidarKondoofJamuraa"),
SilasRennSeekerAdept: require("./SilasRennSeekerAdept"),
SimicGrowthChamber: require("./SimicGrowthChamber"),
SimicSignet: require("./SimicSignet"),
Skullclamp: require("./Skullclamp"),
SlobadGoblinTinkerer: require("./SlobadGoblinTinkerer"),
SolemnSimulacrum: require("./SolemnSimulacrum"),
SolidarityofHeroes: require("./SolidarityofHeroes"),
SolRing: require("./SolRing"),
SoulofNewPhyrexia: require("./SoulofNewPhyrexia"),
SpellheartChimera: require("./SpellheartChimera"),
Spelltwine: require("./Spelltwine"),
SphereofSafety: require("./SphereofSafety"),
SphinxSummoner: require("./SphinxSummoner"),
SpinerockKnoll: require("./SpinerockKnoll"),
SpittingImage: require("./SpittingImage"),
StalkingVengeance: require("./StalkingVengeance"),
StonehoofChieftain: require("./StonehoofChieftain"),
SublimeExhalation: require("./SublimeExhalation"),
Sunforger: require("./Sunforger"),
SungrassPrairie: require("./SungrassPrairie"),
SunpetalGrove: require("./SunpetalGrove"),
Swamp: require("./Swamp"),
SwanSong: require("./SwanSong"),
SwiftfootBoots: require("./SwiftfootBoots"),
SwiftwaterCliffs: require("./SwiftwaterCliffs"),
SwordstoPlowshares: require("./SwordstoPlowshares"),
SydriGalvanicGenius: require("./SydriGalvanicGenius"),
SylvanReclamation: require("./SylvanReclamation"),
SylvokExplorer: require("./SylvokExplorer"),
TanatheBloodsower: require("./TanatheBloodsower"),
TaureanMauler: require("./TaureanMauler"),
TempleBell: require("./TempleBell"),
TempleoftheFalseGod: require("./TempleoftheFalseGod"),
TemptwithDiscovery: require("./TemptwithDiscovery"),
Terminate: require("./Terminate"),
TerramorphicExpanse: require("./TerramorphicExpanse"),
TezzeretsGambit: require("./TezzeretsGambit"),
TheloniteHermit: require("./TheloniteHermit"),
ThopterFoundry: require("./ThopterFoundry"),
ThornwoodFalls: require("./ThornwoodFalls"),
ThrasiosTritonHero: require("./ThrasiosTritonHero"),
Thrummingbird: require("./Thrummingbird"),
ThunderfootBaloth: require("./ThunderfootBaloth"),
TradingPost: require("./TradingPost"),
TransguildPromenade: require("./TransguildPromenade"),
TrashforTreasure: require("./TrashforTreasure"),
TreacherousTerrain: require("./TreacherousTerrain"),
TreasureCruise: require("./TreasureCruise"),
TrialError: require("./TrialError"),
TrinketMage: require("./TrinketMage"),
TuskguardCaptain: require("./TuskguardCaptain"),
TymnatheWeaver: require("./TymnatheWeaver"),
UndergroundRiver: require("./UndergroundRiver"),
UtterEnd: require("./UtterEnd"),
VedalkenEngineer: require("./VedalkenEngineer"),
VensersJournal: require("./VensersJournal"),
VeteranExplorer: require("./VeteranExplorer"),
VialSmashertheFierce: require("./VialSmashertheFierce"),
VolcanicVision: require("./VolcanicVision"),
VoreloftheHullClade: require("./VoreloftheHullClade"),
VulturousZombie: require("./VulturousZombie"),
WallofBlossoms: require("./WallofBlossoms"),
WasteNot: require("./WasteNot"),
WaveofReckoning: require("./WaveofReckoning"),
WheelofFate: require("./WheelofFate"),
WhimsoftheFates: require("./WhimsoftheFates"),
Whipflare: require("./Whipflare"),
WhisperingMadness: require("./WhisperingMadness"),
WhispersilkCloak: require("./WhispersilkCloak"),
WightofPrecinctSix: require("./WightofPrecinctSix"),
WildBeastmaster: require("./WildBeastmaster"),
WildernessElemental: require("./WildernessElemental"),
WindbornMuse: require("./WindbornMuse"),
WindbriskHeights: require("./WindbriskHeights"),
Windfall: require("./Windfall"),
WormHarvest: require("./WormHarvest"),
YidrisMaelstromWielder: require("./YidrisMaelstromWielder"),
ZedruutheGreathearted: require("./ZedruutheGreathearted"),
ZhurTaaDruid: require("./ZhurTaaDruid")
};
if (window) {if (!window.mtgSets) { window.mtgSets = {}; } window.mtgSets.setC16 = module.exports;} | SandsteppeCitadel: require("./SandsteppeCitadel"),
Sangromancer: require("./Sangromancer"), | random_line_split |
utils.rs | use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates};
use tnf_common::{
dll::param_getters,
engine_types::{critter::Critter, map::Map},
primitives::{Hex, MaybeInvalid},
utils::map::{
get_distance_hex,
server::{get_hex_in_path, get_hex_in_path_wall},
HexExt,
},
};
#[no_mangle]
pub extern "C" fn get_hex_coord_wall(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path_wall(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
#[no_mangle]
pub extern "C" fn get_hex_coord(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
/*
#[no_mangle]
pub extern "C" fn test_hex_flags(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
raked: bool,
passed: bool,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let hex = Hex { x: hex_x, y: hex_y };
let flags = map.get_hex_flags_with_proto(hex);
let mut wrong = false;
if raked != map.is_hex_raked(hex) {
wrong = true;
print!("Raked - should be {}, but {}; ", raked, !raked);
}
if passed != map.is_hex_passed(hex) {
wrong = true;
print!("Passed - should be {}, but {}; ", passed, !passed);
}
if wrong {
println!("Hex: {:?}, flags: {:016b}", hex, flags);
}
}
}
*/
macro_rules! validate {
($this:expr, $default:expr) => {
match $this.and_then(MaybeInvalid::validate) {
Some(this) => this,
None => return $default,
}
};
}
/*
#[no_mangle]
pub extern "C" fn is_gM(Critter& player) | {
if( !player.IsPlayer() ) return false;
if( !isLoadedGMs )
LoadGMs( player, 0, 0, 0 );
if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) )
player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER;
return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision ? player.ParamBase[ QST_VISION ] > 0 : true );
}*/
#[no_mangle]
pub extern "C" fn check_look(
map: Option<&MaybeInvalid<Map>>,
cr: Option<&MaybeInvalid<Critter>>,
opponent: Option<&MaybeInvalid<Critter>>,
) -> bool {
// Consider remove this
let map = validate!(map, false);
let cr = validate!(cr, false);
let opponent = validate!(opponent, false);
let config = &config().check_look;
let smart = check_look_smart(config, map, cr, opponent);
/*let old = check_look_old(config, map, cr, opponent);
if old != smart {
println!("old != smart: {:?} != {:?}", old, smart);
}*/
/*let mut config_default = CheckLook::default();
config_default.npc_fast.enable = config.npc_fast.enable;
let smart_default = check_look_smart(&config_default, map, cr, opponent);
if smart != smart_default {
println!("smart != smart_default: {:?} != {:?}", smart, smart_default);
}*/
smart
}
fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
assert!(cr_perception >= 1 && cr_perception <= 10);
fn basic_dist(rates: &CritterRates, perception: u32) -> u32 {
rates.basic_bonus + perception * rates.basic_perception_rate
}
let self_is_npc = cr.is_npc();
if self_is_npc {
if cr.is_dead() {
return false;
}
let npc_fast = &config.npc_fast;
if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to {
return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist;
}
}
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
assert!(look_dir >= 0 && look_dir <= 3);
fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 {
if cr.IsRuning {
rates.running
//} else if cr.is_walking() {
// rates.walking
} else {
rates.still
}
}
fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 {
rates.dir_rate[look_dir as usize]
* moving_rate(cr, &rates.self_moving)
* moving_rate(opponent, &rates.target_moving)
}
let senses: Vec<(f32, f32)> = config
.senses
.iter()
.map(|sense| {
let critter_rates = if self_is_npc {
&sense.npc
} else {
&sense.player
};
let basic_dist = basic_dist(critter_rates, cr_perception);
let sense_mul = sense_mul(sense, cr, opponent, look_dir);
let wall_mul = sense.wall_rate[cr_perception as usize - 1];
let clear_dist = basic_dist as f32 * sense_mul;
//dbg!(clear_dist, wall_mul);
(clear_dist, wall_mul)
})
.collect();
let max_dist = senses
.iter()
.map(|(dist, _wall_mul)| *dist as u32)
.max()
.expect("At least one sense");
//dbg!(dist, max_dist);
if dist > max_dist {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
for (basic_dist, wall_mull) in senses {
//dbg!(basic_dist * wall_mull, dist);
if (basic_dist * wall_mull) as u32 >= dist {
return true;
}
}
false
} else {
true
}
}
fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
//cr.uparam(Param::ST_PERCEPTION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
if cr.is_npc() {
// упрощенный расчет для нпц, учитывает только дистанцию
if cr.is_dead() {
return false;
}
let cfg_npc = &config.npc_fast;
if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to {
return (10 + cr_perception * 5) >= dist;
}
}
let max_view = 10 + cr_perception * 5;
let mut max_hear = 5 + cr_perception * 2;
if cr.is_npc() {
max_hear += 20;
}
let mut is_view = true;
let mut is_hear = true;
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
let (view_mul, mut hear_mul) = match look_dir {
0 => (1.0, 0.8),
1 => (0.8, 1.0),
2 => (0.5, 0.8),
3 => (0.4, 0.8),
_ => unreachable!(),
};
if opponent.IsRuning {
hear_mul *= 3.0;
}
if cr.IsRuning {
hear_mul *= 0.8;
}
let max_view = (max_view as f32 * view_mul) as u32;
let tmp_max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(dist, max_view, tmp_max_hear);
// new optimization: return early if distance larger than max_view and max_hear
if dist > max_view && dist > tmp_max_hear {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
is_view = false;
hear_mul *= match cr_perception {
1..=4 => 0.1,
5..=8 => 0.3,
9..=10 => 0.4,
_ => 1.0,
};
}
if dist > max_view {
is_view = false;
}
let max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(max_hear);
if dist > max_hear {
is_hear = false;
}
return is_view || is_hear;
} | random_line_split | |
utils.rs | use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates};
use tnf_common::{
dll::param_getters,
engine_types::{critter::Critter, map::Map},
primitives::{Hex, MaybeInvalid},
utils::map::{
get_distance_hex,
server::{get_hex_in_path, get_hex_in_path_wall},
HexExt,
},
};
#[no_mangle]
pub extern "C" fn get_hex_coord_wall(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path_wall(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
#[no_mangle]
pub extern "C" fn get_hex_coord(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
/*
#[no_mangle]
pub extern "C" fn test_hex_flags(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
raked: bool,
passed: bool,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let hex = Hex { x: hex_x, y: hex_y };
let flags = map.get_hex_flags_with_proto(hex);
let mut wrong = false;
if raked != map.is_hex_raked(hex) {
wrong = true;
print!("Raked - should be {}, but {}; ", raked, !raked);
}
if passed != map.is_hex_passed(hex) {
wrong = true;
print!("Passed - should be {}, but {}; ", passed, !passed);
}
if wrong {
println!("Hex: {:?}, flags: {:016b}", hex, flags);
}
}
}
*/
macro_rules! validate {
($this:expr, $default:expr) => {
match $this.and_then(MaybeInvalid::validate) {
Some(this) => this,
None => return $default,
}
};
}
/*
#[no_mangle]
pub extern "C" fn is_gM(Critter& player)
{
if( !player.IsPlayer() ) return false;
if( !isLoadedGMs )
LoadGMs( player, 0, 0, 0 );
if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) )
player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER;
return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision ? player.ParamBase[ QST_VISION ] > 0 : true );
}*/
#[no_mangle]
pub extern "C" fn check_look(
map: Option<&MaybeInvalid<Map>>,
cr: Option<&MaybeInvalid<Critter>>,
opponent: Option<&MaybeInvalid<Critter>>,
) -> bool {
// Consider remove this
let map = validate!(map, false);
let cr = validate!(cr, false);
let opponent = validate!(opponent, false);
let config = &config().check_look;
let smart = check_look_smart(config, map, cr, opponent);
/*let old = check_look_old(config, map, cr, opponent);
if old != smart {
println!("old != smart: {:?} != {:?}", old, smart);
}*/
/*let mut config_default = CheckLook::default();
config_default.npc_fast.enable = config.npc_fast.enable;
let smart_default = check_look_smart(&config_default, map, cr, opponent);
if smart != smart_default {
println!("smart != smart_default: {:?} != {:?}", smart, smart_default);
}*/
smart
}
fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
assert!(cr_perception >= 1 && cr_perception <= 10);
fn basic_dist(rates: &CritterRates, perception: u32) -> u32 {
rates.basic_bonus + perception * rates.basic_perception_rate
}
let self_is_npc = cr.is_npc();
if self_is_npc {
if cr.is_dead() {
return false;
}
let npc_fast = &config.npc_fast;
if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to {
return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist;
}
}
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
assert!(look_dir >= 0 && look_dir <= 3);
fn moving_rate | er, rates: &MovingRates) -> f32 {
if cr.IsRuning {
rates.running
//} else if cr.is_walking() {
// rates.walking
} else {
rates.still
}
}
fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 {
rates.dir_rate[look_dir as usize]
* moving_rate(cr, &rates.self_moving)
* moving_rate(opponent, &rates.target_moving)
}
let senses: Vec<(f32, f32)> = config
.senses
.iter()
.map(|sense| {
let critter_rates = if self_is_npc {
&sense.npc
} else {
&sense.player
};
let basic_dist = basic_dist(critter_rates, cr_perception);
let sense_mul = sense_mul(sense, cr, opponent, look_dir);
let wall_mul = sense.wall_rate[cr_perception as usize - 1];
let clear_dist = basic_dist as f32 * sense_mul;
//dbg!(clear_dist, wall_mul);
(clear_dist, wall_mul)
})
.collect();
let max_dist = senses
.iter()
.map(|(dist, _wall_mul)| *dist as u32)
.max()
.expect("At least one sense");
//dbg!(dist, max_dist);
if dist > max_dist {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
for (basic_dist, wall_mull) in senses {
//dbg!(basic_dist * wall_mull, dist);
if (basic_dist * wall_mull) as u32 >= dist {
return true;
}
}
false
} else {
true
}
}
fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
//cr.uparam(Param::ST_PERCEPTION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
if cr.is_npc() {
// упрощенный расчет для нпц, учитывает только дистанцию
if cr.is_dead() {
return false;
}
let cfg_npc = &config.npc_fast;
if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to {
return (10 + cr_perception * 5) >= dist;
}
}
let max_view = 10 + cr_perception * 5;
let mut max_hear = 5 + cr_perception * 2;
if cr.is_npc() {
max_hear += 20;
}
let mut is_view = true;
let mut is_hear = true;
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
let (view_mul, mut hear_mul) = match look_dir {
0 => (1.0, 0.8),
1 => (0.8, 1.0),
2 => (0.5, 0.8),
3 => (0.4, 0.8),
_ => unreachable!(),
};
if opponent.IsRuning {
hear_mul *= 3.0;
}
if cr.IsRuning {
hear_mul *= 0.8;
}
let max_view = (max_view as f32 * view_mul) as u32;
let tmp_max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(dist, max_view, tmp_max_hear);
// new optimization: return early if distance larger than max_view and max_hear
if dist > max_view && dist > tmp_max_hear {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
is_view = false;
hear_mul *= match cr_perception {
1..=4 => 0.1,
5..=8 => 0.3,
9..=10 => 0.4,
_ => 1.0,
};
}
if dist > max_view {
is_view = false;
}
let max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(max_hear);
if dist > max_hear {
is_hear = false;
}
return is_view || is_hear;
}
| (cr: &Critt | identifier_name |
utils.rs | use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates};
use tnf_common::{
dll::param_getters,
engine_types::{critter::Critter, map::Map},
primitives::{Hex, MaybeInvalid},
utils::map::{
get_distance_hex,
server::{get_hex_in_path, get_hex_in_path_wall},
HexExt,
},
};
#[no_mangle]
pub extern "C" fn get_hex_coord_wall(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path_wall(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
#[no_mangle]
pub extern "C" fn get_hex_coord(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
/*
#[no_mangle]
pub extern "C" fn test_hex_flags(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
raked: bool,
passed: bool,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let hex = Hex { x: hex_x, y: hex_y };
let flags = map.get_hex_flags_with_proto(hex);
let mut wrong = false;
if raked != map.is_hex_raked(hex) {
wrong = true;
print!("Raked - should be {}, but {}; ", raked, !raked);
}
if passed != map.is_hex_passed(hex) {
wrong = true;
print!("Passed - should be {}, but {}; ", passed, !passed);
}
if wrong {
println!("Hex: {:?}, flags: {:016b}", hex, flags);
}
}
}
*/
macro_rules! validate {
($this:expr, $default:expr) => {
match $this.and_then(MaybeInvalid::validate) {
Some(this) => this,
None => return $default,
}
};
}
/*
#[no_mangle]
pub extern "C" fn is_gM(Critter& player)
{
if( !player.IsPlayer() ) return false;
if( !isLoadedGMs )
LoadGMs( player, 0, 0, 0 );
if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) )
player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER;
return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision ? player.ParamBase[ QST_VISION ] > 0 : true );
}*/
#[no_mangle]
pub extern "C" fn check_look(
map: Option<&MaybeInvalid<Map>>,
cr: Option<&MaybeInvalid<Critter>>,
opponent: Option<&MaybeInvalid<Critter>>,
) -> bool {
// Consider remove this
let map = validate!(map, false);
let cr = validate!(cr, false);
let opponent = validate!(opponent, false);
let config = &config().check_look;
let smart = check_look_smart(config, map, cr, opponent);
/*let old = check_look_old(config, map, cr, opponent);
if old != smart {
println!("old != smart: {:?} != {:?}", old, smart);
}*/
/*let mut config_default = CheckLook::default();
config_default.npc_fast.enable = config.npc_fast.enable;
let smart_default = check_look_smart(&config_default, map, cr, opponent);
if smart != smart_default {
println!("smart != smart_default: {:?} != {:?}", smart, smart_default);
}*/
smart
}
fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
assert!(cr_perception >= 1 && cr_perception <= 10);
fn basic_dist(rates: &CritterRates, perception: u32) -> u32 {
rates.basic_bonus + perception * rates.basic_perception_rate
}
let self_is_npc = cr.is_npc();
if self_is_npc {
if cr.is_dead() {
return false;
}
let npc_fast = &config.npc_fast;
if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to {
return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist;
}
}
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
assert!(look_dir >= 0 && look_dir <= 3);
fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 {
if cr.IsRuning {
rates.running
//} else if cr.is_walking() {
// rates.walking
} else {
rates.still
}
}
fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 {
r | enses: Vec<(f32, f32)> = config
.senses
.iter()
.map(|sense| {
let critter_rates = if self_is_npc {
&sense.npc
} else {
&sense.player
};
let basic_dist = basic_dist(critter_rates, cr_perception);
let sense_mul = sense_mul(sense, cr, opponent, look_dir);
let wall_mul = sense.wall_rate[cr_perception as usize - 1];
let clear_dist = basic_dist as f32 * sense_mul;
//dbg!(clear_dist, wall_mul);
(clear_dist, wall_mul)
})
.collect();
let max_dist = senses
.iter()
.map(|(dist, _wall_mul)| *dist as u32)
.max()
.expect("At least one sense");
//dbg!(dist, max_dist);
if dist > max_dist {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
for (basic_dist, wall_mull) in senses {
//dbg!(basic_dist * wall_mull, dist);
if (basic_dist * wall_mull) as u32 >= dist {
return true;
}
}
false
} else {
true
}
}
fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
//cr.uparam(Param::ST_PERCEPTION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
if cr.is_npc() {
// упрощенный расчет для нпц, учитывает только дистанцию
if cr.is_dead() {
return false;
}
let cfg_npc = &config.npc_fast;
if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to {
return (10 + cr_perception * 5) >= dist;
}
}
let max_view = 10 + cr_perception * 5;
let mut max_hear = 5 + cr_perception * 2;
if cr.is_npc() {
max_hear += 20;
}
let mut is_view = true;
let mut is_hear = true;
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
let (view_mul, mut hear_mul) = match look_dir {
0 => (1.0, 0.8),
1 => (0.8, 1.0),
2 => (0.5, 0.8),
3 => (0.4, 0.8),
_ => unreachable!(),
};
if opponent.IsRuning {
hear_mul *= 3.0;
}
if cr.IsRuning {
hear_mul *= 0.8;
}
let max_view = (max_view as f32 * view_mul) as u32;
let tmp_max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(dist, max_view, tmp_max_hear);
// new optimization: return early if distance larger than max_view and max_hear
if dist > max_view && dist > tmp_max_hear {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
is_view = false;
hear_mul *= match cr_perception {
1..=4 => 0.1,
5..=8 => 0.3,
9..=10 => 0.4,
_ => 1.0,
};
}
if dist > max_view {
is_view = false;
}
let max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(max_hear);
if dist > max_hear {
is_hear = false;
}
return is_view || is_hear;
}
| ates.dir_rate[look_dir as usize]
* moving_rate(cr, &rates.self_moving)
* moving_rate(opponent, &rates.target_moving)
}
let s | identifier_body |
utils.rs | use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates};
use tnf_common::{
dll::param_getters,
engine_types::{critter::Critter, map::Map},
primitives::{Hex, MaybeInvalid},
utils::map::{
get_distance_hex,
server::{get_hex_in_path, get_hex_in_path_wall},
HexExt,
},
};
#[no_mangle]
pub extern "C" fn get_hex_coord_wall(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path_wall(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
#[no_mangle]
pub extern "C" fn get_hex_coord(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
end_x: &mut u16,
end_y: &mut u16,
angle: f32,
dist: u32,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let end_hex = get_hex_in_path(
map,
Hex { x: hex_x, y: hex_y },
Hex {
x: *end_x,
y: *end_y,
},
angle,
dist,
);
*end_x = end_hex.x;
*end_y = end_hex.y;
}
}
/*
#[no_mangle]
pub extern "C" fn test_hex_flags(
map: Option<&MaybeInvalid<Map>>,
hex_x: u16,
hex_y: u16,
raked: bool,
passed: bool,
) {
if let Some(map) = map.and_then(MaybeInvalid::validate) {
let hex = Hex { x: hex_x, y: hex_y };
let flags = map.get_hex_flags_with_proto(hex);
let mut wrong = false;
if raked != map.is_hex_raked(hex) {
wrong = true;
print!("Raked - should be {}, but {}; ", raked, !raked);
}
if passed != map.is_hex_passed(hex) {
wrong = true;
print!("Passed - should be {}, but {}; ", passed, !passed);
}
if wrong {
println!("Hex: {:?}, flags: {:016b}", hex, flags);
}
}
}
*/
macro_rules! validate {
($this:expr, $default:expr) => {
match $this.and_then(MaybeInvalid::validate) {
Some(this) => this,
None => return $default,
}
};
}
/*
#[no_mangle]
pub extern "C" fn is_gM(Critter& player)
{
if( !player.IsPlayer() ) return false;
if( !isLoadedGMs )
LoadGMs( player, 0, 0, 0 );
if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) )
player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER;
return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision ? player.ParamBase[ QST_VISION ] > 0 : true );
}*/
#[no_mangle]
pub extern "C" fn check_look(
map: Option<&MaybeInvalid<Map>>,
cr: Option<&MaybeInvalid<Critter>>,
opponent: Option<&MaybeInvalid<Critter>>,
) -> bool {
// Consider remove this
let map = validate!(map, false);
let cr = validate!(cr, false);
let opponent = validate!(opponent, false);
let config = &config().check_look;
let smart = check_look_smart(config, map, cr, opponent);
/*let old = check_look_old(config, map, cr, opponent);
if old != smart {
println!("old != smart: {:?} != {:?}", old, smart);
}*/
/*let mut config_default = CheckLook::default();
config_default.npc_fast.enable = config.npc_fast.enable;
let smart_default = check_look_smart(&config_default, map, cr, opponent);
if smart != smart_default {
println!("smart != smart_default: {:?} != {:?}", smart, smart_default);
}*/
smart
}
fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
assert!(cr_perception >= 1 && cr_perception <= 10);
fn basic_dist(rates: &CritterRates, perception: u32) -> u32 {
rates.basic_bonus + perception * rates.basic_perception_rate
}
let self_is_npc = cr.is_npc();
if self_is_npc {
if cr.is_dead() {
return false;
}
let npc_fast = &config.npc_fast;
if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to {
return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist;
}
}
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
assert!(look_dir >= 0 && look_dir <= 3);
fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 {
if cr.IsRuning {
rates.running
//} else if cr.is_walking() {
// rates.walking
} else {
rates.still
}
}
fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 {
rates.dir_rate[look_dir as usize]
* moving_rate(cr, &rates.self_moving)
* moving_rate(opponent, &rates.target_moving)
}
let senses: Vec<(f32, f32)> = config
.senses
.iter()
.map(|sense| {
let critter_rates = if self_is_npc {
&sense.npc
} else {
&sense.player
};
let basic_dist = basic_dist(critter_rates, cr_perception);
let sense_mul = sense_mul(sense, cr, opponent, look_dir);
let wall_mul = sense.wall_rate[cr_perception as usize - 1];
let clear_dist = basic_dist as f32 * sense_mul;
//dbg!(clear_dist, wall_mul);
(clear_dist, wall_mul)
})
.collect();
let max_dist = senses
.iter()
.map(|(dist, _wall_mul)| *dist as u32)
.max()
.expect("At least one sense");
//dbg!(dist, max_dist);
if dist > max_dist {
return false;
}
let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist);
if dist > cr_hex.get_distance(end_hex) {
for (basic_dist, wall_mull) in senses {
//dbg!(basic_dist * wall_mull, dist);
if (basic_dist * wall_mull) as u32 >= dist {
return true;
}
}
false
} else {
true
}
}
fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool {
if map.proto_id() == config.map_utility_start
&& opponent.is_player()
&& cr.is_player()
&& !cr.have_gm_vision()
{
return false;
}
let cr_hex = cr.hex();
let opp_hex = opponent.hex();
let dist = cr_hex.get_distance(opp_hex);
use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param};
let cr_vision = cr.uparam(Param::QST_VISION);
let cr_perception = param_getters::getParam_Perception(cr, 0) as u32;
//cr.uparam(Param::ST_PERCEPTION);
let opp_invis = opponent.uparam(Param::QST_INVIS);
if cr_vision >= dist && opp_invis <= dist {
return true;
}
if opp_invis != 0 && (opp_invis - 1) < dist {
// && ( !( cr.IsPlayer() ) || cr.IsPlayer() && !isGM( cr ) ) )
return false;
}
if opp_invis > dist || cr_vision >= dist {
return true;
}
if cr.is_npc() {
// упрощенный расчет для нпц, учитывает только дистанцию
if cr.is_dead() {
return false;
}
let cfg_npc = &config.npc_fast;
if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to {
return (10 + cr_perception * 5) >= dist;
}
}
let max_view = 10 + cr_perception * 5;
let mut max_hear = 5 + cr_perception * 2;
if cr.is_npc() {
max_hear += 20;
}
let mut is_view = true;
let mut is_hear = true;
let start_dir = cr_hex.get_direction(opp_hex);
let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление
if look_dir > 3 {
look_dir = 6 - look_dir
}
let (view_mul, mut hear_mul) = match look_dir {
0 => (1.0, 0.8),
1 => (0.8, 1.0),
2 => (0.5, 0.8),
3 => (0.4, 0.8),
_ => unreachable!(),
};
if opponent.IsRuning {
hear_mul *= 3.0;
}
if cr.IsRuning {
hear_mul *= 0.8;
}
let max_view = (max_view as f32 * view_mul) as u32;
let tmp_max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(dist, max_view, tmp_max_hear);
// new optimization: return early if distance larger than max_view and max_hear
if dist > max_view && dist > tmp_max_hear {
return false;
}
let end_hex = get_hex_in_path(map | ;
if dist > cr_hex.get_distance(end_hex) {
is_view = false;
hear_mul *= match cr_perception {
1..=4 => 0.1,
5..=8 => 0.3,
9..=10 => 0.4,
_ => 1.0,
};
}
if dist > max_view {
is_view = false;
}
let max_hear = (max_hear as f32 * hear_mul) as u32;
//dbg!(max_hear);
if dist > max_hear {
is_hear = false;
}
return is_view || is_hear;
}
| , cr_hex, opp_hex, 0.0, dist) | conditional_block |
token_flow.go | package cautils
import (
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"github.com/smallstep/certificates/authority/provisioner"
"github.com/smallstep/cli/crypto/pki"
"github.com/smallstep/cli/errs"
"github.com/smallstep/cli/exec"
"github.com/smallstep/cli/jose"
"github.com/smallstep/cli/ui"
"github.com/smallstep/cli/utils"
"github.com/urfave/cli"
)
type provisionersSelect struct {
Name string
Provisioner provisioner.Interface
}
// Token signing types
const (
SignType = iota
RevokeType
SSHUserSignType
SSHHostSignType
)
// parseAudience creates the ca audience url from the ca-url
func parseAudience(ctx *cli.Context, tokType int) (string, error) {
caURL := ctx.String("ca-url")
if len(caURL) == 0 {
return "", errs.RequiredFlag(ctx, "ca-url") | audience, err := url.Parse(caURL)
if err != nil {
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
switch strings.ToLower(audience.Scheme) {
case "https", "":
var path string
switch tokType {
// default
case SignType, SSHUserSignType, SSHHostSignType:
path = "/1.0/sign"
// revocation token
case RevokeType:
path = "/1.0/revoke"
default:
return "", errors.Errorf("unexpected token type: %d", tokType)
}
audience.Scheme = "https"
audience = audience.ResolveReference(&url.URL{Path: path})
return audience.String(), nil
default:
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
}
// ErrACMEToken is the error type returned when the user attempts a Token Flow
// while using an ACME provisioner.
type ErrACMEToken struct {
Name string
}
// Error implements the error interface.
func (e *ErrACMEToken) Error() string {
return "step ACME provisioners do not support token auth flows"
}
// NewTokenFlow implements the common flow used to generate a token
func NewTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, caURL, root string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
provisioners, err := pki.GetProvisioners(caURL, root)
if err != nil {
return "", err
}
p, err := provisionerPrompt(ctx, provisioners)
if err != nil {
return "", err
}
switch p := p.(type) {
case *provisioner.OIDC: // Run step oauth
args := []string{"oauth", "--oidc", "--bare",
"--provider", p.ConfigurationEndpoint,
"--client-id", p.ClientID, "--client-secret", p.ClientSecret}
if ctx.Bool("console") {
args = append(args, "--console")
}
if p.ListenAddress != "" {
args = append(args, "--listen", p.ListenAddress)
}
out, err := exec.Step(args...)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
case *provisioner.GCP: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.AWS: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.Azure: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.ACME: // Return an error with the provisioner ID
return "", &ErrACMEToken{p.GetName()}
}
// JWK provisioner
prov, ok := p.(*provisioner.JWK)
if !ok {
return "", errors.Errorf("unknown provisioner type %T", p)
}
kid := prov.Key.KeyID
issuer := prov.Name
var opts []jose.Option
if passwordFile := ctx.String("password-file"); len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
var jwk *jose.JSONWebKey
if keyFile := ctx.String("key"); len(keyFile) == 0 {
// Get private key from CA
encrypted, err := pki.GetProvisionerKey(caURL, root, kid)
if err != nil {
return "", err
}
// Add template with check mark
opts = append(opts, jose.WithUIOptions(
ui.WithPromptTemplates(ui.PromptTemplates()),
))
decrypted, err := jose.Decrypt("Please enter the password to decrypt the provisioner key", []byte(encrypted), opts...)
if err != nil {
return "", err
}
jwk = new(jose.JSONWebKey)
if err := json.Unmarshal(decrypted, jwk); err != nil {
return "", errors.Wrap(err, "error unmarshalling provisioning key")
}
} else {
// Get private key from given key file
jwk, err = jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
// OfflineTokenFlow generates a provisioning token using either
// 1. static configuration from ca.json (created with `step ca init`)
// 2. input from command line flags
// These two options are mutually exclusive and priority is given to ca.json.
func OfflineTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
caConfig := ctx.String("ca-config")
if caConfig == "" {
return "", errs.InvalidFlagValue(ctx, "ca-config", "", "")
}
// Using the offline CA
if utils.FileExists(caConfig) {
offlineCA, err := NewOfflineCA(caConfig)
if err != nil {
return "", err
}
return offlineCA.GenerateToken(ctx, typ, subject, sans, notBefore, notAfter, certNotBefore, certNotAfter)
}
kid := ctx.String("kid")
issuer := ctx.String("issuer")
keyFile := ctx.String("key")
passwordFile := ctx.String("password-file")
// Require issuer and keyFile if ca.json does not exists.
// kid can be passed or created using jwk.Thumbprint.
switch {
case len(issuer) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "issuer")
case len(keyFile) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "key")
}
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
// Get root from argument or default location
root := ctx.String("root")
if len(root) == 0 {
root = pki.GetRootCAPath()
if utils.FileExists(root) {
return "", errs.RequiredFlag(ctx, "root")
}
}
// Parse key
var opts []jose.Option
if len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
jwk, err := jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
// Get the kid if it's not passed as an argument
if len(kid) == 0 {
hash, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {
return "", errors.Wrap(err, "error generating JWK thumbprint")
}
kid = base64.RawURLEncoding.EncodeToString(hash)
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
func provisionerPrompt(ctx *cli.Context, provisioners provisioner.List) (provisioner.Interface, error) {
// Filter by type
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p.GetType() {
case provisioner.TypeJWK, provisioner.TypeOIDC, provisioner.TypeACME:
return true
case provisioner.TypeGCP, provisioner.TypeAWS, provisioner.TypeAzure:
return true
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errors.New("cannot create a new token: the CA does not have any provisioner configured")
}
// Filter by kid
if kid := ctx.String("kid"); len(kid) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p := p.(type) {
case *provisioner.JWK:
return p.Key.KeyID == kid
case *provisioner.OIDC:
return p.ClientID == kid
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "kid", kid, "")
}
}
// Filter by issuer (provisioner name)
if issuer := ctx.String("issuer"); len(issuer) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
return p.GetName() == issuer
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "issuer", issuer, "")
}
}
// Select provisioner
var items []*provisionersSelect
for _, prov := range provisioners {
switch p := prov.(type) {
case *provisioner.JWK:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [kid: %s]", p.Name, p.GetType(), p.Key.KeyID),
Provisioner: p,
})
case *provisioner.OIDC:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [client: %s]", p.Name, p.GetType(), p.ClientID),
Provisioner: p,
})
case *provisioner.GCP:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.AWS:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.Azure:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [tenant: %s]", p.Name, p.GetType(), p.TenantID),
Provisioner: p,
})
case *provisioner.ACME:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
default:
continue
}
}
if len(items) == 1 {
if err := ui.PrintSelected("Provisioner", items[0].Name); err != nil {
return nil, err
}
return items[0].Provisioner, nil
}
i, _, err := ui.Select("What provisioner key do you want to use?", items, ui.WithSelectTemplates(ui.NamedSelectTemplates("Provisioner")))
if err != nil {
return nil, err
}
return items[i].Provisioner, nil
}
// provisionerFilter returns a slice of provisioners that pass the given filter.
func provisionerFilter(provisioners provisioner.List, f func(provisioner.Interface) bool) provisioner.List {
var result provisioner.List
for _, p := range provisioners {
if f(p) {
result = append(result, p)
}
}
return result
} | }
| random_line_split |
token_flow.go | package cautils
import (
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"github.com/smallstep/certificates/authority/provisioner"
"github.com/smallstep/cli/crypto/pki"
"github.com/smallstep/cli/errs"
"github.com/smallstep/cli/exec"
"github.com/smallstep/cli/jose"
"github.com/smallstep/cli/ui"
"github.com/smallstep/cli/utils"
"github.com/urfave/cli"
)
type provisionersSelect struct {
Name string
Provisioner provisioner.Interface
}
// Token signing types
const (
SignType = iota
RevokeType
SSHUserSignType
SSHHostSignType
)
// parseAudience creates the ca audience url from the ca-url
func parseAudience(ctx *cli.Context, tokType int) (string, error) {
caURL := ctx.String("ca-url")
if len(caURL) == 0 {
return "", errs.RequiredFlag(ctx, "ca-url")
}
audience, err := url.Parse(caURL)
if err != nil {
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
switch strings.ToLower(audience.Scheme) {
case "https", "":
var path string
switch tokType {
// default
case SignType, SSHUserSignType, SSHHostSignType:
path = "/1.0/sign"
// revocation token
case RevokeType:
path = "/1.0/revoke"
default:
return "", errors.Errorf("unexpected token type: %d", tokType)
}
audience.Scheme = "https"
audience = audience.ResolveReference(&url.URL{Path: path})
return audience.String(), nil
default:
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
}
// ErrACMEToken is the error type returned when the user attempts a Token Flow
// while using an ACME provisioner.
type ErrACMEToken struct {
Name string
}
// Error implements the error interface.
func (e *ErrACMEToken) Error() string {
return "step ACME provisioners do not support token auth flows"
}
// NewTokenFlow implements the common flow used to generate a token
func NewTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, caURL, root string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
provisioners, err := pki.GetProvisioners(caURL, root)
if err != nil {
return "", err
}
p, err := provisionerPrompt(ctx, provisioners)
if err != nil {
return "", err
}
switch p := p.(type) {
case *provisioner.OIDC: // Run step oauth
args := []string{"oauth", "--oidc", "--bare",
"--provider", p.ConfigurationEndpoint,
"--client-id", p.ClientID, "--client-secret", p.ClientSecret}
if ctx.Bool("console") {
args = append(args, "--console")
}
if p.ListenAddress != "" {
args = append(args, "--listen", p.ListenAddress)
}
out, err := exec.Step(args...)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
case *provisioner.GCP: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.AWS: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.Azure: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.ACME: // Return an error with the provisioner ID
return "", &ErrACMEToken{p.GetName()}
}
// JWK provisioner
prov, ok := p.(*provisioner.JWK)
if !ok {
return "", errors.Errorf("unknown provisioner type %T", p)
}
kid := prov.Key.KeyID
issuer := prov.Name
var opts []jose.Option
if passwordFile := ctx.String("password-file"); len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
var jwk *jose.JSONWebKey
if keyFile := ctx.String("key"); len(keyFile) == 0 {
// Get private key from CA
encrypted, err := pki.GetProvisionerKey(caURL, root, kid)
if err != nil {
return "", err
}
// Add template with check mark
opts = append(opts, jose.WithUIOptions(
ui.WithPromptTemplates(ui.PromptTemplates()),
))
decrypted, err := jose.Decrypt("Please enter the password to decrypt the provisioner key", []byte(encrypted), opts...)
if err != nil {
return "", err
}
jwk = new(jose.JSONWebKey)
if err := json.Unmarshal(decrypted, jwk); err != nil {
return "", errors.Wrap(err, "error unmarshalling provisioning key")
}
} else {
// Get private key from given key file
jwk, err = jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
// OfflineTokenFlow generates a provisioning token using either
// 1. static configuration from ca.json (created with `step ca init`)
// 2. input from command line flags
// These two options are mutually exclusive and priority is given to ca.json.
func OfflineTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
caConfig := ctx.String("ca-config")
if caConfig == "" {
return "", errs.InvalidFlagValue(ctx, "ca-config", "", "")
}
// Using the offline CA
if utils.FileExists(caConfig) {
offlineCA, err := NewOfflineCA(caConfig)
if err != nil {
return "", err
}
return offlineCA.GenerateToken(ctx, typ, subject, sans, notBefore, notAfter, certNotBefore, certNotAfter)
}
kid := ctx.String("kid")
issuer := ctx.String("issuer")
keyFile := ctx.String("key")
passwordFile := ctx.String("password-file")
// Require issuer and keyFile if ca.json does not exists.
// kid can be passed or created using jwk.Thumbprint.
switch {
case len(issuer) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "issuer")
case len(keyFile) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "key")
}
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
// Get root from argument or default location
root := ctx.String("root")
if len(root) == 0 {
root = pki.GetRootCAPath()
if utils.FileExists(root) {
return "", errs.RequiredFlag(ctx, "root")
}
}
// Parse key
var opts []jose.Option
if len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
jwk, err := jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
// Get the kid if it's not passed as an argument
if len(kid) == 0 {
hash, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {
return "", errors.Wrap(err, "error generating JWK thumbprint")
}
kid = base64.RawURLEncoding.EncodeToString(hash)
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
func provisionerPrompt(ctx *cli.Context, provisioners provisioner.List) (provisioner.Interface, error) {
// Filter by type
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p.GetType() {
case provisioner.TypeJWK, provisioner.TypeOIDC, provisioner.TypeACME:
return true
case provisioner.TypeGCP, provisioner.TypeAWS, provisioner.TypeAzure:
return true
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errors.New("cannot create a new token: the CA does not have any provisioner configured")
}
// Filter by kid
if kid := ctx.String("kid"); len(kid) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p := p.(type) {
case *provisioner.JWK:
return p.Key.KeyID == kid
case *provisioner.OIDC:
return p.ClientID == kid
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "kid", kid, "")
}
}
// Filter by issuer (provisioner name)
if issuer := ctx.String("issuer"); len(issuer) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
return p.GetName() == issuer
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "issuer", issuer, "")
}
}
// Select provisioner
var items []*provisionersSelect
for _, prov := range provisioners {
switch p := prov.(type) {
case *provisioner.JWK:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [kid: %s]", p.Name, p.GetType(), p.Key.KeyID),
Provisioner: p,
})
case *provisioner.OIDC:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [client: %s]", p.Name, p.GetType(), p.ClientID),
Provisioner: p,
})
case *provisioner.GCP:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.AWS:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.Azure:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [tenant: %s]", p.Name, p.GetType(), p.TenantID),
Provisioner: p,
})
case *provisioner.ACME:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
default:
continue
}
}
if len(items) == 1 {
if err := ui.PrintSelected("Provisioner", items[0].Name); err != nil {
return nil, err
}
return items[0].Provisioner, nil
}
i, _, err := ui.Select("What provisioner key do you want to use?", items, ui.WithSelectTemplates(ui.NamedSelectTemplates("Provisioner")))
if err != nil {
return nil, err
}
return items[i].Provisioner, nil
}
// provisionerFilter returns a slice of provisioners that pass the given filter.
func | (provisioners provisioner.List, f func(provisioner.Interface) bool) provisioner.List {
var result provisioner.List
for _, p := range provisioners {
if f(p) {
result = append(result, p)
}
}
return result
}
| provisionerFilter | identifier_name |
token_flow.go | package cautils
import (
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"github.com/smallstep/certificates/authority/provisioner"
"github.com/smallstep/cli/crypto/pki"
"github.com/smallstep/cli/errs"
"github.com/smallstep/cli/exec"
"github.com/smallstep/cli/jose"
"github.com/smallstep/cli/ui"
"github.com/smallstep/cli/utils"
"github.com/urfave/cli"
)
type provisionersSelect struct {
Name string
Provisioner provisioner.Interface
}
// Token signing types
const (
SignType = iota
RevokeType
SSHUserSignType
SSHHostSignType
)
// parseAudience creates the ca audience url from the ca-url
func parseAudience(ctx *cli.Context, tokType int) (string, error) {
caURL := ctx.String("ca-url")
if len(caURL) == 0 {
return "", errs.RequiredFlag(ctx, "ca-url")
}
audience, err := url.Parse(caURL)
if err != nil {
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
switch strings.ToLower(audience.Scheme) {
case "https", "":
var path string
switch tokType {
// default
case SignType, SSHUserSignType, SSHHostSignType:
path = "/1.0/sign"
// revocation token
case RevokeType:
path = "/1.0/revoke"
default:
return "", errors.Errorf("unexpected token type: %d", tokType)
}
audience.Scheme = "https"
audience = audience.ResolveReference(&url.URL{Path: path})
return audience.String(), nil
default:
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
}
// ErrACMEToken is the error type returned when the user attempts a Token Flow
// while using an ACME provisioner.
type ErrACMEToken struct {
Name string
}
// Error implements the error interface.
func (e *ErrACMEToken) Error() string {
return "step ACME provisioners do not support token auth flows"
}
// NewTokenFlow implements the common flow used to generate a token
func NewTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, caURL, root string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
provisioners, err := pki.GetProvisioners(caURL, root)
if err != nil {
return "", err
}
p, err := provisionerPrompt(ctx, provisioners)
if err != nil {
return "", err
}
switch p := p.(type) {
case *provisioner.OIDC: // Run step oauth
args := []string{"oauth", "--oidc", "--bare",
"--provider", p.ConfigurationEndpoint,
"--client-id", p.ClientID, "--client-secret", p.ClientSecret}
if ctx.Bool("console") {
args = append(args, "--console")
}
if p.ListenAddress != "" |
out, err := exec.Step(args...)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
case *provisioner.GCP: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.AWS: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.Azure: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.ACME: // Return an error with the provisioner ID
return "", &ErrACMEToken{p.GetName()}
}
// JWK provisioner
prov, ok := p.(*provisioner.JWK)
if !ok {
return "", errors.Errorf("unknown provisioner type %T", p)
}
kid := prov.Key.KeyID
issuer := prov.Name
var opts []jose.Option
if passwordFile := ctx.String("password-file"); len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
var jwk *jose.JSONWebKey
if keyFile := ctx.String("key"); len(keyFile) == 0 {
// Get private key from CA
encrypted, err := pki.GetProvisionerKey(caURL, root, kid)
if err != nil {
return "", err
}
// Add template with check mark
opts = append(opts, jose.WithUIOptions(
ui.WithPromptTemplates(ui.PromptTemplates()),
))
decrypted, err := jose.Decrypt("Please enter the password to decrypt the provisioner key", []byte(encrypted), opts...)
if err != nil {
return "", err
}
jwk = new(jose.JSONWebKey)
if err := json.Unmarshal(decrypted, jwk); err != nil {
return "", errors.Wrap(err, "error unmarshalling provisioning key")
}
} else {
// Get private key from given key file
jwk, err = jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
// OfflineTokenFlow generates a provisioning token using either
// 1. static configuration from ca.json (created with `step ca init`)
// 2. input from command line flags
// These two options are mutually exclusive and priority is given to ca.json.
func OfflineTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
caConfig := ctx.String("ca-config")
if caConfig == "" {
return "", errs.InvalidFlagValue(ctx, "ca-config", "", "")
}
// Using the offline CA
if utils.FileExists(caConfig) {
offlineCA, err := NewOfflineCA(caConfig)
if err != nil {
return "", err
}
return offlineCA.GenerateToken(ctx, typ, subject, sans, notBefore, notAfter, certNotBefore, certNotAfter)
}
kid := ctx.String("kid")
issuer := ctx.String("issuer")
keyFile := ctx.String("key")
passwordFile := ctx.String("password-file")
// Require issuer and keyFile if ca.json does not exists.
// kid can be passed or created using jwk.Thumbprint.
switch {
case len(issuer) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "issuer")
case len(keyFile) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "key")
}
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
// Get root from argument or default location
root := ctx.String("root")
if len(root) == 0 {
root = pki.GetRootCAPath()
if utils.FileExists(root) {
return "", errs.RequiredFlag(ctx, "root")
}
}
// Parse key
var opts []jose.Option
if len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
jwk, err := jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
// Get the kid if it's not passed as an argument
if len(kid) == 0 {
hash, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {
return "", errors.Wrap(err, "error generating JWK thumbprint")
}
kid = base64.RawURLEncoding.EncodeToString(hash)
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
func provisionerPrompt(ctx *cli.Context, provisioners provisioner.List) (provisioner.Interface, error) {
// Filter by type
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p.GetType() {
case provisioner.TypeJWK, provisioner.TypeOIDC, provisioner.TypeACME:
return true
case provisioner.TypeGCP, provisioner.TypeAWS, provisioner.TypeAzure:
return true
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errors.New("cannot create a new token: the CA does not have any provisioner configured")
}
// Filter by kid
if kid := ctx.String("kid"); len(kid) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p := p.(type) {
case *provisioner.JWK:
return p.Key.KeyID == kid
case *provisioner.OIDC:
return p.ClientID == kid
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "kid", kid, "")
}
}
// Filter by issuer (provisioner name)
if issuer := ctx.String("issuer"); len(issuer) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
return p.GetName() == issuer
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "issuer", issuer, "")
}
}
// Select provisioner
var items []*provisionersSelect
for _, prov := range provisioners {
switch p := prov.(type) {
case *provisioner.JWK:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [kid: %s]", p.Name, p.GetType(), p.Key.KeyID),
Provisioner: p,
})
case *provisioner.OIDC:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [client: %s]", p.Name, p.GetType(), p.ClientID),
Provisioner: p,
})
case *provisioner.GCP:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.AWS:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.Azure:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [tenant: %s]", p.Name, p.GetType(), p.TenantID),
Provisioner: p,
})
case *provisioner.ACME:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
default:
continue
}
}
if len(items) == 1 {
if err := ui.PrintSelected("Provisioner", items[0].Name); err != nil {
return nil, err
}
return items[0].Provisioner, nil
}
i, _, err := ui.Select("What provisioner key do you want to use?", items, ui.WithSelectTemplates(ui.NamedSelectTemplates("Provisioner")))
if err != nil {
return nil, err
}
return items[i].Provisioner, nil
}
// provisionerFilter returns a slice of provisioners that pass the given filter.
func provisionerFilter(provisioners provisioner.List, f func(provisioner.Interface) bool) provisioner.List {
var result provisioner.List
for _, p := range provisioners {
if f(p) {
result = append(result, p)
}
}
return result
}
| {
args = append(args, "--listen", p.ListenAddress)
} | conditional_block |
token_flow.go | package cautils
import (
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"github.com/smallstep/certificates/authority/provisioner"
"github.com/smallstep/cli/crypto/pki"
"github.com/smallstep/cli/errs"
"github.com/smallstep/cli/exec"
"github.com/smallstep/cli/jose"
"github.com/smallstep/cli/ui"
"github.com/smallstep/cli/utils"
"github.com/urfave/cli"
)
type provisionersSelect struct {
Name string
Provisioner provisioner.Interface
}
// Token signing types
const (
SignType = iota
RevokeType
SSHUserSignType
SSHHostSignType
)
// parseAudience creates the ca audience url from the ca-url
func parseAudience(ctx *cli.Context, tokType int) (string, error) {
caURL := ctx.String("ca-url")
if len(caURL) == 0 {
return "", errs.RequiredFlag(ctx, "ca-url")
}
audience, err := url.Parse(caURL)
if err != nil {
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
switch strings.ToLower(audience.Scheme) {
case "https", "":
var path string
switch tokType {
// default
case SignType, SSHUserSignType, SSHHostSignType:
path = "/1.0/sign"
// revocation token
case RevokeType:
path = "/1.0/revoke"
default:
return "", errors.Errorf("unexpected token type: %d", tokType)
}
audience.Scheme = "https"
audience = audience.ResolveReference(&url.URL{Path: path})
return audience.String(), nil
default:
return "", errs.InvalidFlagValue(ctx, "ca-url", caURL, "")
}
}
// ErrACMEToken is the error type returned when the user attempts a Token Flow
// while using an ACME provisioner.
type ErrACMEToken struct {
Name string
}
// Error implements the error interface.
func (e *ErrACMEToken) Error() string {
return "step ACME provisioners do not support token auth flows"
}
// NewTokenFlow implements the common flow used to generate a token
func NewTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, caURL, root string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
provisioners, err := pki.GetProvisioners(caURL, root)
if err != nil {
return "", err
}
p, err := provisionerPrompt(ctx, provisioners)
if err != nil {
return "", err
}
switch p := p.(type) {
case *provisioner.OIDC: // Run step oauth
args := []string{"oauth", "--oidc", "--bare",
"--provider", p.ConfigurationEndpoint,
"--client-id", p.ClientID, "--client-secret", p.ClientSecret}
if ctx.Bool("console") {
args = append(args, "--console")
}
if p.ListenAddress != "" {
args = append(args, "--listen", p.ListenAddress)
}
out, err := exec.Step(args...)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
case *provisioner.GCP: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.AWS: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.Azure: // Do the identity request to get the token
sharedContext.DisableCustomSANs = p.DisableCustomSANs
return p.GetIdentityToken(subject, caURL)
case *provisioner.ACME: // Return an error with the provisioner ID
return "", &ErrACMEToken{p.GetName()}
}
// JWK provisioner
prov, ok := p.(*provisioner.JWK)
if !ok {
return "", errors.Errorf("unknown provisioner type %T", p)
}
kid := prov.Key.KeyID
issuer := prov.Name
var opts []jose.Option
if passwordFile := ctx.String("password-file"); len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
var jwk *jose.JSONWebKey
if keyFile := ctx.String("key"); len(keyFile) == 0 {
// Get private key from CA
encrypted, err := pki.GetProvisionerKey(caURL, root, kid)
if err != nil {
return "", err
}
// Add template with check mark
opts = append(opts, jose.WithUIOptions(
ui.WithPromptTemplates(ui.PromptTemplates()),
))
decrypted, err := jose.Decrypt("Please enter the password to decrypt the provisioner key", []byte(encrypted), opts...)
if err != nil {
return "", err
}
jwk = new(jose.JSONWebKey)
if err := json.Unmarshal(decrypted, jwk); err != nil {
return "", errors.Wrap(err, "error unmarshalling provisioning key")
}
} else {
// Get private key from given key file
jwk, err = jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
// OfflineTokenFlow generates a provisioning token using either
// 1. static configuration from ca.json (created with `step ca init`)
// 2. input from command line flags
// These two options are mutually exclusive and priority is given to ca.json.
func OfflineTokenFlow(ctx *cli.Context, typ int, subject string, sans []string, notBefore, notAfter time.Time, certNotBefore, certNotAfter provisioner.TimeDuration) (string, error) {
caConfig := ctx.String("ca-config")
if caConfig == "" {
return "", errs.InvalidFlagValue(ctx, "ca-config", "", "")
}
// Using the offline CA
if utils.FileExists(caConfig) {
offlineCA, err := NewOfflineCA(caConfig)
if err != nil {
return "", err
}
return offlineCA.GenerateToken(ctx, typ, subject, sans, notBefore, notAfter, certNotBefore, certNotAfter)
}
kid := ctx.String("kid")
issuer := ctx.String("issuer")
keyFile := ctx.String("key")
passwordFile := ctx.String("password-file")
// Require issuer and keyFile if ca.json does not exists.
// kid can be passed or created using jwk.Thumbprint.
switch {
case len(issuer) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "issuer")
case len(keyFile) == 0:
return "", errs.RequiredWithFlag(ctx, "offline", "key")
}
// Get audience from ca-url
audience, err := parseAudience(ctx, typ)
if err != nil {
return "", err
}
// Get root from argument or default location
root := ctx.String("root")
if len(root) == 0 {
root = pki.GetRootCAPath()
if utils.FileExists(root) {
return "", errs.RequiredFlag(ctx, "root")
}
}
// Parse key
var opts []jose.Option
if len(passwordFile) != 0 {
opts = append(opts, jose.WithPasswordFile(passwordFile))
}
jwk, err := jose.ParseKey(keyFile, opts...)
if err != nil {
return "", err
}
// Get the kid if it's not passed as an argument
if len(kid) == 0 {
hash, err := jwk.Thumbprint(crypto.SHA256)
if err != nil {
return "", errors.Wrap(err, "error generating JWK thumbprint")
}
kid = base64.RawURLEncoding.EncodeToString(hash)
}
// Generate token
tokenGen := NewTokenGenerator(kid, issuer, audience, root, notBefore, notAfter, jwk)
switch typ {
case SignType:
return tokenGen.SignToken(subject, sans)
case RevokeType:
return tokenGen.RevokeToken(subject)
case SSHUserSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHUserCert, sans, certNotBefore, certNotAfter)
case SSHHostSignType:
return tokenGen.SignSSHToken(subject, provisioner.SSHHostCert, sans, certNotBefore, certNotAfter)
default:
return tokenGen.Token(subject)
}
}
func provisionerPrompt(ctx *cli.Context, provisioners provisioner.List) (provisioner.Interface, error) |
// provisionerFilter returns a slice of provisioners that pass the given filter.
func provisionerFilter(provisioners provisioner.List, f func(provisioner.Interface) bool) provisioner.List {
var result provisioner.List
for _, p := range provisioners {
if f(p) {
result = append(result, p)
}
}
return result
}
| {
// Filter by type
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p.GetType() {
case provisioner.TypeJWK, provisioner.TypeOIDC, provisioner.TypeACME:
return true
case provisioner.TypeGCP, provisioner.TypeAWS, provisioner.TypeAzure:
return true
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errors.New("cannot create a new token: the CA does not have any provisioner configured")
}
// Filter by kid
if kid := ctx.String("kid"); len(kid) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
switch p := p.(type) {
case *provisioner.JWK:
return p.Key.KeyID == kid
case *provisioner.OIDC:
return p.ClientID == kid
default:
return false
}
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "kid", kid, "")
}
}
// Filter by issuer (provisioner name)
if issuer := ctx.String("issuer"); len(issuer) != 0 {
provisioners = provisionerFilter(provisioners, func(p provisioner.Interface) bool {
return p.GetName() == issuer
})
if len(provisioners) == 0 {
return nil, errs.InvalidFlagValue(ctx, "issuer", issuer, "")
}
}
// Select provisioner
var items []*provisionersSelect
for _, prov := range provisioners {
switch p := prov.(type) {
case *provisioner.JWK:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [kid: %s]", p.Name, p.GetType(), p.Key.KeyID),
Provisioner: p,
})
case *provisioner.OIDC:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [client: %s]", p.Name, p.GetType(), p.ClientID),
Provisioner: p,
})
case *provisioner.GCP:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.AWS:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
case *provisioner.Azure:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s) [tenant: %s]", p.Name, p.GetType(), p.TenantID),
Provisioner: p,
})
case *provisioner.ACME:
items = append(items, &provisionersSelect{
Name: fmt.Sprintf("%s (%s)", p.Name, p.GetType()),
Provisioner: p,
})
default:
continue
}
}
if len(items) == 1 {
if err := ui.PrintSelected("Provisioner", items[0].Name); err != nil {
return nil, err
}
return items[0].Provisioner, nil
}
i, _, err := ui.Select("What provisioner key do you want to use?", items, ui.WithSelectTemplates(ui.NamedSelectTemplates("Provisioner")))
if err != nil {
return nil, err
}
return items[i].Provisioner, nil
} | identifier_body |
recorder.go | // Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Matt Tracy (matt.r.tracy@gmail.com)
package status
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/ts"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
)
const (
// storeTimeSeriesPrefix is the common prefix for time series keys which
// record store-specific data.
storeTimeSeriesPrefix = "cr.store.%s"
// nodeTimeSeriesPrefix is the common prefix for time series keys which
// record node-specific data.
nodeTimeSeriesPrefix = "cr.node.%s"
// runtimeStatTimeSeriesFmt is the current format for time series keys which
// record runtime system stats on a node.
runtimeStatTimeSeriesNameFmt = "cr.node.sys.%s"
)
type quantile struct {
suffix string
quantile float64
}
var recordHistogramQuantiles = []quantile{
{"-max", 100},
{"-p99.999", 99.999},
{"-p99.99", 99.99},
{"-p99.9", 99.9},
{"-p99", 99},
{"-p90", 90},
{"-p75", 75},
{"-p50", 50},
}
// storeMetrics is the minimum interface of the storage.Store object needed by
// MetricsRecorder to provide status summaries. This is used instead of Store
// directly in order to simplify testing.
type storeMetrics interface {
StoreID() roachpb.StoreID
Descriptor() (*roachpb.StoreDescriptor, error)
MVCCStats() engine.MVCCStats
Registry() *metric.Registry
}
// MetricsRecorder is used to periodically record the information in a number of
// metric registries.
//
// Two types of registries are maintained: "node-level" registries, provided by
// node-level systems, and "store-level" registries which are provided by each
// store hosted by the node. There are slight differences in the way these are
// recorded, and they are thus kept separate.
type MetricsRecorder struct {
// nodeRegistry contains, as subregistries, the multiple component-specific
// registries which are recorded as "node level" metrics.
nodeRegistry *metric.Registry
// Fields below are locked by this mutex.
mu struct {
sync.Mutex
// storeRegistries contains a registry for each store on the node. These
// are not stored as subregistries, but rather are treated as wholly
// independent.
storeRegistries map[roachpb.StoreID]*metric.Registry
nodeID roachpb.NodeID
clock *hlc.Clock
// Counts to help optimize slice allocation.
lastDataCount int
lastSummaryCount int
// TODO(mrtracy): These are stored to support the current structure of
// status summaries. These should be removed as part of #4465.
startedAt int64
desc roachpb.NodeDescriptor
stores map[roachpb.StoreID]storeMetrics
}
}
// NewMetricsRecorder initializes a new MetricsRecorder object that uses the
// given clock.
func NewMetricsRecorder(clock *hlc.Clock) *MetricsRecorder {
mr := &MetricsRecorder{
nodeRegistry: metric.NewRegistry(),
}
mr.mu.storeRegistries = make(map[roachpb.StoreID]*metric.Registry)
mr.mu.stores = make(map[roachpb.StoreID]storeMetrics)
mr.mu.clock = clock
return mr
}
// AddNodeRegistry adds a node-level registry to this recorder. Each node-level
// registry has a 'prefix format' which is used to add a prefix to the name of
// all metrics in that registry while recording (see the metric.Registry object
// for more information on prefix format strings).
func (mr *MetricsRecorder) AddNodeRegistry(prefixFmt string, registry *metric.Registry) {
mr.nodeRegistry.MustAdd(prefixFmt, registry)
}
// AddStore adds the Registry from the provided store as a store-level registry
// in this recoder. A reference to the store is kept for the purpose of
// gathering some additional information which is present in store status
// summaries.
// Stores should only be added to the registry after they have been started.
// TODO(mrtracy): Store references should not be necessary after #4465.
func (mr *MetricsRecorder) AddStore(store storeMetrics) {
mr.mu.Lock()
defer mr.mu.Unlock()
storeID := store.StoreID()
mr.mu.storeRegistries[storeID] = store.Registry()
mr.mu.stores[storeID] = store
}
// NodeStarted should be called on the recorder once the associated node has
// received its Node ID; this indicates that it is appropriate to begin
// recording statistics for this node.
func (mr *MetricsRecorder) NodeStarted(desc roachpb.NodeDescriptor, startedAt int64) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.mu.desc = desc
mr.mu.nodeID = desc.NodeID
mr.mu.startedAt = startedAt
}
// MarshalJSON returns an appropriate JSON representation of the current values
// of the metrics being tracked by this recorder.
func (mr *MetricsRecorder) MarshalJSON() ([]byte, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; return an empty
// JSON object.
if log.V(1) {
log.Warning("MetricsRecorder.MarshalJSON() called before NodeID allocation")
}
return []byte("{}"), nil
}
topLevel := map[string]interface{}{
fmt.Sprintf("node.%d", mr.mu.nodeID): mr.nodeRegistry,
}
// Add collection of stores to top level. JSON requires that keys be strings,
// so we must convert the store ID to a string.
storeLevel := make(map[string]interface{})
for id, reg := range mr.mu.storeRegistries {
storeLevel[strconv.Itoa(int(id))] = reg
}
topLevel["stores"] = storeLevel
return json.Marshal(topLevel)
}
// GetTimeSeriesData serializes registered metrics for consumption by
// CockroachDB's time series system.
func (mr *MetricsRecorder) GetTimeSeriesData() []ts.TimeSeriesData {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.desc.NodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetTimeSeriesData() called before NodeID allocation")
}
return nil
}
data := make([]ts.TimeSeriesData, 0, mr.mu.lastDataCount)
// Record time series from node-level registries.
now := mr.mu.clock.PhysicalNow()
recorder := registryRecorder{
registry: mr.nodeRegistry,
format: nodeTimeSeriesPrefix,
source: strconv.FormatInt(int64(mr.mu.nodeID), 10),
timestampNanos: now,
}
recorder.record(&data)
// Record time series from store-level registries.
for storeID, r := range mr.mu.storeRegistries {
storeRecorder := registryRecorder{
registry: r,
format: storeTimeSeriesPrefix,
source: strconv.FormatInt(int64(storeID), 10),
timestampNanos: now,
}
storeRecorder.record(&data)
}
mr.mu.lastDataCount = len(data)
return data
}
// GetStatusSummaries returns a status summary messages for the node, along with
// a status summary for every individual store within the node.
// TODO(mrtracy): The status summaries deserve a near-term, significant
// overhaul. Their primary usage is as an indicator of the most recent metrics
// of a node or store - they are essentially a "vertical" query of several
// time series for a single node or store, returning only the most recent value
// of each series. The structure should be modified to reflect that: there is no
// reason for them to have a strict schema of fields. (Github Issue #4465)
func (mr *MetricsRecorder) GetStatusSummaries() (*NodeStatus, []storage.StoreStatus) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetStatusSummaries called before NodeID allocation.")
}
return nil, nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreIDs: make([]roachpb.StoreID, 0, mr.mu.lastSummaryCount),
}
storeStats := make([]storage.StoreStatus, 0, mr.mu.lastSummaryCount)
// Generate status summaries for stores, while accumulating data into the
// NodeStatus.
for storeID, r := range mr.mu.storeRegistries {
nodeStat.StoreIDs = append(nodeStat.StoreIDs, storeID)
// Gather MVCCStats from the store directly.
stats := mr.mu.stores[storeID].MVCCStats()
// Gather updates from a few specific gauges.
// TODO(mrtracy): This is the worst hack present in supporting the
// current status summary format. It will be removed as part of #4465.
rangeCounter := r.GetCounter("ranges")
if rangeCounter == nil {
log.Errorf("Could not record status summaries: Store %d did not have 'ranges' counter in registry.", storeID)
return nil, nil
}
gaugeNames := []string{"ranges.leader", "ranges.replicated", "ranges.available"}
gauges := make(map[string]*metric.Gauge)
for _, name := range gaugeNames |
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf("Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
}
status := storage.StoreStatus{
Desc: *descriptor,
NodeID: mr.mu.nodeID,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
Stats: stats,
RangeCount: int32(rangeCounter.Count()),
LeaderRangeCount: int32(gauges[gaugeNames[0]].Value()),
ReplicatedRangeCount: int32(gauges[gaugeNames[1]].Value()),
AvailableRangeCount: int32(gauges[gaugeNames[2]].Value()),
}
nodeStat.Stats.Add(stats)
nodeStat.RangeCount += status.RangeCount
nodeStat.LeaderRangeCount += status.LeaderRangeCount
nodeStat.ReplicatedRangeCount += status.ReplicatedRangeCount
nodeStat.AvailableRangeCount += status.AvailableRangeCount
storeStats = append(storeStats, status)
}
return nodeStat, storeStats
}
// registryRecorder is a helper class for recording time series datapoints
// from a metrics Registry.
type registryRecorder struct {
registry *metric.Registry
format string
source string
timestampNanos int64
}
func (rr registryRecorder) record(dest *[]ts.TimeSeriesData) {
rr.registry.Each(func(name string, m interface{}) {
data := ts.TimeSeriesData{
Name: fmt.Sprintf(rr.format, name),
Source: rr.source,
Datapoints: []*ts.TimeSeriesDatapoint{
{
TimestampNanos: rr.timestampNanos,
},
},
}
// The method for extracting data differs based on the type of metric.
// TODO(tschottdorf): should make this based on interfaces.
switch mtr := m.(type) {
case float64:
data.Datapoints[0].Value = mtr
case *metric.Rates:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Counter:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Gauge:
data.Datapoints[0].Value = float64(mtr.Value())
case *metric.Histogram:
h := mtr.Current()
for _, pt := range recordHistogramQuantiles {
d := *util.CloneProto(&data).(*ts.TimeSeriesData)
d.Name += pt.suffix
d.Datapoints[0].Value = float64(h.ValueAtQuantile(pt.quantile))
*dest = append(*dest, d)
}
return
default:
log.Warningf("cannot serialize for time series: %T", mtr)
return
}
*dest = append(*dest, data)
})
}
| {
gauge := r.GetGauge(name)
if gauge == nil {
log.Errorf("Could not record status summaries: Store %d did not have '%s' gauge in registry.", storeID, name)
return nil, nil
}
gauges[name] = gauge
} | conditional_block |
recorder.go | // Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Matt Tracy (matt.r.tracy@gmail.com)
package status
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/ts"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
)
const (
// storeTimeSeriesPrefix is the common prefix for time series keys which
// record store-specific data.
storeTimeSeriesPrefix = "cr.store.%s"
// nodeTimeSeriesPrefix is the common prefix for time series keys which
// record node-specific data.
nodeTimeSeriesPrefix = "cr.node.%s"
// runtimeStatTimeSeriesFmt is the current format for time series keys which
// record runtime system stats on a node.
runtimeStatTimeSeriesNameFmt = "cr.node.sys.%s"
)
type quantile struct {
suffix string
quantile float64
}
var recordHistogramQuantiles = []quantile{
{"-max", 100},
{"-p99.999", 99.999},
{"-p99.99", 99.99},
{"-p99.9", 99.9},
{"-p99", 99},
{"-p90", 90},
{"-p75", 75},
{"-p50", 50},
}
// storeMetrics is the minimum interface of the storage.Store object needed by
// MetricsRecorder to provide status summaries. This is used instead of Store
// directly in order to simplify testing.
type storeMetrics interface {
StoreID() roachpb.StoreID
Descriptor() (*roachpb.StoreDescriptor, error)
MVCCStats() engine.MVCCStats
Registry() *metric.Registry
}
// MetricsRecorder is used to periodically record the information in a number of
// metric registries.
//
// Two types of registries are maintained: "node-level" registries, provided by
// node-level systems, and "store-level" registries which are provided by each
// store hosted by the node. There are slight differences in the way these are
// recorded, and they are thus kept separate.
type MetricsRecorder struct {
// nodeRegistry contains, as subregistries, the multiple component-specific
// registries which are recorded as "node level" metrics.
nodeRegistry *metric.Registry
// Fields below are locked by this mutex.
mu struct {
sync.Mutex
// storeRegistries contains a registry for each store on the node. These
// are not stored as subregistries, but rather are treated as wholly
// independent.
storeRegistries map[roachpb.StoreID]*metric.Registry
nodeID roachpb.NodeID
clock *hlc.Clock
// Counts to help optimize slice allocation.
lastDataCount int
lastSummaryCount int
// TODO(mrtracy): These are stored to support the current structure of
// status summaries. These should be removed as part of #4465.
startedAt int64
desc roachpb.NodeDescriptor
stores map[roachpb.StoreID]storeMetrics
}
}
// NewMetricsRecorder initializes a new MetricsRecorder object that uses the
// given clock.
func NewMetricsRecorder(clock *hlc.Clock) *MetricsRecorder {
mr := &MetricsRecorder{
nodeRegistry: metric.NewRegistry(),
}
mr.mu.storeRegistries = make(map[roachpb.StoreID]*metric.Registry)
mr.mu.stores = make(map[roachpb.StoreID]storeMetrics)
mr.mu.clock = clock
return mr
}
// AddNodeRegistry adds a node-level registry to this recorder. Each node-level
// registry has a 'prefix format' which is used to add a prefix to the name of
// all metrics in that registry while recording (see the metric.Registry object
// for more information on prefix format strings).
func (mr *MetricsRecorder) AddNodeRegistry(prefixFmt string, registry *metric.Registry) {
mr.nodeRegistry.MustAdd(prefixFmt, registry)
}
// AddStore adds the Registry from the provided store as a store-level registry
// in this recoder. A reference to the store is kept for the purpose of
// gathering some additional information which is present in store status
// summaries.
// Stores should only be added to the registry after they have been started.
// TODO(mrtracy): Store references should not be necessary after #4465.
func (mr *MetricsRecorder) AddStore(store storeMetrics) {
mr.mu.Lock()
defer mr.mu.Unlock()
storeID := store.StoreID()
mr.mu.storeRegistries[storeID] = store.Registry()
mr.mu.stores[storeID] = store
}
// NodeStarted should be called on the recorder once the associated node has
// received its Node ID; this indicates that it is appropriate to begin
// recording statistics for this node.
func (mr *MetricsRecorder) NodeStarted(desc roachpb.NodeDescriptor, startedAt int64) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.mu.desc = desc
mr.mu.nodeID = desc.NodeID
mr.mu.startedAt = startedAt
}
// MarshalJSON returns an appropriate JSON representation of the current values
// of the metrics being tracked by this recorder.
func (mr *MetricsRecorder) MarshalJSON() ([]byte, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; return an empty
// JSON object.
if log.V(1) {
log.Warning("MetricsRecorder.MarshalJSON() called before NodeID allocation")
}
return []byte("{}"), nil
}
topLevel := map[string]interface{}{
fmt.Sprintf("node.%d", mr.mu.nodeID): mr.nodeRegistry,
}
// Add collection of stores to top level. JSON requires that keys be strings,
// so we must convert the store ID to a string.
storeLevel := make(map[string]interface{})
for id, reg := range mr.mu.storeRegistries {
storeLevel[strconv.Itoa(int(id))] = reg
}
topLevel["stores"] = storeLevel
return json.Marshal(topLevel)
}
// GetTimeSeriesData serializes registered metrics for consumption by
// CockroachDB's time series system.
func (mr *MetricsRecorder) GetTimeSeriesData() []ts.TimeSeriesData |
// GetStatusSummaries returns a status summary messages for the node, along with
// a status summary for every individual store within the node.
// TODO(mrtracy): The status summaries deserve a near-term, significant
// overhaul. Their primary usage is as an indicator of the most recent metrics
// of a node or store - they are essentially a "vertical" query of several
// time series for a single node or store, returning only the most recent value
// of each series. The structure should be modified to reflect that: there is no
// reason for them to have a strict schema of fields. (Github Issue #4465)
func (mr *MetricsRecorder) GetStatusSummaries() (*NodeStatus, []storage.StoreStatus) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetStatusSummaries called before NodeID allocation.")
}
return nil, nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreIDs: make([]roachpb.StoreID, 0, mr.mu.lastSummaryCount),
}
storeStats := make([]storage.StoreStatus, 0, mr.mu.lastSummaryCount)
// Generate status summaries for stores, while accumulating data into the
// NodeStatus.
for storeID, r := range mr.mu.storeRegistries {
nodeStat.StoreIDs = append(nodeStat.StoreIDs, storeID)
// Gather MVCCStats from the store directly.
stats := mr.mu.stores[storeID].MVCCStats()
// Gather updates from a few specific gauges.
// TODO(mrtracy): This is the worst hack present in supporting the
// current status summary format. It will be removed as part of #4465.
rangeCounter := r.GetCounter("ranges")
if rangeCounter == nil {
log.Errorf("Could not record status summaries: Store %d did not have 'ranges' counter in registry.", storeID)
return nil, nil
}
gaugeNames := []string{"ranges.leader", "ranges.replicated", "ranges.available"}
gauges := make(map[string]*metric.Gauge)
for _, name := range gaugeNames {
gauge := r.GetGauge(name)
if gauge == nil {
log.Errorf("Could not record status summaries: Store %d did not have '%s' gauge in registry.", storeID, name)
return nil, nil
}
gauges[name] = gauge
}
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf("Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
}
status := storage.StoreStatus{
Desc: *descriptor,
NodeID: mr.mu.nodeID,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
Stats: stats,
RangeCount: int32(rangeCounter.Count()),
LeaderRangeCount: int32(gauges[gaugeNames[0]].Value()),
ReplicatedRangeCount: int32(gauges[gaugeNames[1]].Value()),
AvailableRangeCount: int32(gauges[gaugeNames[2]].Value()),
}
nodeStat.Stats.Add(stats)
nodeStat.RangeCount += status.RangeCount
nodeStat.LeaderRangeCount += status.LeaderRangeCount
nodeStat.ReplicatedRangeCount += status.ReplicatedRangeCount
nodeStat.AvailableRangeCount += status.AvailableRangeCount
storeStats = append(storeStats, status)
}
return nodeStat, storeStats
}
// registryRecorder is a helper class for recording time series datapoints
// from a metrics Registry.
type registryRecorder struct {
registry *metric.Registry
format string
source string
timestampNanos int64
}
func (rr registryRecorder) record(dest *[]ts.TimeSeriesData) {
rr.registry.Each(func(name string, m interface{}) {
data := ts.TimeSeriesData{
Name: fmt.Sprintf(rr.format, name),
Source: rr.source,
Datapoints: []*ts.TimeSeriesDatapoint{
{
TimestampNanos: rr.timestampNanos,
},
},
}
// The method for extracting data differs based on the type of metric.
// TODO(tschottdorf): should make this based on interfaces.
switch mtr := m.(type) {
case float64:
data.Datapoints[0].Value = mtr
case *metric.Rates:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Counter:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Gauge:
data.Datapoints[0].Value = float64(mtr.Value())
case *metric.Histogram:
h := mtr.Current()
for _, pt := range recordHistogramQuantiles {
d := *util.CloneProto(&data).(*ts.TimeSeriesData)
d.Name += pt.suffix
d.Datapoints[0].Value = float64(h.ValueAtQuantile(pt.quantile))
*dest = append(*dest, d)
}
return
default:
log.Warningf("cannot serialize for time series: %T", mtr)
return
}
*dest = append(*dest, data)
})
}
| {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.desc.NodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetTimeSeriesData() called before NodeID allocation")
}
return nil
}
data := make([]ts.TimeSeriesData, 0, mr.mu.lastDataCount)
// Record time series from node-level registries.
now := mr.mu.clock.PhysicalNow()
recorder := registryRecorder{
registry: mr.nodeRegistry,
format: nodeTimeSeriesPrefix,
source: strconv.FormatInt(int64(mr.mu.nodeID), 10),
timestampNanos: now,
}
recorder.record(&data)
// Record time series from store-level registries.
for storeID, r := range mr.mu.storeRegistries {
storeRecorder := registryRecorder{
registry: r,
format: storeTimeSeriesPrefix,
source: strconv.FormatInt(int64(storeID), 10),
timestampNanos: now,
}
storeRecorder.record(&data)
}
mr.mu.lastDataCount = len(data)
return data
} | identifier_body |
recorder.go | // Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Matt Tracy (matt.r.tracy@gmail.com)
package status
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/ts"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
)
const (
// storeTimeSeriesPrefix is the common prefix for time series keys which
// record store-specific data.
storeTimeSeriesPrefix = "cr.store.%s"
// nodeTimeSeriesPrefix is the common prefix for time series keys which
// record node-specific data.
nodeTimeSeriesPrefix = "cr.node.%s"
// runtimeStatTimeSeriesFmt is the current format for time series keys which
// record runtime system stats on a node.
runtimeStatTimeSeriesNameFmt = "cr.node.sys.%s"
)
type quantile struct {
suffix string
quantile float64
}
var recordHistogramQuantiles = []quantile{
{"-max", 100},
{"-p99.999", 99.999},
{"-p99.99", 99.99},
{"-p99.9", 99.9},
{"-p99", 99},
{"-p90", 90},
{"-p75", 75},
{"-p50", 50},
}
// storeMetrics is the minimum interface of the storage.Store object needed by
// MetricsRecorder to provide status summaries. This is used instead of Store
// directly in order to simplify testing.
type storeMetrics interface {
StoreID() roachpb.StoreID
Descriptor() (*roachpb.StoreDescriptor, error)
MVCCStats() engine.MVCCStats
Registry() *metric.Registry
}
// MetricsRecorder is used to periodically record the information in a number of
// metric registries.
//
// Two types of registries are maintained: "node-level" registries, provided by
// node-level systems, and "store-level" registries which are provided by each
// store hosted by the node. There are slight differences in the way these are
// recorded, and they are thus kept separate.
type MetricsRecorder struct {
// nodeRegistry contains, as subregistries, the multiple component-specific
// registries which are recorded as "node level" metrics.
nodeRegistry *metric.Registry
// Fields below are locked by this mutex.
mu struct {
sync.Mutex
// storeRegistries contains a registry for each store on the node. These
// are not stored as subregistries, but rather are treated as wholly
// independent.
storeRegistries map[roachpb.StoreID]*metric.Registry
nodeID roachpb.NodeID
clock *hlc.Clock
// Counts to help optimize slice allocation.
lastDataCount int
lastSummaryCount int
// TODO(mrtracy): These are stored to support the current structure of
// status summaries. These should be removed as part of #4465.
startedAt int64
desc roachpb.NodeDescriptor
stores map[roachpb.StoreID]storeMetrics
}
}
// NewMetricsRecorder initializes a new MetricsRecorder object that uses the
// given clock.
func NewMetricsRecorder(clock *hlc.Clock) *MetricsRecorder {
mr := &MetricsRecorder{
nodeRegistry: metric.NewRegistry(),
}
mr.mu.storeRegistries = make(map[roachpb.StoreID]*metric.Registry)
mr.mu.stores = make(map[roachpb.StoreID]storeMetrics)
mr.mu.clock = clock
return mr
}
// AddNodeRegistry adds a node-level registry to this recorder. Each node-level
// registry has a 'prefix format' which is used to add a prefix to the name of
// all metrics in that registry while recording (see the metric.Registry object
// for more information on prefix format strings).
func (mr *MetricsRecorder) AddNodeRegistry(prefixFmt string, registry *metric.Registry) {
mr.nodeRegistry.MustAdd(prefixFmt, registry)
}
// AddStore adds the Registry from the provided store as a store-level registry
// in this recoder. A reference to the store is kept for the purpose of
// gathering some additional information which is present in store status
// summaries.
// Stores should only be added to the registry after they have been started.
// TODO(mrtracy): Store references should not be necessary after #4465.
func (mr *MetricsRecorder) AddStore(store storeMetrics) {
mr.mu.Lock()
defer mr.mu.Unlock()
storeID := store.StoreID()
mr.mu.storeRegistries[storeID] = store.Registry()
mr.mu.stores[storeID] = store
}
// NodeStarted should be called on the recorder once the associated node has
// received its Node ID; this indicates that it is appropriate to begin
// recording statistics for this node.
func (mr *MetricsRecorder) NodeStarted(desc roachpb.NodeDescriptor, startedAt int64) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.mu.desc = desc
mr.mu.nodeID = desc.NodeID
mr.mu.startedAt = startedAt
}
// MarshalJSON returns an appropriate JSON representation of the current values
// of the metrics being tracked by this recorder.
func (mr *MetricsRecorder) MarshalJSON() ([]byte, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; return an empty
// JSON object.
if log.V(1) {
log.Warning("MetricsRecorder.MarshalJSON() called before NodeID allocation")
}
return []byte("{}"), nil
}
topLevel := map[string]interface{}{
fmt.Sprintf("node.%d", mr.mu.nodeID): mr.nodeRegistry,
}
// Add collection of stores to top level. JSON requires that keys be strings,
// so we must convert the store ID to a string.
storeLevel := make(map[string]interface{}) | storeLevel[strconv.Itoa(int(id))] = reg
}
topLevel["stores"] = storeLevel
return json.Marshal(topLevel)
}
// GetTimeSeriesData serializes registered metrics for consumption by
// CockroachDB's time series system.
func (mr *MetricsRecorder) GetTimeSeriesData() []ts.TimeSeriesData {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.desc.NodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetTimeSeriesData() called before NodeID allocation")
}
return nil
}
data := make([]ts.TimeSeriesData, 0, mr.mu.lastDataCount)
// Record time series from node-level registries.
now := mr.mu.clock.PhysicalNow()
recorder := registryRecorder{
registry: mr.nodeRegistry,
format: nodeTimeSeriesPrefix,
source: strconv.FormatInt(int64(mr.mu.nodeID), 10),
timestampNanos: now,
}
recorder.record(&data)
// Record time series from store-level registries.
for storeID, r := range mr.mu.storeRegistries {
storeRecorder := registryRecorder{
registry: r,
format: storeTimeSeriesPrefix,
source: strconv.FormatInt(int64(storeID), 10),
timestampNanos: now,
}
storeRecorder.record(&data)
}
mr.mu.lastDataCount = len(data)
return data
}
// GetStatusSummaries returns a status summary messages for the node, along with
// a status summary for every individual store within the node.
// TODO(mrtracy): The status summaries deserve a near-term, significant
// overhaul. Their primary usage is as an indicator of the most recent metrics
// of a node or store - they are essentially a "vertical" query of several
// time series for a single node or store, returning only the most recent value
// of each series. The structure should be modified to reflect that: there is no
// reason for them to have a strict schema of fields. (Github Issue #4465)
func (mr *MetricsRecorder) GetStatusSummaries() (*NodeStatus, []storage.StoreStatus) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetStatusSummaries called before NodeID allocation.")
}
return nil, nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreIDs: make([]roachpb.StoreID, 0, mr.mu.lastSummaryCount),
}
storeStats := make([]storage.StoreStatus, 0, mr.mu.lastSummaryCount)
// Generate status summaries for stores, while accumulating data into the
// NodeStatus.
for storeID, r := range mr.mu.storeRegistries {
nodeStat.StoreIDs = append(nodeStat.StoreIDs, storeID)
// Gather MVCCStats from the store directly.
stats := mr.mu.stores[storeID].MVCCStats()
// Gather updates from a few specific gauges.
// TODO(mrtracy): This is the worst hack present in supporting the
// current status summary format. It will be removed as part of #4465.
rangeCounter := r.GetCounter("ranges")
if rangeCounter == nil {
log.Errorf("Could not record status summaries: Store %d did not have 'ranges' counter in registry.", storeID)
return nil, nil
}
gaugeNames := []string{"ranges.leader", "ranges.replicated", "ranges.available"}
gauges := make(map[string]*metric.Gauge)
for _, name := range gaugeNames {
gauge := r.GetGauge(name)
if gauge == nil {
log.Errorf("Could not record status summaries: Store %d did not have '%s' gauge in registry.", storeID, name)
return nil, nil
}
gauges[name] = gauge
}
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf("Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
}
status := storage.StoreStatus{
Desc: *descriptor,
NodeID: mr.mu.nodeID,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
Stats: stats,
RangeCount: int32(rangeCounter.Count()),
LeaderRangeCount: int32(gauges[gaugeNames[0]].Value()),
ReplicatedRangeCount: int32(gauges[gaugeNames[1]].Value()),
AvailableRangeCount: int32(gauges[gaugeNames[2]].Value()),
}
nodeStat.Stats.Add(stats)
nodeStat.RangeCount += status.RangeCount
nodeStat.LeaderRangeCount += status.LeaderRangeCount
nodeStat.ReplicatedRangeCount += status.ReplicatedRangeCount
nodeStat.AvailableRangeCount += status.AvailableRangeCount
storeStats = append(storeStats, status)
}
return nodeStat, storeStats
}
// registryRecorder is a helper class for recording time series datapoints
// from a metrics Registry.
type registryRecorder struct {
registry *metric.Registry
format string
source string
timestampNanos int64
}
func (rr registryRecorder) record(dest *[]ts.TimeSeriesData) {
rr.registry.Each(func(name string, m interface{}) {
data := ts.TimeSeriesData{
Name: fmt.Sprintf(rr.format, name),
Source: rr.source,
Datapoints: []*ts.TimeSeriesDatapoint{
{
TimestampNanos: rr.timestampNanos,
},
},
}
// The method for extracting data differs based on the type of metric.
// TODO(tschottdorf): should make this based on interfaces.
switch mtr := m.(type) {
case float64:
data.Datapoints[0].Value = mtr
case *metric.Rates:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Counter:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Gauge:
data.Datapoints[0].Value = float64(mtr.Value())
case *metric.Histogram:
h := mtr.Current()
for _, pt := range recordHistogramQuantiles {
d := *util.CloneProto(&data).(*ts.TimeSeriesData)
d.Name += pt.suffix
d.Datapoints[0].Value = float64(h.ValueAtQuantile(pt.quantile))
*dest = append(*dest, d)
}
return
default:
log.Warningf("cannot serialize for time series: %T", mtr)
return
}
*dest = append(*dest, data)
})
} | for id, reg := range mr.mu.storeRegistries { | random_line_split |
recorder.go | // Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Matt Tracy (matt.r.tracy@gmail.com)
package status
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/ts"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/metric"
)
const (
// storeTimeSeriesPrefix is the common prefix for time series keys which
// record store-specific data.
storeTimeSeriesPrefix = "cr.store.%s"
// nodeTimeSeriesPrefix is the common prefix for time series keys which
// record node-specific data.
nodeTimeSeriesPrefix = "cr.node.%s"
// runtimeStatTimeSeriesFmt is the current format for time series keys which
// record runtime system stats on a node.
runtimeStatTimeSeriesNameFmt = "cr.node.sys.%s"
)
type quantile struct {
suffix string
quantile float64
}
var recordHistogramQuantiles = []quantile{
{"-max", 100},
{"-p99.999", 99.999},
{"-p99.99", 99.99},
{"-p99.9", 99.9},
{"-p99", 99},
{"-p90", 90},
{"-p75", 75},
{"-p50", 50},
}
// storeMetrics is the minimum interface of the storage.Store object needed by
// MetricsRecorder to provide status summaries. This is used instead of Store
// directly in order to simplify testing.
type storeMetrics interface {
StoreID() roachpb.StoreID
Descriptor() (*roachpb.StoreDescriptor, error)
MVCCStats() engine.MVCCStats
Registry() *metric.Registry
}
// MetricsRecorder is used to periodically record the information in a number of
// metric registries.
//
// Two types of registries are maintained: "node-level" registries, provided by
// node-level systems, and "store-level" registries which are provided by each
// store hosted by the node. There are slight differences in the way these are
// recorded, and they are thus kept separate.
type MetricsRecorder struct {
// nodeRegistry contains, as subregistries, the multiple component-specific
// registries which are recorded as "node level" metrics.
nodeRegistry *metric.Registry
// Fields below are locked by this mutex.
mu struct {
sync.Mutex
// storeRegistries contains a registry for each store on the node. These
// are not stored as subregistries, but rather are treated as wholly
// independent.
storeRegistries map[roachpb.StoreID]*metric.Registry
nodeID roachpb.NodeID
clock *hlc.Clock
// Counts to help optimize slice allocation.
lastDataCount int
lastSummaryCount int
// TODO(mrtracy): These are stored to support the current structure of
// status summaries. These should be removed as part of #4465.
startedAt int64
desc roachpb.NodeDescriptor
stores map[roachpb.StoreID]storeMetrics
}
}
// NewMetricsRecorder initializes a new MetricsRecorder object that uses the
// given clock.
func NewMetricsRecorder(clock *hlc.Clock) *MetricsRecorder {
mr := &MetricsRecorder{
nodeRegistry: metric.NewRegistry(),
}
mr.mu.storeRegistries = make(map[roachpb.StoreID]*metric.Registry)
mr.mu.stores = make(map[roachpb.StoreID]storeMetrics)
mr.mu.clock = clock
return mr
}
// AddNodeRegistry adds a node-level registry to this recorder. Each node-level
// registry has a 'prefix format' which is used to add a prefix to the name of
// all metrics in that registry while recording (see the metric.Registry object
// for more information on prefix format strings).
func (mr *MetricsRecorder) | (prefixFmt string, registry *metric.Registry) {
mr.nodeRegistry.MustAdd(prefixFmt, registry)
}
// AddStore adds the Registry from the provided store as a store-level registry
// in this recoder. A reference to the store is kept for the purpose of
// gathering some additional information which is present in store status
// summaries.
// Stores should only be added to the registry after they have been started.
// TODO(mrtracy): Store references should not be necessary after #4465.
func (mr *MetricsRecorder) AddStore(store storeMetrics) {
mr.mu.Lock()
defer mr.mu.Unlock()
storeID := store.StoreID()
mr.mu.storeRegistries[storeID] = store.Registry()
mr.mu.stores[storeID] = store
}
// NodeStarted should be called on the recorder once the associated node has
// received its Node ID; this indicates that it is appropriate to begin
// recording statistics for this node.
func (mr *MetricsRecorder) NodeStarted(desc roachpb.NodeDescriptor, startedAt int64) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.mu.desc = desc
mr.mu.nodeID = desc.NodeID
mr.mu.startedAt = startedAt
}
// MarshalJSON returns an appropriate JSON representation of the current values
// of the metrics being tracked by this recorder.
func (mr *MetricsRecorder) MarshalJSON() ([]byte, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; return an empty
// JSON object.
if log.V(1) {
log.Warning("MetricsRecorder.MarshalJSON() called before NodeID allocation")
}
return []byte("{}"), nil
}
topLevel := map[string]interface{}{
fmt.Sprintf("node.%d", mr.mu.nodeID): mr.nodeRegistry,
}
// Add collection of stores to top level. JSON requires that keys be strings,
// so we must convert the store ID to a string.
storeLevel := make(map[string]interface{})
for id, reg := range mr.mu.storeRegistries {
storeLevel[strconv.Itoa(int(id))] = reg
}
topLevel["stores"] = storeLevel
return json.Marshal(topLevel)
}
// GetTimeSeriesData serializes registered metrics for consumption by
// CockroachDB's time series system.
func (mr *MetricsRecorder) GetTimeSeriesData() []ts.TimeSeriesData {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.desc.NodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetTimeSeriesData() called before NodeID allocation")
}
return nil
}
data := make([]ts.TimeSeriesData, 0, mr.mu.lastDataCount)
// Record time series from node-level registries.
now := mr.mu.clock.PhysicalNow()
recorder := registryRecorder{
registry: mr.nodeRegistry,
format: nodeTimeSeriesPrefix,
source: strconv.FormatInt(int64(mr.mu.nodeID), 10),
timestampNanos: now,
}
recorder.record(&data)
// Record time series from store-level registries.
for storeID, r := range mr.mu.storeRegistries {
storeRecorder := registryRecorder{
registry: r,
format: storeTimeSeriesPrefix,
source: strconv.FormatInt(int64(storeID), 10),
timestampNanos: now,
}
storeRecorder.record(&data)
}
mr.mu.lastDataCount = len(data)
return data
}
// GetStatusSummaries returns a status summary messages for the node, along with
// a status summary for every individual store within the node.
// TODO(mrtracy): The status summaries deserve a near-term, significant
// overhaul. Their primary usage is as an indicator of the most recent metrics
// of a node or store - they are essentially a "vertical" query of several
// time series for a single node or store, returning only the most recent value
// of each series. The structure should be modified to reflect that: there is no
// reason for them to have a strict schema of fields. (Github Issue #4465)
func (mr *MetricsRecorder) GetStatusSummaries() (*NodeStatus, []storage.StoreStatus) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeID == 0 {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning("MetricsRecorder.GetStatusSummaries called before NodeID allocation.")
}
return nil, nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreIDs: make([]roachpb.StoreID, 0, mr.mu.lastSummaryCount),
}
storeStats := make([]storage.StoreStatus, 0, mr.mu.lastSummaryCount)
// Generate status summaries for stores, while accumulating data into the
// NodeStatus.
for storeID, r := range mr.mu.storeRegistries {
nodeStat.StoreIDs = append(nodeStat.StoreIDs, storeID)
// Gather MVCCStats from the store directly.
stats := mr.mu.stores[storeID].MVCCStats()
// Gather updates from a few specific gauges.
// TODO(mrtracy): This is the worst hack present in supporting the
// current status summary format. It will be removed as part of #4465.
rangeCounter := r.GetCounter("ranges")
if rangeCounter == nil {
log.Errorf("Could not record status summaries: Store %d did not have 'ranges' counter in registry.", storeID)
return nil, nil
}
gaugeNames := []string{"ranges.leader", "ranges.replicated", "ranges.available"}
gauges := make(map[string]*metric.Gauge)
for _, name := range gaugeNames {
gauge := r.GetGauge(name)
if gauge == nil {
log.Errorf("Could not record status summaries: Store %d did not have '%s' gauge in registry.", storeID, name)
return nil, nil
}
gauges[name] = gauge
}
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf("Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
}
status := storage.StoreStatus{
Desc: *descriptor,
NodeID: mr.mu.nodeID,
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
Stats: stats,
RangeCount: int32(rangeCounter.Count()),
LeaderRangeCount: int32(gauges[gaugeNames[0]].Value()),
ReplicatedRangeCount: int32(gauges[gaugeNames[1]].Value()),
AvailableRangeCount: int32(gauges[gaugeNames[2]].Value()),
}
nodeStat.Stats.Add(stats)
nodeStat.RangeCount += status.RangeCount
nodeStat.LeaderRangeCount += status.LeaderRangeCount
nodeStat.ReplicatedRangeCount += status.ReplicatedRangeCount
nodeStat.AvailableRangeCount += status.AvailableRangeCount
storeStats = append(storeStats, status)
}
return nodeStat, storeStats
}
// registryRecorder is a helper class for recording time series datapoints
// from a metrics Registry.
type registryRecorder struct {
registry *metric.Registry
format string
source string
timestampNanos int64
}
func (rr registryRecorder) record(dest *[]ts.TimeSeriesData) {
rr.registry.Each(func(name string, m interface{}) {
data := ts.TimeSeriesData{
Name: fmt.Sprintf(rr.format, name),
Source: rr.source,
Datapoints: []*ts.TimeSeriesDatapoint{
{
TimestampNanos: rr.timestampNanos,
},
},
}
// The method for extracting data differs based on the type of metric.
// TODO(tschottdorf): should make this based on interfaces.
switch mtr := m.(type) {
case float64:
data.Datapoints[0].Value = mtr
case *metric.Rates:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Counter:
data.Datapoints[0].Value = float64(mtr.Count())
case *metric.Gauge:
data.Datapoints[0].Value = float64(mtr.Value())
case *metric.Histogram:
h := mtr.Current()
for _, pt := range recordHistogramQuantiles {
d := *util.CloneProto(&data).(*ts.TimeSeriesData)
d.Name += pt.suffix
d.Datapoints[0].Value = float64(h.ValueAtQuantile(pt.quantile))
*dest = append(*dest, d)
}
return
default:
log.Warningf("cannot serialize for time series: %T", mtr)
return
}
*dest = append(*dest, data)
})
}
| AddNodeRegistry | identifier_name |
sdkcallback.go | package main
import (
"crypto/md5"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
proto "code.google.com/p/goprotobuf/proto"
martini "github.com/codegangsta/martini"
nats "github.com/nats-io/nats"
batteryapi "guanghuan.com/xiaoyao/battery_maintenance_server/bussiness"
xyencoder "guanghuan.com/xiaoyao/common/encoding"
xylog "guanghuan.com/xiaoyao/common/log"
battery "guanghuan.com/xiaoyao/superbman_server/battery_run_net"
crypto "guanghuan.com/xiaoyao/superbman_server/crypto"
"guanghuan.com/xiaoyao/superbman_server/error"
)
func HttpSDKCallBack(w http.ResponseWriter, r *http.Request, params martini.Params) (status int, resp string) {
var (
uri string = r.RequestURI
respData []byte
err error
)
xylog.DebugNoId("req url :%v", r.RequestURI)
status = http.StatusOK
// uri, token = ProcessUri(uri)
// xylog.DebugNoId("uri=%s, token=%s, user agent=%s", uri, token, r.UserAgent())
// respData, err = ProcessHttpMsg(token, r)
respData, err = ProcessCallBackMsg(uri, r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("[%s] failed: %s", uri, err.Error())
status = http.StatusInternalServerError //处理失败,返回服务端错误
} else {
resp = getCallBackResp(respData)
xylog.DebugNoId("response.content : %s", resp)
}
return
}
func ProcessCallBackMsg(uri string, r *http.Request) (resp []byte, err error) {
var (
req proto.Message
route *HttpPostToNatsRoute
subj string
reply *nats.Msg
data []byte
)
req, err = constructCallBackMsg(r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("ConstructPbMsg failed : %v", err)
return
}
xylog.DebugNoId("PbMsg : %v", req)
//进行编码
data, err = xyencoder.PbEncode(req)
if err != xyerror.ErrOK {
xylog.ErrorNoId("xyencoder.PbEncode failed : %v", err)
return
}
//进行加密
data, err = crypto.Encrypt(data)
if err != xyerror.ErrOK {
xylog.ErrorNoId("crypto.Encrypt failed : %v", err)
return
}
route = DefHttpPostTable.GetRoutePath(uri)
xylog.DebugNoId("route:%v,item:%v", route, uri)
if route == nil {
err = errors.New("No route for item :" + uri)
return
}
subj = route.GetNatsSubject()
if subj == "" {
err = errors.New("No subject for uri:" + uri)
return
}
xylog.DebugNoId("forward request to %s", subj)
reply, err = nats_service.Request(subj, data, time.Duration(DefConfig.NatsTimeout)*time.Second)
if err != nil {
xylog.ErrorNoId("<%s> Error: %s", subj, err.Error())
return
} else {
if reply != nil {
resp = reply.Data
} else {
err = errors.New("no reply data")
}
}
return
}
const (
CallParameter_UUid = "uuid" // sdk唯一id
CallParameter_OrderId = "order_id" // sdk 订单号
CallParameter_AppOrderId = "app_order_id" // 游戏订单号
CallParameter_EXT = "app_callback_ext" //扩展参数,保存商品id
CallParameter_UserUID = "app_player_id" // 游戏uid
CallParameter_Amount = "pay_amount" // 充值数量
CallParameter_PayTime = "pay_time"
CallParameter_Sandbox = "sandbox" // 是否测试
CallParameter_Sign = "sign" // 签名校验值
CallParameter_ZoneId = "app_zone_id"
CallParameter_Time = "time"
CallParameter_UserID = "app_user_id" // 账号id
)
func constructCallBackMsg(r *http.Request) (message proto.Message, err error) {
var (
goodsId uint64
sandbox, payAmount int
parameterSlice ParameterSlice = make([]ParameterStruct, 0)
)
r.ParseForm()
parameterSlice.Add(CallParameter_Sign, r.PostFormValue(CallParameter_Sign))
parameterSlice.Add(CallParameter_ZoneId, r.PostFormValue(CallParameter_ZoneId))
parameterSlice.Add(CallParameter_Time, r.PostFormValue(CallParameter_Time))
parameterSlice.Add(CallParameter_UserID, r.PostFormValue(CallParameter_UserID))
parameterSlice.Add(CallParameter_UUid, r.PostFormValue(CallParameter_UUid))
parameterSlice.Add(CallParameter_AppOrderId, r.PostFormValue(CallParameter_AppOrderId))
req := &battery.SDKAddOrderRequest{}
v := r.PostFormValue(CallParameter_UserUID)
if v == "" {
xylog.ErrorNoId("get uid from parameter failed")
return nil, xyerror.ErrBadInputData
}
req.Uid = proto.String(v)
parameterSlice.Add(CallParameter_UserUID, v)
v = r.PostFormValue(CallParameter_OrderId)
if v == "" {
xylog.ErrorNoId("get orderid from parameter fail")
return nil, xyerror.ErrBadInputData
}
req.OrderId = proto.String(v)
parameterSlice.Add(CallParameter_OrderId, v)
v = r.PostFormValue(CallParameter_EXT)
if v == "" {
xylog.ErrorNoId("get goodsid from parameter fail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_EXT, v)
goodsId, err = strconv.ParseUint(v, 10, 64)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.GoodsId = proto.Uint64(goodsId)
}
v = r.PostFormValue(CallParameter_Sandbox)
if v == "" {
xylog.ErrorNoId("get sandbox from parameterfail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_Sandbox, v)
sandbox, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.Sandbox = proto.Int32(int32(sandbox))
}
v = r.PostFormValue(CallParameter_Amount)
if v == "" {
xylog.DebugNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_Amount, v)
payAmount, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.PayAmount = proto.Int32(int32(payAmount))
}
}
v = r.PostFormValue(CallParameter_PayTime)
if v == "" {
xylog.WarningNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_PayTime, v)
req.PayTime = proto.String(v)
}
// 数据验证
var key string
key = batteryapi.DefConfigCache.Configs().AppSecretkey
if !SDKVerification(parameterSlice, key) {
xylog.ErrorNoId("verify error,invalid sig")
err = xyerror.ErrBadInputData
return
}
message = req
return
}
func getCallBackResp(respData []byte) (resp string) {
respData, err := crypto.Decrypt(respData)
if err != nil {
resp = "decrypt false"
return
}
respone := &battery.SDKAddOrderResponse{}
err = proto.Unmarshal(respData, respone)
if err != nil {
resp = "unmarshal false"
return
}
if respone.Error.GetCode() != battery.ErrorCode_NoError {
resp = "add order fail"
return
}
resp = "ok"
return
}
// 被坑,get请求使用
// func GetCallBackMsg(parameterSlice ParameterSlice) (message proto.Message, err error) {
// var (
// goodsId uint64
// sandbox, payAmount int
// )
// req := &battery.SDKAddOrderRequest{}
// v, result := parameterSlice.Get(CallParameter_UserUID)
// if !result {
// xylog.ErrorNoId("get uid from parameter failed")
// return nil, xyerror.ErrBadInputData
// }
// req.Uid = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_OrderId)
// if !result {
// xylog.ErrorNoId("get orderid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// req.OrderId = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_EXT)
// if !result {
// xylog.ErrorNoId("get goodsid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// goodsId, err = strconv.ParseUint(v, 10, 64)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.GoodsId = proto.Uint64(goodsId)
// }
// v, result = parameterSlice.Get(CallParameter_Sandbox)
// if !result {
// xylog.ErrorNoId("get sandbox from parameterfail")
// return nil, xyerror.ErrBadInputData
// }
// sandbox, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.Sandbox = proto.Int32(int32(sandbox))
// }
// v, result = parameterSlice.Get(CallParameter_Amount)
// if !result {
// xylog.DebugNoId("get paytime from parameter fail")
// } else {
// payAmount, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.PayAmount = proto.Int32(int32(payAmount))
// }
// }
// v, result = parameterSlice.Get(CallParameter_PayTime)
// if !result {
// xylog.WarningNoId("get paytime from parameter fail")
// } else {
// req.PayTime = proto.String(v)
// }
// message = req
// return
// }
// SDK数据验证
func SDKVerification(parameterslice ParameterSlice, key string) bool {
var (
uriStr string
verifyStr string
)
keys := make([]string, 0, len(parameterslice))
for k := range pa | rameterslice {
if parameterslice[k].key != "sign" {
keys = append(keys, parameterslice[k].key)
}
}
sort.Strings(keys) // 参数升序排序
for index, arg := range keys {
v, result := parameterslice.Get(arg)
if result {
uriStr = fmt.Sprintf("%s%s=%s", uriStr, arg, v)
if index != len(keys)-1 {
uriStr = fmt.Sprintf("%s&", uriStr)
}
}
}
xylog.InfoNoId("uriStr:%s", uriStr)
// 参数升序url编码
verifyStr = url.QueryEscape(uriStr)
verifyStr = fmt.Sprintf("%s&%s", verifyStr, key)
// MD5加密
h := md5.New()
io.WriteString(h, verifyStr)
verifyStr = fmt.Sprintf("%x", h.Sum(nil))
verifyStr = strings.ToLower(verifyStr)
xylog.InfoNoId("verifyStr:%s", verifyStr)
sig, result := parameterslice.Get("sign")
if (!result) || (sig != verifyStr) {
return false
}
xylog.InfoNoId("verify succeed")
return true
}
| identifier_body | |
sdkcallback.go | package main
import (
"crypto/md5"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
proto "code.google.com/p/goprotobuf/proto"
martini "github.com/codegangsta/martini"
nats "github.com/nats-io/nats"
batteryapi "guanghuan.com/xiaoyao/battery_maintenance_server/bussiness"
xyencoder "guanghuan.com/xiaoyao/common/encoding"
xylog "guanghuan.com/xiaoyao/common/log"
battery "guanghuan.com/xiaoyao/superbman_server/battery_run_net"
crypto "guanghuan.com/xiaoyao/superbman_server/crypto"
"guanghuan.com/xiaoyao/superbman_server/error"
)
func HttpSDKCallBack(w http.ResponseWriter, r *http.Request, params martini.Params) (status int, resp string) {
var (
uri string = r.RequestURI
respData []byte
err error
)
xylog.DebugNoId("req url :%v", r.RequestURI)
status = http.StatusOK
// uri, token = ProcessUri(uri)
// xylog.DebugNoId("uri=%s, token=%s, user agent=%s", uri, token, r.UserAgent())
// respData, err = ProcessHttpMsg(token, r)
respData, err = ProcessCallBackMsg(uri, r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("[%s] failed: %s", uri, err.Error())
status = http.StatusInternalServerError //处理失败,返回服务端错误
} else {
resp = getCallBackResp(respData)
xylog.DebugNoId("response.content : %s", resp)
}
return
}
func ProcessCallBackMsg(uri string, r *http.Request) (resp []byte, err error) {
var (
req proto.Message
route *HttpPostToNatsRoute
subj string
reply *nats.Msg
data []byte
)
req, err = constructCallBackMsg(r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("ConstructPbMsg failed : %v", err)
return
}
xylog.DebugNoId("PbMsg : %v", req)
//进行编码
data, err = xyencoder.PbEncode(req)
if err != xyerror.ErrOK {
xylog.ErrorNoId("xyencoder.PbEncode failed : %v", err)
return
}
//进行加密
data, err = crypto.Encrypt(data)
if err != xyerror.ErrOK {
xylog.ErrorNoId("crypto.Encrypt failed : %v", err)
return
}
route = DefHttpPostTable.GetRoutePath(uri)
xylog.DebugNoId("route:%v,item:%v", route, uri)
if route == nil {
err = errors.New("No route for item :" + uri)
return
}
subj = route.GetNatsSubject()
if subj == "" {
err = errors.New("No subject for uri:" + uri)
return
}
xylog.DebugNoId("forward request to %s", subj)
reply, err = nats_service.Request(subj, data, time.Duration(DefConfig.NatsTimeout)*time.Second)
if err != nil {
xylog.ErrorNoId("<%s> Error: %s", subj, err.Error())
return
} else {
if reply != nil {
resp = reply.Data
} else {
err = errors.New("no reply data")
}
}
return
}
const (
CallParameter_UUid = "uuid" // sdk唯一id
CallParameter_OrderId = "order_id" // sdk 订单号
CallParameter_AppOrderId = "app_order_id" // 游戏订单号
CallParameter_EXT = "app_callback_ext" //扩展参数,保存商品id
CallParameter_UserUID = "app_player_id" // 游戏uid
CallParameter_Amount = "pay_amount" // 充值数量
CallParameter_PayTime = "pay_time"
CallParameter_Sandbox = "sandbox" // 是否测试
CallParameter_Sign = "sign" // 签名校验值
CallParameter_ZoneId = "app_zone_id"
CallParameter_Time = "time"
CallParameter_UserID = "app_user_id" // 账号id
)
func constructCallBackMsg(r *http.Request) (message proto.Message, err error) {
var (
goodsId uint64
sandbox, payAmount int
parameterSlice ParameterSlice = make([]ParameterStruct, 0)
)
r.ParseForm()
parameterSlice.Add(CallParameter_Sign, r.PostFormValue(CallParameter_Sign))
parameterSlice.Add(CallParameter_ZoneId, r.PostFormValue(CallParameter_ZoneId))
parameterSlice.Add(CallParameter_Time, r.PostFormValue(CallParameter_Time))
parameterSlice.Add(CallParameter_UserID, r.PostFormValue(CallParameter_UserID))
parameterSlice.Add(CallParameter_UUid, r.PostFormValue(CallParameter_UUid))
parameterSlice.Add(CallParameter_AppOrderId, r.PostFormValue(CallParameter_AppOrderId))
req := &battery.SDKAddOrderRequest{}
v := r.PostFormValue(CallParameter_UserUID)
if v == "" {
xylog.ErrorNoId("get uid from parameter failed")
return nil, xyerror.ErrBadInputData
}
req.Uid = proto.String(v)
parameterSlice.Add(CallParameter_UserUID, v)
v = r.PostFormValue(CallParameter_OrderId)
if v == "" {
xylog.ErrorNoId("get orderid from parameter fail")
return nil, xyerror.ErrBadInputData
}
req.OrderId = proto.String(v)
parameterSlice.Add(CallParameter_OrderId, v)
v = r.PostFormValue(CallParameter_EXT)
if v == "" {
xylog.ErrorNoId("get goodsid from parameter fail")
return nil, xyerror.ErrBadInputData
} | error.ErrOK {
xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.GoodsId = proto.Uint64(goodsId)
}
v = r.PostFormValue(CallParameter_Sandbox)
if v == "" {
xylog.ErrorNoId("get sandbox from parameterfail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_Sandbox, v)
sandbox, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.Sandbox = proto.Int32(int32(sandbox))
}
v = r.PostFormValue(CallParameter_Amount)
if v == "" {
xylog.DebugNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_Amount, v)
payAmount, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.PayAmount = proto.Int32(int32(payAmount))
}
}
v = r.PostFormValue(CallParameter_PayTime)
if v == "" {
xylog.WarningNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_PayTime, v)
req.PayTime = proto.String(v)
}
// 数据验证
var key string
key = batteryapi.DefConfigCache.Configs().AppSecretkey
if !SDKVerification(parameterSlice, key) {
xylog.ErrorNoId("verify error,invalid sig")
err = xyerror.ErrBadInputData
return
}
message = req
return
}
func getCallBackResp(respData []byte) (resp string) {
respData, err := crypto.Decrypt(respData)
if err != nil {
resp = "decrypt false"
return
}
respone := &battery.SDKAddOrderResponse{}
err = proto.Unmarshal(respData, respone)
if err != nil {
resp = "unmarshal false"
return
}
if respone.Error.GetCode() != battery.ErrorCode_NoError {
resp = "add order fail"
return
}
resp = "ok"
return
}
// 被坑,get请求使用
// func GetCallBackMsg(parameterSlice ParameterSlice) (message proto.Message, err error) {
// var (
// goodsId uint64
// sandbox, payAmount int
// )
// req := &battery.SDKAddOrderRequest{}
// v, result := parameterSlice.Get(CallParameter_UserUID)
// if !result {
// xylog.ErrorNoId("get uid from parameter failed")
// return nil, xyerror.ErrBadInputData
// }
// req.Uid = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_OrderId)
// if !result {
// xylog.ErrorNoId("get orderid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// req.OrderId = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_EXT)
// if !result {
// xylog.ErrorNoId("get goodsid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// goodsId, err = strconv.ParseUint(v, 10, 64)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.GoodsId = proto.Uint64(goodsId)
// }
// v, result = parameterSlice.Get(CallParameter_Sandbox)
// if !result {
// xylog.ErrorNoId("get sandbox from parameterfail")
// return nil, xyerror.ErrBadInputData
// }
// sandbox, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.Sandbox = proto.Int32(int32(sandbox))
// }
// v, result = parameterSlice.Get(CallParameter_Amount)
// if !result {
// xylog.DebugNoId("get paytime from parameter fail")
// } else {
// payAmount, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.PayAmount = proto.Int32(int32(payAmount))
// }
// }
// v, result = parameterSlice.Get(CallParameter_PayTime)
// if !result {
// xylog.WarningNoId("get paytime from parameter fail")
// } else {
// req.PayTime = proto.String(v)
// }
// message = req
// return
// }
// SDK数据验证
func SDKVerification(parameterslice ParameterSlice, key string) bool {
var (
uriStr string
verifyStr string
)
keys := make([]string, 0, len(parameterslice))
for k := range parameterslice {
if parameterslice[k].key != "sign" {
keys = append(keys, parameterslice[k].key)
}
}
sort.Strings(keys) // 参数升序排序
for index, arg := range keys {
v, result := parameterslice.Get(arg)
if result {
uriStr = fmt.Sprintf("%s%s=%s", uriStr, arg, v)
if index != len(keys)-1 {
uriStr = fmt.Sprintf("%s&", uriStr)
}
}
}
xylog.InfoNoId("uriStr:%s", uriStr)
// 参数升序url编码
verifyStr = url.QueryEscape(uriStr)
verifyStr = fmt.Sprintf("%s&%s", verifyStr, key)
// MD5加密
h := md5.New()
io.WriteString(h, verifyStr)
verifyStr = fmt.Sprintf("%x", h.Sum(nil))
verifyStr = strings.ToLower(verifyStr)
xylog.InfoNoId("verifyStr:%s", verifyStr)
sig, result := parameterslice.Get("sign")
if (!result) || (sig != verifyStr) {
return false
}
xylog.InfoNoId("verify succeed")
return true
}
|
parameterSlice.Add(CallParameter_EXT, v)
goodsId, err = strconv.ParseUint(v, 10, 64)
if err != xy | conditional_block |
sdkcallback.go | package main
import (
"crypto/md5"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
proto "code.google.com/p/goprotobuf/proto"
martini "github.com/codegangsta/martini"
nats "github.com/nats-io/nats"
batteryapi "guanghuan.com/xiaoyao/battery_maintenance_server/bussiness"
xyencoder "guanghuan.com/xiaoyao/common/encoding"
xylog "guanghuan.com/xiaoyao/common/log"
battery "guanghuan.com/xiaoyao/superbman_server/battery_run_net"
crypto "guanghuan.com/xiaoyao/superbman_server/crypto"
"guanghuan.com/xiaoyao/superbman_server/error"
)
func HttpSDKCallBack(w http.ResponseWriter, r *http.Request, params martini.Params) (status int, resp string) {
var (
uri string = r.RequestURI
respData []byte
err error
)
xylog.DebugNoId("req url :%v", r.RequestURI)
status = http.StatusOK
// uri, token = ProcessUri(uri)
// xylog.DebugNoId("uri=%s, token=%s, user agent=%s", uri, token, r.UserAgent())
// respData, err = ProcessHttpMsg(token, r)
respData, err = ProcessCallBackMsg(uri, r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("[%s] failed: %s", uri, err.Error())
status = http.StatusInternalServerError //处理失败,返回服务端错误
} else {
resp = getCallBackResp(respData)
xylog.DebugNoId("response.content : %s", resp)
}
return
}
func ProcessCallBackMsg(uri string, r *http.Request) (resp []byte, err error) {
var (
req proto.Message
route *HttpPostToNatsRoute
subj string
reply *nats.Msg
data []byte
)
req, err = constructCallBackMsg(r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("ConstructPbMsg failed : %v", err)
return
}
xylog.DebugNoId("PbMsg : %v", req)
//进行编码
data, err = xyencoder.PbEncode(req)
if err != xyerror.ErrOK {
xylog.ErrorNoId("xyencoder.PbEncode failed : %v", err)
return
}
//进行加密
data, err = crypto.Encrypt(data)
if err != xyerror.ErrOK {
xylog.ErrorNoId("crypto.Encrypt failed : %v", err)
return
}
route = DefHttpPostTable.GetRoutePath(uri)
xylog.DebugNoId("route:%v,item:%v", route, uri)
if route == nil {
err = errors.New("No route for item :" + uri)
return
}
subj = route.GetNatsSubject()
if subj == "" {
err = errors.New("No subject for uri:" + uri)
return
}
xylog.DebugNoId("forward request to %s", subj)
reply, err = nats_service.Request(subj, data, time.Duration(DefConfig.NatsTimeout)*time.Second)
if err != nil {
xylog.ErrorNoId("<%s> Error: %s", subj, err.Error())
return
} else {
if reply != nil {
resp = reply.Data
} else {
err = errors.New("no reply data")
}
}
return
}
const (
CallParameter_UUid = "uuid" // sdk唯一id
CallParameter_OrderId = "order_id" // sdk 订单号
CallParameter_AppOrderId = "app_order_id" // 游戏订单号
CallParameter_EXT = "app_callback_ext" //扩展参数,保存商品id
CallParameter_UserUID = "app_player_id" // 游戏uid
CallParameter_Amount = "pay_amount" // 充值数量
CallParameter_PayTime = "pay_time"
CallParameter_Sandbox = "sandbox" // 是否测试
CallParameter_Sign = "sign" // 签名校验值
CallParameter_ZoneId = "app_zone_id"
CallParameter_Time = "time"
CallParameter_UserID = "app_user_id" // 账号id
)
func constructCallBackMsg(r *http.Request) (message proto.Message, err error) {
var (
goodsId uint64
sandbox, payAmount int
parameterSlice ParameterSlice = make([]ParameterStruct, 0)
)
r.ParseForm()
parameterSlice.Add(CallParameter_Sign, r.PostFormValue(CallParameter_Sign))
parameterSlice.Add(CallParameter_ZoneId, r.PostFormValue(CallParameter_ZoneId))
parameterSlice.Add(CallParameter_Time, r.PostFormValue(CallParameter_Time))
parameterSlice.Add(CallParameter_UserID, r.PostFormValue(CallParameter_UserID))
parameterSlice.Add(CallParameter_UUid, r.PostFormValue(CallParameter_UUid))
parameterSlice.Add(CallParameter_AppOrderId, r.PostFormValue(CallParameter_AppOrderId))
req := &battery.SDKAddOrderRequest{}
v := r.PostFormValue(CallParameter_UserUID)
if v == "" {
xylog.ErrorNoId("get uid from parameter failed")
return nil, xyerror.ErrBadInputData
}
req.Uid = proto.String(v)
parameterSlice.Add(CallParameter_UserUID, v)
v = r.PostFormValue(CallParameter_OrderId)
if v == "" {
xylog.ErrorNoId("get orderid from parameter fail")
return nil, xyerror.ErrBadInputData
}
req.OrderId = proto.String(v)
parameterSlice.Add(CallParameter_OrderId, v)
v = r.PostFormValue(CallParameter_EXT)
if v == "" {
xylog.ErrorNoId("get goodsid from parameter fail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_EXT, v)
goodsId, err = strconv.ParseUint(v, 10, 64)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.GoodsId = proto.Uint64(goodsId)
}
v = r.PostFormValue(CallParameter_Sandbox)
if v == "" {
xylog.ErrorNoId("get sandbox from parameterfail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_Sandbox, v)
sandbox, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.Sandbox = proto.Int32(int32(sandbox))
}
v = r.PostFormValue(CallParameter_Amount)
if v == "" {
xylog.DebugNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_Amount, v)
payAmount, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.PayAmount = proto.Int32(int32(payAmount))
}
}
v = r.PostFormValue(CallParameter_PayTime)
if v == "" {
xylog.WarningNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_PayTime, v)
req.PayTime = proto.String(v)
}
// 数据验证
var key string
key = batteryapi.DefConfigCache.Configs().AppSecretkey
if !SDKVerification(parameterSlice, key) {
xylog.ErrorNoId("verify error,invalid sig")
err = xyerror.ErrBadInputData
return
}
message = req
return
}
func getCallBackResp(respData []byte) (resp string) {
respData, err := crypto.Decrypt(respData)
if err != nil {
resp = "decrypt false"
return | }
respone := &battery.SDKAddOrderResponse{}
err = proto.Unmarshal(respData, respone)
if err != nil {
resp = "unmarshal false"
return
}
if respone.Error.GetCode() != battery.ErrorCode_NoError {
resp = "add order fail"
return
}
resp = "ok"
return
}
// 被坑,get请求使用
// func GetCallBackMsg(parameterSlice ParameterSlice) (message proto.Message, err error) {
// var (
// goodsId uint64
// sandbox, payAmount int
// )
// req := &battery.SDKAddOrderRequest{}
// v, result := parameterSlice.Get(CallParameter_UserUID)
// if !result {
// xylog.ErrorNoId("get uid from parameter failed")
// return nil, xyerror.ErrBadInputData
// }
// req.Uid = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_OrderId)
// if !result {
// xylog.ErrorNoId("get orderid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// req.OrderId = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_EXT)
// if !result {
// xylog.ErrorNoId("get goodsid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// goodsId, err = strconv.ParseUint(v, 10, 64)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.GoodsId = proto.Uint64(goodsId)
// }
// v, result = parameterSlice.Get(CallParameter_Sandbox)
// if !result {
// xylog.ErrorNoId("get sandbox from parameterfail")
// return nil, xyerror.ErrBadInputData
// }
// sandbox, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.Sandbox = proto.Int32(int32(sandbox))
// }
// v, result = parameterSlice.Get(CallParameter_Amount)
// if !result {
// xylog.DebugNoId("get paytime from parameter fail")
// } else {
// payAmount, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.PayAmount = proto.Int32(int32(payAmount))
// }
// }
// v, result = parameterSlice.Get(CallParameter_PayTime)
// if !result {
// xylog.WarningNoId("get paytime from parameter fail")
// } else {
// req.PayTime = proto.String(v)
// }
// message = req
// return
// }
// SDK数据验证
func SDKVerification(parameterslice ParameterSlice, key string) bool {
var (
uriStr string
verifyStr string
)
keys := make([]string, 0, len(parameterslice))
for k := range parameterslice {
if parameterslice[k].key != "sign" {
keys = append(keys, parameterslice[k].key)
}
}
sort.Strings(keys) // 参数升序排序
for index, arg := range keys {
v, result := parameterslice.Get(arg)
if result {
uriStr = fmt.Sprintf("%s%s=%s", uriStr, arg, v)
if index != len(keys)-1 {
uriStr = fmt.Sprintf("%s&", uriStr)
}
}
}
xylog.InfoNoId("uriStr:%s", uriStr)
// 参数升序url编码
verifyStr = url.QueryEscape(uriStr)
verifyStr = fmt.Sprintf("%s&%s", verifyStr, key)
// MD5加密
h := md5.New()
io.WriteString(h, verifyStr)
verifyStr = fmt.Sprintf("%x", h.Sum(nil))
verifyStr = strings.ToLower(verifyStr)
xylog.InfoNoId("verifyStr:%s", verifyStr)
sig, result := parameterslice.Get("sign")
if (!result) || (sig != verifyStr) {
return false
}
xylog.InfoNoId("verify succeed")
return true
} | random_line_split | |
sdkcallback.go | package main
import (
"crypto/md5"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
proto "code.google.com/p/goprotobuf/proto"
martini "github.com/codegangsta/martini"
nats "github.com/nats-io/nats"
batteryapi "guanghuan.com/xiaoyao/battery_maintenance_server/bussiness"
xyencoder "guanghuan.com/xiaoyao/common/encoding"
xylog "guanghuan.com/xiaoyao/common/log"
battery "guanghuan.com/xiaoyao/superbman_server/battery_run_net"
crypto "guanghuan.com/xiaoyao/superbman_server/crypto"
"guanghuan.com/xiaoyao/superbman_server/error"
)
func HttpSDKCallBack(w http.ResponseWriter, r *http.Request, params martini.Params) (status int, resp string) {
var (
uri string = r.RequestURI
respData []byte
err error
)
xylog.DebugNoId("req url :%v", r.RequestURI)
status = http.StatusOK
// uri, token = ProcessUri(uri)
// xylog.DebugNoId("uri=%s, token=%s, user agent=%s", uri, token, r.UserAgent())
// respData, err = ProcessHttpMsg(token, r)
respData, err = ProcessCallBackMsg(uri, r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("[%s] failed: %s", uri, err.Error())
status = http.StatusInternalServerError //处理失败,返回服务端错误
} else {
resp = getCallBackResp(respData)
xylog.DebugNoId("response.content : %s", resp)
}
return
}
func ProcessCallBackMsg(uri string, r *http.Request) (resp []byte, err error) {
var (
req proto.Message
route *HttpPostToNatsRoute
subj string
reply *nats.Msg
data []byte
)
req, err = constructCallBackMsg(r)
if err != xyerror.ErrOK {
xylog.ErrorNoId("ConstructPbMsg failed : %v", err)
return
}
xylog.DebugNoId("PbMsg : %v", req)
//进行编码
data, err = xyencoder.PbEncode(req)
if err != xyerror.ErrOK {
xylog.ErrorNoId("xyencoder.PbEncode failed : %v", err)
return
}
//进行加密
data, err = crypto.Encrypt(data)
if err != xyerror.ErrOK {
xylog.ErrorNoId("crypto.Encrypt failed : %v", err)
return
}
route = DefHttpPostTable.GetRoutePath(uri)
xylog.DebugNoId("route:%v,item:%v", route, uri)
if route == nil {
err = errors.New("No route for item :" + uri)
return
}
subj = route.GetNatsSubject()
if subj == "" {
err = errors.New("No subject for uri:" + uri)
return
}
xylog.DebugNoId("forward request to %s", subj)
reply, err = nats_service.Request(subj, data, time.Duration(DefConfig.NatsTimeout)*time.Second)
if err != nil {
xylog.ErrorNoId("<%s> Error: %s", subj, err.Error())
return
} else {
if reply != nil {
resp = reply.Data
} else {
err = errors.New("no reply data")
}
}
return
}
const (
CallParameter_UUid = "uuid" // sdk唯一id
CallParameter_OrderId = "order_id" // sdk 订单号
CallParameter_AppOrderId = "app_order_id" // 游戏订单号
CallParameter_EXT = "app_callback_ext" //扩展参数,保存商品id
CallParameter_UserUID = "app_player_id" // 游戏uid
CallParameter_Amount = "pay_amount" // 充值数量
CallParameter_PayTime = "pay_time"
CallParameter_Sandbox = "sandbox" // 是否测试
CallParameter_Sign = "sign" // 签名校验值
CallParameter_ZoneId = "app_zone_id"
CallParameter_Time = "time"
CallParameter_UserID = "app_user_id" // 账号id
)
func constructCallBackMsg(r *http.Request) (message proto.Message, err error) {
var (
goodsId uint64
sandbox, payAmount int
parameterSlice ParameterSlice = make([]ParameterStruct, 0)
)
r.ParseForm()
parameterSlice.Add(CallParameter_Sign, r.PostFormValue(CallParameter_Sign))
parameterSlice.Add(CallParameter_ZoneId, r.PostFormValue(CallParameter_ZoneId))
parameterSlice.Add(CallParameter_Time, r.PostFormValue(CallParameter_Time))
parameterSlice.Add(CallParameter_UserID, r.PostFormValue(CallParameter_UserID))
parameterSlice.Add(CallParameter_UUid, r.PostFormValue(CallParameter_UUid))
parameterSlice.Add(CallParameter_AppOrderId, r.PostFormValue(CallParameter_AppOrderId))
req := &battery.SDKAddOrderRequest{}
v := r.PostFormValue(CallParameter_UserUID)
if v == "" {
xylog.ErrorNoId("get uid from parameter failed")
return nil, xyerror.ErrBadInputData
}
req.Uid = proto.String(v)
parameterSlice.Add(CallParameter_UserUID, v)
v = r.PostFormValue(CallParameter_OrderId)
if v == "" {
xylog.ErrorNoId("get orderid from parameter fail")
return nil, xyerror.ErrBadInputData
}
req.OrderId = proto.String(v)
parameterSlice.Add(CallParameter_OrderId, v)
v = r.PostFormValue(CallParameter_EXT)
if v == "" {
xylog.ErrorNoId("get goodsid from parameter fail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_EXT, v)
goodsId, err = strconv.ParseUint(v, 10, 64)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.GoodsId = proto.Uint64(goodsId)
}
v = r.PostFormValue(CallParameter_Sandbox)
if v == "" {
xylog.ErrorNoId("get sandbox from parameterfail")
return nil, xyerror.ErrBadInputData
}
parameterSlice.Add(CallParameter_Sandbox, v)
sandbox, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.Sandbox = proto.Int32(int32(sandbox))
}
v = r.PostFormValue(CallParameter_Amount)
if v == "" {
xylog.DebugNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_Amount, v)
payAmount, err = strconv.Atoi(v)
if err != xyerror.ErrOK {
xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
err = xyerror.ErrOK
} else {
req.PayAmount = proto.Int32(int32(payAmount))
}
}
v = r.PostFormValue(CallParameter_PayTime)
if v == "" {
xylog.WarningNoId("get paytime from parameter fail")
} else {
parameterSlice.Add(CallParameter_PayTime, v)
req.PayTime = proto.String(v)
}
// 数据验证
var key string
key = batteryapi.DefConfigCache.Configs().AppSecretkey
if !SDKVerification(parameterSlice, key) {
xylog.ErrorNoId("verify error,invalid sig")
err = xyerror.ErrBadInputData
return
}
message = req
return
}
func getCallBackResp(respData []byte) (resp string) {
respData, err := crypto.Decrypt(respData)
if err != nil {
resp = "decrypt false"
return
}
respone := &battery.SDKAddOrderResponse{}
err = proto.Unmarshal(respData, respone)
if err != nil {
resp = "unmarshal false"
return
}
if respone.Error.GetCode() != battery.ErrorCode_NoError {
resp = "add order fail"
return
}
resp = "ok"
return
}
// 被坑,get请求使用
// func GetCallBackMsg(parameterSlice ParameterSlice) (message proto.Message, err error) {
// var (
// goodsId uint64
// sandbox, payAmount int
// )
// req := &battery.SDKAddOrderRequest{}
// v, result := parameterSlice.Get(CallParameter_UserUID)
// if !result {
// xylog.ErrorNoId("get uid from parameter failed")
// return nil, xyerror.ErrBadInputData
// }
// req.Uid = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_OrderId)
// if !result {
// xylog.ErrorNoId("get orderid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// req.OrderId = proto.String(v)
// v, result = parameterSlice.Get(CallParameter_EXT)
// if !result {
// xylog.ErrorNoId("get goodsid from parameter fail")
// return nil, xyerror.ErrBadInputData
// }
// goodsId, err = strconv.ParseUint(v, 10, 64)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.ParseUint for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.GoodsId = proto.Uint64(goodsId)
// }
// v, result = parameterSlice.Get(CallParameter_Sandbox)
// if !result {
// xylog.ErrorNoId("get sandbox from parameterfail")
// return nil, xyerror.ErrBadInputData
// }
// sandbox, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.Sandbox = proto.Int32(int32(sandbox))
// }
// v, result = parameterSlice.Get(CallParameter_Amount)
// if !result {
// xylog.DebugNoId("get paytime from parameter fail")
// } else {
// payAmount, err = strconv.Atoi(v)
// if err != xyerror.ErrOK {
// xylog.WarningNoId("strconv.Atoi for uid failed : %v", err)
// err = xyerror.ErrOK
// } else {
// req.PayAmount = proto.Int32(int32(payAmount))
// }
// }
// v, result = parameterSlice.Get(CallParameter_PayTime)
// if !result {
// xylog.WarningNoId("get paytime from parameter fail")
// } else {
// req.PayTime = proto.String(v)
// }
// message = req
// return
// }
// SDK数据验证
func SDKVerification(parameterslice ParameterSlice, key string) bool {
var (
uriStr string
verifyStr string
)
keys | ng, 0, len(parameterslice))
for k := range parameterslice {
if parameterslice[k].key != "sign" {
keys = append(keys, parameterslice[k].key)
}
}
sort.Strings(keys) // 参数升序排序
for index, arg := range keys {
v, result := parameterslice.Get(arg)
if result {
uriStr = fmt.Sprintf("%s%s=%s", uriStr, arg, v)
if index != len(keys)-1 {
uriStr = fmt.Sprintf("%s&", uriStr)
}
}
}
xylog.InfoNoId("uriStr:%s", uriStr)
// 参数升序url编码
verifyStr = url.QueryEscape(uriStr)
verifyStr = fmt.Sprintf("%s&%s", verifyStr, key)
// MD5加密
h := md5.New()
io.WriteString(h, verifyStr)
verifyStr = fmt.Sprintf("%x", h.Sum(nil))
verifyStr = strings.ToLower(verifyStr)
xylog.InfoNoId("verifyStr:%s", verifyStr)
sig, result := parameterslice.Get("sign")
if (!result) || (sig != verifyStr) {
return false
}
xylog.InfoNoId("verify succeed")
return true
}
| := make([]stri | identifier_name |
vjAnnotListTableView.js | /*
* ::718604!
*
* Copyright(C) November 20, 2014 U.S. Food and Drug Administration
* Authors: Dr. Vahan Simonyan (1), Dr. Raja Mazumder (2), et al
* Affiliation: Food and Drug Administration (1), George Washington University (2)
*
* All rights Reserved.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
var at = new vjAnnotListTableControl({
formObject: document.forms["demo-form"],
annotObjList:["3099581","3099578"], // optional
callbackSubmit: testSubmit, // optional
submitButton: {name:"koko", hidden:true}, // optional
annotationButton:{name:"aaaa", hidden: true} // optionals
});
*/
function vjAnnotListTableControl(viewer){
//
this.clone=function (viewbase)
{
if(!viewbase)viewbase=this;
var i=0;
// copy all the features from base objejct
for ( i in viewbase ) {
this[i] = viewbase[i];
}
};
this.clone(viewer);
var main_objCls = "obj-AnnotListTableControl"+Math.random();
vjObj.register(main_objCls,this);
// Default Parameters
this.defaultAnnotUrl = "http://?cmd=ionAnnotTypes&fromComputation=0&recordTypes=relation"; // &ionObjs
this.viewersArr =[];
this.objAdded ={};
this.rowsCnt = 0;
//
this.maxTxtLen= this.maxTxtLen ? this.maxTxtLen : 25;
this.bgColors= this.bgColors ? this.bgColors : [ '#f2f2f2', '#ffffff' ];
this.defaultEmptyText = this.defaultEmptyText ? this.defaultEmptyText : 'no information to show';
//this.selectCallback= this.selectCallback ? this.selectCallback : 0;
//this.callbackRendered= this.callbackRendered ? this.callbackRendered : 0;
this.callbackSubmit= this.callbackSubmit ? this.callbackSubmit : 0;
if (!this.vjDS) this.vjDS = vjDS;
if (!this.vjDV) this.vjDV = vjDV;
if (!this.formObject) this.formObject = document.forms[this.formName];
if (!this.className) this.className = "ANNOTLISTCONTROL_table";
// BUTTON VISIBILITY
if (!this.submitButton) this.submitButton = {};
if (!this.submitButton['name']) this.submitButton.name= "Submit";
if (!this.submitButton['hidden']) this.submitButton.hidden= false;
if (!this.annotationButton) this.annotationButton= {};
if (!this.annotationButton['name']) this.annotationButton.name = "Annotation Files";
if (!this.annotationButton['hidden']) this.annotationButton.hidden = false;
if (!this.searchTypeButton) this.searchTypeButton= {};
if (!this.searchTypeButton['hidden']) this.searchTypeButton.hidden= true;
if (!this.referenceObjList) this.referenceObjList = [];
if (!this.annotObjList) this.annotObjList = [];
var dsname = "dsAnnotList_table";
//this.annotTypeDS = this.vjDS.add("infrastructure: Folders Help", dsname, this.defaultAnnotUrl,0,"seqID,start,end,type,id\n");
this.annotTypeDS = this.vjDS.add("Loading annotation ", dsname, "static://",0,"seqID,start,end,type,id\n");
this.checkTypeDS = this.vjDS.add("Loading results", dsname+"-checked", "static://");
// function
var _mainControl_ = this;
function checkDirection (dicPath, curPath) {
var myArr = Object.keys(dicPath);
for (var ia=0; ia<myArr.length; ++ia) {
if (curPath.indexOf(myArr[ia])!=-1) {
return dicPath[myArr[ia]];
}
}
return 1;
}
function accumulateRows (viewer, node, ir,ic) {
var objAdded = _mainControl_.objAdded;
var range = "", seqID ="", type = "", id= "",strand = "+", checked = true;
if (viewer.myName && viewer.myName=="manualPanel")
{
seqID = viewer.tree.findByName("seqID").value;
var curStart = viewer.tree.findByName("start").value;
curStart = isNaN(curStart) ? (-2) : curStart;
var curEnd = viewer.tree.findByName("end").value;
curEnd = isNaN(curEnd) ? (-2) : curEnd;
if (curStart==-2 || curEnd==-2) {
range= "not_valid";
}
else {
range = curStart + "-" + curEnd;
}
viewer.render();
}
else {
range = node.start + "-" + node.end;
seqID = node.seqID; type = node.type; id = node.id;
strand = checkDirection(viewer.dicPaths, node.path);
checked = node.checked;
}
if ( range != "not_valid")
{
if (!checked && objAdded[seqID])
{ // remove
if (objAdded[seqID][range]) { // exist => remove from the array
delete objAdded[seqID][range];
_mainControl_.rowsCnt-=1;
}
}
else
{
if (!objAdded[seqID]) {
objAdded[seqID] ={};
}
// not exit =>push to the dict
objAdded[seqID][range]= {"defLine":type + ":" + id, "strand": strand};
_mainControl_.rowsCnt+=1;
}
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.checkCallback)
{
funcLink(_mainControl_.checkCallback, _mainControl_, viewer);
}
}
if (viewer.myName && viewer.myName=="manualPanel")
{
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
}
}
function returnRowsChecked (submitPanel,treeNode,objNode) {
_mainControl_.retValue = _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded,true);
if (_mainControl_.callbackSubmit){
funcLink(_mainControl_.callbackSubmit, _mainControl_, objNode);
}
// Reset the preview button and the datasource
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
_mainControl_.manualPanel.hidden = true;
_mainControl_.manualPanel.render();
submitPanel.tree.findByName("showChecked").value = "Preview Checked Elements";
submitPanel.tree.findByName("removeSelected").hidden = true;
submitPanel.refresh();
// clear every thing after submit
onClearAll(submitPanel);
_mainControl_.retValue0 = _mainControl_.retValue;
_mainControl_.retValue = "" ;
}
function onClearAll (submitPanel,treeNode,objNode) {
_mainControl_.rowsCnt =0;
_mainControl_.objAdded ={};
// clear panel
var infoNode = _mainControl_.anotSubmitPanel.tree.findByName("info");
infoNode.value = 'Select range(s) to add';
_mainControl_.anotSubmitPanel.refresh();
// clear check rows
_mainControl_.checkTypeDS.reload("static://",true);
var checkedNodes = _mainControl_.anotTable.checkedNodes;
for (var i=0; i<checkedNodes.length; ++i) {
var curNode = checkedNodes[i];
_mainControl_.anotTable.mimicCheckmarkSelected(curNode.irow,false);
_mainControl_.anotTable.onCheckmarkSelected(_mainControl_.anotTable.container,0,curNode.irow);
}
if (_mainControl_.clearCallback) {
funcLink(_mainControl_.clearCallback, _mainControl_, submitPanel);
}
}
_mainControl_.constructPreviewTableUrl = function (obj, isOutput) { // obj = {seqID: {start1-end1: type1-id1, start2-end2: type2-id2}}
var t = "seqID,ranges\n"; var len = t.length;
if (isOutput){
t ="index,seqID,start,end,direction,defLine\n"; len=t.length; // 0: forward, 1: reverse, 2: complement, 3:reverse complement
}
var objKeyArr = Object.keys(obj);
var iCnt=0;
for (var i=0; i< objKeyArr.length; ++i) { // looping through seqID
var curObj = obj[objKeyArr[i]];
for (var range in curObj) {
if (isOutput) {
if (curObj[range]["strand"]>0) { // strand = "+"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 0 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
else { // strand = "-"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 2 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 3 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
} else {
t += "" + objKeyArr[i] + "," + range + "\n";
}
}
}
return (t.length > len) ? t: "";
}
function previewCheckedElement(submitPanel,treeNode,objNode) {
var showCheckedNode = submitPanel.tree.findByName("showChecked");
var removeNode = submitPanel.tree.findByName("removeSelected");
if (showCheckedNode.value.indexOf("Back")==-1) { // when select preview
_mainControl_.anotTable.hidden=true;
_mainControl_.anotTable.render();
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkedTable.hidden=false;
_mainControl_.checkTypeDS.reload(url,true);
showCheckedNode.value = "Back";
removeNode.hidden = false;
submitPanel.refresh();
}
else { // when select back
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
showCheckedNode.value = "Preview Checked Elements";
removeNode.hidden = true;
submitPanel.refresh();
}
}
function removeSelectedElement(submitPanel,treeNode,objNode) {
console.log("removing selected element");
var checkedTbl = _mainControl_.checkedTable;
var objAdded = _mainControl_.objAdded;
for (var i=0; i<checkedTbl.selectedCnt; ++i) {
var curNode = checkedTbl.selectedNodes[i];
if (objAdded[curNode.seqID]) {
var curObj = objAdded[curNode.seqID];
if (curObj[curNode.ranges]) {
delete curObj[curNode.ranges];
--_mainControl_.rowsCnt;
}
}
if (objAdded[curNode.seqID] && !Object.keys(objAdded[curNode.seqID]).length) {
delete objAdded[curNode.seqID];
}
}
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
checkedTbl.selectedCnt=0; checkedTbl.selectedNodes=[];
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.removeCallback) {
funcLink(_mainControl_.removeCallback, _mainControl_, objNode);
}
}
_mainControl_.reload = function () {
if (this.annotObjList.length){
var ionList = this.annotObjList.join(",");
var refList = this.referenceObjList.join(";");
this.annotTypeDS.url = urlExchangeParameter(this.defaultAnnotUrl, "ionObjs", ionList);
this.annotTypeDS.url = urlExchangeParameter(this.annotTypeDS.url, "refGenomeList", refList);
this.annotTypeDS.reload(0,true);
}
}
_mainControl_.updateInfoForSubmitPanel = function () {
var infoNode = this.anotSubmitPanel.tree.findByName("info");
if (this.rowsCnt<=0){
this.rowsCnt =0;
infoNode.value = 'Select range(s) to add';
}
else {
infoNode.value = ''+ this.rowsCnt +' range(s) added';
}
this.anotSubmitPanel.refresh();
}
// contruction Panel
function openManualPanel (panel, treeNode, objNode){
//var aa=0;
var mPanel = _mainControl_.manualPanel;
if (!treeNode.value)
{
treeNode.value = 1;
mPanel.hidden = false;
}
else
{
treeNode.value =0;
mPanel.hidden = true;
}
mPanel.render();
}
_mainControl_.constructPanel = function () {
var anotPanel = new vjBasicPanelView({
data:["dsVoid",this.annotTypeDS.name],
rows:[
{name:'refresh', title: 'Refresh' ,order:-1, icon:'refresh' , description: 'refresh the content of the control to retrieve up to date information' , url: "javascript:vjDS['$(dataname)'].reload(null,true);"},
{name:'pager', icon:'page' , title:'per page',order:2, description: 'page up/down or show selected number of objects in the control' , type:'pager', counters: [10,20,50,100,1000,'all']},
{ name: 'search', align: 'right', type: ' search', prefix:"Search Id: ",order:10, isSubmitable: true, title: 'Search', description: 'search id',order:'1', url: "?cmd=objFile&ids=$(ids)" },
{ name: 'searchType', title:"search type", prefix:"Search Type: ", align: 'right', type: ' text',isSubmitable: true, description: 'search type',order:'1',path:"/search/searchType", hidden: this.searchTypeButton.hidden},
{ name : 'ionObjs', type:"text", align: 'left' , order : 1, prefix: this.annotationButton.name, isSubmitable: true, hidden: this.annotationButton.hidden},
{ name : 'manualInput', align: 'right' , order : -1, icon:"arrow_sort_down_highlighted.gif",title: "Insert Ranges",showTitle:true, description:"Manual Input",url: openManualPanel, iconSize:18, value:0}
],
parentObjCls: main_objCls,
formObject:this.formObject
});
var manualPanel = new vjPanelView( {
data:["dsVoid"],
rows: [
{ name: 'seqID', align: 'left', type: 'text', prefix:"Sequence Id: ",order:1, title: 'Sequence Id', description: 'Sequence Identifier',order:'1', size: '8' },
{ name: 'start', align: 'left', type: 'text', prefix:"Start Position: ",order:2, title: 'Start Position', description: 'Start Position',order:'2',size: '8' },
{ name: 'end', align: 'left', type: 'text', prefix:"End Position: ",order:3, title: 'End Position', description: 'End Position',order:'3',size: '8' },
{ name : 'add', title:'Add', icon:"plus.gif", iconSize:"18" ,showTitle:true ,align: 'left' , order : 4, url: accumulateRows }
],
parentObjCls: main_objCls,
hidden:true,
myName: "manualPanel",
formObject:this.formObject
});
this.anotPanel = anotPanel;
this.manualPanel = manualPanel;
this.viewersArr.push(this.anotPanel);
}
// contruction Table
_mainControl_.constructTable = function () {
// annotation table
var anotTable = new vjTableView({
data: this.annotTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,treeColumn: "start"
,checkable:true
//,selectCallback: this.selectCallback
, appendCols : [{header:{name:"path",title:'Annotation', type:"treenode",order:1,maxTxtLen:32},cell:""}]
,cols : [{ name: 'seqID', hidden:true }]
,treeColumn: "path"
,precompute: "node.name=node.seqID+'['+node.start + ':'+node.end+']';node.path='/'+node.name; \
if(this.viewer.dicPaths[node.path]){if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };node.path+='/'+node.type+':'+node.id.replace(/\\//g,'.');} \
else {this.viewer.dicPaths[node.path]=1;if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };} \
"
,postcompute:"if(node.treenode && node.treenode.depth>=2){node.styleNoCheckmark=true;node.name='';node.start='';node.end='';}"
,dicPaths: {}
,checkCallback: accumulateRows
,myName:"mainTable"
});
// checked elements table
var checkElementTable = new vjTableView({
data: this.checkTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,hidden:true
,myName:"checkTable"
});
this.anotTable = anotTable;
this.checkedTable = checkElementTable;
this.viewersArr.push(this.anotTable, this.checkedTable);
}
// construct the submit panel at the bottom
_mainControl_.constructSubmitPanel = function () {
var rows=[{ name : 'info', type : 'text', title : 'Select range(s) to add', value : 'Select range(s) to add', align:'right', readonly:true, size:40, prefix:'Selected range(s): ', order : 1},
{ name : 'submit', type : 'button', value: this.submitButton['name'], align: 'right' , order : 2, url: returnRowsChecked, hidden: this.submitButton['hidden']},
{ name : 'showChecked', type : 'button', value:'Preview Checked Elements', align: 'left' , order : 1, url: previewCheckedElement},
{ name : 'removeSelected', type : 'button', value:'remove selected Elements', align: 'left' , hidden: true ,order : 2, url: removeSelectedElement},
{ name : 'clear', type : 'button', value:'Clear', align: 'right' , order : 3, url: onClearAll }
];
var anotPanel = new vjPanelView({
data:["dsVoid"],
rows: rows,
formObject: this.formObject,
parentObjCls: main_objCls,
myName: "submitPanel",
isok: true
} );
this.anotSubmitPanel = anotPanel; |
_mainControl_.constructViewers = function() {
this.constructPanel();
this.constructTable();
this.constructSubmitPanel();
this.reload();
}
// Construct All Viewers
this.constructViewers();
return [this.anotPanel, this.manualPanel,this.anotTable, this.checkedTable ,this.anotSubmitPanel];
}
//# sourceURL = getBaseUrl() + "/js/vjAnnotListTableView.js" | this.viewersArr.push(this.anotSubmitPanel);
} | random_line_split |
vjAnnotListTableView.js | /*
* ::718604!
*
* Copyright(C) November 20, 2014 U.S. Food and Drug Administration
* Authors: Dr. Vahan Simonyan (1), Dr. Raja Mazumder (2), et al
* Affiliation: Food and Drug Administration (1), George Washington University (2)
*
* All rights Reserved.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
var at = new vjAnnotListTableControl({
formObject: document.forms["demo-form"],
annotObjList:["3099581","3099578"], // optional
callbackSubmit: testSubmit, // optional
submitButton: {name:"koko", hidden:true}, // optional
annotationButton:{name:"aaaa", hidden: true} // optionals
});
*/
function vjAnnotListTableControl(viewer){
//
this.clone=function (viewbase)
{
if(!viewbase)viewbase=this;
var i=0;
// copy all the features from base objejct
for ( i in viewbase ) {
this[i] = viewbase[i];
}
};
this.clone(viewer);
var main_objCls = "obj-AnnotListTableControl"+Math.random();
vjObj.register(main_objCls,this);
// Default Parameters
this.defaultAnnotUrl = "http://?cmd=ionAnnotTypes&fromComputation=0&recordTypes=relation"; // &ionObjs
this.viewersArr =[];
this.objAdded ={};
this.rowsCnt = 0;
//
this.maxTxtLen= this.maxTxtLen ? this.maxTxtLen : 25;
this.bgColors= this.bgColors ? this.bgColors : [ '#f2f2f2', '#ffffff' ];
this.defaultEmptyText = this.defaultEmptyText ? this.defaultEmptyText : 'no information to show';
//this.selectCallback= this.selectCallback ? this.selectCallback : 0;
//this.callbackRendered= this.callbackRendered ? this.callbackRendered : 0;
this.callbackSubmit= this.callbackSubmit ? this.callbackSubmit : 0;
if (!this.vjDS) this.vjDS = vjDS;
if (!this.vjDV) this.vjDV = vjDV;
if (!this.formObject) this.formObject = document.forms[this.formName];
if (!this.className) this.className = "ANNOTLISTCONTROL_table";
// BUTTON VISIBILITY
if (!this.submitButton) this.submitButton = {};
if (!this.submitButton['name']) this.submitButton.name= "Submit";
if (!this.submitButton['hidden']) this.submitButton.hidden= false;
if (!this.annotationButton) this.annotationButton= {};
if (!this.annotationButton['name']) this.annotationButton.name = "Annotation Files";
if (!this.annotationButton['hidden']) this.annotationButton.hidden = false;
if (!this.searchTypeButton) this.searchTypeButton= {};
if (!this.searchTypeButton['hidden']) this.searchTypeButton.hidden= true;
if (!this.referenceObjList) this.referenceObjList = [];
if (!this.annotObjList) this.annotObjList = [];
var dsname = "dsAnnotList_table";
//this.annotTypeDS = this.vjDS.add("infrastructure: Folders Help", dsname, this.defaultAnnotUrl,0,"seqID,start,end,type,id\n");
this.annotTypeDS = this.vjDS.add("Loading annotation ", dsname, "static://",0,"seqID,start,end,type,id\n");
this.checkTypeDS = this.vjDS.add("Loading results", dsname+"-checked", "static://");
// function
var _mainControl_ = this;
function checkDirection (dicPath, curPath) {
var myArr = Object.keys(dicPath);
for (var ia=0; ia<myArr.length; ++ia) {
if (curPath.indexOf(myArr[ia])!=-1) |
}
return 1;
}
function accumulateRows (viewer, node, ir,ic) {
var objAdded = _mainControl_.objAdded;
var range = "", seqID ="", type = "", id= "",strand = "+", checked = true;
if (viewer.myName && viewer.myName=="manualPanel")
{
seqID = viewer.tree.findByName("seqID").value;
var curStart = viewer.tree.findByName("start").value;
curStart = isNaN(curStart) ? (-2) : curStart;
var curEnd = viewer.tree.findByName("end").value;
curEnd = isNaN(curEnd) ? (-2) : curEnd;
if (curStart==-2 || curEnd==-2) {
range= "not_valid";
}
else {
range = curStart + "-" + curEnd;
}
viewer.render();
}
else {
range = node.start + "-" + node.end;
seqID = node.seqID; type = node.type; id = node.id;
strand = checkDirection(viewer.dicPaths, node.path);
checked = node.checked;
}
if ( range != "not_valid")
{
if (!checked && objAdded[seqID])
{ // remove
if (objAdded[seqID][range]) { // exist => remove from the array
delete objAdded[seqID][range];
_mainControl_.rowsCnt-=1;
}
}
else
{
if (!objAdded[seqID]) {
objAdded[seqID] ={};
}
// not exit =>push to the dict
objAdded[seqID][range]= {"defLine":type + ":" + id, "strand": strand};
_mainControl_.rowsCnt+=1;
}
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.checkCallback)
{
funcLink(_mainControl_.checkCallback, _mainControl_, viewer);
}
}
if (viewer.myName && viewer.myName=="manualPanel")
{
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
}
}
function returnRowsChecked (submitPanel,treeNode,objNode) {
_mainControl_.retValue = _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded,true);
if (_mainControl_.callbackSubmit){
funcLink(_mainControl_.callbackSubmit, _mainControl_, objNode);
}
// Reset the preview button and the datasource
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
_mainControl_.manualPanel.hidden = true;
_mainControl_.manualPanel.render();
submitPanel.tree.findByName("showChecked").value = "Preview Checked Elements";
submitPanel.tree.findByName("removeSelected").hidden = true;
submitPanel.refresh();
// clear every thing after submit
onClearAll(submitPanel);
_mainControl_.retValue0 = _mainControl_.retValue;
_mainControl_.retValue = "" ;
}
function onClearAll (submitPanel,treeNode,objNode) {
_mainControl_.rowsCnt =0;
_mainControl_.objAdded ={};
// clear panel
var infoNode = _mainControl_.anotSubmitPanel.tree.findByName("info");
infoNode.value = 'Select range(s) to add';
_mainControl_.anotSubmitPanel.refresh();
// clear check rows
_mainControl_.checkTypeDS.reload("static://",true);
var checkedNodes = _mainControl_.anotTable.checkedNodes;
for (var i=0; i<checkedNodes.length; ++i) {
var curNode = checkedNodes[i];
_mainControl_.anotTable.mimicCheckmarkSelected(curNode.irow,false);
_mainControl_.anotTable.onCheckmarkSelected(_mainControl_.anotTable.container,0,curNode.irow);
}
if (_mainControl_.clearCallback) {
funcLink(_mainControl_.clearCallback, _mainControl_, submitPanel);
}
}
_mainControl_.constructPreviewTableUrl = function (obj, isOutput) { // obj = {seqID: {start1-end1: type1-id1, start2-end2: type2-id2}}
var t = "seqID,ranges\n"; var len = t.length;
if (isOutput){
t ="index,seqID,start,end,direction,defLine\n"; len=t.length; // 0: forward, 1: reverse, 2: complement, 3:reverse complement
}
var objKeyArr = Object.keys(obj);
var iCnt=0;
for (var i=0; i< objKeyArr.length; ++i) { // looping through seqID
var curObj = obj[objKeyArr[i]];
for (var range in curObj) {
if (isOutput) {
if (curObj[range]["strand"]>0) { // strand = "+"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 0 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
else { // strand = "-"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 2 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 3 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
} else {
t += "" + objKeyArr[i] + "," + range + "\n";
}
}
}
return (t.length > len) ? t: "";
}
function previewCheckedElement(submitPanel,treeNode,objNode) {
var showCheckedNode = submitPanel.tree.findByName("showChecked");
var removeNode = submitPanel.tree.findByName("removeSelected");
if (showCheckedNode.value.indexOf("Back")==-1) { // when select preview
_mainControl_.anotTable.hidden=true;
_mainControl_.anotTable.render();
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkedTable.hidden=false;
_mainControl_.checkTypeDS.reload(url,true);
showCheckedNode.value = "Back";
removeNode.hidden = false;
submitPanel.refresh();
}
else { // when select back
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
showCheckedNode.value = "Preview Checked Elements";
removeNode.hidden = true;
submitPanel.refresh();
}
}
function removeSelectedElement(submitPanel,treeNode,objNode) {
console.log("removing selected element");
var checkedTbl = _mainControl_.checkedTable;
var objAdded = _mainControl_.objAdded;
for (var i=0; i<checkedTbl.selectedCnt; ++i) {
var curNode = checkedTbl.selectedNodes[i];
if (objAdded[curNode.seqID]) {
var curObj = objAdded[curNode.seqID];
if (curObj[curNode.ranges]) {
delete curObj[curNode.ranges];
--_mainControl_.rowsCnt;
}
}
if (objAdded[curNode.seqID] && !Object.keys(objAdded[curNode.seqID]).length) {
delete objAdded[curNode.seqID];
}
}
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
checkedTbl.selectedCnt=0; checkedTbl.selectedNodes=[];
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.removeCallback) {
funcLink(_mainControl_.removeCallback, _mainControl_, objNode);
}
}
_mainControl_.reload = function () {
if (this.annotObjList.length){
var ionList = this.annotObjList.join(",");
var refList = this.referenceObjList.join(";");
this.annotTypeDS.url = urlExchangeParameter(this.defaultAnnotUrl, "ionObjs", ionList);
this.annotTypeDS.url = urlExchangeParameter(this.annotTypeDS.url, "refGenomeList", refList);
this.annotTypeDS.reload(0,true);
}
}
_mainControl_.updateInfoForSubmitPanel = function () {
var infoNode = this.anotSubmitPanel.tree.findByName("info");
if (this.rowsCnt<=0){
this.rowsCnt =0;
infoNode.value = 'Select range(s) to add';
}
else {
infoNode.value = ''+ this.rowsCnt +' range(s) added';
}
this.anotSubmitPanel.refresh();
}
// contruction Panel
function openManualPanel (panel, treeNode, objNode){
//var aa=0;
var mPanel = _mainControl_.manualPanel;
if (!treeNode.value)
{
treeNode.value = 1;
mPanel.hidden = false;
}
else
{
treeNode.value =0;
mPanel.hidden = true;
}
mPanel.render();
}
_mainControl_.constructPanel = function () {
var anotPanel = new vjBasicPanelView({
data:["dsVoid",this.annotTypeDS.name],
rows:[
{name:'refresh', title: 'Refresh' ,order:-1, icon:'refresh' , description: 'refresh the content of the control to retrieve up to date information' , url: "javascript:vjDS['$(dataname)'].reload(null,true);"},
{name:'pager', icon:'page' , title:'per page',order:2, description: 'page up/down or show selected number of objects in the control' , type:'pager', counters: [10,20,50,100,1000,'all']},
{ name: 'search', align: 'right', type: ' search', prefix:"Search Id: ",order:10, isSubmitable: true, title: 'Search', description: 'search id',order:'1', url: "?cmd=objFile&ids=$(ids)" },
{ name: 'searchType', title:"search type", prefix:"Search Type: ", align: 'right', type: ' text',isSubmitable: true, description: 'search type',order:'1',path:"/search/searchType", hidden: this.searchTypeButton.hidden},
{ name : 'ionObjs', type:"text", align: 'left' , order : 1, prefix: this.annotationButton.name, isSubmitable: true, hidden: this.annotationButton.hidden},
{ name : 'manualInput', align: 'right' , order : -1, icon:"arrow_sort_down_highlighted.gif",title: "Insert Ranges",showTitle:true, description:"Manual Input",url: openManualPanel, iconSize:18, value:0}
],
parentObjCls: main_objCls,
formObject:this.formObject
});
var manualPanel = new vjPanelView( {
data:["dsVoid"],
rows: [
{ name: 'seqID', align: 'left', type: 'text', prefix:"Sequence Id: ",order:1, title: 'Sequence Id', description: 'Sequence Identifier',order:'1', size: '8' },
{ name: 'start', align: 'left', type: 'text', prefix:"Start Position: ",order:2, title: 'Start Position', description: 'Start Position',order:'2',size: '8' },
{ name: 'end', align: 'left', type: 'text', prefix:"End Position: ",order:3, title: 'End Position', description: 'End Position',order:'3',size: '8' },
{ name : 'add', title:'Add', icon:"plus.gif", iconSize:"18" ,showTitle:true ,align: 'left' , order : 4, url: accumulateRows }
],
parentObjCls: main_objCls,
hidden:true,
myName: "manualPanel",
formObject:this.formObject
});
this.anotPanel = anotPanel;
this.manualPanel = manualPanel;
this.viewersArr.push(this.anotPanel);
}
// contruction Table
_mainControl_.constructTable = function () {
// annotation table
var anotTable = new vjTableView({
data: this.annotTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,treeColumn: "start"
,checkable:true
//,selectCallback: this.selectCallback
, appendCols : [{header:{name:"path",title:'Annotation', type:"treenode",order:1,maxTxtLen:32},cell:""}]
,cols : [{ name: 'seqID', hidden:true }]
,treeColumn: "path"
,precompute: "node.name=node.seqID+'['+node.start + ':'+node.end+']';node.path='/'+node.name; \
if(this.viewer.dicPaths[node.path]){if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };node.path+='/'+node.type+':'+node.id.replace(/\\//g,'.');} \
else {this.viewer.dicPaths[node.path]=1;if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };} \
"
,postcompute:"if(node.treenode && node.treenode.depth>=2){node.styleNoCheckmark=true;node.name='';node.start='';node.end='';}"
,dicPaths: {}
,checkCallback: accumulateRows
,myName:"mainTable"
});
// checked elements table
var checkElementTable = new vjTableView({
data: this.checkTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,hidden:true
,myName:"checkTable"
});
this.anotTable = anotTable;
this.checkedTable = checkElementTable;
this.viewersArr.push(this.anotTable, this.checkedTable);
}
// construct the submit panel at the bottom
_mainControl_.constructSubmitPanel = function () {
var rows=[{ name : 'info', type : 'text', title : 'Select range(s) to add', value : 'Select range(s) to add', align:'right', readonly:true, size:40, prefix:'Selected range(s): ', order : 1},
{ name : 'submit', type : 'button', value: this.submitButton['name'], align: 'right' , order : 2, url: returnRowsChecked, hidden: this.submitButton['hidden']},
{ name : 'showChecked', type : 'button', value:'Preview Checked Elements', align: 'left' , order : 1, url: previewCheckedElement},
{ name : 'removeSelected', type : 'button', value:'remove selected Elements', align: 'left' , hidden: true ,order : 2, url: removeSelectedElement},
{ name : 'clear', type : 'button', value:'Clear', align: 'right' , order : 3, url: onClearAll }
];
var anotPanel = new vjPanelView({
data:["dsVoid"],
rows: rows,
formObject: this.formObject,
parentObjCls: main_objCls,
myName: "submitPanel",
isok: true
} );
this.anotSubmitPanel = anotPanel;
this.viewersArr.push(this.anotSubmitPanel);
}
_mainControl_.constructViewers = function() {
this.constructPanel();
this.constructTable();
this.constructSubmitPanel();
this.reload();
}
// Construct All Viewers
this.constructViewers();
return [this.anotPanel, this.manualPanel,this.anotTable, this.checkedTable ,this.anotSubmitPanel];
}
//# sourceURL = getBaseUrl() + "/js/vjAnnotListTableView.js" | {
return dicPath[myArr[ia]];
} | conditional_block |
vjAnnotListTableView.js | /*
* ::718604!
*
* Copyright(C) November 20, 2014 U.S. Food and Drug Administration
* Authors: Dr. Vahan Simonyan (1), Dr. Raja Mazumder (2), et al
* Affiliation: Food and Drug Administration (1), George Washington University (2)
*
* All rights Reserved.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
var at = new vjAnnotListTableControl({
formObject: document.forms["demo-form"],
annotObjList:["3099581","3099578"], // optional
callbackSubmit: testSubmit, // optional
submitButton: {name:"koko", hidden:true}, // optional
annotationButton:{name:"aaaa", hidden: true} // optionals
});
*/
function vjAnnotListTableControl(viewer){
//
this.clone=function (viewbase)
{
if(!viewbase)viewbase=this;
var i=0;
// copy all the features from base objejct
for ( i in viewbase ) {
this[i] = viewbase[i];
}
};
this.clone(viewer);
var main_objCls = "obj-AnnotListTableControl"+Math.random();
vjObj.register(main_objCls,this);
// Default Parameters
this.defaultAnnotUrl = "http://?cmd=ionAnnotTypes&fromComputation=0&recordTypes=relation"; // &ionObjs
this.viewersArr =[];
this.objAdded ={};
this.rowsCnt = 0;
//
this.maxTxtLen= this.maxTxtLen ? this.maxTxtLen : 25;
this.bgColors= this.bgColors ? this.bgColors : [ '#f2f2f2', '#ffffff' ];
this.defaultEmptyText = this.defaultEmptyText ? this.defaultEmptyText : 'no information to show';
//this.selectCallback= this.selectCallback ? this.selectCallback : 0;
//this.callbackRendered= this.callbackRendered ? this.callbackRendered : 0;
this.callbackSubmit= this.callbackSubmit ? this.callbackSubmit : 0;
if (!this.vjDS) this.vjDS = vjDS;
if (!this.vjDV) this.vjDV = vjDV;
if (!this.formObject) this.formObject = document.forms[this.formName];
if (!this.className) this.className = "ANNOTLISTCONTROL_table";
// BUTTON VISIBILITY
if (!this.submitButton) this.submitButton = {};
if (!this.submitButton['name']) this.submitButton.name= "Submit";
if (!this.submitButton['hidden']) this.submitButton.hidden= false;
if (!this.annotationButton) this.annotationButton= {};
if (!this.annotationButton['name']) this.annotationButton.name = "Annotation Files";
if (!this.annotationButton['hidden']) this.annotationButton.hidden = false;
if (!this.searchTypeButton) this.searchTypeButton= {};
if (!this.searchTypeButton['hidden']) this.searchTypeButton.hidden= true;
if (!this.referenceObjList) this.referenceObjList = [];
if (!this.annotObjList) this.annotObjList = [];
var dsname = "dsAnnotList_table";
//this.annotTypeDS = this.vjDS.add("infrastructure: Folders Help", dsname, this.defaultAnnotUrl,0,"seqID,start,end,type,id\n");
this.annotTypeDS = this.vjDS.add("Loading annotation ", dsname, "static://",0,"seqID,start,end,type,id\n");
this.checkTypeDS = this.vjDS.add("Loading results", dsname+"-checked", "static://");
// function
var _mainControl_ = this;
function checkDirection (dicPath, curPath) {
var myArr = Object.keys(dicPath);
for (var ia=0; ia<myArr.length; ++ia) {
if (curPath.indexOf(myArr[ia])!=-1) {
return dicPath[myArr[ia]];
}
}
return 1;
}
function accumulateRows (viewer, node, ir,ic) {
var objAdded = _mainControl_.objAdded;
var range = "", seqID ="", type = "", id= "",strand = "+", checked = true;
if (viewer.myName && viewer.myName=="manualPanel")
{
seqID = viewer.tree.findByName("seqID").value;
var curStart = viewer.tree.findByName("start").value;
curStart = isNaN(curStart) ? (-2) : curStart;
var curEnd = viewer.tree.findByName("end").value;
curEnd = isNaN(curEnd) ? (-2) : curEnd;
if (curStart==-2 || curEnd==-2) {
range= "not_valid";
}
else {
range = curStart + "-" + curEnd;
}
viewer.render();
}
else {
range = node.start + "-" + node.end;
seqID = node.seqID; type = node.type; id = node.id;
strand = checkDirection(viewer.dicPaths, node.path);
checked = node.checked;
}
if ( range != "not_valid")
{
if (!checked && objAdded[seqID])
{ // remove
if (objAdded[seqID][range]) { // exist => remove from the array
delete objAdded[seqID][range];
_mainControl_.rowsCnt-=1;
}
}
else
{
if (!objAdded[seqID]) {
objAdded[seqID] ={};
}
// not exit =>push to the dict
objAdded[seqID][range]= {"defLine":type + ":" + id, "strand": strand};
_mainControl_.rowsCnt+=1;
}
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.checkCallback)
{
funcLink(_mainControl_.checkCallback, _mainControl_, viewer);
}
}
if (viewer.myName && viewer.myName=="manualPanel")
{
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
}
}
function returnRowsChecked (submitPanel,treeNode,objNode) {
_mainControl_.retValue = _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded,true);
if (_mainControl_.callbackSubmit){
funcLink(_mainControl_.callbackSubmit, _mainControl_, objNode);
}
// Reset the preview button and the datasource
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
_mainControl_.manualPanel.hidden = true;
_mainControl_.manualPanel.render();
submitPanel.tree.findByName("showChecked").value = "Preview Checked Elements";
submitPanel.tree.findByName("removeSelected").hidden = true;
submitPanel.refresh();
// clear every thing after submit
onClearAll(submitPanel);
_mainControl_.retValue0 = _mainControl_.retValue;
_mainControl_.retValue = "" ;
}
function onClearAll (submitPanel,treeNode,objNode) {
_mainControl_.rowsCnt =0;
_mainControl_.objAdded ={};
// clear panel
var infoNode = _mainControl_.anotSubmitPanel.tree.findByName("info");
infoNode.value = 'Select range(s) to add';
_mainControl_.anotSubmitPanel.refresh();
// clear check rows
_mainControl_.checkTypeDS.reload("static://",true);
var checkedNodes = _mainControl_.anotTable.checkedNodes;
for (var i=0; i<checkedNodes.length; ++i) {
var curNode = checkedNodes[i];
_mainControl_.anotTable.mimicCheckmarkSelected(curNode.irow,false);
_mainControl_.anotTable.onCheckmarkSelected(_mainControl_.anotTable.container,0,curNode.irow);
}
if (_mainControl_.clearCallback) {
funcLink(_mainControl_.clearCallback, _mainControl_, submitPanel);
}
}
_mainControl_.constructPreviewTableUrl = function (obj, isOutput) { // obj = {seqID: {start1-end1: type1-id1, start2-end2: type2-id2}}
var t = "seqID,ranges\n"; var len = t.length;
if (isOutput){
t ="index,seqID,start,end,direction,defLine\n"; len=t.length; // 0: forward, 1: reverse, 2: complement, 3:reverse complement
}
var objKeyArr = Object.keys(obj);
var iCnt=0;
for (var i=0; i< objKeyArr.length; ++i) { // looping through seqID
var curObj = obj[objKeyArr[i]];
for (var range in curObj) {
if (isOutput) {
if (curObj[range]["strand"]>0) { // strand = "+"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 0 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
else { // strand = "-"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 2 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 3 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
} else {
t += "" + objKeyArr[i] + "," + range + "\n";
}
}
}
return (t.length > len) ? t: "";
}
function previewCheckedElement(submitPanel,treeNode,objNode) {
var showCheckedNode = submitPanel.tree.findByName("showChecked");
var removeNode = submitPanel.tree.findByName("removeSelected");
if (showCheckedNode.value.indexOf("Back")==-1) { // when select preview
_mainControl_.anotTable.hidden=true;
_mainControl_.anotTable.render();
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkedTable.hidden=false;
_mainControl_.checkTypeDS.reload(url,true);
showCheckedNode.value = "Back";
removeNode.hidden = false;
submitPanel.refresh();
}
else { // when select back
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
showCheckedNode.value = "Preview Checked Elements";
removeNode.hidden = true;
submitPanel.refresh();
}
}
function removeSelectedElement(submitPanel,treeNode,objNode) |
_mainControl_.reload = function () {
if (this.annotObjList.length){
var ionList = this.annotObjList.join(",");
var refList = this.referenceObjList.join(";");
this.annotTypeDS.url = urlExchangeParameter(this.defaultAnnotUrl, "ionObjs", ionList);
this.annotTypeDS.url = urlExchangeParameter(this.annotTypeDS.url, "refGenomeList", refList);
this.annotTypeDS.reload(0,true);
}
}
_mainControl_.updateInfoForSubmitPanel = function () {
var infoNode = this.anotSubmitPanel.tree.findByName("info");
if (this.rowsCnt<=0){
this.rowsCnt =0;
infoNode.value = 'Select range(s) to add';
}
else {
infoNode.value = ''+ this.rowsCnt +' range(s) added';
}
this.anotSubmitPanel.refresh();
}
// contruction Panel
function openManualPanel (panel, treeNode, objNode){
//var aa=0;
var mPanel = _mainControl_.manualPanel;
if (!treeNode.value)
{
treeNode.value = 1;
mPanel.hidden = false;
}
else
{
treeNode.value =0;
mPanel.hidden = true;
}
mPanel.render();
}
_mainControl_.constructPanel = function () {
var anotPanel = new vjBasicPanelView({
data:["dsVoid",this.annotTypeDS.name],
rows:[
{name:'refresh', title: 'Refresh' ,order:-1, icon:'refresh' , description: 'refresh the content of the control to retrieve up to date information' , url: "javascript:vjDS['$(dataname)'].reload(null,true);"},
{name:'pager', icon:'page' , title:'per page',order:2, description: 'page up/down or show selected number of objects in the control' , type:'pager', counters: [10,20,50,100,1000,'all']},
{ name: 'search', align: 'right', type: ' search', prefix:"Search Id: ",order:10, isSubmitable: true, title: 'Search', description: 'search id',order:'1', url: "?cmd=objFile&ids=$(ids)" },
{ name: 'searchType', title:"search type", prefix:"Search Type: ", align: 'right', type: ' text',isSubmitable: true, description: 'search type',order:'1',path:"/search/searchType", hidden: this.searchTypeButton.hidden},
{ name : 'ionObjs', type:"text", align: 'left' , order : 1, prefix: this.annotationButton.name, isSubmitable: true, hidden: this.annotationButton.hidden},
{ name : 'manualInput', align: 'right' , order : -1, icon:"arrow_sort_down_highlighted.gif",title: "Insert Ranges",showTitle:true, description:"Manual Input",url: openManualPanel, iconSize:18, value:0}
],
parentObjCls: main_objCls,
formObject:this.formObject
});
var manualPanel = new vjPanelView( {
data:["dsVoid"],
rows: [
{ name: 'seqID', align: 'left', type: 'text', prefix:"Sequence Id: ",order:1, title: 'Sequence Id', description: 'Sequence Identifier',order:'1', size: '8' },
{ name: 'start', align: 'left', type: 'text', prefix:"Start Position: ",order:2, title: 'Start Position', description: 'Start Position',order:'2',size: '8' },
{ name: 'end', align: 'left', type: 'text', prefix:"End Position: ",order:3, title: 'End Position', description: 'End Position',order:'3',size: '8' },
{ name : 'add', title:'Add', icon:"plus.gif", iconSize:"18" ,showTitle:true ,align: 'left' , order : 4, url: accumulateRows }
],
parentObjCls: main_objCls,
hidden:true,
myName: "manualPanel",
formObject:this.formObject
});
this.anotPanel = anotPanel;
this.manualPanel = manualPanel;
this.viewersArr.push(this.anotPanel);
}
// contruction Table
_mainControl_.constructTable = function () {
// annotation table
var anotTable = new vjTableView({
data: this.annotTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,treeColumn: "start"
,checkable:true
//,selectCallback: this.selectCallback
, appendCols : [{header:{name:"path",title:'Annotation', type:"treenode",order:1,maxTxtLen:32},cell:""}]
,cols : [{ name: 'seqID', hidden:true }]
,treeColumn: "path"
,precompute: "node.name=node.seqID+'['+node.start + ':'+node.end+']';node.path='/'+node.name; \
if(this.viewer.dicPaths[node.path]){if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };node.path+='/'+node.type+':'+node.id.replace(/\\//g,'.');} \
else {this.viewer.dicPaths[node.path]=1;if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };} \
"
,postcompute:"if(node.treenode && node.treenode.depth>=2){node.styleNoCheckmark=true;node.name='';node.start='';node.end='';}"
,dicPaths: {}
,checkCallback: accumulateRows
,myName:"mainTable"
});
// checked elements table
var checkElementTable = new vjTableView({
data: this.checkTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,hidden:true
,myName:"checkTable"
});
this.anotTable = anotTable;
this.checkedTable = checkElementTable;
this.viewersArr.push(this.anotTable, this.checkedTable);
}
// construct the submit panel at the bottom
_mainControl_.constructSubmitPanel = function () {
var rows=[{ name : 'info', type : 'text', title : 'Select range(s) to add', value : 'Select range(s) to add', align:'right', readonly:true, size:40, prefix:'Selected range(s): ', order : 1},
{ name : 'submit', type : 'button', value: this.submitButton['name'], align: 'right' , order : 2, url: returnRowsChecked, hidden: this.submitButton['hidden']},
{ name : 'showChecked', type : 'button', value:'Preview Checked Elements', align: 'left' , order : 1, url: previewCheckedElement},
{ name : 'removeSelected', type : 'button', value:'remove selected Elements', align: 'left' , hidden: true ,order : 2, url: removeSelectedElement},
{ name : 'clear', type : 'button', value:'Clear', align: 'right' , order : 3, url: onClearAll }
];
var anotPanel = new vjPanelView({
data:["dsVoid"],
rows: rows,
formObject: this.formObject,
parentObjCls: main_objCls,
myName: "submitPanel",
isok: true
} );
this.anotSubmitPanel = anotPanel;
this.viewersArr.push(this.anotSubmitPanel);
}
_mainControl_.constructViewers = function() {
this.constructPanel();
this.constructTable();
this.constructSubmitPanel();
this.reload();
}
// Construct All Viewers
this.constructViewers();
return [this.anotPanel, this.manualPanel,this.anotTable, this.checkedTable ,this.anotSubmitPanel];
}
//# sourceURL = getBaseUrl() + "/js/vjAnnotListTableView.js" | {
console.log("removing selected element");
var checkedTbl = _mainControl_.checkedTable;
var objAdded = _mainControl_.objAdded;
for (var i=0; i<checkedTbl.selectedCnt; ++i) {
var curNode = checkedTbl.selectedNodes[i];
if (objAdded[curNode.seqID]) {
var curObj = objAdded[curNode.seqID];
if (curObj[curNode.ranges]) {
delete curObj[curNode.ranges];
--_mainControl_.rowsCnt;
}
}
if (objAdded[curNode.seqID] && !Object.keys(objAdded[curNode.seqID]).length) {
delete objAdded[curNode.seqID];
}
}
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
checkedTbl.selectedCnt=0; checkedTbl.selectedNodes=[];
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.removeCallback) {
funcLink(_mainControl_.removeCallback, _mainControl_, objNode);
}
} | identifier_body |
vjAnnotListTableView.js | /*
* ::718604!
*
* Copyright(C) November 20, 2014 U.S. Food and Drug Administration
* Authors: Dr. Vahan Simonyan (1), Dr. Raja Mazumder (2), et al
* Affiliation: Food and Drug Administration (1), George Washington University (2)
*
* All rights Reserved.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
var at = new vjAnnotListTableControl({
formObject: document.forms["demo-form"],
annotObjList:["3099581","3099578"], // optional
callbackSubmit: testSubmit, // optional
submitButton: {name:"koko", hidden:true}, // optional
annotationButton:{name:"aaaa", hidden: true} // optionals
});
*/
function vjAnnotListTableControl(viewer){
//
this.clone=function (viewbase)
{
if(!viewbase)viewbase=this;
var i=0;
// copy all the features from base objejct
for ( i in viewbase ) {
this[i] = viewbase[i];
}
};
this.clone(viewer);
var main_objCls = "obj-AnnotListTableControl"+Math.random();
vjObj.register(main_objCls,this);
// Default Parameters
this.defaultAnnotUrl = "http://?cmd=ionAnnotTypes&fromComputation=0&recordTypes=relation"; // &ionObjs
this.viewersArr =[];
this.objAdded ={};
this.rowsCnt = 0;
//
this.maxTxtLen= this.maxTxtLen ? this.maxTxtLen : 25;
this.bgColors= this.bgColors ? this.bgColors : [ '#f2f2f2', '#ffffff' ];
this.defaultEmptyText = this.defaultEmptyText ? this.defaultEmptyText : 'no information to show';
//this.selectCallback= this.selectCallback ? this.selectCallback : 0;
//this.callbackRendered= this.callbackRendered ? this.callbackRendered : 0;
this.callbackSubmit= this.callbackSubmit ? this.callbackSubmit : 0;
if (!this.vjDS) this.vjDS = vjDS;
if (!this.vjDV) this.vjDV = vjDV;
if (!this.formObject) this.formObject = document.forms[this.formName];
if (!this.className) this.className = "ANNOTLISTCONTROL_table";
// BUTTON VISIBILITY
if (!this.submitButton) this.submitButton = {};
if (!this.submitButton['name']) this.submitButton.name= "Submit";
if (!this.submitButton['hidden']) this.submitButton.hidden= false;
if (!this.annotationButton) this.annotationButton= {};
if (!this.annotationButton['name']) this.annotationButton.name = "Annotation Files";
if (!this.annotationButton['hidden']) this.annotationButton.hidden = false;
if (!this.searchTypeButton) this.searchTypeButton= {};
if (!this.searchTypeButton['hidden']) this.searchTypeButton.hidden= true;
if (!this.referenceObjList) this.referenceObjList = [];
if (!this.annotObjList) this.annotObjList = [];
var dsname = "dsAnnotList_table";
//this.annotTypeDS = this.vjDS.add("infrastructure: Folders Help", dsname, this.defaultAnnotUrl,0,"seqID,start,end,type,id\n");
this.annotTypeDS = this.vjDS.add("Loading annotation ", dsname, "static://",0,"seqID,start,end,type,id\n");
this.checkTypeDS = this.vjDS.add("Loading results", dsname+"-checked", "static://");
// function
var _mainControl_ = this;
function | (dicPath, curPath) {
var myArr = Object.keys(dicPath);
for (var ia=0; ia<myArr.length; ++ia) {
if (curPath.indexOf(myArr[ia])!=-1) {
return dicPath[myArr[ia]];
}
}
return 1;
}
function accumulateRows (viewer, node, ir,ic) {
var objAdded = _mainControl_.objAdded;
var range = "", seqID ="", type = "", id= "",strand = "+", checked = true;
if (viewer.myName && viewer.myName=="manualPanel")
{
seqID = viewer.tree.findByName("seqID").value;
var curStart = viewer.tree.findByName("start").value;
curStart = isNaN(curStart) ? (-2) : curStart;
var curEnd = viewer.tree.findByName("end").value;
curEnd = isNaN(curEnd) ? (-2) : curEnd;
if (curStart==-2 || curEnd==-2) {
range= "not_valid";
}
else {
range = curStart + "-" + curEnd;
}
viewer.render();
}
else {
range = node.start + "-" + node.end;
seqID = node.seqID; type = node.type; id = node.id;
strand = checkDirection(viewer.dicPaths, node.path);
checked = node.checked;
}
if ( range != "not_valid")
{
if (!checked && objAdded[seqID])
{ // remove
if (objAdded[seqID][range]) { // exist => remove from the array
delete objAdded[seqID][range];
_mainControl_.rowsCnt-=1;
}
}
else
{
if (!objAdded[seqID]) {
objAdded[seqID] ={};
}
// not exit =>push to the dict
objAdded[seqID][range]= {"defLine":type + ":" + id, "strand": strand};
_mainControl_.rowsCnt+=1;
}
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.checkCallback)
{
funcLink(_mainControl_.checkCallback, _mainControl_, viewer);
}
}
if (viewer.myName && viewer.myName=="manualPanel")
{
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
}
}
function returnRowsChecked (submitPanel,treeNode,objNode) {
_mainControl_.retValue = _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded,true);
if (_mainControl_.callbackSubmit){
funcLink(_mainControl_.callbackSubmit, _mainControl_, objNode);
}
// Reset the preview button and the datasource
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
_mainControl_.manualPanel.hidden = true;
_mainControl_.manualPanel.render();
submitPanel.tree.findByName("showChecked").value = "Preview Checked Elements";
submitPanel.tree.findByName("removeSelected").hidden = true;
submitPanel.refresh();
// clear every thing after submit
onClearAll(submitPanel);
_mainControl_.retValue0 = _mainControl_.retValue;
_mainControl_.retValue = "" ;
}
function onClearAll (submitPanel,treeNode,objNode) {
_mainControl_.rowsCnt =0;
_mainControl_.objAdded ={};
// clear panel
var infoNode = _mainControl_.anotSubmitPanel.tree.findByName("info");
infoNode.value = 'Select range(s) to add';
_mainControl_.anotSubmitPanel.refresh();
// clear check rows
_mainControl_.checkTypeDS.reload("static://",true);
var checkedNodes = _mainControl_.anotTable.checkedNodes;
for (var i=0; i<checkedNodes.length; ++i) {
var curNode = checkedNodes[i];
_mainControl_.anotTable.mimicCheckmarkSelected(curNode.irow,false);
_mainControl_.anotTable.onCheckmarkSelected(_mainControl_.anotTable.container,0,curNode.irow);
}
if (_mainControl_.clearCallback) {
funcLink(_mainControl_.clearCallback, _mainControl_, submitPanel);
}
}
_mainControl_.constructPreviewTableUrl = function (obj, isOutput) { // obj = {seqID: {start1-end1: type1-id1, start2-end2: type2-id2}}
var t = "seqID,ranges\n"; var len = t.length;
if (isOutput){
t ="index,seqID,start,end,direction,defLine\n"; len=t.length; // 0: forward, 1: reverse, 2: complement, 3:reverse complement
}
var objKeyArr = Object.keys(obj);
var iCnt=0;
for (var i=0; i< objKeyArr.length; ++i) { // looping through seqID
var curObj = obj[objKeyArr[i]];
for (var range in curObj) {
if (isOutput) {
if (curObj[range]["strand"]>0) { // strand = "+"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 0 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
else { // strand = "-"
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 1 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
/*t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 2 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;
t += "" + iCnt + "," + objKeyArr[i] + "," + range.split("-")[0] + "," + range.split("-")[1] + "," + ( 3 ) + "," + curObj[range]["defLine"] +"\n"; ++iCnt;*/
}
} else {
t += "" + objKeyArr[i] + "," + range + "\n";
}
}
}
return (t.length > len) ? t: "";
}
function previewCheckedElement(submitPanel,treeNode,objNode) {
var showCheckedNode = submitPanel.tree.findByName("showChecked");
var removeNode = submitPanel.tree.findByName("removeSelected");
if (showCheckedNode.value.indexOf("Back")==-1) { // when select preview
_mainControl_.anotTable.hidden=true;
_mainControl_.anotTable.render();
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkedTable.hidden=false;
_mainControl_.checkTypeDS.reload(url,true);
showCheckedNode.value = "Back";
removeNode.hidden = false;
submitPanel.refresh();
}
else { // when select back
_mainControl_.anotTable.checkedCnt=0; _mainControl_.anotTable.checkedNodes=[];
_mainControl_.anotTable.hidden = false;
_mainControl_.annotTypeDS.reload(0,true);
_mainControl_.checkedTable.hidden = true;
_mainControl_.checkedTable.render();
showCheckedNode.value = "Preview Checked Elements";
removeNode.hidden = true;
submitPanel.refresh();
}
}
function removeSelectedElement(submitPanel,treeNode,objNode) {
console.log("removing selected element");
var checkedTbl = _mainControl_.checkedTable;
var objAdded = _mainControl_.objAdded;
for (var i=0; i<checkedTbl.selectedCnt; ++i) {
var curNode = checkedTbl.selectedNodes[i];
if (objAdded[curNode.seqID]) {
var curObj = objAdded[curNode.seqID];
if (curObj[curNode.ranges]) {
delete curObj[curNode.ranges];
--_mainControl_.rowsCnt;
}
}
if (objAdded[curNode.seqID] && !Object.keys(objAdded[curNode.seqID]).length) {
delete objAdded[curNode.seqID];
}
}
var url = "static://" + _mainControl_.constructPreviewTableUrl(_mainControl_.objAdded);
_mainControl_.checkTypeDS.reload(url,true);
checkedTbl.selectedCnt=0; checkedTbl.selectedNodes=[];
_mainControl_.updateInfoForSubmitPanel();
if (_mainControl_.removeCallback) {
funcLink(_mainControl_.removeCallback, _mainControl_, objNode);
}
}
_mainControl_.reload = function () {
if (this.annotObjList.length){
var ionList = this.annotObjList.join(",");
var refList = this.referenceObjList.join(";");
this.annotTypeDS.url = urlExchangeParameter(this.defaultAnnotUrl, "ionObjs", ionList);
this.annotTypeDS.url = urlExchangeParameter(this.annotTypeDS.url, "refGenomeList", refList);
this.annotTypeDS.reload(0,true);
}
}
_mainControl_.updateInfoForSubmitPanel = function () {
var infoNode = this.anotSubmitPanel.tree.findByName("info");
if (this.rowsCnt<=0){
this.rowsCnt =0;
infoNode.value = 'Select range(s) to add';
}
else {
infoNode.value = ''+ this.rowsCnt +' range(s) added';
}
this.anotSubmitPanel.refresh();
}
// contruction Panel
function openManualPanel (panel, treeNode, objNode){
//var aa=0;
var mPanel = _mainControl_.manualPanel;
if (!treeNode.value)
{
treeNode.value = 1;
mPanel.hidden = false;
}
else
{
treeNode.value =0;
mPanel.hidden = true;
}
mPanel.render();
}
_mainControl_.constructPanel = function () {
var anotPanel = new vjBasicPanelView({
data:["dsVoid",this.annotTypeDS.name],
rows:[
{name:'refresh', title: 'Refresh' ,order:-1, icon:'refresh' , description: 'refresh the content of the control to retrieve up to date information' , url: "javascript:vjDS['$(dataname)'].reload(null,true);"},
{name:'pager', icon:'page' , title:'per page',order:2, description: 'page up/down or show selected number of objects in the control' , type:'pager', counters: [10,20,50,100,1000,'all']},
{ name: 'search', align: 'right', type: ' search', prefix:"Search Id: ",order:10, isSubmitable: true, title: 'Search', description: 'search id',order:'1', url: "?cmd=objFile&ids=$(ids)" },
{ name: 'searchType', title:"search type", prefix:"Search Type: ", align: 'right', type: ' text',isSubmitable: true, description: 'search type',order:'1',path:"/search/searchType", hidden: this.searchTypeButton.hidden},
{ name : 'ionObjs', type:"text", align: 'left' , order : 1, prefix: this.annotationButton.name, isSubmitable: true, hidden: this.annotationButton.hidden},
{ name : 'manualInput', align: 'right' , order : -1, icon:"arrow_sort_down_highlighted.gif",title: "Insert Ranges",showTitle:true, description:"Manual Input",url: openManualPanel, iconSize:18, value:0}
],
parentObjCls: main_objCls,
formObject:this.formObject
});
var manualPanel = new vjPanelView( {
data:["dsVoid"],
rows: [
{ name: 'seqID', align: 'left', type: 'text', prefix:"Sequence Id: ",order:1, title: 'Sequence Id', description: 'Sequence Identifier',order:'1', size: '8' },
{ name: 'start', align: 'left', type: 'text', prefix:"Start Position: ",order:2, title: 'Start Position', description: 'Start Position',order:'2',size: '8' },
{ name: 'end', align: 'left', type: 'text', prefix:"End Position: ",order:3, title: 'End Position', description: 'End Position',order:'3',size: '8' },
{ name : 'add', title:'Add', icon:"plus.gif", iconSize:"18" ,showTitle:true ,align: 'left' , order : 4, url: accumulateRows }
],
parentObjCls: main_objCls,
hidden:true,
myName: "manualPanel",
formObject:this.formObject
});
this.anotPanel = anotPanel;
this.manualPanel = manualPanel;
this.viewersArr.push(this.anotPanel);
}
// contruction Table
_mainControl_.constructTable = function () {
// annotation table
var anotTable = new vjTableView({
data: this.annotTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,treeColumn: "start"
,checkable:true
//,selectCallback: this.selectCallback
, appendCols : [{header:{name:"path",title:'Annotation', type:"treenode",order:1,maxTxtLen:32},cell:""}]
,cols : [{ name: 'seqID', hidden:true }]
,treeColumn: "path"
,precompute: "node.name=node.seqID+'['+node.start + ':'+node.end+']';node.path='/'+node.name; \
if(this.viewer.dicPaths[node.path]){if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };node.path+='/'+node.type+':'+node.id.replace(/\\//g,'.');} \
else {this.viewer.dicPaths[node.path]=1;if (node.type.trim()=='strand'){this.viewer.dicPaths[node.path]= (node.id.trim()=='+') ? 1 : -1; };} \
"
,postcompute:"if(node.treenode && node.treenode.depth>=2){node.styleNoCheckmark=true;node.name='';node.start='';node.end='';}"
,dicPaths: {}
,checkCallback: accumulateRows
,myName:"mainTable"
});
// checked elements table
var checkElementTable = new vjTableView({
data: this.checkTypeDS.name
,formObject:this.formObject
,parentObjCls: main_objCls
,bgColors: this.bgColors
,defaultEmptyText: this.defaultEmptyText
,maxTxtLen: this.maxTxtLen
,hidden:true
,myName:"checkTable"
});
this.anotTable = anotTable;
this.checkedTable = checkElementTable;
this.viewersArr.push(this.anotTable, this.checkedTable);
}
// construct the submit panel at the bottom
_mainControl_.constructSubmitPanel = function () {
var rows=[{ name : 'info', type : 'text', title : 'Select range(s) to add', value : 'Select range(s) to add', align:'right', readonly:true, size:40, prefix:'Selected range(s): ', order : 1},
{ name : 'submit', type : 'button', value: this.submitButton['name'], align: 'right' , order : 2, url: returnRowsChecked, hidden: this.submitButton['hidden']},
{ name : 'showChecked', type : 'button', value:'Preview Checked Elements', align: 'left' , order : 1, url: previewCheckedElement},
{ name : 'removeSelected', type : 'button', value:'remove selected Elements', align: 'left' , hidden: true ,order : 2, url: removeSelectedElement},
{ name : 'clear', type : 'button', value:'Clear', align: 'right' , order : 3, url: onClearAll }
];
var anotPanel = new vjPanelView({
data:["dsVoid"],
rows: rows,
formObject: this.formObject,
parentObjCls: main_objCls,
myName: "submitPanel",
isok: true
} );
this.anotSubmitPanel = anotPanel;
this.viewersArr.push(this.anotSubmitPanel);
}
_mainControl_.constructViewers = function() {
this.constructPanel();
this.constructTable();
this.constructSubmitPanel();
this.reload();
}
// Construct All Viewers
this.constructViewers();
return [this.anotPanel, this.manualPanel,this.anotTable, this.checkedTable ,this.anotSubmitPanel];
}
//# sourceURL = getBaseUrl() + "/js/vjAnnotListTableView.js" | checkDirection | identifier_name |
fifo.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! This test demonstrates the DataFusion FIFO capabilities.
//!
#[cfg(not(target_os = "windows"))]
#[cfg(test)]
mod unix_test {
use arrow::array::Array;
use arrow::csv::ReaderBuilder;
use arrow::datatypes::{DataType, Field, Schema};
use datafusion::test_util::register_unbounded_file_with_ordering;
use datafusion::{
prelude::{CsvReadOptions, SessionConfig, SessionContext},
test_util::{aggr_test_schema, arrow_test_data},
};
use datafusion_common::{DataFusionError, Result};
use futures::StreamExt;
use itertools::enumerate;
use nix::sys::stat;
use nix::unistd;
use rstest::*;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use tempfile::TempDir;
// ! For the sake of the test, do not alter the numbers. !
// Session batch size
const TEST_BATCH_SIZE: usize = 20;
// Number of lines written to FIFO
const TEST_DATA_SIZE: usize = 20_000;
// Number of lines what can be joined. Each joinable key produced 20 lines with
// aggregate_test_100 dataset. We will use these joinable keys for understanding
// incremental execution.
const TEST_JOIN_RATIO: f64 = 0.01;
fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> {
let file_path = tmp_dir.path().join(file_name);
// Simulate an infinite environment via a FIFO file
if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) {
Err(DataFusionError::Execution(e.to_string()))
} else {
Ok(file_path)
}
}
fn write_to_fifo(
mut file: &File,
line: &str,
ref_time: Instant,
broken_pipe_timeout: Duration,
) -> Result<()> {
// We need to handle broken pipe error until the reader is ready. This
// is why we use a timeout to limit the wait duration for the reader.
// If the error is different than broken pipe, we fail immediately.
while let Err(e) = file.write_all(line.as_bytes()) {
if e.raw_os_error().unwrap() == 32 {
let interval = Instant::now().duration_since(ref_time);
if interval < broken_pipe_timeout {
thread::sleep(Duration::from_millis(100));
continue;
}
}
return Err(DataFusionError::Execution(e.to_string()));
}
Ok(())
}
// This test provides a relatively realistic end-to-end scenario where
// we swap join sides to accommodate a FIFO source.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread", worker_threads = 8)]
async fn | (
#[values(true, false)] unbounded_file: bool,
) -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.with_collect_statistics(false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(unbounded_file));
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let fifo_path =
create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?;
// Execution can calculated at least one RecordBatch after the number of
// "joinable_lines_length" lines are read.
let joinable_lines_length =
(TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize;
// The row including "a" is joinable with aggregate_test_100.c1
let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string());
let second_joinable_iterator =
(0..joinable_lines_length).map(|_| "a".to_string());
// The row including "zzz" is not joinable with aggregate_test_100.c1
let non_joinable_iterator =
(0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string());
let lines = joinable_iterator
.chain(non_joinable_iterator)
.chain(second_joinable_iterator)
.zip(0..TEST_DATA_SIZE)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create writing threads for the left and right FIFO files
let task = create_writing_thread(
fifo_path.clone(),
"a1,a2\n".to_owned(),
lines,
waiting.clone(),
joinable_lines_length,
);
// Data Schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
// Create a file with bounded or unbounded flag.
ctx.register_csv(
"left",
fifo_path.as_os_str().to_str().unwrap(),
CsvReadOptions::new()
.schema(schema.as_ref())
.mark_infinite(unbounded_file),
)
.await?;
// Register right table
let schema = aggr_test_schema();
let test_data = arrow_test_data();
ctx.register_csv(
"right",
&format!("{test_data}/csv/aggregate_test_100.csv"),
CsvReadOptions::new().schema(schema.as_ref()),
)
.await?;
// Execute the query
let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?;
let mut stream = df.execute_stream().await?;
while (stream.next().await).is_some() {
waiting.store(false, Ordering::SeqCst);
}
task.join().unwrap();
Ok(())
}
#[derive(Debug, PartialEq)]
enum JoinOperation {
LeftUnmatched,
RightUnmatched,
Equal,
}
fn create_writing_thread(
file_path: PathBuf,
header: String,
lines: Vec<String>,
waiting_lock: Arc<AtomicBool>,
wait_until: usize,
) -> JoinHandle<()> {
// Timeout for a long period of BrokenPipe error
let broken_pipe_timeout = Duration::from_secs(10);
// Spawn a new thread to write to the FIFO file
thread::spawn(move || {
let file = OpenOptions::new().write(true).open(file_path).unwrap();
// Reference time to use when deciding to fail the test
let execution_start = Instant::now();
write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap();
for (cnt, line) in enumerate(lines) {
while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until {
thread::sleep(Duration::from_millis(50));
}
write_to_fifo(&file, &line, execution_start, broken_pipe_timeout)
.unwrap();
}
drop(file);
})
}
// This test provides a relatively realistic end-to-end scenario where
// we change the join into a [SymmetricHashJoin] to accommodate two
// unbounded (FIFO) sources.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn unbounded_file_with_symmetric_join() -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.set_bool("datafusion.execution.coalesce_batches", false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// Join filter
let a1_iter = 0..TEST_DATA_SIZE;
// Join key
let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10);
let lines = a1_iter
.zip(a2_iter)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
// Create a FIFO file for the left input source.
let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?;
// Create a FIFO file for the right input source.
let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?;
// Create a mutex for tracking if the right input source is waiting for data.
let waiting = Arc::new(AtomicBool::new(true));
// Create writing threads for the left and right FIFO files
tasks.push(create_writing_thread(
left_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
tasks.push(create_writing_thread(
right_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
// Create schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::UInt32, false),
Field::new("a2", DataType::UInt32, false),
]));
// Specify the ordering:
let file_sort_order = vec![[datafusion_expr::col("a1")]
.into_iter()
.map(|e| {
let ascending = true;
let nulls_first = false;
e.sort(ascending, nulls_first)
})
.collect::<Vec<_>>()];
// Set unbounded sorted files read configuration
register_unbounded_file_with_ordering(
&ctx,
schema.clone(),
&left_fifo,
"left",
file_sort_order.clone(),
true,
)
.await?;
register_unbounded_file_with_ordering(
&ctx,
schema,
&right_fifo,
"right",
file_sort_order,
true,
)
.await?;
// Execute the query, with no matching rows. (since key is modulus 10)
let df = ctx
.sql(
"SELECT
t1.a1,
t1.a2,
t2.a1,
t2.a2
FROM
left as t1 FULL
JOIN right as t2 ON t1.a2 = t2.a2
AND t1.a1 > t2.a1 + 4
AND t1.a1 < t2.a1 + 9",
)
.await?;
let mut stream = df.execute_stream().await?;
let mut operations = vec![];
// Partial.
while let Some(Ok(batch)) = stream.next().await {
waiting.store(false, Ordering::SeqCst);
let left_unmatched = batch.column(2).null_count();
let right_unmatched = batch.column(0).null_count();
let op = if left_unmatched == 0 && right_unmatched == 0 {
JoinOperation::Equal
} else if right_unmatched > left_unmatched {
JoinOperation::RightUnmatched
} else {
JoinOperation::LeftUnmatched
};
operations.push(op);
}
tasks.into_iter().for_each(|jh| jh.join().unwrap());
// The SymmetricHashJoin executor produces FULL join results at every
// pruning, which happens before it reaches the end of input and more
// than once. In this test, we feed partially joinable data to both
// sides in order to ensure that left or right unmatched results are
// generated more than once during the test.
assert!(
operations
.iter()
.filter(|&n| JoinOperation::RightUnmatched.eq(n))
.count()
> 1
&& operations
.iter()
.filter(|&n| JoinOperation::LeftUnmatched.eq(n))
.count()
> 1
);
Ok(())
}
/// It tests the INSERT INTO functionality.
#[tokio::test]
async fn test_sql_insert_into_fifo() -> Result<()> {
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(true));
let waiting_thread = waiting.clone();
// create local execution context
let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE);
let ctx = SessionContext::with_config(config);
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?;
// Prevent move
let (source_fifo_path_thread, source_display_fifo_path) =
(source_fifo_path.clone(), source_fifo_path.display());
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely
// TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another
// thread. This approach ensures that the pipeline remains unbroken.
tasks.push(create_writing_thread(
source_fifo_path_thread,
"a1,a2\n".to_owned(),
(0..TEST_DATA_SIZE)
.map(|_| "a,1\n".to_string())
.collect::<Vec<_>>(),
waiting,
TEST_BATCH_SIZE,
));
// Create a new temporary FIFO file
let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?;
// Prevent move
let (sink_fifo_path_thread, sink_display_fifo_path) =
(sink_fifo_path.clone(), sink_fifo_path.display());
// Spawn a new thread to read sink EXTERNAL TABLE.
tasks.push(thread::spawn(move || {
let file = File::open(sink_fifo_path_thread).unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
let mut reader = ReaderBuilder::new(schema)
.has_header(true)
.with_batch_size(TEST_BATCH_SIZE)
.build(file)
.map_err(|e| DataFusionError::Internal(e.to_string()))
.unwrap();
while let Some(Ok(_)) = reader.next() {
waiting_thread.store(false, Ordering::SeqCst);
}
}));
// register second csv file with the SQL (create an empty file if not found)
ctx.sql(&format!(
"CREATE EXTERNAL TABLE source_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{source_display_fifo_path}'"
))
.await?;
// register csv file with the SQL
ctx.sql(&format!(
"CREATE EXTERNAL TABLE sink_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{sink_display_fifo_path}'"
))
.await?;
let df = ctx
.sql(
"INSERT INTO sink_table
SELECT a1, a2 FROM source_table",
)
.await?;
df.collect().await?;
tasks.into_iter().for_each(|jh| jh.join().unwrap());
Ok(())
}
}
| unbounded_file_with_swapped_join | identifier_name |
fifo.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! This test demonstrates the DataFusion FIFO capabilities.
//!
#[cfg(not(target_os = "windows"))]
#[cfg(test)]
mod unix_test {
use arrow::array::Array;
use arrow::csv::ReaderBuilder;
use arrow::datatypes::{DataType, Field, Schema};
use datafusion::test_util::register_unbounded_file_with_ordering;
use datafusion::{
prelude::{CsvReadOptions, SessionConfig, SessionContext},
test_util::{aggr_test_schema, arrow_test_data},
};
use datafusion_common::{DataFusionError, Result};
use futures::StreamExt;
use itertools::enumerate;
use nix::sys::stat;
use nix::unistd;
use rstest::*;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use tempfile::TempDir;
// ! For the sake of the test, do not alter the numbers. !
// Session batch size
const TEST_BATCH_SIZE: usize = 20;
// Number of lines written to FIFO
const TEST_DATA_SIZE: usize = 20_000;
// Number of lines what can be joined. Each joinable key produced 20 lines with
// aggregate_test_100 dataset. We will use these joinable keys for understanding
// incremental execution.
const TEST_JOIN_RATIO: f64 = 0.01;
fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> {
let file_path = tmp_dir.path().join(file_name);
// Simulate an infinite environment via a FIFO file
if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) {
Err(DataFusionError::Execution(e.to_string()))
} else {
Ok(file_path)
}
}
fn write_to_fifo(
mut file: &File,
line: &str,
ref_time: Instant,
broken_pipe_timeout: Duration,
) -> Result<()> {
// We need to handle broken pipe error until the reader is ready. This
// is why we use a timeout to limit the wait duration for the reader.
// If the error is different than broken pipe, we fail immediately.
while let Err(e) = file.write_all(line.as_bytes()) {
if e.raw_os_error().unwrap() == 32 {
let interval = Instant::now().duration_since(ref_time);
if interval < broken_pipe_timeout {
thread::sleep(Duration::from_millis(100));
continue;
}
}
return Err(DataFusionError::Execution(e.to_string()));
}
Ok(())
}
// This test provides a relatively realistic end-to-end scenario where
// we swap join sides to accommodate a FIFO source.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread", worker_threads = 8)]
async fn unbounded_file_with_swapped_join(
#[values(true, false)] unbounded_file: bool,
) -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.with_collect_statistics(false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(unbounded_file));
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let fifo_path =
create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?;
// Execution can calculated at least one RecordBatch after the number of
// "joinable_lines_length" lines are read.
let joinable_lines_length =
(TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize;
// The row including "a" is joinable with aggregate_test_100.c1
let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string());
let second_joinable_iterator =
(0..joinable_lines_length).map(|_| "a".to_string());
// The row including "zzz" is not joinable with aggregate_test_100.c1
let non_joinable_iterator =
(0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string());
let lines = joinable_iterator
.chain(non_joinable_iterator)
.chain(second_joinable_iterator)
.zip(0..TEST_DATA_SIZE)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create writing threads for the left and right FIFO files
let task = create_writing_thread(
fifo_path.clone(),
"a1,a2\n".to_owned(),
lines,
waiting.clone(),
joinable_lines_length,
);
// Data Schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
// Create a file with bounded or unbounded flag.
ctx.register_csv(
"left",
fifo_path.as_os_str().to_str().unwrap(),
CsvReadOptions::new()
.schema(schema.as_ref())
.mark_infinite(unbounded_file),
)
.await?;
// Register right table
let schema = aggr_test_schema();
let test_data = arrow_test_data();
ctx.register_csv(
"right",
&format!("{test_data}/csv/aggregate_test_100.csv"),
CsvReadOptions::new().schema(schema.as_ref()),
)
.await?;
// Execute the query
let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?;
let mut stream = df.execute_stream().await?;
while (stream.next().await).is_some() {
waiting.store(false, Ordering::SeqCst);
}
task.join().unwrap();
Ok(())
}
#[derive(Debug, PartialEq)]
enum JoinOperation {
LeftUnmatched,
RightUnmatched,
Equal,
}
fn create_writing_thread(
file_path: PathBuf,
header: String,
lines: Vec<String>,
waiting_lock: Arc<AtomicBool>,
wait_until: usize,
) -> JoinHandle<()> {
// Timeout for a long period of BrokenPipe error
let broken_pipe_timeout = Duration::from_secs(10);
// Spawn a new thread to write to the FIFO file
thread::spawn(move || {
let file = OpenOptions::new().write(true).open(file_path).unwrap();
// Reference time to use when deciding to fail the test
let execution_start = Instant::now();
write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap();
for (cnt, line) in enumerate(lines) {
while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until {
thread::sleep(Duration::from_millis(50));
}
write_to_fifo(&file, &line, execution_start, broken_pipe_timeout)
.unwrap();
}
drop(file);
})
}
// This test provides a relatively realistic end-to-end scenario where
// we change the join into a [SymmetricHashJoin] to accommodate two
// unbounded (FIFO) sources.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn unbounded_file_with_symmetric_join() -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.set_bool("datafusion.execution.coalesce_batches", false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// Join filter
let a1_iter = 0..TEST_DATA_SIZE;
// Join key
let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10);
let lines = a1_iter
.zip(a2_iter)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
// Create a FIFO file for the left input source.
let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?;
// Create a FIFO file for the right input source.
let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?;
// Create a mutex for tracking if the right input source is waiting for data.
let waiting = Arc::new(AtomicBool::new(true));
// Create writing threads for the left and right FIFO files
tasks.push(create_writing_thread(
left_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
tasks.push(create_writing_thread(
right_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
// Create schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::UInt32, false),
Field::new("a2", DataType::UInt32, false),
]));
// Specify the ordering:
let file_sort_order = vec![[datafusion_expr::col("a1")]
.into_iter()
.map(|e| {
let ascending = true;
let nulls_first = false;
e.sort(ascending, nulls_first)
})
.collect::<Vec<_>>()];
// Set unbounded sorted files read configuration
register_unbounded_file_with_ordering(
&ctx,
schema.clone(),
&left_fifo,
"left",
file_sort_order.clone(),
true,
)
.await?;
register_unbounded_file_with_ordering(
&ctx,
schema,
&right_fifo,
"right",
file_sort_order,
true,
)
.await?;
// Execute the query, with no matching rows. (since key is modulus 10)
let df = ctx
.sql(
"SELECT
t1.a1,
t1.a2,
t2.a1,
t2.a2
FROM
left as t1 FULL
JOIN right as t2 ON t1.a2 = t2.a2
AND t1.a1 > t2.a1 + 4
AND t1.a1 < t2.a1 + 9",
)
.await?;
let mut stream = df.execute_stream().await?;
let mut operations = vec![];
// Partial.
while let Some(Ok(batch)) = stream.next().await {
waiting.store(false, Ordering::SeqCst);
let left_unmatched = batch.column(2).null_count();
let right_unmatched = batch.column(0).null_count();
let op = if left_unmatched == 0 && right_unmatched == 0 {
JoinOperation::Equal
} else if right_unmatched > left_unmatched | else {
JoinOperation::LeftUnmatched
};
operations.push(op);
}
tasks.into_iter().for_each(|jh| jh.join().unwrap());
// The SymmetricHashJoin executor produces FULL join results at every
// pruning, which happens before it reaches the end of input and more
// than once. In this test, we feed partially joinable data to both
// sides in order to ensure that left or right unmatched results are
// generated more than once during the test.
assert!(
operations
.iter()
.filter(|&n| JoinOperation::RightUnmatched.eq(n))
.count()
> 1
&& operations
.iter()
.filter(|&n| JoinOperation::LeftUnmatched.eq(n))
.count()
> 1
);
Ok(())
}
/// It tests the INSERT INTO functionality.
#[tokio::test]
async fn test_sql_insert_into_fifo() -> Result<()> {
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(true));
let waiting_thread = waiting.clone();
// create local execution context
let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE);
let ctx = SessionContext::with_config(config);
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?;
// Prevent move
let (source_fifo_path_thread, source_display_fifo_path) =
(source_fifo_path.clone(), source_fifo_path.display());
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely
// TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another
// thread. This approach ensures that the pipeline remains unbroken.
tasks.push(create_writing_thread(
source_fifo_path_thread,
"a1,a2\n".to_owned(),
(0..TEST_DATA_SIZE)
.map(|_| "a,1\n".to_string())
.collect::<Vec<_>>(),
waiting,
TEST_BATCH_SIZE,
));
// Create a new temporary FIFO file
let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?;
// Prevent move
let (sink_fifo_path_thread, sink_display_fifo_path) =
(sink_fifo_path.clone(), sink_fifo_path.display());
// Spawn a new thread to read sink EXTERNAL TABLE.
tasks.push(thread::spawn(move || {
let file = File::open(sink_fifo_path_thread).unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
let mut reader = ReaderBuilder::new(schema)
.has_header(true)
.with_batch_size(TEST_BATCH_SIZE)
.build(file)
.map_err(|e| DataFusionError::Internal(e.to_string()))
.unwrap();
while let Some(Ok(_)) = reader.next() {
waiting_thread.store(false, Ordering::SeqCst);
}
}));
// register second csv file with the SQL (create an empty file if not found)
ctx.sql(&format!(
"CREATE EXTERNAL TABLE source_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{source_display_fifo_path}'"
))
.await?;
// register csv file with the SQL
ctx.sql(&format!(
"CREATE EXTERNAL TABLE sink_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{sink_display_fifo_path}'"
))
.await?;
let df = ctx
.sql(
"INSERT INTO sink_table
SELECT a1, a2 FROM source_table",
)
.await?;
df.collect().await?;
tasks.into_iter().for_each(|jh| jh.join().unwrap());
Ok(())
}
}
| {
JoinOperation::RightUnmatched
} | conditional_block |
fifo.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! This test demonstrates the DataFusion FIFO capabilities.
//!
#[cfg(not(target_os = "windows"))]
#[cfg(test)]
mod unix_test {
use arrow::array::Array;
use arrow::csv::ReaderBuilder;
use arrow::datatypes::{DataType, Field, Schema};
use datafusion::test_util::register_unbounded_file_with_ordering;
use datafusion::{
prelude::{CsvReadOptions, SessionConfig, SessionContext},
test_util::{aggr_test_schema, arrow_test_data},
};
use datafusion_common::{DataFusionError, Result};
use futures::StreamExt;
use itertools::enumerate;
use nix::sys::stat;
use nix::unistd;
use rstest::*;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use tempfile::TempDir;
// ! For the sake of the test, do not alter the numbers. !
// Session batch size
const TEST_BATCH_SIZE: usize = 20;
// Number of lines written to FIFO
const TEST_DATA_SIZE: usize = 20_000;
// Number of lines what can be joined. Each joinable key produced 20 lines with
// aggregate_test_100 dataset. We will use these joinable keys for understanding
// incremental execution.
const TEST_JOIN_RATIO: f64 = 0.01;
fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> {
let file_path = tmp_dir.path().join(file_name);
// Simulate an infinite environment via a FIFO file
if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) {
Err(DataFusionError::Execution(e.to_string()))
} else {
Ok(file_path)
}
}
fn write_to_fifo(
mut file: &File,
line: &str,
ref_time: Instant,
broken_pipe_timeout: Duration,
) -> Result<()> {
// We need to handle broken pipe error until the reader is ready. This
// is why we use a timeout to limit the wait duration for the reader.
// If the error is different than broken pipe, we fail immediately.
while let Err(e) = file.write_all(line.as_bytes()) {
if e.raw_os_error().unwrap() == 32 {
let interval = Instant::now().duration_since(ref_time);
if interval < broken_pipe_timeout {
thread::sleep(Duration::from_millis(100));
continue; | }
// This test provides a relatively realistic end-to-end scenario where
// we swap join sides to accommodate a FIFO source.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread", worker_threads = 8)]
async fn unbounded_file_with_swapped_join(
#[values(true, false)] unbounded_file: bool,
) -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.with_collect_statistics(false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(unbounded_file));
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let fifo_path =
create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?;
// Execution can calculated at least one RecordBatch after the number of
// "joinable_lines_length" lines are read.
let joinable_lines_length =
(TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize;
// The row including "a" is joinable with aggregate_test_100.c1
let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string());
let second_joinable_iterator =
(0..joinable_lines_length).map(|_| "a".to_string());
// The row including "zzz" is not joinable with aggregate_test_100.c1
let non_joinable_iterator =
(0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string());
let lines = joinable_iterator
.chain(non_joinable_iterator)
.chain(second_joinable_iterator)
.zip(0..TEST_DATA_SIZE)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create writing threads for the left and right FIFO files
let task = create_writing_thread(
fifo_path.clone(),
"a1,a2\n".to_owned(),
lines,
waiting.clone(),
joinable_lines_length,
);
// Data Schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
// Create a file with bounded or unbounded flag.
ctx.register_csv(
"left",
fifo_path.as_os_str().to_str().unwrap(),
CsvReadOptions::new()
.schema(schema.as_ref())
.mark_infinite(unbounded_file),
)
.await?;
// Register right table
let schema = aggr_test_schema();
let test_data = arrow_test_data();
ctx.register_csv(
"right",
&format!("{test_data}/csv/aggregate_test_100.csv"),
CsvReadOptions::new().schema(schema.as_ref()),
)
.await?;
// Execute the query
let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?;
let mut stream = df.execute_stream().await?;
while (stream.next().await).is_some() {
waiting.store(false, Ordering::SeqCst);
}
task.join().unwrap();
Ok(())
}
#[derive(Debug, PartialEq)]
enum JoinOperation {
LeftUnmatched,
RightUnmatched,
Equal,
}
fn create_writing_thread(
file_path: PathBuf,
header: String,
lines: Vec<String>,
waiting_lock: Arc<AtomicBool>,
wait_until: usize,
) -> JoinHandle<()> {
// Timeout for a long period of BrokenPipe error
let broken_pipe_timeout = Duration::from_secs(10);
// Spawn a new thread to write to the FIFO file
thread::spawn(move || {
let file = OpenOptions::new().write(true).open(file_path).unwrap();
// Reference time to use when deciding to fail the test
let execution_start = Instant::now();
write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap();
for (cnt, line) in enumerate(lines) {
while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until {
thread::sleep(Duration::from_millis(50));
}
write_to_fifo(&file, &line, execution_start, broken_pipe_timeout)
.unwrap();
}
drop(file);
})
}
// This test provides a relatively realistic end-to-end scenario where
// we change the join into a [SymmetricHashJoin] to accommodate two
// unbounded (FIFO) sources.
#[rstest]
#[timeout(std::time::Duration::from_secs(30))]
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn unbounded_file_with_symmetric_join() -> Result<()> {
// Create session context
let config = SessionConfig::new()
.with_batch_size(TEST_BATCH_SIZE)
.set_bool("datafusion.execution.coalesce_batches", false)
.with_target_partitions(1);
let ctx = SessionContext::with_config(config);
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// Join filter
let a1_iter = 0..TEST_DATA_SIZE;
// Join key
let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10);
let lines = a1_iter
.zip(a2_iter)
.map(|(a1, a2)| format!("{a1},{a2}\n"))
.collect::<Vec<_>>();
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
// Create a FIFO file for the left input source.
let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?;
// Create a FIFO file for the right input source.
let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?;
// Create a mutex for tracking if the right input source is waiting for data.
let waiting = Arc::new(AtomicBool::new(true));
// Create writing threads for the left and right FIFO files
tasks.push(create_writing_thread(
left_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
tasks.push(create_writing_thread(
right_fifo.clone(),
"a1,a2\n".to_owned(),
lines.clone(),
waiting.clone(),
TEST_BATCH_SIZE,
));
// Create schema
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::UInt32, false),
Field::new("a2", DataType::UInt32, false),
]));
// Specify the ordering:
let file_sort_order = vec![[datafusion_expr::col("a1")]
.into_iter()
.map(|e| {
let ascending = true;
let nulls_first = false;
e.sort(ascending, nulls_first)
})
.collect::<Vec<_>>()];
// Set unbounded sorted files read configuration
register_unbounded_file_with_ordering(
&ctx,
schema.clone(),
&left_fifo,
"left",
file_sort_order.clone(),
true,
)
.await?;
register_unbounded_file_with_ordering(
&ctx,
schema,
&right_fifo,
"right",
file_sort_order,
true,
)
.await?;
// Execute the query, with no matching rows. (since key is modulus 10)
let df = ctx
.sql(
"SELECT
t1.a1,
t1.a2,
t2.a1,
t2.a2
FROM
left as t1 FULL
JOIN right as t2 ON t1.a2 = t2.a2
AND t1.a1 > t2.a1 + 4
AND t1.a1 < t2.a1 + 9",
)
.await?;
let mut stream = df.execute_stream().await?;
let mut operations = vec![];
// Partial.
while let Some(Ok(batch)) = stream.next().await {
waiting.store(false, Ordering::SeqCst);
let left_unmatched = batch.column(2).null_count();
let right_unmatched = batch.column(0).null_count();
let op = if left_unmatched == 0 && right_unmatched == 0 {
JoinOperation::Equal
} else if right_unmatched > left_unmatched {
JoinOperation::RightUnmatched
} else {
JoinOperation::LeftUnmatched
};
operations.push(op);
}
tasks.into_iter().for_each(|jh| jh.join().unwrap());
// The SymmetricHashJoin executor produces FULL join results at every
// pruning, which happens before it reaches the end of input and more
// than once. In this test, we feed partially joinable data to both
// sides in order to ensure that left or right unmatched results are
// generated more than once during the test.
assert!(
operations
.iter()
.filter(|&n| JoinOperation::RightUnmatched.eq(n))
.count()
> 1
&& operations
.iter()
.filter(|&n| JoinOperation::LeftUnmatched.eq(n))
.count()
> 1
);
Ok(())
}
/// It tests the INSERT INTO functionality.
#[tokio::test]
async fn test_sql_insert_into_fifo() -> Result<()> {
// To make unbounded deterministic
let waiting = Arc::new(AtomicBool::new(true));
let waiting_thread = waiting.clone();
// create local execution context
let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE);
let ctx = SessionContext::with_config(config);
// Create a new temporary FIFO file
let tmp_dir = TempDir::new()?;
let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?;
// Prevent move
let (source_fifo_path_thread, source_display_fifo_path) =
(source_fifo_path.clone(), source_fifo_path.display());
// Tasks
let mut tasks: Vec<JoinHandle<()>> = vec![];
// TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely
// TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another
// thread. This approach ensures that the pipeline remains unbroken.
tasks.push(create_writing_thread(
source_fifo_path_thread,
"a1,a2\n".to_owned(),
(0..TEST_DATA_SIZE)
.map(|_| "a,1\n".to_string())
.collect::<Vec<_>>(),
waiting,
TEST_BATCH_SIZE,
));
// Create a new temporary FIFO file
let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?;
// Prevent move
let (sink_fifo_path_thread, sink_display_fifo_path) =
(sink_fifo_path.clone(), sink_fifo_path.display());
// Spawn a new thread to read sink EXTERNAL TABLE.
tasks.push(thread::spawn(move || {
let file = File::open(sink_fifo_path_thread).unwrap();
let schema = Arc::new(Schema::new(vec![
Field::new("a1", DataType::Utf8, false),
Field::new("a2", DataType::UInt32, false),
]));
let mut reader = ReaderBuilder::new(schema)
.has_header(true)
.with_batch_size(TEST_BATCH_SIZE)
.build(file)
.map_err(|e| DataFusionError::Internal(e.to_string()))
.unwrap();
while let Some(Ok(_)) = reader.next() {
waiting_thread.store(false, Ordering::SeqCst);
}
}));
// register second csv file with the SQL (create an empty file if not found)
ctx.sql(&format!(
"CREATE EXTERNAL TABLE source_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{source_display_fifo_path}'"
))
.await?;
// register csv file with the SQL
ctx.sql(&format!(
"CREATE EXTERNAL TABLE sink_table (
a1 VARCHAR NOT NULL,
a2 INT NOT NULL
)
STORED AS CSV
WITH HEADER ROW
OPTIONS ('UNBOUNDED' 'TRUE')
LOCATION '{sink_display_fifo_path}'"
))
.await?;
let df = ctx
.sql(
"INSERT INTO sink_table
SELECT a1, a2 FROM source_table",
)
.await?;
df.collect().await?;
tasks.into_iter().for_each(|jh| jh.join().unwrap());
Ok(())
}
} | }
}
return Err(DataFusionError::Execution(e.to_string()));
}
Ok(()) | random_line_split |
gpt.go | package gpt
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/qeedquan/disktools/endian"
"github.com/qeedquan/disktools/mbr"
)
type Option struct {
Sectsz int
}
type GUID [16]byte
type Header struct {
Sig [8]byte
Rev uint32
Hdrsz uint32
Hdrcrc uint32
_ uint32
Current uint64
Backup uint64
First uint64
Last uint64
GUID GUID
Table uint64
Ent uint32
Entsz uint32
Tabcrc uint32
}
type Entry struct {
Part GUID
Uniq GUID
First uint64
Last uint64
Attr uint64
Name [72]byte
}
type Table struct {
MBR *mbr.Record
Header Header
Sectsz int
Entries []Entry
}
var (
ErrHeader = errors.New("gpt: invalid header")
)
func Open(r io.ReaderAt, o *Option) (*Table, error) {
if o == nil {
o = &Option{Sectsz: 512}
}
d := decoder{
r: r,
Table: Table{Sectsz: o.Sectsz},
}
err := d.decode()
if err != nil {
return nil, err
}
return &d.Table, nil
}
type decoder struct {
Table
r io.ReaderAt
}
func (d *decoder) decode() error {
var err error
d.MBR, err = mbr.Open(d.r)
if err != nil {
return err
}
if d.MBR.Part[0].Type != 0xee {
return ErrHeader
}
d.Header, err = d.readHeader(int64(d.Sectsz))
if err != nil {
return err
}
d.Entries, err = d.readEntry(int64(d.Sectsz * 2))
if err != nil {
return err
}
return nil
}
func (d *decoder) readHeader(off int64) (Header, error) {
var h Header
sr := io.NewSectionReader(d.r, off, math.MaxUint32)
err := binary.Read(sr, binary.LittleEndian, &h)
if err != nil {
return h, err
}
if string(h.Sig[:]) != "EFI PART" {
return h, ErrHeader
}
return h, nil
}
func (d *decoder) readEntry(off int64) ([]Entry, error) {
var entries []Entry
h := &d.Header
buf := make([]byte, h.Ent)
for i := uint32(0); i < h.Ent; i++ {
_, err := d.r.ReadAt(buf, off)
if err != nil {
return nil, err
}
var entry Entry
rd := bytes.NewReader(buf)
err = binary.Read(rd, binary.LittleEndian, &entry)
entries = append(entries, entry)
}
return entries, nil
}
func ParseGUID(guid string) ([16]byte, error) |
func MustParseGUID(guid string) GUID {
p, err := ParseGUID(guid)
if err != nil {
panic(err)
}
return p
}
func (p GUID) String() string {
return fmt.Sprintf("%X-%X-%X-%X-%X",
endian.Read32be(p[0:]),
endian.Read32be(p[4:]),
endian.Read32be(p[6:]),
endian.Read32be(p[8:]),
endian.Read48be(p[10:]),
)
}
var Parts = []struct {
Name string
Desc string
GUID GUID
}{
{"unused", "Unused entry", MustParseGUID("00000000-0000-0000-0000-000000000000")},
{"mbr", "MBR", MustParseGUID("024DEE41-33E7-11D3-9D69-0008C781F39F")},
{"efi", "EFI System", MustParseGUID("C12A7328-F81F-11D2-BA4B-00A0C93EC93B")},
{"bios", "BIOS Boot", MustParseGUID("21686148-6449-6E6F-744E-656564454649")},
{"iffs", "Intel Fast Flash", MustParseGUID("D3BFE2DE-3DAF-11DF-BA40-E3A556D89593")},
{"sony", "Sony boot", MustParseGUID("F4019732-066E-4E12-8273-346C5641494F")},
{"lenovo", "Lenovo boot", MustParseGUID("BFBFAFE7-A34F-448A-9A5B-6213EB736C22")},
{"msr", "Microsoft Reserved", MustParseGUID("E3C9E316-0B5C-4DB8-817D-F92DF00215AE")},
{"dos", "Microsoft Basic data", MustParseGUID("EBD0A0A2-B9E5-4433-87C0-68B6B72699C7")},
{"ldmm", "Microsoft Logical Disk Manager metadata", MustParseGUID("5808C8AA-7E8F-42E0-85D2-E1E90434CFB3")},
{"ldmd", "Microsoft Logical Disk Manager data", MustParseGUID("AF9B60A0-1431-4F62-BC68-3311714A69AD")},
{"recovery", "Windows Recovery Environment", MustParseGUID("DE94BBA4-06D1-4D40-A16A-BFD50179D6AC")},
{"gpfs", "IBM General Parallel File System", MustParseGUID("37AFFC90-EF7D-4E96-91C3-2D7AE055B174")},
{"storagespaces", "Storage Spaces", MustParseGUID("E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D")},
{"hpuxdata", "HP-UX Data", MustParseGUID("75894C1E-3AEB-11D3-B7C1-7B03A0000000")},
{"hpuxserv", "HP-UX Service", MustParseGUID("E2A1E728-32E3-11D6-A682-7B03A0000000")},
{"linuxdata", "Linux Data", MustParseGUID("0FC63DAF-8483-4772-8E79-3D69D8477DE4")},
{"linuxraid", "Linux RAID", MustParseGUID("A19D880F-05FC-4D3B-A006-743F0F84911E")},
{"linuxrootx86", "Linux Root (x86)", MustParseGUID("44479540-F297-41B2-9AF7-D131D5F0458A")},
{"linuxrootx86_64", "Linux Root (x86-64)", MustParseGUID("4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709")},
{"linuxrootarm", "Linux Root (ARM)", MustParseGUID("69DAD710-2CE4-4E3C-B16C-21A1D49ABED3")},
{"linuxrootaarch64", "Linux Root (ARM)", MustParseGUID("B921B045-1DF0-41C3-AF44-4C6F280D3FAE")},
{"linuxswap", "Linux Swap", MustParseGUID("0657FD6D-A4AB-43C4-84E5-0933C84B4F4F")},
{"linuxlvm", "Linux Logical Volume Manager", MustParseGUID("E6D6D379-F507-44C2-A23C-238F2A3DF928")},
{"linuxhome", "Linux /home", MustParseGUID("933AC7E1-2EB4-4F13-B844-0E14E2AEF915")},
{"linuxsrv", "Linux /srv", MustParseGUID("3B8F8425-20E0-4F3B-907F-1A25A76F98E8")},
{"linuxcrypt", "Linux Plain dm-crypt", MustParseGUID("7FFEC5C9-2D00-49B7-8941-3EA10A5586B7")},
{"luks", "LUKS", MustParseGUID("CA7D7CCB-63ED-4C53-861C-1742536059CC")},
{"linuxreserved", "Linux Reserved", MustParseGUID("8DA63339-0007-60C0-C436-083AC8230908")},
{"fbsdboot", "FreeBSD Boot", MustParseGUID("83BD6B9D-7F41-11DC-BE0B-001560B84F0F")},
{"fbsddata", "FreeBSD Data", MustParseGUID("516E7CB4-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdswap", "FreeBSD Swap", MustParseGUID("516E7CB5-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdufs", "FreeBSD Unix File System", MustParseGUID("516E7CB6-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdvvm", "FreeBSD Vinum volume manager", MustParseGUID("516E7CB8-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdzfs", "FreeBSD ZFS", MustParseGUID("516E7CBA-6ECF-11D6-8FF8-00022D09712B")},
{"applehfs", "Apple HFS+", MustParseGUID("48465300-0000-11AA-AA11-00306543ECAC")},
{"appleufs", "Apple UFS", MustParseGUID("55465300-0000-11AA-AA11-00306543ECAC")},
{"applezfs", "Apple ZFS", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"appleraid", "Apple RAID", MustParseGUID("52414944-0000-11AA-AA11-00306543ECAC")},
{"appleraidoff", "Apple RAID, offline", MustParseGUID("52414944-5F4F-11AA-AA11-00306543ECAC")},
{"appleboot", "Apple Boot", MustParseGUID("426F6F74-0000-11AA-AA11-00306543ECAC")},
{"applelabel", "Apple Label", MustParseGUID("4C616265-6C00-11AA-AA11-00306543ECAC")},
{"appletv", "Apple TV Recovery", MustParseGUID("5265636F-7665-11AA-AA11-00306543ECAC")},
{"applecs", "Apple Core Storage", MustParseGUID("53746F72-6167-11AA-AA11-00306543ECAC")},
{"applesrs", "Apple SoftRAID Status", MustParseGUID("B6FA30DA-92D2-4A9A-96F1-871EC6486200")},
{"applesrscr", "Apple SoftRAID Scratch", MustParseGUID("2E313465-19B9-463F-8126-8A7993773801")},
{"applesrv", "Apple SoftRAID Volume", MustParseGUID("FA709C7E-65B1-4593-BFD5-E71D61DE9B02")},
{"applesrc", "Apple SoftRAID Cache", MustParseGUID("BBBA6DF5-F46F-4A89-8F59-8765B2727503")},
{"solarisboot", "Solaris Boot", MustParseGUID("6A82CB45-1DD2-11B2-99A6-080020736631")},
{"solarisroot", "Solaris Root", MustParseGUID("6A85CF4D-1DD2-11B2-99A6-080020736631")},
{"solarisswap", "Solaris Swap", MustParseGUID("6A87C46F-1DD2-11B2-99A6-080020736631")},
{"solarisbakup", "Solaris Backup", MustParseGUID("6A8B642B-1DD2-11B2-99A6-080020736631")},
{"solarisusr", "Solaris /usr", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"solarisvar", "Solaris /var", MustParseGUID("6A8EF2E9-1DD2-11B2-99A6-080020736631")},
{"solarishome", "Solaris /home", MustParseGUID("6A90BA39-1DD2-11B2-99A6-080020736631")},
{"solarisalt", "Solaris Alternate sector", MustParseGUID("6A9283A5-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A945A3B-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A9630D1-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A980767-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A96237F-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A8D2AC7-1DD2-11B2-99A6-080020736631")},
{"nbsdswap", "NetBSD Swap", MustParseGUID("49F48D32-B10E-11DC-B99B-0019D1879648")},
{"nbsdffs", "NetBSD FFS", MustParseGUID("49F48D5A-B10E-11DC-B99B-0019D1879648")},
{"nbsdlfs", "NetBSD LFS", MustParseGUID("49F48D82-B10E-11DC-B99B-0019D1879648")},
{"nbsdraid", "NetBSD RAID", MustParseGUID("49F48DAA-B10E-11DC-B99B-0019D1879648")},
{"nbsdcat", "NetBSD Concatenated", MustParseGUID("2DB519C4-B10F-11DC-B99B-0019D1879648")},
{"nbsdcrypt", "NetBSD Encrypted", MustParseGUID("2DB519EC-B10F-11DC-B99B-0019D1879648")},
{"chromeoskern", "ChromeOS kernel", MustParseGUID("FE3A2A5D-4F32-41A7-B725-ACCC3285A309")},
{"chromeosroot", "ChromeOS rootfs", MustParseGUID("3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC")},
{"chromeos", "ChromeOS future use", MustParseGUID("2E0A753D-9E48-43B0-8337-B15192CB1B5E")},
{"haikubfs", "Haiku BFS", MustParseGUID("42465331-3BA3-10F1-802A-4861696B7521")},
{"midbsdboot", "MidnightBSD Boot", MustParseGUID("85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsddata", "MidnightBSD Data", MustParseGUID("85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdswap", "MidnightBSD Swap", MustParseGUID("85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdufs", "MidnightBSD Unix File System", MustParseGUID("0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdvvm", "MidnightBSD Vinum volume manager", MustParseGUID("85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdzfs", "MidnightBSD ZFS", MustParseGUID("85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7")},
{"cephjournal", "Ceph Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-B4B80CEFF106")},
{"cephcrypt", "Ceph dm-crypt Encrypted Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-5EC00CEFF106")},
{"cephosd", "Ceph OSD", MustParseGUID("4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D")},
{"cephdsk", "Ceph disk in creation", MustParseGUID("89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE")},
{"cephcryptosd", "Ceph dm-crypt OSD", MustParseGUID("89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE")},
{"openbsd", "OpenBSD Data", MustParseGUID("824CC7A0-36A8-11E3-890A-952519AD3F61")},
{"qnx6", "QNX6 Power-safe file system", MustParseGUID("CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1")},
{"plan9", "Plan 9", MustParseGUID("C91818F9-8025-47AF-89D2-F030D7000C2C")},
{"vmwarecore", "vmkcore (coredump partition)", MustParseGUID("9D275380-40AD-11DB-BF97-000C2911D1B8")},
{"vmwarevmfs", "VMFS filesystem partition", MustParseGUID("AA31E02A-400F-11DB-9590-000C2911D1B8")},
{"vmwarersrv", "VMware Reserved", MustParseGUID("9198EFFC-31C0-11DB-8F78-000C2911D1B8")},
{"androidiabootldr", "Android-IA bootloader", MustParseGUID("2568845D-2332-4675-BC39-8FA5A4748D15")},
{"androidiabootldr2", "Android-IA bootloader 2", MustParseGUID("114EAFFE-1552-4022-B26E-9B053604CF84")},
{"androidiaboot", "Android-IA boot", MustParseGUID("49A4D17F-93A3-45C1-A0DE-F50B2EBE2599")},
{"androidiarecovery", "Android-IA recovery", MustParseGUID("4177C722-9E92-4AAB-8644-43502BFD5506")},
{"androidiamisc", "Android-IA misc", MustParseGUID("EF32A33B-A409-486C-9141-9FFB711F6266")},
{"androidiametadata", "Android-IA metadata", MustParseGUID("20AC26BE-20B7-11E3-84C5-6CFDB94711E9")},
{"androidiasystem", "Android-IA system", MustParseGUID("38F428E6-D326-425D-9140-6E0EA133647C")},
{"androidiacache", "Android-IA cache", MustParseGUID("A893EF21-E428-470A-9E55-0668FD91A2D9")},
{"androidiadata", "Android-IA data", MustParseGUID("DC76DDA9-5AC1-491C-AF42-A82591580C0D")},
{"androidiapersistent", "Android-IA persistent", MustParseGUID("EBC597D0-2053-4B15-8B64-E0AAC75F4DB1")},
{"androidiafactory", "Android-IA factory", MustParseGUID("8F68CC74-C5E5-48DA-BE91-A0C8C15E9C80")},
{"androidiafastboot", "Android-IA fastboot", MustParseGUID("767941D0-2085-11E3-AD3B-6CFDB94711E9")},
{"androidiaoem", "Android-IA OEM", MustParseGUID("AC6D7924-EB71-4DF8-B48D-E267B27148FF")},
{"onieboot", "Onie Boot", MustParseGUID("7412F7D5-A156-4B13-81DC-867174929325")},
{"oniecfg", "Onie Config", MustParseGUID("D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149")},
{"ppcboot", "Prep boot", MustParseGUID("9E1A2D38-C612-4316-AA26-8B49521E5A8B")},
{"fdesktopboot", "Extended Boot Partition", MustParseGUID("BC13C2FF-59E6-4262-A352-B275FD6F7172")},
}
| {
var (
a uint32
b, c, d uint16
e uint64
p [16]byte
)
n, err := fmt.Sscanf(guid, "%x-%x-%x-%x-%x", &a, &b, &c, &d, &e)
if err != nil {
return p, err
}
if n != 5 {
return p, errors.New("invalid GUID format")
}
endian.Put32le(p[0:], a)
endian.Put16le(p[4:], b)
endian.Put16le(p[6:], c)
endian.Put16le(p[8:], d)
endian.Put48le(p[10:], e)
return p, nil
} | identifier_body |
gpt.go | package gpt
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/qeedquan/disktools/endian"
"github.com/qeedquan/disktools/mbr"
)
type Option struct {
Sectsz int
}
type GUID [16]byte
type Header struct {
Sig [8]byte
Rev uint32
Hdrsz uint32
Hdrcrc uint32
_ uint32
Current uint64
Backup uint64
First uint64
Last uint64
GUID GUID
Table uint64
Ent uint32
Entsz uint32
Tabcrc uint32
}
type Entry struct {
Part GUID
Uniq GUID
First uint64
Last uint64
Attr uint64
Name [72]byte
}
type Table struct {
MBR *mbr.Record
Header Header
Sectsz int
Entries []Entry
}
var (
ErrHeader = errors.New("gpt: invalid header")
)
func Open(r io.ReaderAt, o *Option) (*Table, error) {
if o == nil {
o = &Option{Sectsz: 512}
}
d := decoder{
r: r,
Table: Table{Sectsz: o.Sectsz},
}
err := d.decode()
if err != nil {
return nil, err
}
return &d.Table, nil
}
type decoder struct {
Table
r io.ReaderAt
}
func (d *decoder) decode() error {
var err error
d.MBR, err = mbr.Open(d.r)
if err != nil {
return err |
if d.MBR.Part[0].Type != 0xee {
return ErrHeader
}
d.Header, err = d.readHeader(int64(d.Sectsz))
if err != nil {
return err
}
d.Entries, err = d.readEntry(int64(d.Sectsz * 2))
if err != nil {
return err
}
return nil
}
func (d *decoder) readHeader(off int64) (Header, error) {
var h Header
sr := io.NewSectionReader(d.r, off, math.MaxUint32)
err := binary.Read(sr, binary.LittleEndian, &h)
if err != nil {
return h, err
}
if string(h.Sig[:]) != "EFI PART" {
return h, ErrHeader
}
return h, nil
}
func (d *decoder) readEntry(off int64) ([]Entry, error) {
var entries []Entry
h := &d.Header
buf := make([]byte, h.Ent)
for i := uint32(0); i < h.Ent; i++ {
_, err := d.r.ReadAt(buf, off)
if err != nil {
return nil, err
}
var entry Entry
rd := bytes.NewReader(buf)
err = binary.Read(rd, binary.LittleEndian, &entry)
entries = append(entries, entry)
}
return entries, nil
}
func ParseGUID(guid string) ([16]byte, error) {
var (
a uint32
b, c, d uint16
e uint64
p [16]byte
)
n, err := fmt.Sscanf(guid, "%x-%x-%x-%x-%x", &a, &b, &c, &d, &e)
if err != nil {
return p, err
}
if n != 5 {
return p, errors.New("invalid GUID format")
}
endian.Put32le(p[0:], a)
endian.Put16le(p[4:], b)
endian.Put16le(p[6:], c)
endian.Put16le(p[8:], d)
endian.Put48le(p[10:], e)
return p, nil
}
func MustParseGUID(guid string) GUID {
p, err := ParseGUID(guid)
if err != nil {
panic(err)
}
return p
}
func (p GUID) String() string {
return fmt.Sprintf("%X-%X-%X-%X-%X",
endian.Read32be(p[0:]),
endian.Read32be(p[4:]),
endian.Read32be(p[6:]),
endian.Read32be(p[8:]),
endian.Read48be(p[10:]),
)
}
var Parts = []struct {
Name string
Desc string
GUID GUID
}{
{"unused", "Unused entry", MustParseGUID("00000000-0000-0000-0000-000000000000")},
{"mbr", "MBR", MustParseGUID("024DEE41-33E7-11D3-9D69-0008C781F39F")},
{"efi", "EFI System", MustParseGUID("C12A7328-F81F-11D2-BA4B-00A0C93EC93B")},
{"bios", "BIOS Boot", MustParseGUID("21686148-6449-6E6F-744E-656564454649")},
{"iffs", "Intel Fast Flash", MustParseGUID("D3BFE2DE-3DAF-11DF-BA40-E3A556D89593")},
{"sony", "Sony boot", MustParseGUID("F4019732-066E-4E12-8273-346C5641494F")},
{"lenovo", "Lenovo boot", MustParseGUID("BFBFAFE7-A34F-448A-9A5B-6213EB736C22")},
{"msr", "Microsoft Reserved", MustParseGUID("E3C9E316-0B5C-4DB8-817D-F92DF00215AE")},
{"dos", "Microsoft Basic data", MustParseGUID("EBD0A0A2-B9E5-4433-87C0-68B6B72699C7")},
{"ldmm", "Microsoft Logical Disk Manager metadata", MustParseGUID("5808C8AA-7E8F-42E0-85D2-E1E90434CFB3")},
{"ldmd", "Microsoft Logical Disk Manager data", MustParseGUID("AF9B60A0-1431-4F62-BC68-3311714A69AD")},
{"recovery", "Windows Recovery Environment", MustParseGUID("DE94BBA4-06D1-4D40-A16A-BFD50179D6AC")},
{"gpfs", "IBM General Parallel File System", MustParseGUID("37AFFC90-EF7D-4E96-91C3-2D7AE055B174")},
{"storagespaces", "Storage Spaces", MustParseGUID("E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D")},
{"hpuxdata", "HP-UX Data", MustParseGUID("75894C1E-3AEB-11D3-B7C1-7B03A0000000")},
{"hpuxserv", "HP-UX Service", MustParseGUID("E2A1E728-32E3-11D6-A682-7B03A0000000")},
{"linuxdata", "Linux Data", MustParseGUID("0FC63DAF-8483-4772-8E79-3D69D8477DE4")},
{"linuxraid", "Linux RAID", MustParseGUID("A19D880F-05FC-4D3B-A006-743F0F84911E")},
{"linuxrootx86", "Linux Root (x86)", MustParseGUID("44479540-F297-41B2-9AF7-D131D5F0458A")},
{"linuxrootx86_64", "Linux Root (x86-64)", MustParseGUID("4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709")},
{"linuxrootarm", "Linux Root (ARM)", MustParseGUID("69DAD710-2CE4-4E3C-B16C-21A1D49ABED3")},
{"linuxrootaarch64", "Linux Root (ARM)", MustParseGUID("B921B045-1DF0-41C3-AF44-4C6F280D3FAE")},
{"linuxswap", "Linux Swap", MustParseGUID("0657FD6D-A4AB-43C4-84E5-0933C84B4F4F")},
{"linuxlvm", "Linux Logical Volume Manager", MustParseGUID("E6D6D379-F507-44C2-A23C-238F2A3DF928")},
{"linuxhome", "Linux /home", MustParseGUID("933AC7E1-2EB4-4F13-B844-0E14E2AEF915")},
{"linuxsrv", "Linux /srv", MustParseGUID("3B8F8425-20E0-4F3B-907F-1A25A76F98E8")},
{"linuxcrypt", "Linux Plain dm-crypt", MustParseGUID("7FFEC5C9-2D00-49B7-8941-3EA10A5586B7")},
{"luks", "LUKS", MustParseGUID("CA7D7CCB-63ED-4C53-861C-1742536059CC")},
{"linuxreserved", "Linux Reserved", MustParseGUID("8DA63339-0007-60C0-C436-083AC8230908")},
{"fbsdboot", "FreeBSD Boot", MustParseGUID("83BD6B9D-7F41-11DC-BE0B-001560B84F0F")},
{"fbsddata", "FreeBSD Data", MustParseGUID("516E7CB4-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdswap", "FreeBSD Swap", MustParseGUID("516E7CB5-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdufs", "FreeBSD Unix File System", MustParseGUID("516E7CB6-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdvvm", "FreeBSD Vinum volume manager", MustParseGUID("516E7CB8-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdzfs", "FreeBSD ZFS", MustParseGUID("516E7CBA-6ECF-11D6-8FF8-00022D09712B")},
{"applehfs", "Apple HFS+", MustParseGUID("48465300-0000-11AA-AA11-00306543ECAC")},
{"appleufs", "Apple UFS", MustParseGUID("55465300-0000-11AA-AA11-00306543ECAC")},
{"applezfs", "Apple ZFS", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"appleraid", "Apple RAID", MustParseGUID("52414944-0000-11AA-AA11-00306543ECAC")},
{"appleraidoff", "Apple RAID, offline", MustParseGUID("52414944-5F4F-11AA-AA11-00306543ECAC")},
{"appleboot", "Apple Boot", MustParseGUID("426F6F74-0000-11AA-AA11-00306543ECAC")},
{"applelabel", "Apple Label", MustParseGUID("4C616265-6C00-11AA-AA11-00306543ECAC")},
{"appletv", "Apple TV Recovery", MustParseGUID("5265636F-7665-11AA-AA11-00306543ECAC")},
{"applecs", "Apple Core Storage", MustParseGUID("53746F72-6167-11AA-AA11-00306543ECAC")},
{"applesrs", "Apple SoftRAID Status", MustParseGUID("B6FA30DA-92D2-4A9A-96F1-871EC6486200")},
{"applesrscr", "Apple SoftRAID Scratch", MustParseGUID("2E313465-19B9-463F-8126-8A7993773801")},
{"applesrv", "Apple SoftRAID Volume", MustParseGUID("FA709C7E-65B1-4593-BFD5-E71D61DE9B02")},
{"applesrc", "Apple SoftRAID Cache", MustParseGUID("BBBA6DF5-F46F-4A89-8F59-8765B2727503")},
{"solarisboot", "Solaris Boot", MustParseGUID("6A82CB45-1DD2-11B2-99A6-080020736631")},
{"solarisroot", "Solaris Root", MustParseGUID("6A85CF4D-1DD2-11B2-99A6-080020736631")},
{"solarisswap", "Solaris Swap", MustParseGUID("6A87C46F-1DD2-11B2-99A6-080020736631")},
{"solarisbakup", "Solaris Backup", MustParseGUID("6A8B642B-1DD2-11B2-99A6-080020736631")},
{"solarisusr", "Solaris /usr", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"solarisvar", "Solaris /var", MustParseGUID("6A8EF2E9-1DD2-11B2-99A6-080020736631")},
{"solarishome", "Solaris /home", MustParseGUID("6A90BA39-1DD2-11B2-99A6-080020736631")},
{"solarisalt", "Solaris Alternate sector", MustParseGUID("6A9283A5-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A945A3B-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A9630D1-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A980767-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A96237F-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A8D2AC7-1DD2-11B2-99A6-080020736631")},
{"nbsdswap", "NetBSD Swap", MustParseGUID("49F48D32-B10E-11DC-B99B-0019D1879648")},
{"nbsdffs", "NetBSD FFS", MustParseGUID("49F48D5A-B10E-11DC-B99B-0019D1879648")},
{"nbsdlfs", "NetBSD LFS", MustParseGUID("49F48D82-B10E-11DC-B99B-0019D1879648")},
{"nbsdraid", "NetBSD RAID", MustParseGUID("49F48DAA-B10E-11DC-B99B-0019D1879648")},
{"nbsdcat", "NetBSD Concatenated", MustParseGUID("2DB519C4-B10F-11DC-B99B-0019D1879648")},
{"nbsdcrypt", "NetBSD Encrypted", MustParseGUID("2DB519EC-B10F-11DC-B99B-0019D1879648")},
{"chromeoskern", "ChromeOS kernel", MustParseGUID("FE3A2A5D-4F32-41A7-B725-ACCC3285A309")},
{"chromeosroot", "ChromeOS rootfs", MustParseGUID("3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC")},
{"chromeos", "ChromeOS future use", MustParseGUID("2E0A753D-9E48-43B0-8337-B15192CB1B5E")},
{"haikubfs", "Haiku BFS", MustParseGUID("42465331-3BA3-10F1-802A-4861696B7521")},
{"midbsdboot", "MidnightBSD Boot", MustParseGUID("85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsddata", "MidnightBSD Data", MustParseGUID("85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdswap", "MidnightBSD Swap", MustParseGUID("85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdufs", "MidnightBSD Unix File System", MustParseGUID("0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdvvm", "MidnightBSD Vinum volume manager", MustParseGUID("85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdzfs", "MidnightBSD ZFS", MustParseGUID("85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7")},
{"cephjournal", "Ceph Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-B4B80CEFF106")},
{"cephcrypt", "Ceph dm-crypt Encrypted Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-5EC00CEFF106")},
{"cephosd", "Ceph OSD", MustParseGUID("4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D")},
{"cephdsk", "Ceph disk in creation", MustParseGUID("89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE")},
{"cephcryptosd", "Ceph dm-crypt OSD", MustParseGUID("89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE")},
{"openbsd", "OpenBSD Data", MustParseGUID("824CC7A0-36A8-11E3-890A-952519AD3F61")},
{"qnx6", "QNX6 Power-safe file system", MustParseGUID("CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1")},
{"plan9", "Plan 9", MustParseGUID("C91818F9-8025-47AF-89D2-F030D7000C2C")},
{"vmwarecore", "vmkcore (coredump partition)", MustParseGUID("9D275380-40AD-11DB-BF97-000C2911D1B8")},
{"vmwarevmfs", "VMFS filesystem partition", MustParseGUID("AA31E02A-400F-11DB-9590-000C2911D1B8")},
{"vmwarersrv", "VMware Reserved", MustParseGUID("9198EFFC-31C0-11DB-8F78-000C2911D1B8")},
{"androidiabootldr", "Android-IA bootloader", MustParseGUID("2568845D-2332-4675-BC39-8FA5A4748D15")},
{"androidiabootldr2", "Android-IA bootloader 2", MustParseGUID("114EAFFE-1552-4022-B26E-9B053604CF84")},
{"androidiaboot", "Android-IA boot", MustParseGUID("49A4D17F-93A3-45C1-A0DE-F50B2EBE2599")},
{"androidiarecovery", "Android-IA recovery", MustParseGUID("4177C722-9E92-4AAB-8644-43502BFD5506")},
{"androidiamisc", "Android-IA misc", MustParseGUID("EF32A33B-A409-486C-9141-9FFB711F6266")},
{"androidiametadata", "Android-IA metadata", MustParseGUID("20AC26BE-20B7-11E3-84C5-6CFDB94711E9")},
{"androidiasystem", "Android-IA system", MustParseGUID("38F428E6-D326-425D-9140-6E0EA133647C")},
{"androidiacache", "Android-IA cache", MustParseGUID("A893EF21-E428-470A-9E55-0668FD91A2D9")},
{"androidiadata", "Android-IA data", MustParseGUID("DC76DDA9-5AC1-491C-AF42-A82591580C0D")},
{"androidiapersistent", "Android-IA persistent", MustParseGUID("EBC597D0-2053-4B15-8B64-E0AAC75F4DB1")},
{"androidiafactory", "Android-IA factory", MustParseGUID("8F68CC74-C5E5-48DA-BE91-A0C8C15E9C80")},
{"androidiafastboot", "Android-IA fastboot", MustParseGUID("767941D0-2085-11E3-AD3B-6CFDB94711E9")},
{"androidiaoem", "Android-IA OEM", MustParseGUID("AC6D7924-EB71-4DF8-B48D-E267B27148FF")},
{"onieboot", "Onie Boot", MustParseGUID("7412F7D5-A156-4B13-81DC-867174929325")},
{"oniecfg", "Onie Config", MustParseGUID("D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149")},
{"ppcboot", "Prep boot", MustParseGUID("9E1A2D38-C612-4316-AA26-8B49521E5A8B")},
{"fdesktopboot", "Extended Boot Partition", MustParseGUID("BC13C2FF-59E6-4262-A352-B275FD6F7172")},
} | } | random_line_split |
gpt.go | package gpt
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/qeedquan/disktools/endian"
"github.com/qeedquan/disktools/mbr"
)
type Option struct {
Sectsz int
}
type GUID [16]byte
type Header struct {
Sig [8]byte
Rev uint32
Hdrsz uint32
Hdrcrc uint32
_ uint32
Current uint64
Backup uint64
First uint64
Last uint64
GUID GUID
Table uint64
Ent uint32
Entsz uint32
Tabcrc uint32
}
type Entry struct {
Part GUID
Uniq GUID
First uint64
Last uint64
Attr uint64
Name [72]byte
}
type Table struct {
MBR *mbr.Record
Header Header
Sectsz int
Entries []Entry
}
var (
ErrHeader = errors.New("gpt: invalid header")
)
func Open(r io.ReaderAt, o *Option) (*Table, error) {
if o == nil {
o = &Option{Sectsz: 512}
}
d := decoder{
r: r,
Table: Table{Sectsz: o.Sectsz},
}
err := d.decode()
if err != nil {
return nil, err
}
return &d.Table, nil
}
type decoder struct {
Table
r io.ReaderAt
}
func (d *decoder) | () error {
var err error
d.MBR, err = mbr.Open(d.r)
if err != nil {
return err
}
if d.MBR.Part[0].Type != 0xee {
return ErrHeader
}
d.Header, err = d.readHeader(int64(d.Sectsz))
if err != nil {
return err
}
d.Entries, err = d.readEntry(int64(d.Sectsz * 2))
if err != nil {
return err
}
return nil
}
func (d *decoder) readHeader(off int64) (Header, error) {
var h Header
sr := io.NewSectionReader(d.r, off, math.MaxUint32)
err := binary.Read(sr, binary.LittleEndian, &h)
if err != nil {
return h, err
}
if string(h.Sig[:]) != "EFI PART" {
return h, ErrHeader
}
return h, nil
}
func (d *decoder) readEntry(off int64) ([]Entry, error) {
var entries []Entry
h := &d.Header
buf := make([]byte, h.Ent)
for i := uint32(0); i < h.Ent; i++ {
_, err := d.r.ReadAt(buf, off)
if err != nil {
return nil, err
}
var entry Entry
rd := bytes.NewReader(buf)
err = binary.Read(rd, binary.LittleEndian, &entry)
entries = append(entries, entry)
}
return entries, nil
}
func ParseGUID(guid string) ([16]byte, error) {
var (
a uint32
b, c, d uint16
e uint64
p [16]byte
)
n, err := fmt.Sscanf(guid, "%x-%x-%x-%x-%x", &a, &b, &c, &d, &e)
if err != nil {
return p, err
}
if n != 5 {
return p, errors.New("invalid GUID format")
}
endian.Put32le(p[0:], a)
endian.Put16le(p[4:], b)
endian.Put16le(p[6:], c)
endian.Put16le(p[8:], d)
endian.Put48le(p[10:], e)
return p, nil
}
func MustParseGUID(guid string) GUID {
p, err := ParseGUID(guid)
if err != nil {
panic(err)
}
return p
}
func (p GUID) String() string {
return fmt.Sprintf("%X-%X-%X-%X-%X",
endian.Read32be(p[0:]),
endian.Read32be(p[4:]),
endian.Read32be(p[6:]),
endian.Read32be(p[8:]),
endian.Read48be(p[10:]),
)
}
var Parts = []struct {
Name string
Desc string
GUID GUID
}{
{"unused", "Unused entry", MustParseGUID("00000000-0000-0000-0000-000000000000")},
{"mbr", "MBR", MustParseGUID("024DEE41-33E7-11D3-9D69-0008C781F39F")},
{"efi", "EFI System", MustParseGUID("C12A7328-F81F-11D2-BA4B-00A0C93EC93B")},
{"bios", "BIOS Boot", MustParseGUID("21686148-6449-6E6F-744E-656564454649")},
{"iffs", "Intel Fast Flash", MustParseGUID("D3BFE2DE-3DAF-11DF-BA40-E3A556D89593")},
{"sony", "Sony boot", MustParseGUID("F4019732-066E-4E12-8273-346C5641494F")},
{"lenovo", "Lenovo boot", MustParseGUID("BFBFAFE7-A34F-448A-9A5B-6213EB736C22")},
{"msr", "Microsoft Reserved", MustParseGUID("E3C9E316-0B5C-4DB8-817D-F92DF00215AE")},
{"dos", "Microsoft Basic data", MustParseGUID("EBD0A0A2-B9E5-4433-87C0-68B6B72699C7")},
{"ldmm", "Microsoft Logical Disk Manager metadata", MustParseGUID("5808C8AA-7E8F-42E0-85D2-E1E90434CFB3")},
{"ldmd", "Microsoft Logical Disk Manager data", MustParseGUID("AF9B60A0-1431-4F62-BC68-3311714A69AD")},
{"recovery", "Windows Recovery Environment", MustParseGUID("DE94BBA4-06D1-4D40-A16A-BFD50179D6AC")},
{"gpfs", "IBM General Parallel File System", MustParseGUID("37AFFC90-EF7D-4E96-91C3-2D7AE055B174")},
{"storagespaces", "Storage Spaces", MustParseGUID("E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D")},
{"hpuxdata", "HP-UX Data", MustParseGUID("75894C1E-3AEB-11D3-B7C1-7B03A0000000")},
{"hpuxserv", "HP-UX Service", MustParseGUID("E2A1E728-32E3-11D6-A682-7B03A0000000")},
{"linuxdata", "Linux Data", MustParseGUID("0FC63DAF-8483-4772-8E79-3D69D8477DE4")},
{"linuxraid", "Linux RAID", MustParseGUID("A19D880F-05FC-4D3B-A006-743F0F84911E")},
{"linuxrootx86", "Linux Root (x86)", MustParseGUID("44479540-F297-41B2-9AF7-D131D5F0458A")},
{"linuxrootx86_64", "Linux Root (x86-64)", MustParseGUID("4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709")},
{"linuxrootarm", "Linux Root (ARM)", MustParseGUID("69DAD710-2CE4-4E3C-B16C-21A1D49ABED3")},
{"linuxrootaarch64", "Linux Root (ARM)", MustParseGUID("B921B045-1DF0-41C3-AF44-4C6F280D3FAE")},
{"linuxswap", "Linux Swap", MustParseGUID("0657FD6D-A4AB-43C4-84E5-0933C84B4F4F")},
{"linuxlvm", "Linux Logical Volume Manager", MustParseGUID("E6D6D379-F507-44C2-A23C-238F2A3DF928")},
{"linuxhome", "Linux /home", MustParseGUID("933AC7E1-2EB4-4F13-B844-0E14E2AEF915")},
{"linuxsrv", "Linux /srv", MustParseGUID("3B8F8425-20E0-4F3B-907F-1A25A76F98E8")},
{"linuxcrypt", "Linux Plain dm-crypt", MustParseGUID("7FFEC5C9-2D00-49B7-8941-3EA10A5586B7")},
{"luks", "LUKS", MustParseGUID("CA7D7CCB-63ED-4C53-861C-1742536059CC")},
{"linuxreserved", "Linux Reserved", MustParseGUID("8DA63339-0007-60C0-C436-083AC8230908")},
{"fbsdboot", "FreeBSD Boot", MustParseGUID("83BD6B9D-7F41-11DC-BE0B-001560B84F0F")},
{"fbsddata", "FreeBSD Data", MustParseGUID("516E7CB4-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdswap", "FreeBSD Swap", MustParseGUID("516E7CB5-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdufs", "FreeBSD Unix File System", MustParseGUID("516E7CB6-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdvvm", "FreeBSD Vinum volume manager", MustParseGUID("516E7CB8-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdzfs", "FreeBSD ZFS", MustParseGUID("516E7CBA-6ECF-11D6-8FF8-00022D09712B")},
{"applehfs", "Apple HFS+", MustParseGUID("48465300-0000-11AA-AA11-00306543ECAC")},
{"appleufs", "Apple UFS", MustParseGUID("55465300-0000-11AA-AA11-00306543ECAC")},
{"applezfs", "Apple ZFS", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"appleraid", "Apple RAID", MustParseGUID("52414944-0000-11AA-AA11-00306543ECAC")},
{"appleraidoff", "Apple RAID, offline", MustParseGUID("52414944-5F4F-11AA-AA11-00306543ECAC")},
{"appleboot", "Apple Boot", MustParseGUID("426F6F74-0000-11AA-AA11-00306543ECAC")},
{"applelabel", "Apple Label", MustParseGUID("4C616265-6C00-11AA-AA11-00306543ECAC")},
{"appletv", "Apple TV Recovery", MustParseGUID("5265636F-7665-11AA-AA11-00306543ECAC")},
{"applecs", "Apple Core Storage", MustParseGUID("53746F72-6167-11AA-AA11-00306543ECAC")},
{"applesrs", "Apple SoftRAID Status", MustParseGUID("B6FA30DA-92D2-4A9A-96F1-871EC6486200")},
{"applesrscr", "Apple SoftRAID Scratch", MustParseGUID("2E313465-19B9-463F-8126-8A7993773801")},
{"applesrv", "Apple SoftRAID Volume", MustParseGUID("FA709C7E-65B1-4593-BFD5-E71D61DE9B02")},
{"applesrc", "Apple SoftRAID Cache", MustParseGUID("BBBA6DF5-F46F-4A89-8F59-8765B2727503")},
{"solarisboot", "Solaris Boot", MustParseGUID("6A82CB45-1DD2-11B2-99A6-080020736631")},
{"solarisroot", "Solaris Root", MustParseGUID("6A85CF4D-1DD2-11B2-99A6-080020736631")},
{"solarisswap", "Solaris Swap", MustParseGUID("6A87C46F-1DD2-11B2-99A6-080020736631")},
{"solarisbakup", "Solaris Backup", MustParseGUID("6A8B642B-1DD2-11B2-99A6-080020736631")},
{"solarisusr", "Solaris /usr", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"solarisvar", "Solaris /var", MustParseGUID("6A8EF2E9-1DD2-11B2-99A6-080020736631")},
{"solarishome", "Solaris /home", MustParseGUID("6A90BA39-1DD2-11B2-99A6-080020736631")},
{"solarisalt", "Solaris Alternate sector", MustParseGUID("6A9283A5-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A945A3B-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A9630D1-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A980767-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A96237F-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A8D2AC7-1DD2-11B2-99A6-080020736631")},
{"nbsdswap", "NetBSD Swap", MustParseGUID("49F48D32-B10E-11DC-B99B-0019D1879648")},
{"nbsdffs", "NetBSD FFS", MustParseGUID("49F48D5A-B10E-11DC-B99B-0019D1879648")},
{"nbsdlfs", "NetBSD LFS", MustParseGUID("49F48D82-B10E-11DC-B99B-0019D1879648")},
{"nbsdraid", "NetBSD RAID", MustParseGUID("49F48DAA-B10E-11DC-B99B-0019D1879648")},
{"nbsdcat", "NetBSD Concatenated", MustParseGUID("2DB519C4-B10F-11DC-B99B-0019D1879648")},
{"nbsdcrypt", "NetBSD Encrypted", MustParseGUID("2DB519EC-B10F-11DC-B99B-0019D1879648")},
{"chromeoskern", "ChromeOS kernel", MustParseGUID("FE3A2A5D-4F32-41A7-B725-ACCC3285A309")},
{"chromeosroot", "ChromeOS rootfs", MustParseGUID("3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC")},
{"chromeos", "ChromeOS future use", MustParseGUID("2E0A753D-9E48-43B0-8337-B15192CB1B5E")},
{"haikubfs", "Haiku BFS", MustParseGUID("42465331-3BA3-10F1-802A-4861696B7521")},
{"midbsdboot", "MidnightBSD Boot", MustParseGUID("85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsddata", "MidnightBSD Data", MustParseGUID("85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdswap", "MidnightBSD Swap", MustParseGUID("85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdufs", "MidnightBSD Unix File System", MustParseGUID("0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdvvm", "MidnightBSD Vinum volume manager", MustParseGUID("85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdzfs", "MidnightBSD ZFS", MustParseGUID("85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7")},
{"cephjournal", "Ceph Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-B4B80CEFF106")},
{"cephcrypt", "Ceph dm-crypt Encrypted Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-5EC00CEFF106")},
{"cephosd", "Ceph OSD", MustParseGUID("4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D")},
{"cephdsk", "Ceph disk in creation", MustParseGUID("89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE")},
{"cephcryptosd", "Ceph dm-crypt OSD", MustParseGUID("89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE")},
{"openbsd", "OpenBSD Data", MustParseGUID("824CC7A0-36A8-11E3-890A-952519AD3F61")},
{"qnx6", "QNX6 Power-safe file system", MustParseGUID("CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1")},
{"plan9", "Plan 9", MustParseGUID("C91818F9-8025-47AF-89D2-F030D7000C2C")},
{"vmwarecore", "vmkcore (coredump partition)", MustParseGUID("9D275380-40AD-11DB-BF97-000C2911D1B8")},
{"vmwarevmfs", "VMFS filesystem partition", MustParseGUID("AA31E02A-400F-11DB-9590-000C2911D1B8")},
{"vmwarersrv", "VMware Reserved", MustParseGUID("9198EFFC-31C0-11DB-8F78-000C2911D1B8")},
{"androidiabootldr", "Android-IA bootloader", MustParseGUID("2568845D-2332-4675-BC39-8FA5A4748D15")},
{"androidiabootldr2", "Android-IA bootloader 2", MustParseGUID("114EAFFE-1552-4022-B26E-9B053604CF84")},
{"androidiaboot", "Android-IA boot", MustParseGUID("49A4D17F-93A3-45C1-A0DE-F50B2EBE2599")},
{"androidiarecovery", "Android-IA recovery", MustParseGUID("4177C722-9E92-4AAB-8644-43502BFD5506")},
{"androidiamisc", "Android-IA misc", MustParseGUID("EF32A33B-A409-486C-9141-9FFB711F6266")},
{"androidiametadata", "Android-IA metadata", MustParseGUID("20AC26BE-20B7-11E3-84C5-6CFDB94711E9")},
{"androidiasystem", "Android-IA system", MustParseGUID("38F428E6-D326-425D-9140-6E0EA133647C")},
{"androidiacache", "Android-IA cache", MustParseGUID("A893EF21-E428-470A-9E55-0668FD91A2D9")},
{"androidiadata", "Android-IA data", MustParseGUID("DC76DDA9-5AC1-491C-AF42-A82591580C0D")},
{"androidiapersistent", "Android-IA persistent", MustParseGUID("EBC597D0-2053-4B15-8B64-E0AAC75F4DB1")},
{"androidiafactory", "Android-IA factory", MustParseGUID("8F68CC74-C5E5-48DA-BE91-A0C8C15E9C80")},
{"androidiafastboot", "Android-IA fastboot", MustParseGUID("767941D0-2085-11E3-AD3B-6CFDB94711E9")},
{"androidiaoem", "Android-IA OEM", MustParseGUID("AC6D7924-EB71-4DF8-B48D-E267B27148FF")},
{"onieboot", "Onie Boot", MustParseGUID("7412F7D5-A156-4B13-81DC-867174929325")},
{"oniecfg", "Onie Config", MustParseGUID("D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149")},
{"ppcboot", "Prep boot", MustParseGUID("9E1A2D38-C612-4316-AA26-8B49521E5A8B")},
{"fdesktopboot", "Extended Boot Partition", MustParseGUID("BC13C2FF-59E6-4262-A352-B275FD6F7172")},
}
| decode | identifier_name |
gpt.go | package gpt
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/qeedquan/disktools/endian"
"github.com/qeedquan/disktools/mbr"
)
type Option struct {
Sectsz int
}
type GUID [16]byte
type Header struct {
Sig [8]byte
Rev uint32
Hdrsz uint32
Hdrcrc uint32
_ uint32
Current uint64
Backup uint64
First uint64
Last uint64
GUID GUID
Table uint64
Ent uint32
Entsz uint32
Tabcrc uint32
}
type Entry struct {
Part GUID
Uniq GUID
First uint64
Last uint64
Attr uint64
Name [72]byte
}
type Table struct {
MBR *mbr.Record
Header Header
Sectsz int
Entries []Entry
}
var (
ErrHeader = errors.New("gpt: invalid header")
)
func Open(r io.ReaderAt, o *Option) (*Table, error) {
if o == nil |
d := decoder{
r: r,
Table: Table{Sectsz: o.Sectsz},
}
err := d.decode()
if err != nil {
return nil, err
}
return &d.Table, nil
}
type decoder struct {
Table
r io.ReaderAt
}
func (d *decoder) decode() error {
var err error
d.MBR, err = mbr.Open(d.r)
if err != nil {
return err
}
if d.MBR.Part[0].Type != 0xee {
return ErrHeader
}
d.Header, err = d.readHeader(int64(d.Sectsz))
if err != nil {
return err
}
d.Entries, err = d.readEntry(int64(d.Sectsz * 2))
if err != nil {
return err
}
return nil
}
func (d *decoder) readHeader(off int64) (Header, error) {
var h Header
sr := io.NewSectionReader(d.r, off, math.MaxUint32)
err := binary.Read(sr, binary.LittleEndian, &h)
if err != nil {
return h, err
}
if string(h.Sig[:]) != "EFI PART" {
return h, ErrHeader
}
return h, nil
}
func (d *decoder) readEntry(off int64) ([]Entry, error) {
var entries []Entry
h := &d.Header
buf := make([]byte, h.Ent)
for i := uint32(0); i < h.Ent; i++ {
_, err := d.r.ReadAt(buf, off)
if err != nil {
return nil, err
}
var entry Entry
rd := bytes.NewReader(buf)
err = binary.Read(rd, binary.LittleEndian, &entry)
entries = append(entries, entry)
}
return entries, nil
}
func ParseGUID(guid string) ([16]byte, error) {
var (
a uint32
b, c, d uint16
e uint64
p [16]byte
)
n, err := fmt.Sscanf(guid, "%x-%x-%x-%x-%x", &a, &b, &c, &d, &e)
if err != nil {
return p, err
}
if n != 5 {
return p, errors.New("invalid GUID format")
}
endian.Put32le(p[0:], a)
endian.Put16le(p[4:], b)
endian.Put16le(p[6:], c)
endian.Put16le(p[8:], d)
endian.Put48le(p[10:], e)
return p, nil
}
func MustParseGUID(guid string) GUID {
p, err := ParseGUID(guid)
if err != nil {
panic(err)
}
return p
}
func (p GUID) String() string {
return fmt.Sprintf("%X-%X-%X-%X-%X",
endian.Read32be(p[0:]),
endian.Read32be(p[4:]),
endian.Read32be(p[6:]),
endian.Read32be(p[8:]),
endian.Read48be(p[10:]),
)
}
var Parts = []struct {
Name string
Desc string
GUID GUID
}{
{"unused", "Unused entry", MustParseGUID("00000000-0000-0000-0000-000000000000")},
{"mbr", "MBR", MustParseGUID("024DEE41-33E7-11D3-9D69-0008C781F39F")},
{"efi", "EFI System", MustParseGUID("C12A7328-F81F-11D2-BA4B-00A0C93EC93B")},
{"bios", "BIOS Boot", MustParseGUID("21686148-6449-6E6F-744E-656564454649")},
{"iffs", "Intel Fast Flash", MustParseGUID("D3BFE2DE-3DAF-11DF-BA40-E3A556D89593")},
{"sony", "Sony boot", MustParseGUID("F4019732-066E-4E12-8273-346C5641494F")},
{"lenovo", "Lenovo boot", MustParseGUID("BFBFAFE7-A34F-448A-9A5B-6213EB736C22")},
{"msr", "Microsoft Reserved", MustParseGUID("E3C9E316-0B5C-4DB8-817D-F92DF00215AE")},
{"dos", "Microsoft Basic data", MustParseGUID("EBD0A0A2-B9E5-4433-87C0-68B6B72699C7")},
{"ldmm", "Microsoft Logical Disk Manager metadata", MustParseGUID("5808C8AA-7E8F-42E0-85D2-E1E90434CFB3")},
{"ldmd", "Microsoft Logical Disk Manager data", MustParseGUID("AF9B60A0-1431-4F62-BC68-3311714A69AD")},
{"recovery", "Windows Recovery Environment", MustParseGUID("DE94BBA4-06D1-4D40-A16A-BFD50179D6AC")},
{"gpfs", "IBM General Parallel File System", MustParseGUID("37AFFC90-EF7D-4E96-91C3-2D7AE055B174")},
{"storagespaces", "Storage Spaces", MustParseGUID("E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D")},
{"hpuxdata", "HP-UX Data", MustParseGUID("75894C1E-3AEB-11D3-B7C1-7B03A0000000")},
{"hpuxserv", "HP-UX Service", MustParseGUID("E2A1E728-32E3-11D6-A682-7B03A0000000")},
{"linuxdata", "Linux Data", MustParseGUID("0FC63DAF-8483-4772-8E79-3D69D8477DE4")},
{"linuxraid", "Linux RAID", MustParseGUID("A19D880F-05FC-4D3B-A006-743F0F84911E")},
{"linuxrootx86", "Linux Root (x86)", MustParseGUID("44479540-F297-41B2-9AF7-D131D5F0458A")},
{"linuxrootx86_64", "Linux Root (x86-64)", MustParseGUID("4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709")},
{"linuxrootarm", "Linux Root (ARM)", MustParseGUID("69DAD710-2CE4-4E3C-B16C-21A1D49ABED3")},
{"linuxrootaarch64", "Linux Root (ARM)", MustParseGUID("B921B045-1DF0-41C3-AF44-4C6F280D3FAE")},
{"linuxswap", "Linux Swap", MustParseGUID("0657FD6D-A4AB-43C4-84E5-0933C84B4F4F")},
{"linuxlvm", "Linux Logical Volume Manager", MustParseGUID("E6D6D379-F507-44C2-A23C-238F2A3DF928")},
{"linuxhome", "Linux /home", MustParseGUID("933AC7E1-2EB4-4F13-B844-0E14E2AEF915")},
{"linuxsrv", "Linux /srv", MustParseGUID("3B8F8425-20E0-4F3B-907F-1A25A76F98E8")},
{"linuxcrypt", "Linux Plain dm-crypt", MustParseGUID("7FFEC5C9-2D00-49B7-8941-3EA10A5586B7")},
{"luks", "LUKS", MustParseGUID("CA7D7CCB-63ED-4C53-861C-1742536059CC")},
{"linuxreserved", "Linux Reserved", MustParseGUID("8DA63339-0007-60C0-C436-083AC8230908")},
{"fbsdboot", "FreeBSD Boot", MustParseGUID("83BD6B9D-7F41-11DC-BE0B-001560B84F0F")},
{"fbsddata", "FreeBSD Data", MustParseGUID("516E7CB4-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdswap", "FreeBSD Swap", MustParseGUID("516E7CB5-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdufs", "FreeBSD Unix File System", MustParseGUID("516E7CB6-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdvvm", "FreeBSD Vinum volume manager", MustParseGUID("516E7CB8-6ECF-11D6-8FF8-00022D09712B")},
{"fbsdzfs", "FreeBSD ZFS", MustParseGUID("516E7CBA-6ECF-11D6-8FF8-00022D09712B")},
{"applehfs", "Apple HFS+", MustParseGUID("48465300-0000-11AA-AA11-00306543ECAC")},
{"appleufs", "Apple UFS", MustParseGUID("55465300-0000-11AA-AA11-00306543ECAC")},
{"applezfs", "Apple ZFS", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"appleraid", "Apple RAID", MustParseGUID("52414944-0000-11AA-AA11-00306543ECAC")},
{"appleraidoff", "Apple RAID, offline", MustParseGUID("52414944-5F4F-11AA-AA11-00306543ECAC")},
{"appleboot", "Apple Boot", MustParseGUID("426F6F74-0000-11AA-AA11-00306543ECAC")},
{"applelabel", "Apple Label", MustParseGUID("4C616265-6C00-11AA-AA11-00306543ECAC")},
{"appletv", "Apple TV Recovery", MustParseGUID("5265636F-7665-11AA-AA11-00306543ECAC")},
{"applecs", "Apple Core Storage", MustParseGUID("53746F72-6167-11AA-AA11-00306543ECAC")},
{"applesrs", "Apple SoftRAID Status", MustParseGUID("B6FA30DA-92D2-4A9A-96F1-871EC6486200")},
{"applesrscr", "Apple SoftRAID Scratch", MustParseGUID("2E313465-19B9-463F-8126-8A7993773801")},
{"applesrv", "Apple SoftRAID Volume", MustParseGUID("FA709C7E-65B1-4593-BFD5-E71D61DE9B02")},
{"applesrc", "Apple SoftRAID Cache", MustParseGUID("BBBA6DF5-F46F-4A89-8F59-8765B2727503")},
{"solarisboot", "Solaris Boot", MustParseGUID("6A82CB45-1DD2-11B2-99A6-080020736631")},
{"solarisroot", "Solaris Root", MustParseGUID("6A85CF4D-1DD2-11B2-99A6-080020736631")},
{"solarisswap", "Solaris Swap", MustParseGUID("6A87C46F-1DD2-11B2-99A6-080020736631")},
{"solarisbakup", "Solaris Backup", MustParseGUID("6A8B642B-1DD2-11B2-99A6-080020736631")},
{"solarisusr", "Solaris /usr", MustParseGUID("6A898CC3-1DD2-11B2-99A6-080020736631")},
{"solarisvar", "Solaris /var", MustParseGUID("6A8EF2E9-1DD2-11B2-99A6-080020736631")},
{"solarishome", "Solaris /home", MustParseGUID("6A90BA39-1DD2-11B2-99A6-080020736631")},
{"solarisalt", "Solaris Alternate sector", MustParseGUID("6A9283A5-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A945A3B-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A9630D1-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A980767-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A96237F-1DD2-11B2-99A6-080020736631")},
{"solaris", "Solaris Reserved", MustParseGUID("6A8D2AC7-1DD2-11B2-99A6-080020736631")},
{"nbsdswap", "NetBSD Swap", MustParseGUID("49F48D32-B10E-11DC-B99B-0019D1879648")},
{"nbsdffs", "NetBSD FFS", MustParseGUID("49F48D5A-B10E-11DC-B99B-0019D1879648")},
{"nbsdlfs", "NetBSD LFS", MustParseGUID("49F48D82-B10E-11DC-B99B-0019D1879648")},
{"nbsdraid", "NetBSD RAID", MustParseGUID("49F48DAA-B10E-11DC-B99B-0019D1879648")},
{"nbsdcat", "NetBSD Concatenated", MustParseGUID("2DB519C4-B10F-11DC-B99B-0019D1879648")},
{"nbsdcrypt", "NetBSD Encrypted", MustParseGUID("2DB519EC-B10F-11DC-B99B-0019D1879648")},
{"chromeoskern", "ChromeOS kernel", MustParseGUID("FE3A2A5D-4F32-41A7-B725-ACCC3285A309")},
{"chromeosroot", "ChromeOS rootfs", MustParseGUID("3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC")},
{"chromeos", "ChromeOS future use", MustParseGUID("2E0A753D-9E48-43B0-8337-B15192CB1B5E")},
{"haikubfs", "Haiku BFS", MustParseGUID("42465331-3BA3-10F1-802A-4861696B7521")},
{"midbsdboot", "MidnightBSD Boot", MustParseGUID("85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsddata", "MidnightBSD Data", MustParseGUID("85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdswap", "MidnightBSD Swap", MustParseGUID("85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdufs", "MidnightBSD Unix File System", MustParseGUID("0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdvvm", "MidnightBSD Vinum volume manager", MustParseGUID("85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7")},
{"midbsdzfs", "MidnightBSD ZFS", MustParseGUID("85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7")},
{"cephjournal", "Ceph Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-B4B80CEFF106")},
{"cephcrypt", "Ceph dm-crypt Encrypted Journal", MustParseGUID("45B0969E-9B03-4F30-B4C6-5EC00CEFF106")},
{"cephosd", "Ceph OSD", MustParseGUID("4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D")},
{"cephdsk", "Ceph disk in creation", MustParseGUID("89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE")},
{"cephcryptosd", "Ceph dm-crypt OSD", MustParseGUID("89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE")},
{"openbsd", "OpenBSD Data", MustParseGUID("824CC7A0-36A8-11E3-890A-952519AD3F61")},
{"qnx6", "QNX6 Power-safe file system", MustParseGUID("CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1")},
{"plan9", "Plan 9", MustParseGUID("C91818F9-8025-47AF-89D2-F030D7000C2C")},
{"vmwarecore", "vmkcore (coredump partition)", MustParseGUID("9D275380-40AD-11DB-BF97-000C2911D1B8")},
{"vmwarevmfs", "VMFS filesystem partition", MustParseGUID("AA31E02A-400F-11DB-9590-000C2911D1B8")},
{"vmwarersrv", "VMware Reserved", MustParseGUID("9198EFFC-31C0-11DB-8F78-000C2911D1B8")},
{"androidiabootldr", "Android-IA bootloader", MustParseGUID("2568845D-2332-4675-BC39-8FA5A4748D15")},
{"androidiabootldr2", "Android-IA bootloader 2", MustParseGUID("114EAFFE-1552-4022-B26E-9B053604CF84")},
{"androidiaboot", "Android-IA boot", MustParseGUID("49A4D17F-93A3-45C1-A0DE-F50B2EBE2599")},
{"androidiarecovery", "Android-IA recovery", MustParseGUID("4177C722-9E92-4AAB-8644-43502BFD5506")},
{"androidiamisc", "Android-IA misc", MustParseGUID("EF32A33B-A409-486C-9141-9FFB711F6266")},
{"androidiametadata", "Android-IA metadata", MustParseGUID("20AC26BE-20B7-11E3-84C5-6CFDB94711E9")},
{"androidiasystem", "Android-IA system", MustParseGUID("38F428E6-D326-425D-9140-6E0EA133647C")},
{"androidiacache", "Android-IA cache", MustParseGUID("A893EF21-E428-470A-9E55-0668FD91A2D9")},
{"androidiadata", "Android-IA data", MustParseGUID("DC76DDA9-5AC1-491C-AF42-A82591580C0D")},
{"androidiapersistent", "Android-IA persistent", MustParseGUID("EBC597D0-2053-4B15-8B64-E0AAC75F4DB1")},
{"androidiafactory", "Android-IA factory", MustParseGUID("8F68CC74-C5E5-48DA-BE91-A0C8C15E9C80")},
{"androidiafastboot", "Android-IA fastboot", MustParseGUID("767941D0-2085-11E3-AD3B-6CFDB94711E9")},
{"androidiaoem", "Android-IA OEM", MustParseGUID("AC6D7924-EB71-4DF8-B48D-E267B27148FF")},
{"onieboot", "Onie Boot", MustParseGUID("7412F7D5-A156-4B13-81DC-867174929325")},
{"oniecfg", "Onie Config", MustParseGUID("D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149")},
{"ppcboot", "Prep boot", MustParseGUID("9E1A2D38-C612-4316-AA26-8B49521E5A8B")},
{"fdesktopboot", "Extended Boot Partition", MustParseGUID("BC13C2FF-59E6-4262-A352-B275FD6F7172")},
}
| {
o = &Option{Sectsz: 512}
} | conditional_block |
serving.py | # coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import json
import multiprocessing
import time
import signal
import paddlehub as hub
from paddlehub.commands import register
from paddlehub.serving import app_compat as app
from paddlehub.env import CONF_HOME
from paddlehub.serving.http_server import run_all, StandaloneApplication
from paddlehub.utils import log
from paddlehub.utils.utils import is_port_occupied
from paddlehub.server.server import CacheUpdater
def number_of_workers():
'''
Get suitable quantity of workers based on empirical formula.
'''
return (multiprocessing.cpu_count() * 2) + 1
def pid_is_exist(pid: int):
'''
Try to kill process by PID.
Args:
pid(int): PID of process to be killed.
Returns:
True if PID will be killed.
Examples:
.. code-block:: python
pid_is_exist(pid=8866)
'''
try:
os.kill(pid, 0)
except:
return False
else:
return True
@register(name='hub.serving', description='Start Module Serving or Bert Service for online predicting.')
class ServingCommand:
name = "serving"
module_list = []
def dump_pid_file(self):
'''
Write PID info to file.
'''
pid = os.getpid()
filepath = os.path.join(CONF_HOME, "serving_" + str(self.args.port) + ".json")
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "w") as fp:
info = {"pid": pid, "module": self.args.modules, "start_time": time.time()}
json.dump(info, fp)
@staticmethod
def load_pid_file(filepath: str, port: int = None):
'''
Read PID info from file.
'''
if port is None:
port = os.path.basename(filepath).split(".")[0].split("_")[1]
if not os.path.exists(filepath):
log.logger.error(
"PaddleHub Serving config file is not exists, please confirm the port [%s] you specified is correct." %
port)
return False
with open(filepath, "r") as fp:
info = json.load(fp)
return info
def stop_serving(self, port: int):
'''
Stop PaddleHub-Serving by port.
'''
filepath = os.path.join(CONF_HOME, "serving_" + str(port) + ".json")
info = self.load_pid_file(filepath, port)
if info is False:
return
pid = info["pid"]
module = info["module"]
start_time = info["start_time"]
CacheUpdater("hub_serving_stop", module=module, addition={"period_time": time.time() - start_time}).start()
if os.path.exists(filepath):
os.remove(filepath)
if not pid_is_exist(pid):
log.logger.info("PaddleHub Serving has been stopped.")
return
log.logger.info("PaddleHub Serving will stop.")
if platform.system() == "Windows":
os.kill(pid, signal.SIGTERM)
else:
try:
os.killpg(pid, signal.SIGTERM)
except ProcessLookupError:
os.kill(pid, signal.SIGTERM)
@staticmethod
def start_bert_serving(args):
'''
Start bert serving server.
'''
if platform.system() != "Linux":
log.logger.error("Error. Bert Service only support linux.")
return False
if is_port_occupied("127.0.0.1", args.port) is True:
log.logger.error("Port %s is occupied, please change it." % args.port)
return False
from paddle_gpu_serving.run import BertServer
bs = BertServer(with_gpu=args.use_gpu)
bs.with_model(model_name=args.modules[0])
CacheUpdater("hub_bert_service", module=args.modules[0], version="0.0.0").start()
bs.run(gpu_index=args.gpu, port=int(args.port))
def preinstall_modules(self):
'''
Install module by PaddleHub and get info of this module.
'''
for key, value in self.modules_info.items():
init_args = value["init_args"]
CacheUpdater("hub_serving_start", module=key, version=init_args.get("version", "0.0.0")).start()
if "directory" not in init_args:
init_args.update({"name": key})
m = hub.Module(**init_args)
method_name = m.serving_func_name
if method_name is None:
raise RuntimeError("{} cannot be use for " "predicting".format(key))
exit(1)
serving_method = getattr(m, method_name)
category = str(m.type).split("/")[0].upper()
self.modules_info[key].update({
"method_name": method_name,
"version": m.version,
"category": category,
"module": m,
"name": m.name,
"serving_method": serving_method
})
def start_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with gunicorn.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": self.args.workers}
self.dump_pid_file()
StandaloneApplication(app.create_app(init_flag=False, configs=self.modules_info), options).run()
else:
log.logger.error("Lack of necessary parameters!")
def start_zmq_serving_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with zmq.
'''
if self.modules_info is not None:
for module, info in self.modules_info.items():
CacheUpdater("hub_serving_start", module=module, version=info['init_args']['version']).start()
front_port = self.args.port
if is_port_occupied("127.0.0.1", front_port) is True:
log.logger.error("Port %s is occupied, please change it." % front_port)
return False
back_port = int(front_port) + 1
for index in range(100):
if not is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError(
"Port from %s to %s is occupied, please use another port" % (int(front_port) + 1, back_port))
self.dump_pid_file()
run_all(self.modules_info, self.args.gpu, front_port, back_port)
else:
log.logger.error("Lack of necessary parameters!")
def start_single_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with flask.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
self.dump_pid_file()
app.run(configs=self.modules_info, port=port)
else:
log.logger.error("Lack of necessary parameters!")
def start_serving(self):
'''
Start PaddleHub-Serving with flask and gunicorn
'''
if self.args.use_gpu:
|
else:
if self.args.use_multiprocess:
if platform.system() == "Windows":
log.logger.warning(
"Warning: Windows cannot use multiprocess working mode, PaddleHub Serving will switch to single process mode"
)
self.start_single_app_with_args()
else:
self.start_app_with_args()
else:
self.start_single_app_with_args()
@staticmethod
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub Serving.\n"
str += "sub command:\n"
str += "1. start\n"
str += "\tStart PaddleHub Serving.\n"
str += "2. stop\n"
str += "\tStop PaddleHub Serving.\n"
str += "3. start bert_service\n"
str += "\tStart Bert Service.\n"
str += "\n"
str += "[start] option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via the parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_multiprocess\n"
str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
str += "--modules_info\n"
str += "\tSet module config in PaddleHub Serving."
str += "--config/-c file_path\n"
str += "\tUse configs in file to start PaddleHub Serving. "
str += "Other parameters will be ignored if you specify the parameter.\n"
str += "\n"
str += "[stop] option:\n"
str += "--port/-p XXXX\n"
str += "\tStop PaddleHub Serving on port XXXX safely.\n"
str += "\n"
str += "[start bert_service] option:\n"
str += "--modules/-m\n"
str += "\tPre-install modules via the parameter.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if specifies the parameter.\n"
str += "--gpu\n"
str += "\tSpecify the GPU devices to use.\n"
print(str)
def parse_args(self):
if self.args.config is not None:
if os.path.exists(self.args.config):
with open(self.args.config, "r") as fp:
# self.args.config = json.load(fp)
self.args_config = json.load(fp)
self.args.use_gpu = self.args_config.get('use_gpu', False)
self.args.use_multiprocess = self.args_config.get('use_multiprocess', False)
self.modules_info = self.args_config["modules_info"]
self.args.port = self.args_config.get('port', 8866)
if self.args.use_gpu:
self.args.gpu = self.args_config.get('gpu', '0')
else:
self.args.gpu = self.args_config.get('gpu', None)
self.args.use_gpu = self.args_config.get('use_gpu', False)
if self.args.use_multiprocess:
self.args.workers = self.args_config.get('workers', number_of_workers())
else:
self.args.workers = self.args_config.get('workers', None)
else:
raise RuntimeError("{} not exists.".format(self.args.config))
exit(1)
else:
self.modules_info = {}
for item in self.args.modules:
version = None
if "==" in item:
module = item.split("==")[0]
version = item.split("==")[1]
else:
module = item
self.modules_info.update({module: {"init_args": {"version": version}, "predict_args": {}}})
if self.args.gpu:
self.args.gpu = self.args.gpu.split(',')
return self.modules_info
def execute(self, argv):
self.show_in_help = True
self.description = "Start Module Serving or Bert Service for online predicting."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__, prog='hub serving', usage='%(prog)s', add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.parser.add_argument("bert_service", nargs="?")
self.sub_parse = self.parser.add_mutually_exclusive_group(required=False)
self.parser.add_argument("--use_gpu", action="store_true", default=False)
self.parser.add_argument("--use_multiprocess", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default='0')
self.parser.add_argument("--use_singleprocess", action="store_true", default=False)
self.parser.add_argument("--modules_info", "-mi", default={}, type=json.loads)
self.parser.add_argument("--workers", "-w", nargs="?", default=number_of_workers())
try:
self.args = self.parser.parse_args()
except:
ServingCommand.show_help()
return False
if self.args.sub_command == "start":
if self.args.bert_service == "bert_service":
ServingCommand.start_bert_serving(self.args)
else:
self.parse_args()
self.start_serving()
elif self.args.sub_command == "stop":
if self.args.bert_service == "bert_service":
log.logger.warning("Please stop Bert Service by kill process by yourself")
elif self.args.bert_service is None:
self.stop_serving(port=self.args.port)
else:
ServingCommand.show_help()
| if self.args.use_multiprocess:
log.logger.warning('`use_multiprocess` will be ignored if specify `use_gpu`.')
self.start_zmq_serving_with_args() | conditional_block |
serving.py | # coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import json
import multiprocessing
import time
import signal
import paddlehub as hub
from paddlehub.commands import register
from paddlehub.serving import app_compat as app
from paddlehub.env import CONF_HOME
from paddlehub.serving.http_server import run_all, StandaloneApplication
from paddlehub.utils import log
from paddlehub.utils.utils import is_port_occupied
from paddlehub.server.server import CacheUpdater
def number_of_workers():
'''
Get suitable quantity of workers based on empirical formula.
'''
return (multiprocessing.cpu_count() * 2) + 1
def pid_is_exist(pid: int):
'''
Try to kill process by PID.
Args:
pid(int): PID of process to be killed.
Returns:
True if PID will be killed.
Examples:
.. code-block:: python
pid_is_exist(pid=8866)
'''
try:
os.kill(pid, 0)
except:
return False
else:
return True
@register(name='hub.serving', description='Start Module Serving or Bert Service for online predicting.')
class ServingCommand:
name = "serving"
module_list = []
def dump_pid_file(self):
'''
Write PID info to file.
'''
pid = os.getpid()
filepath = os.path.join(CONF_HOME, "serving_" + str(self.args.port) + ".json")
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "w") as fp:
info = {"pid": pid, "module": self.args.modules, "start_time": time.time()}
json.dump(info, fp)
@staticmethod
def load_pid_file(filepath: str, port: int = None):
'''
Read PID info from file.
'''
if port is None:
port = os.path.basename(filepath).split(".")[0].split("_")[1]
if not os.path.exists(filepath):
log.logger.error(
"PaddleHub Serving config file is not exists, please confirm the port [%s] you specified is correct." %
port)
return False
with open(filepath, "r") as fp:
info = json.load(fp)
return info
def stop_serving(self, port: int):
'''
Stop PaddleHub-Serving by port.
'''
filepath = os.path.join(CONF_HOME, "serving_" + str(port) + ".json")
info = self.load_pid_file(filepath, port)
if info is False:
return
pid = info["pid"]
module = info["module"]
start_time = info["start_time"]
CacheUpdater("hub_serving_stop", module=module, addition={"period_time": time.time() - start_time}).start() |
if not pid_is_exist(pid):
log.logger.info("PaddleHub Serving has been stopped.")
return
log.logger.info("PaddleHub Serving will stop.")
if platform.system() == "Windows":
os.kill(pid, signal.SIGTERM)
else:
try:
os.killpg(pid, signal.SIGTERM)
except ProcessLookupError:
os.kill(pid, signal.SIGTERM)
@staticmethod
def start_bert_serving(args):
'''
Start bert serving server.
'''
if platform.system() != "Linux":
log.logger.error("Error. Bert Service only support linux.")
return False
if is_port_occupied("127.0.0.1", args.port) is True:
log.logger.error("Port %s is occupied, please change it." % args.port)
return False
from paddle_gpu_serving.run import BertServer
bs = BertServer(with_gpu=args.use_gpu)
bs.with_model(model_name=args.modules[0])
CacheUpdater("hub_bert_service", module=args.modules[0], version="0.0.0").start()
bs.run(gpu_index=args.gpu, port=int(args.port))
def preinstall_modules(self):
'''
Install module by PaddleHub and get info of this module.
'''
for key, value in self.modules_info.items():
init_args = value["init_args"]
CacheUpdater("hub_serving_start", module=key, version=init_args.get("version", "0.0.0")).start()
if "directory" not in init_args:
init_args.update({"name": key})
m = hub.Module(**init_args)
method_name = m.serving_func_name
if method_name is None:
raise RuntimeError("{} cannot be use for " "predicting".format(key))
exit(1)
serving_method = getattr(m, method_name)
category = str(m.type).split("/")[0].upper()
self.modules_info[key].update({
"method_name": method_name,
"version": m.version,
"category": category,
"module": m,
"name": m.name,
"serving_method": serving_method
})
def start_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with gunicorn.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": self.args.workers}
self.dump_pid_file()
StandaloneApplication(app.create_app(init_flag=False, configs=self.modules_info), options).run()
else:
log.logger.error("Lack of necessary parameters!")
def start_zmq_serving_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with zmq.
'''
if self.modules_info is not None:
for module, info in self.modules_info.items():
CacheUpdater("hub_serving_start", module=module, version=info['init_args']['version']).start()
front_port = self.args.port
if is_port_occupied("127.0.0.1", front_port) is True:
log.logger.error("Port %s is occupied, please change it." % front_port)
return False
back_port = int(front_port) + 1
for index in range(100):
if not is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError(
"Port from %s to %s is occupied, please use another port" % (int(front_port) + 1, back_port))
self.dump_pid_file()
run_all(self.modules_info, self.args.gpu, front_port, back_port)
else:
log.logger.error("Lack of necessary parameters!")
def start_single_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with flask.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
self.dump_pid_file()
app.run(configs=self.modules_info, port=port)
else:
log.logger.error("Lack of necessary parameters!")
def start_serving(self):
'''
Start PaddleHub-Serving with flask and gunicorn
'''
if self.args.use_gpu:
if self.args.use_multiprocess:
log.logger.warning('`use_multiprocess` will be ignored if specify `use_gpu`.')
self.start_zmq_serving_with_args()
else:
if self.args.use_multiprocess:
if platform.system() == "Windows":
log.logger.warning(
"Warning: Windows cannot use multiprocess working mode, PaddleHub Serving will switch to single process mode"
)
self.start_single_app_with_args()
else:
self.start_app_with_args()
else:
self.start_single_app_with_args()
@staticmethod
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub Serving.\n"
str += "sub command:\n"
str += "1. start\n"
str += "\tStart PaddleHub Serving.\n"
str += "2. stop\n"
str += "\tStop PaddleHub Serving.\n"
str += "3. start bert_service\n"
str += "\tStart Bert Service.\n"
str += "\n"
str += "[start] option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via the parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_multiprocess\n"
str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
str += "--modules_info\n"
str += "\tSet module config in PaddleHub Serving."
str += "--config/-c file_path\n"
str += "\tUse configs in file to start PaddleHub Serving. "
str += "Other parameters will be ignored if you specify the parameter.\n"
str += "\n"
str += "[stop] option:\n"
str += "--port/-p XXXX\n"
str += "\tStop PaddleHub Serving on port XXXX safely.\n"
str += "\n"
str += "[start bert_service] option:\n"
str += "--modules/-m\n"
str += "\tPre-install modules via the parameter.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if specifies the parameter.\n"
str += "--gpu\n"
str += "\tSpecify the GPU devices to use.\n"
print(str)
def parse_args(self):
if self.args.config is not None:
if os.path.exists(self.args.config):
with open(self.args.config, "r") as fp:
# self.args.config = json.load(fp)
self.args_config = json.load(fp)
self.args.use_gpu = self.args_config.get('use_gpu', False)
self.args.use_multiprocess = self.args_config.get('use_multiprocess', False)
self.modules_info = self.args_config["modules_info"]
self.args.port = self.args_config.get('port', 8866)
if self.args.use_gpu:
self.args.gpu = self.args_config.get('gpu', '0')
else:
self.args.gpu = self.args_config.get('gpu', None)
self.args.use_gpu = self.args_config.get('use_gpu', False)
if self.args.use_multiprocess:
self.args.workers = self.args_config.get('workers', number_of_workers())
else:
self.args.workers = self.args_config.get('workers', None)
else:
raise RuntimeError("{} not exists.".format(self.args.config))
exit(1)
else:
self.modules_info = {}
for item in self.args.modules:
version = None
if "==" in item:
module = item.split("==")[0]
version = item.split("==")[1]
else:
module = item
self.modules_info.update({module: {"init_args": {"version": version}, "predict_args": {}}})
if self.args.gpu:
self.args.gpu = self.args.gpu.split(',')
return self.modules_info
def execute(self, argv):
self.show_in_help = True
self.description = "Start Module Serving or Bert Service for online predicting."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__, prog='hub serving', usage='%(prog)s', add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.parser.add_argument("bert_service", nargs="?")
self.sub_parse = self.parser.add_mutually_exclusive_group(required=False)
self.parser.add_argument("--use_gpu", action="store_true", default=False)
self.parser.add_argument("--use_multiprocess", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default='0')
self.parser.add_argument("--use_singleprocess", action="store_true", default=False)
self.parser.add_argument("--modules_info", "-mi", default={}, type=json.loads)
self.parser.add_argument("--workers", "-w", nargs="?", default=number_of_workers())
try:
self.args = self.parser.parse_args()
except:
ServingCommand.show_help()
return False
if self.args.sub_command == "start":
if self.args.bert_service == "bert_service":
ServingCommand.start_bert_serving(self.args)
else:
self.parse_args()
self.start_serving()
elif self.args.sub_command == "stop":
if self.args.bert_service == "bert_service":
log.logger.warning("Please stop Bert Service by kill process by yourself")
elif self.args.bert_service is None:
self.stop_serving(port=self.args.port)
else:
ServingCommand.show_help() | if os.path.exists(filepath):
os.remove(filepath) | random_line_split |
serving.py | # coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import json
import multiprocessing
import time
import signal
import paddlehub as hub
from paddlehub.commands import register
from paddlehub.serving import app_compat as app
from paddlehub.env import CONF_HOME
from paddlehub.serving.http_server import run_all, StandaloneApplication
from paddlehub.utils import log
from paddlehub.utils.utils import is_port_occupied
from paddlehub.server.server import CacheUpdater
def number_of_workers():
'''
Get suitable quantity of workers based on empirical formula.
'''
return (multiprocessing.cpu_count() * 2) + 1
def pid_is_exist(pid: int):
'''
Try to kill process by PID.
Args:
pid(int): PID of process to be killed.
Returns:
True if PID will be killed.
Examples:
.. code-block:: python
pid_is_exist(pid=8866)
'''
try:
os.kill(pid, 0)
except:
return False
else:
return True
@register(name='hub.serving', description='Start Module Serving or Bert Service for online predicting.')
class ServingCommand:
name = "serving"
module_list = []
def dump_pid_file(self):
'''
Write PID info to file.
'''
pid = os.getpid()
filepath = os.path.join(CONF_HOME, "serving_" + str(self.args.port) + ".json")
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "w") as fp:
info = {"pid": pid, "module": self.args.modules, "start_time": time.time()}
json.dump(info, fp)
@staticmethod
def load_pid_file(filepath: str, port: int = None):
'''
Read PID info from file.
'''
if port is None:
port = os.path.basename(filepath).split(".")[0].split("_")[1]
if not os.path.exists(filepath):
log.logger.error(
"PaddleHub Serving config file is not exists, please confirm the port [%s] you specified is correct." %
port)
return False
with open(filepath, "r") as fp:
info = json.load(fp)
return info
def stop_serving(self, port: int):
'''
Stop PaddleHub-Serving by port.
'''
filepath = os.path.join(CONF_HOME, "serving_" + str(port) + ".json")
info = self.load_pid_file(filepath, port)
if info is False:
return
pid = info["pid"]
module = info["module"]
start_time = info["start_time"]
CacheUpdater("hub_serving_stop", module=module, addition={"period_time": time.time() - start_time}).start()
if os.path.exists(filepath):
os.remove(filepath)
if not pid_is_exist(pid):
log.logger.info("PaddleHub Serving has been stopped.")
return
log.logger.info("PaddleHub Serving will stop.")
if platform.system() == "Windows":
os.kill(pid, signal.SIGTERM)
else:
try:
os.killpg(pid, signal.SIGTERM)
except ProcessLookupError:
os.kill(pid, signal.SIGTERM)
@staticmethod
def start_bert_serving(args):
'''
Start bert serving server.
'''
if platform.system() != "Linux":
log.logger.error("Error. Bert Service only support linux.")
return False
if is_port_occupied("127.0.0.1", args.port) is True:
log.logger.error("Port %s is occupied, please change it." % args.port)
return False
from paddle_gpu_serving.run import BertServer
bs = BertServer(with_gpu=args.use_gpu)
bs.with_model(model_name=args.modules[0])
CacheUpdater("hub_bert_service", module=args.modules[0], version="0.0.0").start()
bs.run(gpu_index=args.gpu, port=int(args.port))
def preinstall_modules(self):
'''
Install module by PaddleHub and get info of this module.
'''
for key, value in self.modules_info.items():
init_args = value["init_args"]
CacheUpdater("hub_serving_start", module=key, version=init_args.get("version", "0.0.0")).start()
if "directory" not in init_args:
init_args.update({"name": key})
m = hub.Module(**init_args)
method_name = m.serving_func_name
if method_name is None:
raise RuntimeError("{} cannot be use for " "predicting".format(key))
exit(1)
serving_method = getattr(m, method_name)
category = str(m.type).split("/")[0].upper()
self.modules_info[key].update({
"method_name": method_name,
"version": m.version,
"category": category,
"module": m,
"name": m.name,
"serving_method": serving_method
})
def start_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with gunicorn.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": self.args.workers}
self.dump_pid_file()
StandaloneApplication(app.create_app(init_flag=False, configs=self.modules_info), options).run()
else:
log.logger.error("Lack of necessary parameters!")
def start_zmq_serving_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with zmq.
'''
if self.modules_info is not None:
for module, info in self.modules_info.items():
CacheUpdater("hub_serving_start", module=module, version=info['init_args']['version']).start()
front_port = self.args.port
if is_port_occupied("127.0.0.1", front_port) is True:
log.logger.error("Port %s is occupied, please change it." % front_port)
return False
back_port = int(front_port) + 1
for index in range(100):
if not is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError(
"Port from %s to %s is occupied, please use another port" % (int(front_port) + 1, back_port))
self.dump_pid_file()
run_all(self.modules_info, self.args.gpu, front_port, back_port)
else:
log.logger.error("Lack of necessary parameters!")
def start_single_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with flask.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
self.dump_pid_file()
app.run(configs=self.modules_info, port=port)
else:
log.logger.error("Lack of necessary parameters!")
def start_serving(self):
|
@staticmethod
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub Serving.\n"
str += "sub command:\n"
str += "1. start\n"
str += "\tStart PaddleHub Serving.\n"
str += "2. stop\n"
str += "\tStop PaddleHub Serving.\n"
str += "3. start bert_service\n"
str += "\tStart Bert Service.\n"
str += "\n"
str += "[start] option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via the parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_multiprocess\n"
str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
str += "--modules_info\n"
str += "\tSet module config in PaddleHub Serving."
str += "--config/-c file_path\n"
str += "\tUse configs in file to start PaddleHub Serving. "
str += "Other parameters will be ignored if you specify the parameter.\n"
str += "\n"
str += "[stop] option:\n"
str += "--port/-p XXXX\n"
str += "\tStop PaddleHub Serving on port XXXX safely.\n"
str += "\n"
str += "[start bert_service] option:\n"
str += "--modules/-m\n"
str += "\tPre-install modules via the parameter.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if specifies the parameter.\n"
str += "--gpu\n"
str += "\tSpecify the GPU devices to use.\n"
print(str)
def parse_args(self):
if self.args.config is not None:
if os.path.exists(self.args.config):
with open(self.args.config, "r") as fp:
# self.args.config = json.load(fp)
self.args_config = json.load(fp)
self.args.use_gpu = self.args_config.get('use_gpu', False)
self.args.use_multiprocess = self.args_config.get('use_multiprocess', False)
self.modules_info = self.args_config["modules_info"]
self.args.port = self.args_config.get('port', 8866)
if self.args.use_gpu:
self.args.gpu = self.args_config.get('gpu', '0')
else:
self.args.gpu = self.args_config.get('gpu', None)
self.args.use_gpu = self.args_config.get('use_gpu', False)
if self.args.use_multiprocess:
self.args.workers = self.args_config.get('workers', number_of_workers())
else:
self.args.workers = self.args_config.get('workers', None)
else:
raise RuntimeError("{} not exists.".format(self.args.config))
exit(1)
else:
self.modules_info = {}
for item in self.args.modules:
version = None
if "==" in item:
module = item.split("==")[0]
version = item.split("==")[1]
else:
module = item
self.modules_info.update({module: {"init_args": {"version": version}, "predict_args": {}}})
if self.args.gpu:
self.args.gpu = self.args.gpu.split(',')
return self.modules_info
def execute(self, argv):
self.show_in_help = True
self.description = "Start Module Serving or Bert Service for online predicting."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__, prog='hub serving', usage='%(prog)s', add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.parser.add_argument("bert_service", nargs="?")
self.sub_parse = self.parser.add_mutually_exclusive_group(required=False)
self.parser.add_argument("--use_gpu", action="store_true", default=False)
self.parser.add_argument("--use_multiprocess", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default='0')
self.parser.add_argument("--use_singleprocess", action="store_true", default=False)
self.parser.add_argument("--modules_info", "-mi", default={}, type=json.loads)
self.parser.add_argument("--workers", "-w", nargs="?", default=number_of_workers())
try:
self.args = self.parser.parse_args()
except:
ServingCommand.show_help()
return False
if self.args.sub_command == "start":
if self.args.bert_service == "bert_service":
ServingCommand.start_bert_serving(self.args)
else:
self.parse_args()
self.start_serving()
elif self.args.sub_command == "stop":
if self.args.bert_service == "bert_service":
log.logger.warning("Please stop Bert Service by kill process by yourself")
elif self.args.bert_service is None:
self.stop_serving(port=self.args.port)
else:
ServingCommand.show_help()
| '''
Start PaddleHub-Serving with flask and gunicorn
'''
if self.args.use_gpu:
if self.args.use_multiprocess:
log.logger.warning('`use_multiprocess` will be ignored if specify `use_gpu`.')
self.start_zmq_serving_with_args()
else:
if self.args.use_multiprocess:
if platform.system() == "Windows":
log.logger.warning(
"Warning: Windows cannot use multiprocess working mode, PaddleHub Serving will switch to single process mode"
)
self.start_single_app_with_args()
else:
self.start_app_with_args()
else:
self.start_single_app_with_args() | identifier_body |
serving.py | # coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import json
import multiprocessing
import time
import signal
import paddlehub as hub
from paddlehub.commands import register
from paddlehub.serving import app_compat as app
from paddlehub.env import CONF_HOME
from paddlehub.serving.http_server import run_all, StandaloneApplication
from paddlehub.utils import log
from paddlehub.utils.utils import is_port_occupied
from paddlehub.server.server import CacheUpdater
def number_of_workers():
'''
Get suitable quantity of workers based on empirical formula.
'''
return (multiprocessing.cpu_count() * 2) + 1
def pid_is_exist(pid: int):
'''
Try to kill process by PID.
Args:
pid(int): PID of process to be killed.
Returns:
True if PID will be killed.
Examples:
.. code-block:: python
pid_is_exist(pid=8866)
'''
try:
os.kill(pid, 0)
except:
return False
else:
return True
@register(name='hub.serving', description='Start Module Serving or Bert Service for online predicting.')
class ServingCommand:
name = "serving"
module_list = []
def dump_pid_file(self):
'''
Write PID info to file.
'''
pid = os.getpid()
filepath = os.path.join(CONF_HOME, "serving_" + str(self.args.port) + ".json")
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "w") as fp:
info = {"pid": pid, "module": self.args.modules, "start_time": time.time()}
json.dump(info, fp)
@staticmethod
def load_pid_file(filepath: str, port: int = None):
'''
Read PID info from file.
'''
if port is None:
port = os.path.basename(filepath).split(".")[0].split("_")[1]
if not os.path.exists(filepath):
log.logger.error(
"PaddleHub Serving config file is not exists, please confirm the port [%s] you specified is correct." %
port)
return False
with open(filepath, "r") as fp:
info = json.load(fp)
return info
def stop_serving(self, port: int):
'''
Stop PaddleHub-Serving by port.
'''
filepath = os.path.join(CONF_HOME, "serving_" + str(port) + ".json")
info = self.load_pid_file(filepath, port)
if info is False:
return
pid = info["pid"]
module = info["module"]
start_time = info["start_time"]
CacheUpdater("hub_serving_stop", module=module, addition={"period_time": time.time() - start_time}).start()
if os.path.exists(filepath):
os.remove(filepath)
if not pid_is_exist(pid):
log.logger.info("PaddleHub Serving has been stopped.")
return
log.logger.info("PaddleHub Serving will stop.")
if platform.system() == "Windows":
os.kill(pid, signal.SIGTERM)
else:
try:
os.killpg(pid, signal.SIGTERM)
except ProcessLookupError:
os.kill(pid, signal.SIGTERM)
@staticmethod
def | (args):
'''
Start bert serving server.
'''
if platform.system() != "Linux":
log.logger.error("Error. Bert Service only support linux.")
return False
if is_port_occupied("127.0.0.1", args.port) is True:
log.logger.error("Port %s is occupied, please change it." % args.port)
return False
from paddle_gpu_serving.run import BertServer
bs = BertServer(with_gpu=args.use_gpu)
bs.with_model(model_name=args.modules[0])
CacheUpdater("hub_bert_service", module=args.modules[0], version="0.0.0").start()
bs.run(gpu_index=args.gpu, port=int(args.port))
def preinstall_modules(self):
'''
Install module by PaddleHub and get info of this module.
'''
for key, value in self.modules_info.items():
init_args = value["init_args"]
CacheUpdater("hub_serving_start", module=key, version=init_args.get("version", "0.0.0")).start()
if "directory" not in init_args:
init_args.update({"name": key})
m = hub.Module(**init_args)
method_name = m.serving_func_name
if method_name is None:
raise RuntimeError("{} cannot be use for " "predicting".format(key))
exit(1)
serving_method = getattr(m, method_name)
category = str(m.type).split("/")[0].upper()
self.modules_info[key].update({
"method_name": method_name,
"version": m.version,
"category": category,
"module": m,
"name": m.name,
"serving_method": serving_method
})
def start_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with gunicorn.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": self.args.workers}
self.dump_pid_file()
StandaloneApplication(app.create_app(init_flag=False, configs=self.modules_info), options).run()
else:
log.logger.error("Lack of necessary parameters!")
def start_zmq_serving_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with zmq.
'''
if self.modules_info is not None:
for module, info in self.modules_info.items():
CacheUpdater("hub_serving_start", module=module, version=info['init_args']['version']).start()
front_port = self.args.port
if is_port_occupied("127.0.0.1", front_port) is True:
log.logger.error("Port %s is occupied, please change it." % front_port)
return False
back_port = int(front_port) + 1
for index in range(100):
if not is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError(
"Port from %s to %s is occupied, please use another port" % (int(front_port) + 1, back_port))
self.dump_pid_file()
run_all(self.modules_info, self.args.gpu, front_port, back_port)
else:
log.logger.error("Lack of necessary parameters!")
def start_single_app_with_args(self):
'''
Start one PaddleHub-Serving instance by arguments with flask.
'''
module = self.modules_info
if module is not None:
port = self.args.port
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
self.dump_pid_file()
app.run(configs=self.modules_info, port=port)
else:
log.logger.error("Lack of necessary parameters!")
def start_serving(self):
'''
Start PaddleHub-Serving with flask and gunicorn
'''
if self.args.use_gpu:
if self.args.use_multiprocess:
log.logger.warning('`use_multiprocess` will be ignored if specify `use_gpu`.')
self.start_zmq_serving_with_args()
else:
if self.args.use_multiprocess:
if platform.system() == "Windows":
log.logger.warning(
"Warning: Windows cannot use multiprocess working mode, PaddleHub Serving will switch to single process mode"
)
self.start_single_app_with_args()
else:
self.start_app_with_args()
else:
self.start_single_app_with_args()
@staticmethod
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub Serving.\n"
str += "sub command:\n"
str += "1. start\n"
str += "\tStart PaddleHub Serving.\n"
str += "2. stop\n"
str += "\tStop PaddleHub Serving.\n"
str += "3. start bert_service\n"
str += "\tStart Bert Service.\n"
str += "\n"
str += "[start] option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via the parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_multiprocess\n"
str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
str += "--modules_info\n"
str += "\tSet module config in PaddleHub Serving."
str += "--config/-c file_path\n"
str += "\tUse configs in file to start PaddleHub Serving. "
str += "Other parameters will be ignored if you specify the parameter.\n"
str += "\n"
str += "[stop] option:\n"
str += "--port/-p XXXX\n"
str += "\tStop PaddleHub Serving on port XXXX safely.\n"
str += "\n"
str += "[start bert_service] option:\n"
str += "--modules/-m\n"
str += "\tPre-install modules via the parameter.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if specifies the parameter.\n"
str += "--gpu\n"
str += "\tSpecify the GPU devices to use.\n"
print(str)
def parse_args(self):
if self.args.config is not None:
if os.path.exists(self.args.config):
with open(self.args.config, "r") as fp:
# self.args.config = json.load(fp)
self.args_config = json.load(fp)
self.args.use_gpu = self.args_config.get('use_gpu', False)
self.args.use_multiprocess = self.args_config.get('use_multiprocess', False)
self.modules_info = self.args_config["modules_info"]
self.args.port = self.args_config.get('port', 8866)
if self.args.use_gpu:
self.args.gpu = self.args_config.get('gpu', '0')
else:
self.args.gpu = self.args_config.get('gpu', None)
self.args.use_gpu = self.args_config.get('use_gpu', False)
if self.args.use_multiprocess:
self.args.workers = self.args_config.get('workers', number_of_workers())
else:
self.args.workers = self.args_config.get('workers', None)
else:
raise RuntimeError("{} not exists.".format(self.args.config))
exit(1)
else:
self.modules_info = {}
for item in self.args.modules:
version = None
if "==" in item:
module = item.split("==")[0]
version = item.split("==")[1]
else:
module = item
self.modules_info.update({module: {"init_args": {"version": version}, "predict_args": {}}})
if self.args.gpu:
self.args.gpu = self.args.gpu.split(',')
return self.modules_info
def execute(self, argv):
self.show_in_help = True
self.description = "Start Module Serving or Bert Service for online predicting."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__, prog='hub serving', usage='%(prog)s', add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.parser.add_argument("bert_service", nargs="?")
self.sub_parse = self.parser.add_mutually_exclusive_group(required=False)
self.parser.add_argument("--use_gpu", action="store_true", default=False)
self.parser.add_argument("--use_multiprocess", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default='0')
self.parser.add_argument("--use_singleprocess", action="store_true", default=False)
self.parser.add_argument("--modules_info", "-mi", default={}, type=json.loads)
self.parser.add_argument("--workers", "-w", nargs="?", default=number_of_workers())
try:
self.args = self.parser.parse_args()
except:
ServingCommand.show_help()
return False
if self.args.sub_command == "start":
if self.args.bert_service == "bert_service":
ServingCommand.start_bert_serving(self.args)
else:
self.parse_args()
self.start_serving()
elif self.args.sub_command == "stop":
if self.args.bert_service == "bert_service":
log.logger.warning("Please stop Bert Service by kill process by yourself")
elif self.args.bert_service is None:
self.stop_serving(port=self.args.port)
else:
ServingCommand.show_help()
| start_bert_serving | identifier_name |
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct HeapApp<'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn | () -> Self {
let mut app = App::new("servicemanagement1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <byronimo@gmail.com>")
.version("0.1.0-20200619")
.about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list")
.about("Lists service operations that match the specified filter in the request.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete");
{
let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about(
"Gets a managed service. Authentication is required unless the service is\npublic.",
);
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_config")
.about("Gets a service configuration (version) for a managed service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>");
services0 = services0.subcommand(mcmd);
}
let mut configs1 = SubCommand::with_name("configs")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get, list and submit");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Gets a service configuration (version) for a managed service.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>");
configs1 = configs1.subcommand(mcmd);
}
let mut consumers1 = SubCommand::with_name("consumers")
.setting(AppSettings::ColoredHelp)
.about("methods: get_iam_policy, set_iam_policy and test_iam_permissions");
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
consumers1 = consumers1.subcommand(mcmd);
}
let mut rollouts1 = SubCommand::with_name("rollouts")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get and list");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout.");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.");
rollouts1 = rollouts1.subcommand(mcmd);
}
services0 = services0.subcommand(rollouts1);
services0 = services0.subcommand(consumers1);
services0 = services0.subcommand(configs1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
}
use google_servicemanagement1 as api;
fn main() {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
}
| default | identifier_name |
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct HeapApp<'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn default() -> Self {
let mut app = App::new("servicemanagement1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <byronimo@gmail.com>")
.version("0.1.0-20200619")
.about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list")
.about("Lists service operations that match the specified filter in the request.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete");
{
let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about(
"Gets a managed service. Authentication is required unless the service is\npublic.",
);
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_config")
.about("Gets a service configuration (version) for a managed service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>");
services0 = services0.subcommand(mcmd);
}
let mut configs1 = SubCommand::with_name("configs")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get, list and submit");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Gets a service configuration (version) for a managed service.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>");
configs1 = configs1.subcommand(mcmd);
}
let mut consumers1 = SubCommand::with_name("consumers")
.setting(AppSettings::ColoredHelp)
.about("methods: get_iam_policy, set_iam_policy and test_iam_permissions");
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
consumers1 = consumers1.subcommand(mcmd);
}
let mut rollouts1 = SubCommand::with_name("rollouts")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get and list");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout.");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.");
rollouts1 = rollouts1.subcommand(mcmd);
}
services0 = services0.subcommand(rollouts1);
services0 = services0.subcommand(consumers1);
services0 = services0.subcommand(configs1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
} | use google_servicemanagement1 as api;
fn main() {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
} | random_line_split | |
main.rs | use clap::{App, AppSettings, Arg, SubCommand};
use default_boxed::DefaultBoxed;
#[derive(DefaultBoxed)]
struct Outer<'a, 'b> {
inner: HeapApp<'a, 'b>,
}
struct HeapApp<'a, 'b> {
app: App<'a, 'b>,
}
impl<'a, 'b> Default for HeapApp<'a, 'b> {
fn default() -> Self {
let mut app = App::new("servicemanagement1")
.setting(clap::AppSettings::ColoredHelp)
.author("Sebastian Thiel <byronimo@gmail.com>")
.version("0.1.0-20200619")
.about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.")
.after_help("All documentation details can be found at <TODO figure out URL>")
.arg(Arg::with_name("scope")
.long("scope")
.help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." )
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Provide more output to aid with debugging")
.multiple(false)
.takes_value(false));
let mut operations0 = SubCommand::with_name("operations")
.setting(AppSettings::ColoredHelp)
.about("methods: get and list");
{
let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.");
operations0 = operations0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list")
.about("Lists service operations that match the specified filter in the request.");
operations0 = operations0.subcommand(mcmd);
}
let mut services0 = SubCommand::with_name("services")
.setting(AppSettings::ColoredHelp)
.about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete");
{
let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about(
"Gets a managed service. Authentication is required unless the service is\npublic.",
);
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_config")
.about("Gets a service configuration (version) for a managed service.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\".");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
services0 = services0.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>");
services0 = services0.subcommand(mcmd);
}
let mut configs1 = SubCommand::with_name("configs")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get, list and submit");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get")
.about("Gets a service configuration (version) for a managed service.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest.");
configs1 = configs1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>");
configs1 = configs1.subcommand(mcmd);
}
let mut consumers1 = SubCommand::with_name("consumers")
.setting(AppSettings::ColoredHelp)
.about("methods: get_iam_policy, set_iam_policy and test_iam_permissions");
{
let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.");
consumers1 = consumers1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.");
consumers1 = consumers1.subcommand(mcmd);
}
let mut rollouts1 = SubCommand::with_name("rollouts")
.setting(AppSettings::ColoredHelp)
.about("methods: create, get and list");
{
let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout.");
rollouts1 = rollouts1.subcommand(mcmd);
}
{
let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest.");
rollouts1 = rollouts1.subcommand(mcmd);
}
services0 = services0.subcommand(rollouts1);
services0 = services0.subcommand(consumers1);
services0 = services0.subcommand(configs1);
app = app.subcommand(services0);
app = app.subcommand(operations0);
Self { app }
}
}
use google_servicemanagement1 as api;
fn main() | {
// TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time
// to allow a self-referential structure :D!
let _home_dir = dirs::config_dir()
.expect("configuration directory can be obtained")
.join("google-service-cli");
let outer = Outer::default_boxed();
let app = outer.inner.app;
let _matches = app.get_matches();
} | identifier_body | |
cert.go | // http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"os"
"os/user"
"path/filepath"
"time"
"github.com/lxc/incus/shared/api"
)
// KeyPairAndCA returns a CertInfo object with a reference to the key pair and
// (optionally) CA certificate located in the given directory and having the
// given name prefix
//
// The naming conversion for the various files is:
//
// <prefix>.crt -> public key
// <prefix>.key -> private key
// <prefix>.ca -> CA certificate
//
// If no public/private key files are found, a new key pair will be generated
// and saved on disk.
//
// If a CA certificate is found, it will be returned as well as second return
// value (otherwise it will be nil).
func KeyPairAndCA(dir, prefix string, kind CertKind, addHosts bool) (*CertInfo, error) {
certFilename := filepath.Join(dir, prefix+".crt")
keyFilename := filepath.Join(dir, prefix+".key")
// Ensure that the certificate exists, or create a new one if it does
// not.
err := FindOrGenCert(certFilename, keyFilename, kind == CertClient, addHosts)
if err != nil {
return nil, err
}
// Load the certificate.
keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename)
if err != nil {
return nil, err
}
// If available, load the CA data as well.
caFilename := filepath.Join(dir, prefix+".ca")
var ca *x509.Certificate
if PathExists(caFilename) {
ca, err = ReadCert(caFilename)
if err != nil {
return nil, err
}
}
crlFilename := filepath.Join(dir, "ca.crl")
var crl *pkix.CertificateList
if PathExists(crlFilename) {
data, err := os.ReadFile(crlFilename)
if err != nil {
return nil, err
}
crl, err = x509.ParseCRL(data)
if err != nil {
return nil, err
}
}
info := &CertInfo{
keypair: keypair,
ca: ca,
crl: crl,
}
return info, nil
}
// KeyPairFromRaw returns a CertInfo from the raw certificate and key.
func KeyPairFromRaw(certificate []byte, key []byte) (*CertInfo, error) {
keypair, err := tls.X509KeyPair(certificate, key)
if err != nil {
return nil, err
}
return &CertInfo{
keypair: keypair,
}, nil
}
// CertInfo captures TLS certificate information about a certain public/private
// keypair and an optional CA certificate and CRL.
//
// Given LXD's support for PKI setups, these two bits of information are
// normally used and passed around together, so this structure helps with that
// (see doc/security.md for more details).
type CertInfo struct {
keypair tls.Certificate
ca *x509.Certificate
crl *pkix.CertificateList
}
// KeyPair returns the public/private key pair.
func (c *CertInfo) KeyPair() tls.Certificate {
return c.keypair
}
// CA returns the CA certificate.
func (c *CertInfo) CA() *x509.Certificate {
return c.ca
}
// PublicKey is a convenience to encode the underlying public key to ASCII.
func (c *CertInfo) PublicKey() []byte {
data := c.KeyPair().Certificate[0]
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
}
// PublicKeyX509 is a convenience to return the underlying public key as an *x509.Certificate.
func (c *CertInfo) PublicKeyX509() (*x509.Certificate, error) {
return x509.ParseCertificate(c.KeyPair().Certificate[0])
}
// PrivateKey is a convenience to encode the underlying private key.
func (c *CertInfo) PrivateKey() []byte {
ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey)
if ok {
data, err := x509.MarshalECPrivateKey(ecKey)
if err != nil {
return nil
}
return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
}
rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
if ok {
data := x509.MarshalPKCS1PrivateKey(rsaKey)
return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
}
return nil
}
// Fingerprint returns the fingerprint of the public key.
func (c *CertInfo) Fingerprint() string {
fingerprint, err := CertFingerprintStr(string(c.PublicKey()))
// Parsing should never fail, since we generated the cert ourselves,
// but let's check the error for good measure.
if err != nil {
panic("invalid public key material")
}
return fingerprint
}
// CRL returns the certificate revocation list.
func (c *CertInfo) CRL() *pkix.CertificateList {
return c.crl
}
// CertKind defines the kind of certificate to generate from scratch in
// KeyPairAndCA when it's not there.
//
// The two possible kinds are client and server, and they differ in the
// ext-key-usage bitmaps. See GenerateMemCert for more details.
type CertKind int
// Possible kinds of certificates.
const (
CertClient CertKind = iota
CertServer
)
// TestingKeyPair returns CertInfo object initialized with a test keypair. It's
// meant to be used only by tests.
func TestingKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
// TestingAltKeyPair returns CertInfo object initialized with a test keypair
// which differs from the one returned by TestCertInfo. It's meant to be used
// only by tests.
func TestingAltKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address.
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h, "127.0.0.1/8", "::1/128"}
return ret, nil
}
// FindOrGenCert generates a keypair if needed.
// The type argument is false for server, true for client.
func FindOrGenCert(certf string, keyf string, certtype bool, addHosts bool) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf, certtype, addHosts)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file.
func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {
/* Create the basenames if needed */
dir := filepath.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
dir = filepath.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", certf, err)
}
_, err = certOut.Write(certBytes)
if err != nil {
return fmt.Errorf("Failed to write cert file: %w", err)
}
err = certOut.Close()
if err != nil {
return fmt.Errorf("Failed to close cert file: %w", err)
}
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", keyf, err)
}
_, err = keyOut.Write(keyBytes)
if err != nil {
return fmt.Errorf("Failed to write key file: %w", err)
}
err = keyOut.Close()
if err != nil {
return fmt.Errorf("Failed to close key file: %w", err)
}
return nil
}
// GenerateMemCert creates client or server certificate and key pair,
// returning them as byte arrays in memory.
func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) {
privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate serial number: %w", err)
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
}
} else {
username = "UNKNOWN"
}
hostname, err := os.Hostname()
if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"LXD"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
}
if client {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
} else {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
}
if addHosts {
hosts, err := mynames()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get my hostname: %w", err)
}
for _, h := range hosts {
ip, _, err := net.ParseCIDR(h)
if err == nil {
if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() {
template.IPAddresses = append(template.IPAddresses, ip)
}
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
} else if !client {
template.DNSNames = []string{"unspecified"}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
data, err := x509.MarshalECPrivateKey(privk)
if err != nil {
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate file")
}
return x509.ParseCertificate(certBlock.Bytes)
}
func CertFingerprint(cert *x509.Certificate) string {
return fmt.Sprintf("%x", sha256.Sum256(cert.Raw))
}
func CertFingerprintStr(c string) (string, error) {
pemCertificate, _ := pem.Decode([]byte(c))
if pemCertificate == nil {
return "", fmt.Errorf("invalid certificate")
}
cert, err := x509.ParseCertificate(pemCertificate.Bytes)
if err != nil {
return "", err
}
return CertFingerprint(cert), nil
}
func GetRemoteCertificate(address string, useragent string) (*x509.Certificate, error) {
// Setup a permissive TLS config
tlsConfig, err := GetTLSConfig("", "", "", nil)
if err != nil {
return nil, err
}
tlsConfig.InsecureSkipVerify = true
tr := &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: RFC3493Dialer,
Proxy: ProxyFromEnvironment,
ExpectContinueTimeout: time.Second * 30,
ResponseHeaderTimeout: time.Second * 3600,
TLSHandshakeTimeout: time.Second * 5,
}
// Connect
req, err := http.NewRequest("GET", address, nil)
if err != nil {
return nil, err
}
if useragent != "" {
req.Header.Set("User-Agent", useragent)
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
// Retrieve the certificate
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
// CertificateTokenDecode decodes a base64 and JSON encoded certificate add token.
func | (input string) (*api.CertificateAddToken, error) {
joinTokenJSON, err := base64.StdEncoding.DecodeString(input)
if err != nil {
return nil, err
}
var j api.CertificateAddToken
err = json.Unmarshal(joinTokenJSON, &j)
if err != nil {
return nil, err
}
if j.ClientName == "" {
return nil, fmt.Errorf("No client name in certificate add token")
}
if len(j.Addresses) < 1 {
return nil, fmt.Errorf("No server addresses in certificate add token")
}
if j.Secret == "" {
return nil, fmt.Errorf("No secret in certificate add token")
}
if j.Fingerprint == "" {
return nil, fmt.Errorf("No certificate fingerprint in certificate add token")
}
return &j, nil
}
// GenerateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for
// use as a trusted cluster server certificate.
func GenerateTrustCertificate(cert *CertInfo, name string) (*api.Certificate, error) {
block, _ := pem.Decode(cert.PublicKey())
if block == nil {
return nil, fmt.Errorf("Failed to decode certificate")
}
fingerprint, err := CertFingerprintStr(string(cert.PublicKey()))
if err != nil {
return nil, fmt.Errorf("Failed to calculate fingerprint: %w", err)
}
certificate := base64.StdEncoding.EncodeToString(block.Bytes)
apiCert := api.Certificate{
CertificatePut: api.CertificatePut{
Certificate: certificate,
Name: name,
Type: api.CertificateTypeServer, // Server type for intra-member communication.
},
Fingerprint: fingerprint,
}
return &apiCert, nil
}
var testCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUJAEAVl1oOU+OQxj5aUrRdJDwuWEwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMjA0WhcNMzIwNDEwMDQy
MjA0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BGAmiHj98SXz0ZW1AxheW+zkFyPz5ZrZoZDY7NezGQpoH4KZ1x08X1jw67wv+M0c
W+yd2BThOcvItBO+HokJ03lgL6cgDojcmEEfZntgmGHjG7USqh48TrQtmt/uSJsD
4qNpMGcwHQYDVR0OBBYEFPOsHk3ewn4abmyzLgOXs3Bg8Dq9MB8GA1UdIwQYMBaA
FPOsHk3ewn4abmyzLgOXs3Bg8Dq9MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMCKR+gWwN9VWXct8tDxCvlA6
+JP7iQPnLetiSLpyN4HEVQYP+EQhDJIJIy6+CwlUCQIxANQXfaTTrcVuhAb9dwVI
9bcu4cRGLEtbbNuOW/y+q7mXG0LtE/frDv/QrNpKhnnOzA==
-----END CERTIFICATE-----
`)
var testKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBzlLjHjIxc5XHm95zB
p8cnUtHQcmdBy2Ekv+bbiaS/8M8Twp7Jvi47SruAY5gESK2hZANiAARgJoh4/fEl
89GVtQMYXlvs5Bcj8+Wa2aGQ2OzXsxkKaB+CmdcdPF9Y8Ou8L/jNHFvsndgU4TnL
yLQTvh6JCdN5YC+nIA6I3JhBH2Z7YJhh4xu1EqoePE60LZrf7kibA+I=
-----END PRIVATE KEY-----
`)
var testAltCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUK41+7aTdYLu3x3vGoDOqat10TmQwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMzM0WhcNMzIwNDEwMDQy
MzM0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BAHv2a3obPHcQVDQouW/A/M/l2xHUFINWvCIhA5gWCtj9RLWKD6veBR133qSr9w0
/DT96ZoTw7kJu/BQQFlRafmfMRTZcvXHLoPMoihBEkDqTGl2qwEQea/0MPi3thwJ
wqNpMGcwHQYDVR0OBBYEFKoF8yXx9lgBTQvZL2M8YqV4c4c5MB8GA1UdIwQYMBaA
FKoF8yXx9lgBTQvZL2M8YqV4c4c5MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMQCcpYeYWmIL7QdUCGGRT8gt
YhQSciGzXlyncToAJ+A91dXGbGYvqfIti7R00sR+8cwCMAxglHP7iFzWrzn1M/Z9
H5bVDjnWZvsgEblThausOYxWxzxD+5dT5rItoVZOJhfPLw==
-----END CERTIFICATE-----
`)
var testAltKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDC3/Fv+SmNLfBy2AuUD
O3zHq1GMLvVfk3JkDIqqbKPJeEa2rS44bemExc8v85wVYTmhZANiAAQB79mt6Gzx
3EFQ0KLlvwPzP5dsR1BSDVrwiIQOYFgrY/US1ig+r3gUdd96kq/cNPw0/emaE8O5
CbvwUEBZUWn5nzEU2XL1xy6DzKIoQRJA6kxpdqsBEHmv9DD4t7YcCcI=
-----END PRIVATE KEY-----
`)
| CertificateTokenDecode | identifier_name |
cert.go | // http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"os"
"os/user"
"path/filepath"
"time"
"github.com/lxc/incus/shared/api"
)
// KeyPairAndCA returns a CertInfo object with a reference to the key pair and
// (optionally) CA certificate located in the given directory and having the
// given name prefix
//
// The naming conversion for the various files is:
//
// <prefix>.crt -> public key
// <prefix>.key -> private key
// <prefix>.ca -> CA certificate
//
// If no public/private key files are found, a new key pair will be generated
// and saved on disk.
//
// If a CA certificate is found, it will be returned as well as second return
// value (otherwise it will be nil).
func KeyPairAndCA(dir, prefix string, kind CertKind, addHosts bool) (*CertInfo, error) {
certFilename := filepath.Join(dir, prefix+".crt")
keyFilename := filepath.Join(dir, prefix+".key")
// Ensure that the certificate exists, or create a new one if it does
// not.
err := FindOrGenCert(certFilename, keyFilename, kind == CertClient, addHosts)
if err != nil {
return nil, err
}
// Load the certificate.
keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename)
if err != nil {
return nil, err
}
// If available, load the CA data as well.
caFilename := filepath.Join(dir, prefix+".ca")
var ca *x509.Certificate
if PathExists(caFilename) {
ca, err = ReadCert(caFilename)
if err != nil {
return nil, err
}
}
crlFilename := filepath.Join(dir, "ca.crl")
var crl *pkix.CertificateList
if PathExists(crlFilename) {
data, err := os.ReadFile(crlFilename)
if err != nil {
return nil, err
}
crl, err = x509.ParseCRL(data)
if err != nil {
return nil, err
}
}
info := &CertInfo{
keypair: keypair,
ca: ca,
crl: crl,
}
return info, nil
}
// KeyPairFromRaw returns a CertInfo from the raw certificate and key.
func KeyPairFromRaw(certificate []byte, key []byte) (*CertInfo, error) {
keypair, err := tls.X509KeyPair(certificate, key)
if err != nil {
return nil, err
}
return &CertInfo{
keypair: keypair,
}, nil
}
// CertInfo captures TLS certificate information about a certain public/private
// keypair and an optional CA certificate and CRL.
//
// Given LXD's support for PKI setups, these two bits of information are
// normally used and passed around together, so this structure helps with that
// (see doc/security.md for more details).
type CertInfo struct {
keypair tls.Certificate
ca *x509.Certificate
crl *pkix.CertificateList
}
// KeyPair returns the public/private key pair.
func (c *CertInfo) KeyPair() tls.Certificate {
return c.keypair
}
// CA returns the CA certificate.
func (c *CertInfo) CA() *x509.Certificate {
return c.ca
}
// PublicKey is a convenience to encode the underlying public key to ASCII.
func (c *CertInfo) PublicKey() []byte {
data := c.KeyPair().Certificate[0]
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
}
// PublicKeyX509 is a convenience to return the underlying public key as an *x509.Certificate.
func (c *CertInfo) PublicKeyX509() (*x509.Certificate, error) {
return x509.ParseCertificate(c.KeyPair().Certificate[0])
}
// PrivateKey is a convenience to encode the underlying private key.
func (c *CertInfo) PrivateKey() []byte {
ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey)
if ok {
data, err := x509.MarshalECPrivateKey(ecKey)
if err != nil {
return nil
}
return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
}
rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
if ok {
data := x509.MarshalPKCS1PrivateKey(rsaKey)
return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
}
return nil
}
// Fingerprint returns the fingerprint of the public key.
func (c *CertInfo) Fingerprint() string {
fingerprint, err := CertFingerprintStr(string(c.PublicKey()))
// Parsing should never fail, since we generated the cert ourselves,
// but let's check the error for good measure.
if err != nil {
panic("invalid public key material")
}
return fingerprint
}
// CRL returns the certificate revocation list.
func (c *CertInfo) CRL() *pkix.CertificateList {
return c.crl
}
// CertKind defines the kind of certificate to generate from scratch in
// KeyPairAndCA when it's not there.
//
// The two possible kinds are client and server, and they differ in the
// ext-key-usage bitmaps. See GenerateMemCert for more details.
type CertKind int
// Possible kinds of certificates.
const (
CertClient CertKind = iota
CertServer
)
// TestingKeyPair returns CertInfo object initialized with a test keypair. It's
// meant to be used only by tests.
func TestingKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
// TestingAltKeyPair returns CertInfo object initialized with a test keypair
// which differs from the one returned by TestCertInfo. It's meant to be used
// only by tests.
func TestingAltKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address.
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h, "127.0.0.1/8", "::1/128"}
return ret, nil
}
// FindOrGenCert generates a keypair if needed.
// The type argument is false for server, true for client.
func FindOrGenCert(certf string, keyf string, certtype bool, addHosts bool) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf, certtype, addHosts)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file.
func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {
/* Create the basenames if needed */
dir := filepath.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
dir = filepath.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", certf, err)
}
_, err = certOut.Write(certBytes)
if err != nil {
return fmt.Errorf("Failed to write cert file: %w", err)
}
err = certOut.Close()
if err != nil {
return fmt.Errorf("Failed to close cert file: %w", err)
}
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", keyf, err)
}
_, err = keyOut.Write(keyBytes)
if err != nil {
return fmt.Errorf("Failed to write key file: %w", err)
}
err = keyOut.Close()
if err != nil {
return fmt.Errorf("Failed to close key file: %w", err)
}
return nil
}
// GenerateMemCert creates client or server certificate and key pair,
// returning them as byte arrays in memory.
func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) {
privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate serial number: %w", err)
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
}
} else {
username = "UNKNOWN"
}
hostname, err := os.Hostname()
if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"LXD"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
}
if client {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
} else {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
}
if addHosts {
hosts, err := mynames()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get my hostname: %w", err)
}
for _, h := range hosts {
ip, _, err := net.ParseCIDR(h)
if err == nil {
if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() {
template.IPAddresses = append(template.IPAddresses, ip)
}
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
} else if !client {
template.DNSNames = []string{"unspecified"}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
data, err := x509.MarshalECPrivateKey(privk)
if err != nil {
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate file")
}
return x509.ParseCertificate(certBlock.Bytes)
}
func CertFingerprint(cert *x509.Certificate) string |
func CertFingerprintStr(c string) (string, error) {
pemCertificate, _ := pem.Decode([]byte(c))
if pemCertificate == nil {
return "", fmt.Errorf("invalid certificate")
}
cert, err := x509.ParseCertificate(pemCertificate.Bytes)
if err != nil {
return "", err
}
return CertFingerprint(cert), nil
}
func GetRemoteCertificate(address string, useragent string) (*x509.Certificate, error) {
// Setup a permissive TLS config
tlsConfig, err := GetTLSConfig("", "", "", nil)
if err != nil {
return nil, err
}
tlsConfig.InsecureSkipVerify = true
tr := &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: RFC3493Dialer,
Proxy: ProxyFromEnvironment,
ExpectContinueTimeout: time.Second * 30,
ResponseHeaderTimeout: time.Second * 3600,
TLSHandshakeTimeout: time.Second * 5,
}
// Connect
req, err := http.NewRequest("GET", address, nil)
if err != nil {
return nil, err
}
if useragent != "" {
req.Header.Set("User-Agent", useragent)
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
// Retrieve the certificate
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
// CertificateTokenDecode decodes a base64 and JSON encoded certificate add token.
func CertificateTokenDecode(input string) (*api.CertificateAddToken, error) {
joinTokenJSON, err := base64.StdEncoding.DecodeString(input)
if err != nil {
return nil, err
}
var j api.CertificateAddToken
err = json.Unmarshal(joinTokenJSON, &j)
if err != nil {
return nil, err
}
if j.ClientName == "" {
return nil, fmt.Errorf("No client name in certificate add token")
}
if len(j.Addresses) < 1 {
return nil, fmt.Errorf("No server addresses in certificate add token")
}
if j.Secret == "" {
return nil, fmt.Errorf("No secret in certificate add token")
}
if j.Fingerprint == "" {
return nil, fmt.Errorf("No certificate fingerprint in certificate add token")
}
return &j, nil
}
// GenerateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for
// use as a trusted cluster server certificate.
func GenerateTrustCertificate(cert *CertInfo, name string) (*api.Certificate, error) {
block, _ := pem.Decode(cert.PublicKey())
if block == nil {
return nil, fmt.Errorf("Failed to decode certificate")
}
fingerprint, err := CertFingerprintStr(string(cert.PublicKey()))
if err != nil {
return nil, fmt.Errorf("Failed to calculate fingerprint: %w", err)
}
certificate := base64.StdEncoding.EncodeToString(block.Bytes)
apiCert := api.Certificate{
CertificatePut: api.CertificatePut{
Certificate: certificate,
Name: name,
Type: api.CertificateTypeServer, // Server type for intra-member communication.
},
Fingerprint: fingerprint,
}
return &apiCert, nil
}
var testCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUJAEAVl1oOU+OQxj5aUrRdJDwuWEwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMjA0WhcNMzIwNDEwMDQy
MjA0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BGAmiHj98SXz0ZW1AxheW+zkFyPz5ZrZoZDY7NezGQpoH4KZ1x08X1jw67wv+M0c
W+yd2BThOcvItBO+HokJ03lgL6cgDojcmEEfZntgmGHjG7USqh48TrQtmt/uSJsD
4qNpMGcwHQYDVR0OBBYEFPOsHk3ewn4abmyzLgOXs3Bg8Dq9MB8GA1UdIwQYMBaA
FPOsHk3ewn4abmyzLgOXs3Bg8Dq9MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMCKR+gWwN9VWXct8tDxCvlA6
+JP7iQPnLetiSLpyN4HEVQYP+EQhDJIJIy6+CwlUCQIxANQXfaTTrcVuhAb9dwVI
9bcu4cRGLEtbbNuOW/y+q7mXG0LtE/frDv/QrNpKhnnOzA==
-----END CERTIFICATE-----
`)
var testKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBzlLjHjIxc5XHm95zB
p8cnUtHQcmdBy2Ekv+bbiaS/8M8Twp7Jvi47SruAY5gESK2hZANiAARgJoh4/fEl
89GVtQMYXlvs5Bcj8+Wa2aGQ2OzXsxkKaB+CmdcdPF9Y8Ou8L/jNHFvsndgU4TnL
yLQTvh6JCdN5YC+nIA6I3JhBH2Z7YJhh4xu1EqoePE60LZrf7kibA+I=
-----END PRIVATE KEY-----
`)
var testAltCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUK41+7aTdYLu3x3vGoDOqat10TmQwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMzM0WhcNMzIwNDEwMDQy
MzM0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BAHv2a3obPHcQVDQouW/A/M/l2xHUFINWvCIhA5gWCtj9RLWKD6veBR133qSr9w0
/DT96ZoTw7kJu/BQQFlRafmfMRTZcvXHLoPMoihBEkDqTGl2qwEQea/0MPi3thwJ
wqNpMGcwHQYDVR0OBBYEFKoF8yXx9lgBTQvZL2M8YqV4c4c5MB8GA1UdIwQYMBaA
FKoF8yXx9lgBTQvZL2M8YqV4c4c5MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMQCcpYeYWmIL7QdUCGGRT8gt
YhQSciGzXlyncToAJ+A91dXGbGYvqfIti7R00sR+8cwCMAxglHP7iFzWrzn1M/Z9
H5bVDjnWZvsgEblThausOYxWxzxD+5dT5rItoVZOJhfPLw==
-----END CERTIFICATE-----
`)
var testAltKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDC3/Fv+SmNLfBy2AuUD
O3zHq1GMLvVfk3JkDIqqbKPJeEa2rS44bemExc8v85wVYTmhZANiAAQB79mt6Gzx
3EFQ0KLlvwPzP5dsR1BSDVrwiIQOYFgrY/US1ig+r3gUdd96kq/cNPw0/emaE8O5
CbvwUEBZUWn5nzEU2XL1xy6DzKIoQRJA6kxpdqsBEHmv9DD4t7YcCcI=
-----END PRIVATE KEY-----
`)
| {
return fmt.Sprintf("%x", sha256.Sum256(cert.Raw))
} | identifier_body |
cert.go | // http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"os"
"os/user"
"path/filepath"
"time"
"github.com/lxc/incus/shared/api"
)
// KeyPairAndCA returns a CertInfo object with a reference to the key pair and
// (optionally) CA certificate located in the given directory and having the
// given name prefix
//
// The naming conversion for the various files is:
//
// <prefix>.crt -> public key
// <prefix>.key -> private key
// <prefix>.ca -> CA certificate
//
// If no public/private key files are found, a new key pair will be generated
// and saved on disk.
//
// If a CA certificate is found, it will be returned as well as second return
// value (otherwise it will be nil).
func KeyPairAndCA(dir, prefix string, kind CertKind, addHosts bool) (*CertInfo, error) {
certFilename := filepath.Join(dir, prefix+".crt")
keyFilename := filepath.Join(dir, prefix+".key")
// Ensure that the certificate exists, or create a new one if it does
// not.
err := FindOrGenCert(certFilename, keyFilename, kind == CertClient, addHosts)
if err != nil {
return nil, err
}
// Load the certificate.
keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename)
if err != nil {
return nil, err
}
// If available, load the CA data as well.
caFilename := filepath.Join(dir, prefix+".ca")
var ca *x509.Certificate
if PathExists(caFilename) {
ca, err = ReadCert(caFilename)
if err != nil {
return nil, err
}
}
crlFilename := filepath.Join(dir, "ca.crl")
var crl *pkix.CertificateList
if PathExists(crlFilename) {
data, err := os.ReadFile(crlFilename)
if err != nil {
return nil, err
}
crl, err = x509.ParseCRL(data)
if err != nil {
return nil, err
}
}
info := &CertInfo{
keypair: keypair,
ca: ca,
crl: crl,
}
return info, nil
}
// KeyPairFromRaw returns a CertInfo from the raw certificate and key.
func KeyPairFromRaw(certificate []byte, key []byte) (*CertInfo, error) {
keypair, err := tls.X509KeyPair(certificate, key)
if err != nil {
return nil, err
}
return &CertInfo{
keypair: keypair,
}, nil
}
// CertInfo captures TLS certificate information about a certain public/private
// keypair and an optional CA certificate and CRL.
//
// Given LXD's support for PKI setups, these two bits of information are
// normally used and passed around together, so this structure helps with that
// (see doc/security.md for more details).
type CertInfo struct {
keypair tls.Certificate
ca *x509.Certificate
crl *pkix.CertificateList
}
// KeyPair returns the public/private key pair.
func (c *CertInfo) KeyPair() tls.Certificate {
return c.keypair
}
// CA returns the CA certificate.
func (c *CertInfo) CA() *x509.Certificate {
return c.ca
}
// PublicKey is a convenience to encode the underlying public key to ASCII.
func (c *CertInfo) PublicKey() []byte {
data := c.KeyPair().Certificate[0]
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
}
// PublicKeyX509 is a convenience to return the underlying public key as an *x509.Certificate.
func (c *CertInfo) PublicKeyX509() (*x509.Certificate, error) {
return x509.ParseCertificate(c.KeyPair().Certificate[0])
}
// PrivateKey is a convenience to encode the underlying private key.
func (c *CertInfo) PrivateKey() []byte {
ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey)
if ok {
data, err := x509.MarshalECPrivateKey(ecKey)
if err != nil {
return nil
}
return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
}
rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
if ok {
data := x509.MarshalPKCS1PrivateKey(rsaKey)
return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
}
return nil
}
// Fingerprint returns the fingerprint of the public key.
func (c *CertInfo) Fingerprint() string {
fingerprint, err := CertFingerprintStr(string(c.PublicKey()))
// Parsing should never fail, since we generated the cert ourselves,
// but let's check the error for good measure.
if err != nil {
panic("invalid public key material")
}
return fingerprint
}
// CRL returns the certificate revocation list.
func (c *CertInfo) CRL() *pkix.CertificateList {
return c.crl
}
// CertKind defines the kind of certificate to generate from scratch in
// KeyPairAndCA when it's not there.
//
// The two possible kinds are client and server, and they differ in the
// ext-key-usage bitmaps. See GenerateMemCert for more details.
type CertKind int
// Possible kinds of certificates.
const (
CertClient CertKind = iota
CertServer
)
// TestingKeyPair returns CertInfo object initialized with a test keypair. It's
// meant to be used only by tests.
func TestingKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
// TestingAltKeyPair returns CertInfo object initialized with a test keypair
// which differs from the one returned by TestCertInfo. It's meant to be used
// only by tests.
func TestingAltKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address.
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h, "127.0.0.1/8", "::1/128"}
return ret, nil
}
// FindOrGenCert generates a keypair if needed.
// The type argument is false for server, true for client.
func FindOrGenCert(certf string, keyf string, certtype bool, addHosts bool) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf, certtype, addHosts)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file.
func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {
/* Create the basenames if needed */
dir := filepath.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
dir = filepath.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", certf, err)
}
_, err = certOut.Write(certBytes)
if err != nil {
return fmt.Errorf("Failed to write cert file: %w", err)
}
err = certOut.Close()
if err != nil {
return fmt.Errorf("Failed to close cert file: %w", err)
}
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", keyf, err)
}
_, err = keyOut.Write(keyBytes)
if err != nil {
return fmt.Errorf("Failed to write key file: %w", err)
}
err = keyOut.Close()
if err != nil {
return fmt.Errorf("Failed to close key file: %w", err)
}
return nil
}
// GenerateMemCert creates client or server certificate and key pair,
// returning them as byte arrays in memory.
func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) {
privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate serial number: %w", err)
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
} | if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"LXD"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
}
if client {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
} else {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
}
if addHosts {
hosts, err := mynames()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get my hostname: %w", err)
}
for _, h := range hosts {
ip, _, err := net.ParseCIDR(h)
if err == nil {
if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() {
template.IPAddresses = append(template.IPAddresses, ip)
}
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
} else if !client {
template.DNSNames = []string{"unspecified"}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
data, err := x509.MarshalECPrivateKey(privk)
if err != nil {
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate file")
}
return x509.ParseCertificate(certBlock.Bytes)
}
func CertFingerprint(cert *x509.Certificate) string {
return fmt.Sprintf("%x", sha256.Sum256(cert.Raw))
}
func CertFingerprintStr(c string) (string, error) {
pemCertificate, _ := pem.Decode([]byte(c))
if pemCertificate == nil {
return "", fmt.Errorf("invalid certificate")
}
cert, err := x509.ParseCertificate(pemCertificate.Bytes)
if err != nil {
return "", err
}
return CertFingerprint(cert), nil
}
func GetRemoteCertificate(address string, useragent string) (*x509.Certificate, error) {
// Setup a permissive TLS config
tlsConfig, err := GetTLSConfig("", "", "", nil)
if err != nil {
return nil, err
}
tlsConfig.InsecureSkipVerify = true
tr := &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: RFC3493Dialer,
Proxy: ProxyFromEnvironment,
ExpectContinueTimeout: time.Second * 30,
ResponseHeaderTimeout: time.Second * 3600,
TLSHandshakeTimeout: time.Second * 5,
}
// Connect
req, err := http.NewRequest("GET", address, nil)
if err != nil {
return nil, err
}
if useragent != "" {
req.Header.Set("User-Agent", useragent)
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
// Retrieve the certificate
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
// CertificateTokenDecode decodes a base64 and JSON encoded certificate add token.
func CertificateTokenDecode(input string) (*api.CertificateAddToken, error) {
joinTokenJSON, err := base64.StdEncoding.DecodeString(input)
if err != nil {
return nil, err
}
var j api.CertificateAddToken
err = json.Unmarshal(joinTokenJSON, &j)
if err != nil {
return nil, err
}
if j.ClientName == "" {
return nil, fmt.Errorf("No client name in certificate add token")
}
if len(j.Addresses) < 1 {
return nil, fmt.Errorf("No server addresses in certificate add token")
}
if j.Secret == "" {
return nil, fmt.Errorf("No secret in certificate add token")
}
if j.Fingerprint == "" {
return nil, fmt.Errorf("No certificate fingerprint in certificate add token")
}
return &j, nil
}
// GenerateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for
// use as a trusted cluster server certificate.
func GenerateTrustCertificate(cert *CertInfo, name string) (*api.Certificate, error) {
block, _ := pem.Decode(cert.PublicKey())
if block == nil {
return nil, fmt.Errorf("Failed to decode certificate")
}
fingerprint, err := CertFingerprintStr(string(cert.PublicKey()))
if err != nil {
return nil, fmt.Errorf("Failed to calculate fingerprint: %w", err)
}
certificate := base64.StdEncoding.EncodeToString(block.Bytes)
apiCert := api.Certificate{
CertificatePut: api.CertificatePut{
Certificate: certificate,
Name: name,
Type: api.CertificateTypeServer, // Server type for intra-member communication.
},
Fingerprint: fingerprint,
}
return &apiCert, nil
}
var testCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUJAEAVl1oOU+OQxj5aUrRdJDwuWEwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMjA0WhcNMzIwNDEwMDQy
MjA0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BGAmiHj98SXz0ZW1AxheW+zkFyPz5ZrZoZDY7NezGQpoH4KZ1x08X1jw67wv+M0c
W+yd2BThOcvItBO+HokJ03lgL6cgDojcmEEfZntgmGHjG7USqh48TrQtmt/uSJsD
4qNpMGcwHQYDVR0OBBYEFPOsHk3ewn4abmyzLgOXs3Bg8Dq9MB8GA1UdIwQYMBaA
FPOsHk3ewn4abmyzLgOXs3Bg8Dq9MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMCKR+gWwN9VWXct8tDxCvlA6
+JP7iQPnLetiSLpyN4HEVQYP+EQhDJIJIy6+CwlUCQIxANQXfaTTrcVuhAb9dwVI
9bcu4cRGLEtbbNuOW/y+q7mXG0LtE/frDv/QrNpKhnnOzA==
-----END CERTIFICATE-----
`)
var testKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBzlLjHjIxc5XHm95zB
p8cnUtHQcmdBy2Ekv+bbiaS/8M8Twp7Jvi47SruAY5gESK2hZANiAARgJoh4/fEl
89GVtQMYXlvs5Bcj8+Wa2aGQ2OzXsxkKaB+CmdcdPF9Y8Ou8L/jNHFvsndgU4TnL
yLQTvh6JCdN5YC+nIA6I3JhBH2Z7YJhh4xu1EqoePE60LZrf7kibA+I=
-----END PRIVATE KEY-----
`)
var testAltCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUK41+7aTdYLu3x3vGoDOqat10TmQwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMzM0WhcNMzIwNDEwMDQy
MzM0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BAHv2a3obPHcQVDQouW/A/M/l2xHUFINWvCIhA5gWCtj9RLWKD6veBR133qSr9w0
/DT96ZoTw7kJu/BQQFlRafmfMRTZcvXHLoPMoihBEkDqTGl2qwEQea/0MPi3thwJ
wqNpMGcwHQYDVR0OBBYEFKoF8yXx9lgBTQvZL2M8YqV4c4c5MB8GA1UdIwQYMBaA
FKoF8yXx9lgBTQvZL2M8YqV4c4c5MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMQCcpYeYWmIL7QdUCGGRT8gt
YhQSciGzXlyncToAJ+A91dXGbGYvqfIti7R00sR+8cwCMAxglHP7iFzWrzn1M/Z9
H5bVDjnWZvsgEblThausOYxWxzxD+5dT5rItoVZOJhfPLw==
-----END CERTIFICATE-----
`)
var testAltKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDC3/Fv+SmNLfBy2AuUD
O3zHq1GMLvVfk3JkDIqqbKPJeEa2rS44bemExc8v85wVYTmhZANiAAQB79mt6Gzx
3EFQ0KLlvwPzP5dsR1BSDVrwiIQOYFgrY/US1ig+r3gUdd96kq/cNPw0/emaE8O5
CbvwUEBZUWn5nzEU2XL1xy6DzKIoQRJA6kxpdqsBEHmv9DD4t7YcCcI=
-----END PRIVATE KEY-----
`) | } else {
username = "UNKNOWN"
}
hostname, err := os.Hostname() | random_line_split |
cert.go | // http://golang.org/src/pkg/crypto/tls/generate_cert.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"os"
"os/user"
"path/filepath"
"time"
"github.com/lxc/incus/shared/api"
)
// KeyPairAndCA returns a CertInfo object with a reference to the key pair and
// (optionally) CA certificate located in the given directory and having the
// given name prefix
//
// The naming conversion for the various files is:
//
// <prefix>.crt -> public key
// <prefix>.key -> private key
// <prefix>.ca -> CA certificate
//
// If no public/private key files are found, a new key pair will be generated
// and saved on disk.
//
// If a CA certificate is found, it will be returned as well as second return
// value (otherwise it will be nil).
func KeyPairAndCA(dir, prefix string, kind CertKind, addHosts bool) (*CertInfo, error) {
certFilename := filepath.Join(dir, prefix+".crt")
keyFilename := filepath.Join(dir, prefix+".key")
// Ensure that the certificate exists, or create a new one if it does
// not.
err := FindOrGenCert(certFilename, keyFilename, kind == CertClient, addHosts)
if err != nil {
return nil, err
}
// Load the certificate.
keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename)
if err != nil {
return nil, err
}
// If available, load the CA data as well.
caFilename := filepath.Join(dir, prefix+".ca")
var ca *x509.Certificate
if PathExists(caFilename) {
ca, err = ReadCert(caFilename)
if err != nil {
return nil, err
}
}
crlFilename := filepath.Join(dir, "ca.crl")
var crl *pkix.CertificateList
if PathExists(crlFilename) {
data, err := os.ReadFile(crlFilename)
if err != nil {
return nil, err
}
crl, err = x509.ParseCRL(data)
if err != nil {
return nil, err
}
}
info := &CertInfo{
keypair: keypair,
ca: ca,
crl: crl,
}
return info, nil
}
// KeyPairFromRaw returns a CertInfo from the raw certificate and key.
func KeyPairFromRaw(certificate []byte, key []byte) (*CertInfo, error) {
keypair, err := tls.X509KeyPair(certificate, key)
if err != nil {
return nil, err
}
return &CertInfo{
keypair: keypair,
}, nil
}
// CertInfo captures TLS certificate information about a certain public/private
// keypair and an optional CA certificate and CRL.
//
// Given LXD's support for PKI setups, these two bits of information are
// normally used and passed around together, so this structure helps with that
// (see doc/security.md for more details).
type CertInfo struct {
keypair tls.Certificate
ca *x509.Certificate
crl *pkix.CertificateList
}
// KeyPair returns the public/private key pair.
func (c *CertInfo) KeyPair() tls.Certificate {
return c.keypair
}
// CA returns the CA certificate.
func (c *CertInfo) CA() *x509.Certificate {
return c.ca
}
// PublicKey is a convenience to encode the underlying public key to ASCII.
func (c *CertInfo) PublicKey() []byte {
data := c.KeyPair().Certificate[0]
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
}
// PublicKeyX509 is a convenience to return the underlying public key as an *x509.Certificate.
func (c *CertInfo) PublicKeyX509() (*x509.Certificate, error) {
return x509.ParseCertificate(c.KeyPair().Certificate[0])
}
// PrivateKey is a convenience to encode the underlying private key.
func (c *CertInfo) PrivateKey() []byte {
ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey)
if ok {
data, err := x509.MarshalECPrivateKey(ecKey)
if err != nil {
return nil
}
return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
}
rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
if ok {
data := x509.MarshalPKCS1PrivateKey(rsaKey)
return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
}
return nil
}
// Fingerprint returns the fingerprint of the public key.
func (c *CertInfo) Fingerprint() string {
fingerprint, err := CertFingerprintStr(string(c.PublicKey()))
// Parsing should never fail, since we generated the cert ourselves,
// but let's check the error for good measure.
if err != nil {
panic("invalid public key material")
}
return fingerprint
}
// CRL returns the certificate revocation list.
func (c *CertInfo) CRL() *pkix.CertificateList {
return c.crl
}
// CertKind defines the kind of certificate to generate from scratch in
// KeyPairAndCA when it's not there.
//
// The two possible kinds are client and server, and they differ in the
// ext-key-usage bitmaps. See GenerateMemCert for more details.
type CertKind int
// Possible kinds of certificates.
const (
CertClient CertKind = iota
CertServer
)
// TestingKeyPair returns CertInfo object initialized with a test keypair. It's
// meant to be used only by tests.
func TestingKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
// TestingAltKeyPair returns CertInfo object initialized with a test keypair
// which differs from the one returned by TestCertInfo. It's meant to be used
// only by tests.
func TestingAltKeyPair() *CertInfo {
keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock)
if err != nil {
panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
}
cert := &CertInfo{
keypair: keypair,
}
return cert
}
/*
* Generate a list of names for which the certificate will be valid.
* This will include the hostname and ip address.
*/
func mynames() ([]string, error) {
h, err := os.Hostname()
if err != nil {
return nil, err
}
ret := []string{h, "127.0.0.1/8", "::1/128"}
return ret, nil
}
// FindOrGenCert generates a keypair if needed.
// The type argument is false for server, true for client.
func FindOrGenCert(certf string, keyf string, certtype bool, addHosts bool) error {
if PathExists(certf) && PathExists(keyf) {
return nil
}
/* If neither stat succeeded, then this is our first run and we
* need to generate cert and privkey */
err := GenCert(certf, keyf, certtype, addHosts)
if err != nil {
return err
}
return nil
}
// GenCert will create and populate a certificate file and a key file.
func GenCert(certf string, keyf string, certtype bool, addHosts bool) error {
/* Create the basenames if needed */
dir := filepath.Dir(certf)
err := os.MkdirAll(dir, 0750)
if err != nil |
dir = filepath.Dir(keyf)
err = os.MkdirAll(dir, 0750)
if err != nil {
return err
}
certBytes, keyBytes, err := GenerateMemCert(certtype, addHosts)
if err != nil {
return err
}
certOut, err := os.Create(certf)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", certf, err)
}
_, err = certOut.Write(certBytes)
if err != nil {
return fmt.Errorf("Failed to write cert file: %w", err)
}
err = certOut.Close()
if err != nil {
return fmt.Errorf("Failed to close cert file: %w", err)
}
keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to open %s for writing: %w", keyf, err)
}
_, err = keyOut.Write(keyBytes)
if err != nil {
return fmt.Errorf("Failed to write key file: %w", err)
}
err = keyOut.Close()
if err != nil {
return fmt.Errorf("Failed to close key file: %w", err)
}
return nil
}
// GenerateMemCert creates client or server certificate and key pair,
// returning them as byte arrays in memory.
func GenerateMemCert(client bool, addHosts bool) ([]byte, []byte, error) {
privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
validFrom := time.Now()
validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate serial number: %w", err)
}
userEntry, err := user.Current()
var username string
if err == nil {
username = userEntry.Username
if username == "" {
username = "UNKNOWN"
}
} else {
username = "UNKNOWN"
}
hostname, err := os.Hostname()
if err != nil {
hostname = "UNKNOWN"
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"LXD"},
CommonName: fmt.Sprintf("%s@%s", username, hostname),
},
NotBefore: validFrom,
NotAfter: validTo,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
}
if client {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
} else {
template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
}
if addHosts {
hosts, err := mynames()
if err != nil {
return nil, nil, fmt.Errorf("Failed to get my hostname: %w", err)
}
for _, h := range hosts {
ip, _, err := net.ParseCIDR(h)
if err == nil {
if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() {
template.IPAddresses = append(template.IPAddresses, ip)
}
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
} else if !client {
template.DNSNames = []string{"unspecified"}
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
data, err := x509.MarshalECPrivateKey(privk)
if err != nil {
return nil, nil, err
}
cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
return cert, key, nil
}
func ReadCert(fpath string) (*x509.Certificate, error) {
cf, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
certBlock, _ := pem.Decode(cf)
if certBlock == nil {
return nil, fmt.Errorf("Invalid certificate file")
}
return x509.ParseCertificate(certBlock.Bytes)
}
func CertFingerprint(cert *x509.Certificate) string {
return fmt.Sprintf("%x", sha256.Sum256(cert.Raw))
}
func CertFingerprintStr(c string) (string, error) {
pemCertificate, _ := pem.Decode([]byte(c))
if pemCertificate == nil {
return "", fmt.Errorf("invalid certificate")
}
cert, err := x509.ParseCertificate(pemCertificate.Bytes)
if err != nil {
return "", err
}
return CertFingerprint(cert), nil
}
func GetRemoteCertificate(address string, useragent string) (*x509.Certificate, error) {
// Setup a permissive TLS config
tlsConfig, err := GetTLSConfig("", "", "", nil)
if err != nil {
return nil, err
}
tlsConfig.InsecureSkipVerify = true
tr := &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: RFC3493Dialer,
Proxy: ProxyFromEnvironment,
ExpectContinueTimeout: time.Second * 30,
ResponseHeaderTimeout: time.Second * 3600,
TLSHandshakeTimeout: time.Second * 5,
}
// Connect
req, err := http.NewRequest("GET", address, nil)
if err != nil {
return nil, err
}
if useragent != "" {
req.Header.Set("User-Agent", useragent)
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
// Retrieve the certificate
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
// CertificateTokenDecode decodes a base64 and JSON encoded certificate add token.
func CertificateTokenDecode(input string) (*api.CertificateAddToken, error) {
joinTokenJSON, err := base64.StdEncoding.DecodeString(input)
if err != nil {
return nil, err
}
var j api.CertificateAddToken
err = json.Unmarshal(joinTokenJSON, &j)
if err != nil {
return nil, err
}
if j.ClientName == "" {
return nil, fmt.Errorf("No client name in certificate add token")
}
if len(j.Addresses) < 1 {
return nil, fmt.Errorf("No server addresses in certificate add token")
}
if j.Secret == "" {
return nil, fmt.Errorf("No secret in certificate add token")
}
if j.Fingerprint == "" {
return nil, fmt.Errorf("No certificate fingerprint in certificate add token")
}
return &j, nil
}
// GenerateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for
// use as a trusted cluster server certificate.
func GenerateTrustCertificate(cert *CertInfo, name string) (*api.Certificate, error) {
block, _ := pem.Decode(cert.PublicKey())
if block == nil {
return nil, fmt.Errorf("Failed to decode certificate")
}
fingerprint, err := CertFingerprintStr(string(cert.PublicKey()))
if err != nil {
return nil, fmt.Errorf("Failed to calculate fingerprint: %w", err)
}
certificate := base64.StdEncoding.EncodeToString(block.Bytes)
apiCert := api.Certificate{
CertificatePut: api.CertificatePut{
Certificate: certificate,
Name: name,
Type: api.CertificateTypeServer, // Server type for intra-member communication.
},
Fingerprint: fingerprint,
}
return &apiCert, nil
}
var testCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUJAEAVl1oOU+OQxj5aUrRdJDwuWEwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMjA0WhcNMzIwNDEwMDQy
MjA0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BGAmiHj98SXz0ZW1AxheW+zkFyPz5ZrZoZDY7NezGQpoH4KZ1x08X1jw67wv+M0c
W+yd2BThOcvItBO+HokJ03lgL6cgDojcmEEfZntgmGHjG7USqh48TrQtmt/uSJsD
4qNpMGcwHQYDVR0OBBYEFPOsHk3ewn4abmyzLgOXs3Bg8Dq9MB8GA1UdIwQYMBaA
FPOsHk3ewn4abmyzLgOXs3Bg8Dq9MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMCKR+gWwN9VWXct8tDxCvlA6
+JP7iQPnLetiSLpyN4HEVQYP+EQhDJIJIy6+CwlUCQIxANQXfaTTrcVuhAb9dwVI
9bcu4cRGLEtbbNuOW/y+q7mXG0LtE/frDv/QrNpKhnnOzA==
-----END CERTIFICATE-----
`)
var testKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBzlLjHjIxc5XHm95zB
p8cnUtHQcmdBy2Ekv+bbiaS/8M8Twp7Jvi47SruAY5gESK2hZANiAARgJoh4/fEl
89GVtQMYXlvs5Bcj8+Wa2aGQ2OzXsxkKaB+CmdcdPF9Y8Ou8L/jNHFvsndgU4TnL
yLQTvh6JCdN5YC+nIA6I3JhBH2Z7YJhh4xu1EqoePE60LZrf7kibA+I=
-----END PRIVATE KEY-----
`)
var testAltCertPEMBlock = []byte(`
-----BEGIN CERTIFICATE-----
MIIBzjCCAVSgAwIBAgIUK41+7aTdYLu3x3vGoDOqat10TmQwCgYIKoZIzj0EAwMw
EzERMA8GA1UEAwwIYWx0LnRlc3QwHhcNMjIwNDEzMDQyMzM0WhcNMzIwNDEwMDQy
MzM0WjATMREwDwYDVQQDDAhhbHQudGVzdDB2MBAGByqGSM49AgEGBSuBBAAiA2IA
BAHv2a3obPHcQVDQouW/A/M/l2xHUFINWvCIhA5gWCtj9RLWKD6veBR133qSr9w0
/DT96ZoTw7kJu/BQQFlRafmfMRTZcvXHLoPMoihBEkDqTGl2qwEQea/0MPi3thwJ
wqNpMGcwHQYDVR0OBBYEFKoF8yXx9lgBTQvZL2M8YqV4c4c5MB8GA1UdIwQYMBaA
FKoF8yXx9lgBTQvZL2M8YqV4c4c5MA8GA1UdEwEB/wQFMAMBAf8wFAYDVR0RBA0w
C4IJbG9jYWxob3N0MAoGCCqGSM49BAMDA2gAMGUCMQCcpYeYWmIL7QdUCGGRT8gt
YhQSciGzXlyncToAJ+A91dXGbGYvqfIti7R00sR+8cwCMAxglHP7iFzWrzn1M/Z9
H5bVDjnWZvsgEblThausOYxWxzxD+5dT5rItoVZOJhfPLw==
-----END CERTIFICATE-----
`)
var testAltKeyPEMBlock = []byte(`
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDC3/Fv+SmNLfBy2AuUD
O3zHq1GMLvVfk3JkDIqqbKPJeEa2rS44bemExc8v85wVYTmhZANiAAQB79mt6Gzx
3EFQ0KLlvwPzP5dsR1BSDVrwiIQOYFgrY/US1ig+r3gUdd96kq/cNPw0/emaE8O5
CbvwUEBZUWn5nzEU2XL1xy6DzKIoQRJA6kxpdqsBEHmv9DD4t7YcCcI=
-----END PRIVATE KEY-----
`)
| {
return err
} | conditional_block |
ycsb.rs | /* Copyright (c) 2018 University of Utah
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
extern crate db;
extern crate rand;
extern crate splinter;
extern crate time;
extern crate zipf;
mod setup;
use std::cell::RefCell;
use std::fmt::Display;
use std::mem;
use std::mem::transmute;
use std::sync::Arc;
use db::config;
use db::cycles;
use db::e2d2::allocators::*;
use db::e2d2::interface::*;
use db::e2d2::scheduler::*;
use db::log::*;
use db::rpc::*;
use db::wireformat::*;
use rand::distributions::Sample;
use rand::{Rng, SeedableRng, XorShiftRng};
use zipf::ZipfDistribution;
use splinter::*;
// YCSB A, B, and C benchmark.
// The benchmark is created and parameterized with `new()`. Many threads
// share the same benchmark instance. Each thread can call `abc()` which
// runs the benchmark until another thread calls `stop()`. Each thread
// then returns their runtime and the number of gets and puts they have done.
// This benchmark doesn't care about how get/put are implemented; it takes
// function pointers to get/put on `new()` and just calls those as it runs.
//
// The tests below give an example of how to use it and how to aggregate the results.
pub struct Ycsb {
put_pct: usize,
rng: Box<dyn Rng>,
key_rng: Box<ZipfDistribution>,
tenant_rng: Box<ZipfDistribution>,
key_buf: Vec<u8>,
value_buf: Vec<u8>,
}
impl Ycsb {
// Create a new benchmark instance.
//
// # Arguments
// - key_len: Length of the keys to generate per get/put. Most bytes will be zero, since
// the benchmark poplates them from a random 32-bit value.
// - value_len: Length of the values to store per put. Always all zero bytes.
// - n_keys: Number of keys from which random keys are drawn.
// - put_pct: Number between 0 and 100 indicating percent of ops that are sets.
// - skew: Zipfian skew parameter. 0.99 is YCSB default.
// - n_tenants: The number of tenants from which the tenant id is chosen.
// - tenant_skew: The skew in the Zipfian distribution from which tenant id's are drawn.
// # Return
// A new instance of YCSB that threads can call `abc()` on to run.
fn new(
key_len: usize,
value_len: usize,
n_keys: usize,
put_pct: usize,
skew: f64,
n_tenants: u32,
tenant_skew: f64,
) -> Ycsb {
let seed: [u32; 4] = rand::random::<[u32; 4]>();
let mut key_buf: Vec<u8> = Vec::with_capacity(key_len);
key_buf.resize(key_len, 0);
let mut value_buf: Vec<u8> = Vec::with_capacity(value_len);
value_buf.resize(value_len, 0);
Ycsb {
put_pct: put_pct,
rng: Box::new(XorShiftRng::from_seed(seed)),
key_rng: Box::new(
ZipfDistribution::new(n_keys, skew).expect("Couldn't create key RNG."),
),
tenant_rng: Box::new(
ZipfDistribution::new(n_tenants as usize, tenant_skew)
.expect("Couldn't create tenant RNG."),
),
key_buf: key_buf,
value_buf: value_buf,
}
}
// Run YCSB A, B, or C (depending on `new()` parameters).
// The calling thread will not return until `done()` is called on this `Ycsb` instance.
//
// # Arguments
// - get: A function that fetches the data stored under a bytestring key of `self.key_len` bytes.
// - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes
// with a bytestring value of `self.value_len` bytes.
// # Return
// A three tuple consisting of the duration that this thread ran the benchmark, the
// number of gets it performed, and the number of puts it performed.
pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R
where
G: FnMut(u32, &[u8]) -> R,
P: FnMut(u32, &[u8], &[u8]) -> R,
{
let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32;
// Sample a tenant.
let t = self.tenant_rng.sample(&mut self.rng) as u32;
// Sample a key, and convert into a little endian byte array.
let k = self.key_rng.sample(&mut self.rng) as u32;
let k: [u8; 4] = unsafe { transmute(k.to_le()) };
self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k);
if is_get {
get(t, self.key_buf.as_slice())
} else {
put(t, self.key_buf.as_slice(), self.value_buf.as_slice())
}
}
}
/// Sends out YCSB based RPC requests to a Sandstorm server.
struct YcsbSend {
// The actual YCSB workload. Required to generate keys and values for get() and put() requests.
workload: RefCell<Ycsb>,
// Network stack required to actually send RPC requests out the network.
sender: dispatch::Sender,
// Total number of requests to be sent out.
requests: u64,
// Number of requests that have been sent out so far.
sent: u64,
// The inverse of the rate at which requests are to be generated. Basically, the time interval
// between two request generations in cycles.
rate_inv: u64,
// The time stamp at which the workload started generating requests in cycles.
start: u64,
// The time stamp at which the next request must be issued in cycles.
next: u64,
// If true, RPC requests corresponding to native get() and put() operations are sent out. If
// false, invoke() based RPC requests are sent out.
native: bool,
// Payload for an invoke() based get operation. Required in order to avoid making intermediate
// copies of the extension name, table id, and key.
payload_get: RefCell<Vec<u8>>,
// Payload for an invoke() based put operation. Required in order to avoid making intermediate
// copies of the extension name, table id, key length, key, and value.
payload_put: RefCell<Vec<u8>>,
}
// Implementation of methods on YcsbSend.
impl YcsbSend {
/// Constructs a YcsbSend.
///
/// # Arguments
///
/// * `config`: Client configuration with YCSB related (key and value length etc.) as well as
/// Network related (Server and Client MAC address etc.) parameters.
/// * `port`: Network port over which requests will be sent out.
/// * `reqs`: The number of requests to be issued to the server.
/// * `dst_ports`: The total number of UDP ports the server is listening on.
///
/// # Return
///
/// A YCSB request generator.
fn new(
config: &config::ClientConfig,
port: CacheAligned<PortQueue>,
reqs: u64,
dst_ports: u16,
) -> YcsbSend {
// The payload on an invoke() based get request consists of the extensions name ("get"),
// the table id to perform the lookup on, and the key to lookup.
let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len;
let mut payload_get = Vec::with_capacity(payload_len);
payload_get.extend_from_slice("get".as_bytes());
payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_get.resize(payload_len, 0);
// The payload on an invoke() based put request consists of the extensions name ("put"),
// the table id to perform the lookup on, the length of the key to lookup, the key, and the
// value to be inserted into the database.
let payload_len = "put".as_bytes().len()
+ mem::size_of::<u64>()
+ mem::size_of::<u16>()
+ config.key_len
+ config.value_len;
let mut payload_put = Vec::with_capacity(payload_len);
payload_put.extend_from_slice("put".as_bytes());
payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_put.extend_from_slice(&unsafe {
transmute::<u16, [u8; 2]>((config.key_len as u16).to_le())
});
payload_put.resize(payload_len, 0);
YcsbSend {
workload: RefCell::new(Ycsb::new(
config.key_len,
config.value_len,
config.n_keys,
config.put_pct,
config.skew,
config.num_tenants,
config.tenant_skew,
)),
sender: dispatch::Sender::new(config, port, dst_ports),
requests: reqs,
sent: 0,
rate_inv: cycles::cycles_per_second() / config.req_rate as u64,
start: cycles::rdtsc(),
next: 0,
native: !config.use_invoke,
payload_get: RefCell::new(payload_get),
payload_put: RefCell::new(payload_put),
}
}
}
// The Executable trait allowing YcsbSend to be scheduled by Netbricks.
impl Executable for YcsbSend {
// Called internally by Netbricks.
fn execute(&mut self) {
// Return if there are no more requests to generate.
if self.requests <= self.sent {
return;
}
// Get the current time stamp so that we can determine if it is time to issue the next RPC.
let curr = cycles::rdtsc();
// If it is either time to send out a request, or if a request has never been sent out,
// then, do so.
if curr >= self.next || self.next == 0 {
if self.native == true {
// Configured to issue native RPCs, issue a regular get()/put() operation.
self.workload.borrow_mut().abc(
|tenant, key| self.sender.send_get(tenant, 1, key, curr),
|tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr),
);
} else {
// Configured to issue invoke() RPCs.
let mut p_get = self.payload_get.borrow_mut();
let mut p_put = self.payload_put.borrow_mut();
// XXX Heavily dependent on how `Ycsb` creates a key. Only the first four
// bytes of the key matter, the rest are zero. The value is always zero.
self.workload.borrow_mut().abc(
|tenant, key| {
// First 11 bytes on the payload were already pre-populated with the
// extension name (3 bytes), and the table id (8 bytes). Just write in the
// first 4 bytes of the key.
p_get[11..15].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_get, curr)
},
|tenant, key, _val| {
// First 13 bytes on the payload were already pre-populated with the
// extension name (3 bytes), the table id (8 bytes), and the key length (2
// bytes). Just write in the first 4 bytes of the key. The value is anyway
// always zero.
p_put[13..17].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_put, curr)
},
);
}
// Update the time stamp at which the next request should be generated, assuming that
// the first request was sent out at self.start.
self.sent += 1;
self.next = self.start + self.sent * self.rate_inv;
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Receives responses to YCSB requests sent out by YcsbSend.
struct YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// The network stack required to receives RPC response packets from a network port.
receiver: dispatch::Receiver<T>,
// The number of response packets to wait for before printing out statistics.
responses: u64,
// Time stamp in cycles at which measurement started. Required to calculate observed
// throughput of the Sandstorm server.
start: u64,
// The total number of responses received so far.
recvd: u64,
// Vector of sampled request latencies. Required to calculate distributions once all responses
// have been received.
latencies: Vec<u64>,
// If true, this receiver will make latency measurements.
master: bool,
// If true, then responses will be considered to correspond to native gets and puts.
native: bool,
// Time stamp in cycles at which measurement stopped.
stop: u64,
}
// Implementation of methods on YcsbRecv.
impl<T> YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
/// Constructs a YcsbRecv.
///
/// # Arguments
///
/// * `port` : Network port on which responses will be polled for.
/// * `resps`: The number of responses to wait for before calculating statistics.
/// * `master`: Boolean indicating if the receiver should make latency measurements.
/// * `native`: If true, responses will be considered to correspond to native gets and puts.
///
/// # Return
///
/// A YCSB response receiver that measures the median latency and throughput of a Sandstorm
/// server.
fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> {
YcsbRecv {
receiver: dispatch::Receiver::new(port),
responses: resps,
start: cycles::rdtsc(),
recvd: 0,
latencies: Vec::with_capacity(resps as usize),
master: master,
native: native,
stop: 0,
}
}
}
// Implementation of the `Drop` trait on YcsbRecv.
impl<T> Drop for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
fn drop(&mut self) {
// Calculate & print the throughput for all client threads.
println!(
"YCSB Throughput {}",
self.recvd as f64 / cycles::to_seconds(self.stop - self.start)
);
// Calculate & print median & tail latency only on the master thread.
if self.master {
self.latencies.sort();
let m;
let t = self.latencies[(self.latencies.len() * 99) / 100];
match self.latencies.len() % 2 {
0 => {
let n = self.latencies.len();
m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2;
}
_ => m = self.latencies[self.latencies.len() / 2],
}
println!(
">>> {} {}",
cycles::to_seconds(m) * 1e9,
cycles::to_seconds(t) * 1e9
);
}
}
}
// Executable trait allowing YcsbRecv to be scheduled by Netbricks.
impl<T> Executable for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// Called internally by Netbricks.
fn execute(&mut self) {
// Don't do anything after all responses have been received.
if self.responses <= self.recvd {
return;
}
// Try to receive packets from the network port.
// If there are packets, sample the latency of the server.
if let Some(mut packets) = self.receiver.recv_res() {
while let Some(packet) = packets.pop() {
self.recvd += 1;
// Measure latency on the master client after the first 2 million requests.
// The start timestamp is present on the RPC response header.
if self.recvd > 2 * 1000 * 1000 && self.master {
let curr = cycles::rdtsc();
match self.native {
// The response corresponds to an invoke() RPC.
false => {
let p = packet.parse_header::<InvokeResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
// The response corresponds to a get() or put() RPC.
// The opcode on the response identifies the RPC type.
true => match parse_rpc_opcode(&packet) {
OpCode::SandstormGetRpc => {
let p = packet.parse_header::<GetResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
OpCode::SandstormPutRpc => {
let p = packet.parse_header::<PutResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
_ => packet.free_packet(),
},
}
} else {
packet.free_packet();
}
}
}
// The moment all response packets have been received, set the value of the
// stop timestamp so that throughput can be estimated later.
if self.responses <= self.recvd {
self.stop = cycles::rdtsc();
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Sets up YcsbSend by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `config`: Network related configuration such as the MAC and IP address.
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbSend will be added.
fn setup_send<S>(
config: &config::ClientConfig,
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the sender to a netbricks pipeline.
match scheduler.add_task(YcsbSend::new(
config,
ports[0].clone(),
config.num_reqs as u64,
config.server_udp_ports as u16,
)) {
Ok(_) => {
info!(
"Successfully added YcsbSend with tx queue {}.",
ports[0].txq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
/// Sets up YcsbRecv by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added.
/// * `master`: If true, the added YcsbRecv will make latency measurements.
/// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets
/// and puts.
fn setup_recv<S>(
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
master: bool,
native: bool,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 |
// Add the receiver to a netbricks pipeline.
match scheduler.add_task(YcsbRecv::new(
ports[0].clone(),
34 * 1000 * 1000 as u64,
master,
native,
)) {
Ok(_) => {
info!(
"Successfully added YcsbRecv with rx queue {}.",
ports[0].rxq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
fn main() {
db::env_logger::init().expect("ERROR: failed to initialize logger!");
let config = config::ClientConfig::load();
info!("Starting up Sandstorm client with config {:?}", config);
// Based on the supplied client configuration, compute the amount of time it will take to send
// out `num_reqs` requests at a rate of `req_rate` requests per second.
let exec = config.num_reqs / config.req_rate;
// Setup Netbricks.
let mut net_context = setup::config_and_init_netbricks(&config);
// Setup the client pipeline.
net_context.start_schedulers();
// The core id's which will run the sender and receiver threads.
// XXX The following two arrays heavily depend on the set of cores
// configured in setup.rs
let senders = [0, 2, 4, 6];
let receive = [1, 3, 5, 7];
assert!((senders.len() == 4) && (receive.len() == 4));
// Setup 4 senders, and 4 receivers.
for i in 0..4 {
// First, retrieve a tx-rx queue pair from Netbricks
let port = net_context
.rx_queues
.get(&senders[i])
.expect("Failed to retrieve network port!")
.clone();
let mut master = false;
if i == 0 {
master = true;
}
let native = !config.use_invoke;
// Setup the receive side.
net_context
.add_pipeline_to_core(
receive[i],
Arc::new(
move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_recv(port.clone(), sched, core, master, native)
},
),
)
.expect("Failed to initialize receive side.");
// Setup the send side.
net_context
.add_pipeline_to_core(
senders[i],
Arc::new(
move |ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_send(&config::ClientConfig::load(), ports, sched, core)
},
),
)
.expect("Failed to initialize send side.");
}
// Allow the system to bootup fully.
std::thread::sleep(std::time::Duration::from_secs(1));
// Run the client.
net_context.execute();
// Sleep for an amount of time approximately equal to the estimated execution time, and then
// shutdown the client.
std::thread::sleep(std::time::Duration::from_secs(exec as u64 + 11));
// Stop the client.
net_context.stop();
}
#[cfg(test)]
mod test {
use std;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn ycsb_abc_basic() {
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(10, 100, 1000000, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(|_t, _key| n_gets += 1, |_t, _key, _value| n_puts += 1);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
}
// Convert a key to u32 assuming little endian.
fn convert_key(key: &[u8]) -> u32 {
assert_eq!(4, key.len());
let k: u32 = 0
| key[0] as u32
| (key[1] as u32) << 8
| (key[2] as u32) << 16
| (key[3] as u32) << 24;
k
}
#[test]
fn ycsb_abc_histogram() {
let hist = Arc::new(Mutex::new(HashMap::new()));
let n_keys = 20;
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let hist = hist.clone();
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(4, 100, n_keys, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(
|_t, key| {
// get
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).0 += 1;
n_gets += 1
},
|_t, key, _value| {
// put
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).1 += 1;
n_puts += 1
},
);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
let ht = hist.lock().unwrap();
let mut kvs: Vec<_> = ht.iter().collect();
kvs.sort();
let v: Vec<_> = kvs
.iter()
.map(|&(k, v)| println!("Key {:?}: {:?} gets/puts", k, v))
.collect();
println!("Unique key count: {}", v.len());
assert_eq!(n_keys, v.len());
let total: i64 = kvs.iter().map(|&(_, &(g, s))| (g + s) as i64).sum();
let mut sum = 0;
for &(k, v) in kvs.iter() {
let &(g, s) = v;
sum += g + s;
let percentile = sum as f64 / total as f64;
println!("Key {:?}: {:?} percentile", k, percentile);
}
// For 20 keys median key should be near 4th key, so this checks out.
}
}
| {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
} | conditional_block |
ycsb.rs | /* Copyright (c) 2018 University of Utah
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
extern crate db;
extern crate rand;
extern crate splinter;
extern crate time;
extern crate zipf;
mod setup;
use std::cell::RefCell;
use std::fmt::Display;
use std::mem;
use std::mem::transmute;
use std::sync::Arc;
use db::config;
use db::cycles;
use db::e2d2::allocators::*;
use db::e2d2::interface::*;
use db::e2d2::scheduler::*;
use db::log::*;
use db::rpc::*;
use db::wireformat::*;
use rand::distributions::Sample;
use rand::{Rng, SeedableRng, XorShiftRng};
use zipf::ZipfDistribution;
use splinter::*;
// YCSB A, B, and C benchmark.
// The benchmark is created and parameterized with `new()`. Many threads
// share the same benchmark instance. Each thread can call `abc()` which
// runs the benchmark until another thread calls `stop()`. Each thread
// then returns their runtime and the number of gets and puts they have done.
// This benchmark doesn't care about how get/put are implemented; it takes
// function pointers to get/put on `new()` and just calls those as it runs.
//
// The tests below give an example of how to use it and how to aggregate the results.
pub struct Ycsb {
put_pct: usize,
rng: Box<dyn Rng>,
key_rng: Box<ZipfDistribution>,
tenant_rng: Box<ZipfDistribution>,
key_buf: Vec<u8>,
value_buf: Vec<u8>,
}
impl Ycsb {
// Create a new benchmark instance.
//
// # Arguments
// - key_len: Length of the keys to generate per get/put. Most bytes will be zero, since
// the benchmark poplates them from a random 32-bit value.
// - value_len: Length of the values to store per put. Always all zero bytes.
// - n_keys: Number of keys from which random keys are drawn.
// - put_pct: Number between 0 and 100 indicating percent of ops that are sets.
// - skew: Zipfian skew parameter. 0.99 is YCSB default.
// - n_tenants: The number of tenants from which the tenant id is chosen.
// - tenant_skew: The skew in the Zipfian distribution from which tenant id's are drawn.
// # Return
// A new instance of YCSB that threads can call `abc()` on to run.
fn new(
key_len: usize,
value_len: usize,
n_keys: usize,
put_pct: usize,
skew: f64,
n_tenants: u32,
tenant_skew: f64,
) -> Ycsb {
let seed: [u32; 4] = rand::random::<[u32; 4]>();
let mut key_buf: Vec<u8> = Vec::with_capacity(key_len);
key_buf.resize(key_len, 0);
let mut value_buf: Vec<u8> = Vec::with_capacity(value_len);
value_buf.resize(value_len, 0);
Ycsb {
put_pct: put_pct,
rng: Box::new(XorShiftRng::from_seed(seed)),
key_rng: Box::new(
ZipfDistribution::new(n_keys, skew).expect("Couldn't create key RNG."),
),
tenant_rng: Box::new(
ZipfDistribution::new(n_tenants as usize, tenant_skew)
.expect("Couldn't create tenant RNG."),
),
key_buf: key_buf,
value_buf: value_buf,
}
}
// Run YCSB A, B, or C (depending on `new()` parameters).
// The calling thread will not return until `done()` is called on this `Ycsb` instance.
//
// # Arguments
// - get: A function that fetches the data stored under a bytestring key of `self.key_len` bytes.
// - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes
// with a bytestring value of `self.value_len` bytes.
// # Return
// A three tuple consisting of the duration that this thread ran the benchmark, the
// number of gets it performed, and the number of puts it performed.
pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R
where
G: FnMut(u32, &[u8]) -> R,
P: FnMut(u32, &[u8], &[u8]) -> R,
{
let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32;
// Sample a tenant.
let t = self.tenant_rng.sample(&mut self.rng) as u32;
// Sample a key, and convert into a little endian byte array.
let k = self.key_rng.sample(&mut self.rng) as u32;
let k: [u8; 4] = unsafe { transmute(k.to_le()) };
self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k);
if is_get {
get(t, self.key_buf.as_slice())
} else {
put(t, self.key_buf.as_slice(), self.value_buf.as_slice())
}
}
}
/// Sends out YCSB based RPC requests to a Sandstorm server.
struct YcsbSend {
// The actual YCSB workload. Required to generate keys and values for get() and put() requests.
workload: RefCell<Ycsb>,
// Network stack required to actually send RPC requests out the network.
sender: dispatch::Sender,
// Total number of requests to be sent out.
requests: u64,
// Number of requests that have been sent out so far.
sent: u64,
// The inverse of the rate at which requests are to be generated. Basically, the time interval
// between two request generations in cycles.
rate_inv: u64,
// The time stamp at which the workload started generating requests in cycles.
start: u64,
// The time stamp at which the next request must be issued in cycles.
next: u64,
// If true, RPC requests corresponding to native get() and put() operations are sent out. If
// false, invoke() based RPC requests are sent out.
native: bool,
// Payload for an invoke() based get operation. Required in order to avoid making intermediate
// copies of the extension name, table id, and key.
payload_get: RefCell<Vec<u8>>,
// Payload for an invoke() based put operation. Required in order to avoid making intermediate
// copies of the extension name, table id, key length, key, and value.
payload_put: RefCell<Vec<u8>>,
}
// Implementation of methods on YcsbSend.
impl YcsbSend {
/// Constructs a YcsbSend.
///
/// # Arguments
///
/// * `config`: Client configuration with YCSB related (key and value length etc.) as well as
/// Network related (Server and Client MAC address etc.) parameters.
/// * `port`: Network port over which requests will be sent out.
/// * `reqs`: The number of requests to be issued to the server.
/// * `dst_ports`: The total number of UDP ports the server is listening on.
///
/// # Return
///
/// A YCSB request generator.
fn new(
config: &config::ClientConfig,
port: CacheAligned<PortQueue>,
reqs: u64,
dst_ports: u16,
) -> YcsbSend {
// The payload on an invoke() based get request consists of the extensions name ("get"),
// the table id to perform the lookup on, and the key to lookup.
let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len;
let mut payload_get = Vec::with_capacity(payload_len);
payload_get.extend_from_slice("get".as_bytes());
payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_get.resize(payload_len, 0);
// The payload on an invoke() based put request consists of the extensions name ("put"),
// the table id to perform the lookup on, the length of the key to lookup, the key, and the
// value to be inserted into the database.
let payload_len = "put".as_bytes().len()
+ mem::size_of::<u64>()
+ mem::size_of::<u16>()
+ config.key_len
+ config.value_len;
let mut payload_put = Vec::with_capacity(payload_len);
payload_put.extend_from_slice("put".as_bytes());
payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_put.extend_from_slice(&unsafe {
transmute::<u16, [u8; 2]>((config.key_len as u16).to_le())
});
payload_put.resize(payload_len, 0);
YcsbSend {
workload: RefCell::new(Ycsb::new(
config.key_len,
config.value_len,
config.n_keys,
config.put_pct,
config.skew,
config.num_tenants,
config.tenant_skew,
)),
sender: dispatch::Sender::new(config, port, dst_ports),
requests: reqs,
sent: 0,
rate_inv: cycles::cycles_per_second() / config.req_rate as u64,
start: cycles::rdtsc(),
next: 0,
native: !config.use_invoke,
payload_get: RefCell::new(payload_get),
payload_put: RefCell::new(payload_put),
}
}
}
// The Executable trait allowing YcsbSend to be scheduled by Netbricks.
impl Executable for YcsbSend {
// Called internally by Netbricks.
fn execute(&mut self) |
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Receives responses to YCSB requests sent out by YcsbSend.
struct YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// The network stack required to receives RPC response packets from a network port.
receiver: dispatch::Receiver<T>,
// The number of response packets to wait for before printing out statistics.
responses: u64,
// Time stamp in cycles at which measurement started. Required to calculate observed
// throughput of the Sandstorm server.
start: u64,
// The total number of responses received so far.
recvd: u64,
// Vector of sampled request latencies. Required to calculate distributions once all responses
// have been received.
latencies: Vec<u64>,
// If true, this receiver will make latency measurements.
master: bool,
// If true, then responses will be considered to correspond to native gets and puts.
native: bool,
// Time stamp in cycles at which measurement stopped.
stop: u64,
}
// Implementation of methods on YcsbRecv.
impl<T> YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
/// Constructs a YcsbRecv.
///
/// # Arguments
///
/// * `port` : Network port on which responses will be polled for.
/// * `resps`: The number of responses to wait for before calculating statistics.
/// * `master`: Boolean indicating if the receiver should make latency measurements.
/// * `native`: If true, responses will be considered to correspond to native gets and puts.
///
/// # Return
///
/// A YCSB response receiver that measures the median latency and throughput of a Sandstorm
/// server.
fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> {
YcsbRecv {
receiver: dispatch::Receiver::new(port),
responses: resps,
start: cycles::rdtsc(),
recvd: 0,
latencies: Vec::with_capacity(resps as usize),
master: master,
native: native,
stop: 0,
}
}
}
// Implementation of the `Drop` trait on YcsbRecv.
impl<T> Drop for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
fn drop(&mut self) {
// Calculate & print the throughput for all client threads.
println!(
"YCSB Throughput {}",
self.recvd as f64 / cycles::to_seconds(self.stop - self.start)
);
// Calculate & print median & tail latency only on the master thread.
if self.master {
self.latencies.sort();
let m;
let t = self.latencies[(self.latencies.len() * 99) / 100];
match self.latencies.len() % 2 {
0 => {
let n = self.latencies.len();
m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2;
}
_ => m = self.latencies[self.latencies.len() / 2],
}
println!(
">>> {} {}",
cycles::to_seconds(m) * 1e9,
cycles::to_seconds(t) * 1e9
);
}
}
}
// Executable trait allowing YcsbRecv to be scheduled by Netbricks.
impl<T> Executable for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// Called internally by Netbricks.
fn execute(&mut self) {
// Don't do anything after all responses have been received.
if self.responses <= self.recvd {
return;
}
// Try to receive packets from the network port.
// If there are packets, sample the latency of the server.
if let Some(mut packets) = self.receiver.recv_res() {
while let Some(packet) = packets.pop() {
self.recvd += 1;
// Measure latency on the master client after the first 2 million requests.
// The start timestamp is present on the RPC response header.
if self.recvd > 2 * 1000 * 1000 && self.master {
let curr = cycles::rdtsc();
match self.native {
// The response corresponds to an invoke() RPC.
false => {
let p = packet.parse_header::<InvokeResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
// The response corresponds to a get() or put() RPC.
// The opcode on the response identifies the RPC type.
true => match parse_rpc_opcode(&packet) {
OpCode::SandstormGetRpc => {
let p = packet.parse_header::<GetResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
OpCode::SandstormPutRpc => {
let p = packet.parse_header::<PutResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
_ => packet.free_packet(),
},
}
} else {
packet.free_packet();
}
}
}
// The moment all response packets have been received, set the value of the
// stop timestamp so that throughput can be estimated later.
if self.responses <= self.recvd {
self.stop = cycles::rdtsc();
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Sets up YcsbSend by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `config`: Network related configuration such as the MAC and IP address.
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbSend will be added.
fn setup_send<S>(
config: &config::ClientConfig,
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the sender to a netbricks pipeline.
match scheduler.add_task(YcsbSend::new(
config,
ports[0].clone(),
config.num_reqs as u64,
config.server_udp_ports as u16,
)) {
Ok(_) => {
info!(
"Successfully added YcsbSend with tx queue {}.",
ports[0].txq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
/// Sets up YcsbRecv by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added.
/// * `master`: If true, the added YcsbRecv will make latency measurements.
/// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets
/// and puts.
fn setup_recv<S>(
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
master: bool,
native: bool,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the receiver to a netbricks pipeline.
match scheduler.add_task(YcsbRecv::new(
ports[0].clone(),
34 * 1000 * 1000 as u64,
master,
native,
)) {
Ok(_) => {
info!(
"Successfully added YcsbRecv with rx queue {}.",
ports[0].rxq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
fn main() {
db::env_logger::init().expect("ERROR: failed to initialize logger!");
let config = config::ClientConfig::load();
info!("Starting up Sandstorm client with config {:?}", config);
// Based on the supplied client configuration, compute the amount of time it will take to send
// out `num_reqs` requests at a rate of `req_rate` requests per second.
let exec = config.num_reqs / config.req_rate;
// Setup Netbricks.
let mut net_context = setup::config_and_init_netbricks(&config);
// Setup the client pipeline.
net_context.start_schedulers();
// The core id's which will run the sender and receiver threads.
// XXX The following two arrays heavily depend on the set of cores
// configured in setup.rs
let senders = [0, 2, 4, 6];
let receive = [1, 3, 5, 7];
assert!((senders.len() == 4) && (receive.len() == 4));
// Setup 4 senders, and 4 receivers.
for i in 0..4 {
// First, retrieve a tx-rx queue pair from Netbricks
let port = net_context
.rx_queues
.get(&senders[i])
.expect("Failed to retrieve network port!")
.clone();
let mut master = false;
if i == 0 {
master = true;
}
let native = !config.use_invoke;
// Setup the receive side.
net_context
.add_pipeline_to_core(
receive[i],
Arc::new(
move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_recv(port.clone(), sched, core, master, native)
},
),
)
.expect("Failed to initialize receive side.");
// Setup the send side.
net_context
.add_pipeline_to_core(
senders[i],
Arc::new(
move |ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_send(&config::ClientConfig::load(), ports, sched, core)
},
),
)
.expect("Failed to initialize send side.");
}
// Allow the system to bootup fully.
std::thread::sleep(std::time::Duration::from_secs(1));
// Run the client.
net_context.execute();
// Sleep for an amount of time approximately equal to the estimated execution time, and then
// shutdown the client.
std::thread::sleep(std::time::Duration::from_secs(exec as u64 + 11));
// Stop the client.
net_context.stop();
}
#[cfg(test)]
mod test {
use std;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn ycsb_abc_basic() {
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(10, 100, 1000000, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(|_t, _key| n_gets += 1, |_t, _key, _value| n_puts += 1);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
}
// Convert a key to u32 assuming little endian.
fn convert_key(key: &[u8]) -> u32 {
assert_eq!(4, key.len());
let k: u32 = 0
| key[0] as u32
| (key[1] as u32) << 8
| (key[2] as u32) << 16
| (key[3] as u32) << 24;
k
}
#[test]
fn ycsb_abc_histogram() {
let hist = Arc::new(Mutex::new(HashMap::new()));
let n_keys = 20;
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let hist = hist.clone();
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(4, 100, n_keys, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(
|_t, key| {
// get
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).0 += 1;
n_gets += 1
},
|_t, key, _value| {
// put
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).1 += 1;
n_puts += 1
},
);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
let ht = hist.lock().unwrap();
let mut kvs: Vec<_> = ht.iter().collect();
kvs.sort();
let v: Vec<_> = kvs
.iter()
.map(|&(k, v)| println!("Key {:?}: {:?} gets/puts", k, v))
.collect();
println!("Unique key count: {}", v.len());
assert_eq!(n_keys, v.len());
let total: i64 = kvs.iter().map(|&(_, &(g, s))| (g + s) as i64).sum();
let mut sum = 0;
for &(k, v) in kvs.iter() {
let &(g, s) = v;
sum += g + s;
let percentile = sum as f64 / total as f64;
println!("Key {:?}: {:?} percentile", k, percentile);
}
// For 20 keys median key should be near 4th key, so this checks out.
}
}
| {
// Return if there are no more requests to generate.
if self.requests <= self.sent {
return;
}
// Get the current time stamp so that we can determine if it is time to issue the next RPC.
let curr = cycles::rdtsc();
// If it is either time to send out a request, or if a request has never been sent out,
// then, do so.
if curr >= self.next || self.next == 0 {
if self.native == true {
// Configured to issue native RPCs, issue a regular get()/put() operation.
self.workload.borrow_mut().abc(
|tenant, key| self.sender.send_get(tenant, 1, key, curr),
|tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr),
);
} else {
// Configured to issue invoke() RPCs.
let mut p_get = self.payload_get.borrow_mut();
let mut p_put = self.payload_put.borrow_mut();
// XXX Heavily dependent on how `Ycsb` creates a key. Only the first four
// bytes of the key matter, the rest are zero. The value is always zero.
self.workload.borrow_mut().abc(
|tenant, key| {
// First 11 bytes on the payload were already pre-populated with the
// extension name (3 bytes), and the table id (8 bytes). Just write in the
// first 4 bytes of the key.
p_get[11..15].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_get, curr)
},
|tenant, key, _val| {
// First 13 bytes on the payload were already pre-populated with the
// extension name (3 bytes), the table id (8 bytes), and the key length (2
// bytes). Just write in the first 4 bytes of the key. The value is anyway
// always zero.
p_put[13..17].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_put, curr)
},
);
}
// Update the time stamp at which the next request should be generated, assuming that
// the first request was sent out at self.start.
self.sent += 1;
self.next = self.start + self.sent * self.rate_inv;
}
} | identifier_body |
ycsb.rs | /* Copyright (c) 2018 University of Utah
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
extern crate db;
extern crate rand;
extern crate splinter;
extern crate time;
extern crate zipf;
mod setup;
use std::cell::RefCell;
use std::fmt::Display;
use std::mem;
use std::mem::transmute;
use std::sync::Arc;
use db::config;
use db::cycles;
use db::e2d2::allocators::*;
use db::e2d2::interface::*;
use db::e2d2::scheduler::*;
use db::log::*;
use db::rpc::*;
use db::wireformat::*;
use rand::distributions::Sample;
use rand::{Rng, SeedableRng, XorShiftRng};
use zipf::ZipfDistribution;
use splinter::*;
// YCSB A, B, and C benchmark.
// The benchmark is created and parameterized with `new()`. Many threads
// share the same benchmark instance. Each thread can call `abc()` which
// runs the benchmark until another thread calls `stop()`. Each thread
// then returns their runtime and the number of gets and puts they have done.
// This benchmark doesn't care about how get/put are implemented; it takes
// function pointers to get/put on `new()` and just calls those as it runs.
//
// The tests below give an example of how to use it and how to aggregate the results.
pub struct Ycsb {
put_pct: usize,
rng: Box<dyn Rng>,
key_rng: Box<ZipfDistribution>,
tenant_rng: Box<ZipfDistribution>,
key_buf: Vec<u8>,
value_buf: Vec<u8>,
}
impl Ycsb {
// Create a new benchmark instance.
//
// # Arguments
// - key_len: Length of the keys to generate per get/put. Most bytes will be zero, since
// the benchmark poplates them from a random 32-bit value.
// - value_len: Length of the values to store per put. Always all zero bytes.
// - n_keys: Number of keys from which random keys are drawn.
// - put_pct: Number between 0 and 100 indicating percent of ops that are sets.
// - skew: Zipfian skew parameter. 0.99 is YCSB default.
// - n_tenants: The number of tenants from which the tenant id is chosen.
// - tenant_skew: The skew in the Zipfian distribution from which tenant id's are drawn.
// # Return
// A new instance of YCSB that threads can call `abc()` on to run.
fn new(
key_len: usize,
value_len: usize,
n_keys: usize,
put_pct: usize,
skew: f64,
n_tenants: u32,
tenant_skew: f64,
) -> Ycsb {
let seed: [u32; 4] = rand::random::<[u32; 4]>();
let mut key_buf: Vec<u8> = Vec::with_capacity(key_len);
key_buf.resize(key_len, 0);
let mut value_buf: Vec<u8> = Vec::with_capacity(value_len);
value_buf.resize(value_len, 0);
Ycsb {
put_pct: put_pct,
rng: Box::new(XorShiftRng::from_seed(seed)),
key_rng: Box::new(
ZipfDistribution::new(n_keys, skew).expect("Couldn't create key RNG."),
),
tenant_rng: Box::new(
ZipfDistribution::new(n_tenants as usize, tenant_skew)
.expect("Couldn't create tenant RNG."),
),
key_buf: key_buf,
value_buf: value_buf,
}
}
// Run YCSB A, B, or C (depending on `new()` parameters).
// The calling thread will not return until `done()` is called on this `Ycsb` instance.
//
// # Arguments
// - get: A function that fetches the data stored under a bytestring key of `self.key_len` bytes.
// - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes
// with a bytestring value of `self.value_len` bytes.
// # Return
// A three tuple consisting of the duration that this thread ran the benchmark, the
// number of gets it performed, and the number of puts it performed.
pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R
where
G: FnMut(u32, &[u8]) -> R,
P: FnMut(u32, &[u8], &[u8]) -> R,
{
let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32;
// Sample a tenant.
let t = self.tenant_rng.sample(&mut self.rng) as u32;
// Sample a key, and convert into a little endian byte array.
let k = self.key_rng.sample(&mut self.rng) as u32;
let k: [u8; 4] = unsafe { transmute(k.to_le()) };
self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k);
if is_get {
get(t, self.key_buf.as_slice())
} else {
put(t, self.key_buf.as_slice(), self.value_buf.as_slice())
}
}
}
/// Sends out YCSB based RPC requests to a Sandstorm server.
struct YcsbSend {
// The actual YCSB workload. Required to generate keys and values for get() and put() requests.
workload: RefCell<Ycsb>,
// Network stack required to actually send RPC requests out the network.
sender: dispatch::Sender,
// Total number of requests to be sent out.
requests: u64,
// Number of requests that have been sent out so far.
sent: u64,
// The inverse of the rate at which requests are to be generated. Basically, the time interval
// between two request generations in cycles.
rate_inv: u64,
// The time stamp at which the workload started generating requests in cycles.
start: u64,
// The time stamp at which the next request must be issued in cycles.
next: u64,
// If true, RPC requests corresponding to native get() and put() operations are sent out. If
// false, invoke() based RPC requests are sent out.
native: bool,
// Payload for an invoke() based get operation. Required in order to avoid making intermediate
// copies of the extension name, table id, and key.
payload_get: RefCell<Vec<u8>>,
// Payload for an invoke() based put operation. Required in order to avoid making intermediate
// copies of the extension name, table id, key length, key, and value.
payload_put: RefCell<Vec<u8>>,
}
// Implementation of methods on YcsbSend.
impl YcsbSend {
/// Constructs a YcsbSend.
///
/// # Arguments
///
/// * `config`: Client configuration with YCSB related (key and value length etc.) as well as
/// Network related (Server and Client MAC address etc.) parameters.
/// * `port`: Network port over which requests will be sent out.
/// * `reqs`: The number of requests to be issued to the server.
/// * `dst_ports`: The total number of UDP ports the server is listening on.
///
/// # Return
///
/// A YCSB request generator.
fn new(
config: &config::ClientConfig,
port: CacheAligned<PortQueue>,
reqs: u64,
dst_ports: u16,
) -> YcsbSend {
// The payload on an invoke() based get request consists of the extensions name ("get"),
// the table id to perform the lookup on, and the key to lookup.
let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len;
let mut payload_get = Vec::with_capacity(payload_len);
payload_get.extend_from_slice("get".as_bytes());
payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_get.resize(payload_len, 0);
// The payload on an invoke() based put request consists of the extensions name ("put"),
// the table id to perform the lookup on, the length of the key to lookup, the key, and the
// value to be inserted into the database.
let payload_len = "put".as_bytes().len()
+ mem::size_of::<u64>()
+ mem::size_of::<u16>()
+ config.key_len
+ config.value_len;
let mut payload_put = Vec::with_capacity(payload_len);
payload_put.extend_from_slice("put".as_bytes());
payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_put.extend_from_slice(&unsafe {
transmute::<u16, [u8; 2]>((config.key_len as u16).to_le())
});
payload_put.resize(payload_len, 0);
YcsbSend {
workload: RefCell::new(Ycsb::new(
config.key_len,
config.value_len,
config.n_keys,
config.put_pct,
config.skew,
config.num_tenants,
config.tenant_skew,
)),
sender: dispatch::Sender::new(config, port, dst_ports),
requests: reqs,
sent: 0,
rate_inv: cycles::cycles_per_second() / config.req_rate as u64,
start: cycles::rdtsc(),
next: 0,
native: !config.use_invoke,
payload_get: RefCell::new(payload_get),
payload_put: RefCell::new(payload_put),
}
}
}
// The Executable trait allowing YcsbSend to be scheduled by Netbricks.
impl Executable for YcsbSend {
// Called internally by Netbricks.
fn execute(&mut self) {
// Return if there are no more requests to generate.
if self.requests <= self.sent {
return;
}
// Get the current time stamp so that we can determine if it is time to issue the next RPC.
let curr = cycles::rdtsc();
// If it is either time to send out a request, or if a request has never been sent out,
// then, do so.
if curr >= self.next || self.next == 0 {
if self.native == true {
// Configured to issue native RPCs, issue a regular get()/put() operation.
self.workload.borrow_mut().abc(
|tenant, key| self.sender.send_get(tenant, 1, key, curr),
|tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr),
);
} else {
// Configured to issue invoke() RPCs.
let mut p_get = self.payload_get.borrow_mut();
let mut p_put = self.payload_put.borrow_mut();
// XXX Heavily dependent on how `Ycsb` creates a key. Only the first four
// bytes of the key matter, the rest are zero. The value is always zero.
self.workload.borrow_mut().abc(
|tenant, key| {
// First 11 bytes on the payload were already pre-populated with the
// extension name (3 bytes), and the table id (8 bytes). Just write in the
// first 4 bytes of the key.
p_get[11..15].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_get, curr)
},
|tenant, key, _val| {
// First 13 bytes on the payload were already pre-populated with the
// extension name (3 bytes), the table id (8 bytes), and the key length (2
// bytes). Just write in the first 4 bytes of the key. The value is anyway
// always zero.
p_put[13..17].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_put, curr)
},
);
}
// Update the time stamp at which the next request should be generated, assuming that
// the first request was sent out at self.start.
self.sent += 1;
self.next = self.start + self.sent * self.rate_inv;
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Receives responses to YCSB requests sent out by YcsbSend.
struct YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// The network stack required to receives RPC response packets from a network port.
receiver: dispatch::Receiver<T>,
// The number of response packets to wait for before printing out statistics.
responses: u64,
// Time stamp in cycles at which measurement started. Required to calculate observed
// throughput of the Sandstorm server.
start: u64,
// The total number of responses received so far.
recvd: u64,
// Vector of sampled request latencies. Required to calculate distributions once all responses
// have been received.
latencies: Vec<u64>,
// If true, this receiver will make latency measurements.
master: bool,
// If true, then responses will be considered to correspond to native gets and puts.
native: bool,
// Time stamp in cycles at which measurement stopped.
stop: u64,
}
// Implementation of methods on YcsbRecv.
impl<T> YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
/// Constructs a YcsbRecv.
///
/// # Arguments
///
/// * `port` : Network port on which responses will be polled for.
/// * `resps`: The number of responses to wait for before calculating statistics.
/// * `master`: Boolean indicating if the receiver should make latency measurements.
/// * `native`: If true, responses will be considered to correspond to native gets and puts.
///
/// # Return
///
/// A YCSB response receiver that measures the median latency and throughput of a Sandstorm
/// server.
fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> {
YcsbRecv {
receiver: dispatch::Receiver::new(port),
responses: resps,
start: cycles::rdtsc(),
recvd: 0,
latencies: Vec::with_capacity(resps as usize),
master: master,
native: native,
stop: 0,
}
}
}
// Implementation of the `Drop` trait on YcsbRecv.
impl<T> Drop for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
fn drop(&mut self) {
// Calculate & print the throughput for all client threads.
println!(
"YCSB Throughput {}",
self.recvd as f64 / cycles::to_seconds(self.stop - self.start)
);
// Calculate & print median & tail latency only on the master thread.
if self.master {
self.latencies.sort();
let m;
let t = self.latencies[(self.latencies.len() * 99) / 100];
match self.latencies.len() % 2 {
0 => {
let n = self.latencies.len();
m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2;
}
_ => m = self.latencies[self.latencies.len() / 2],
}
println!(
">>> {} {}",
cycles::to_seconds(m) * 1e9,
cycles::to_seconds(t) * 1e9
);
}
}
}
// Executable trait allowing YcsbRecv to be scheduled by Netbricks.
impl<T> Executable for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// Called internally by Netbricks.
fn execute(&mut self) {
// Don't do anything after all responses have been received.
if self.responses <= self.recvd {
return;
}
// Try to receive packets from the network port.
// If there are packets, sample the latency of the server.
if let Some(mut packets) = self.receiver.recv_res() {
while let Some(packet) = packets.pop() {
self.recvd += 1;
// Measure latency on the master client after the first 2 million requests.
// The start timestamp is present on the RPC response header.
if self.recvd > 2 * 1000 * 1000 && self.master {
let curr = cycles::rdtsc();
match self.native {
// The response corresponds to an invoke() RPC.
false => {
let p = packet.parse_header::<InvokeResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
// The response corresponds to a get() or put() RPC.
// The opcode on the response identifies the RPC type.
true => match parse_rpc_opcode(&packet) {
OpCode::SandstormGetRpc => {
let p = packet.parse_header::<GetResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
OpCode::SandstormPutRpc => {
let p = packet.parse_header::<PutResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
_ => packet.free_packet(),
},
}
} else {
packet.free_packet();
}
}
}
// The moment all response packets have been received, set the value of the
// stop timestamp so that throughput can be estimated later.
if self.responses <= self.recvd {
self.stop = cycles::rdtsc();
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Sets up YcsbSend by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `config`: Network related configuration such as the MAC and IP address.
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbSend will be added.
fn setup_send<S>(
config: &config::ClientConfig,
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the sender to a netbricks pipeline.
match scheduler.add_task(YcsbSend::new(
config,
ports[0].clone(),
config.num_reqs as u64,
config.server_udp_ports as u16,
)) {
Ok(_) => {
info!(
"Successfully added YcsbSend with tx queue {}.",
ports[0].txq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
/// Sets up YcsbRecv by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added.
/// * `master`: If true, the added YcsbRecv will make latency measurements.
/// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets
/// and puts.
fn setup_recv<S>(
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
master: bool,
native: bool,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the receiver to a netbricks pipeline.
match scheduler.add_task(YcsbRecv::new(
ports[0].clone(),
34 * 1000 * 1000 as u64,
master,
native,
)) {
Ok(_) => {
info!(
"Successfully added YcsbRecv with rx queue {}.",
ports[0].rxq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
fn main() {
db::env_logger::init().expect("ERROR: failed to initialize logger!");
let config = config::ClientConfig::load();
info!("Starting up Sandstorm client with config {:?}", config);
// Based on the supplied client configuration, compute the amount of time it will take to send
// out `num_reqs` requests at a rate of `req_rate` requests per second.
let exec = config.num_reqs / config.req_rate; | // Setup Netbricks.
let mut net_context = setup::config_and_init_netbricks(&config);
// Setup the client pipeline.
net_context.start_schedulers();
// The core id's which will run the sender and receiver threads.
// XXX The following two arrays heavily depend on the set of cores
// configured in setup.rs
let senders = [0, 2, 4, 6];
let receive = [1, 3, 5, 7];
assert!((senders.len() == 4) && (receive.len() == 4));
// Setup 4 senders, and 4 receivers.
for i in 0..4 {
// First, retrieve a tx-rx queue pair from Netbricks
let port = net_context
.rx_queues
.get(&senders[i])
.expect("Failed to retrieve network port!")
.clone();
let mut master = false;
if i == 0 {
master = true;
}
let native = !config.use_invoke;
// Setup the receive side.
net_context
.add_pipeline_to_core(
receive[i],
Arc::new(
move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_recv(port.clone(), sched, core, master, native)
},
),
)
.expect("Failed to initialize receive side.");
// Setup the send side.
net_context
.add_pipeline_to_core(
senders[i],
Arc::new(
move |ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_send(&config::ClientConfig::load(), ports, sched, core)
},
),
)
.expect("Failed to initialize send side.");
}
// Allow the system to bootup fully.
std::thread::sleep(std::time::Duration::from_secs(1));
// Run the client.
net_context.execute();
// Sleep for an amount of time approximately equal to the estimated execution time, and then
// shutdown the client.
std::thread::sleep(std::time::Duration::from_secs(exec as u64 + 11));
// Stop the client.
net_context.stop();
}
#[cfg(test)]
mod test {
use std;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn ycsb_abc_basic() {
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(10, 100, 1000000, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(|_t, _key| n_gets += 1, |_t, _key, _value| n_puts += 1);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
}
// Convert a key to u32 assuming little endian.
fn convert_key(key: &[u8]) -> u32 {
assert_eq!(4, key.len());
let k: u32 = 0
| key[0] as u32
| (key[1] as u32) << 8
| (key[2] as u32) << 16
| (key[3] as u32) << 24;
k
}
#[test]
fn ycsb_abc_histogram() {
let hist = Arc::new(Mutex::new(HashMap::new()));
let n_keys = 20;
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let hist = hist.clone();
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(4, 100, n_keys, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(
|_t, key| {
// get
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).0 += 1;
n_gets += 1
},
|_t, key, _value| {
// put
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).1 += 1;
n_puts += 1
},
);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
let ht = hist.lock().unwrap();
let mut kvs: Vec<_> = ht.iter().collect();
kvs.sort();
let v: Vec<_> = kvs
.iter()
.map(|&(k, v)| println!("Key {:?}: {:?} gets/puts", k, v))
.collect();
println!("Unique key count: {}", v.len());
assert_eq!(n_keys, v.len());
let total: i64 = kvs.iter().map(|&(_, &(g, s))| (g + s) as i64).sum();
let mut sum = 0;
for &(k, v) in kvs.iter() {
let &(g, s) = v;
sum += g + s;
let percentile = sum as f64 / total as f64;
println!("Key {:?}: {:?} percentile", k, percentile);
}
// For 20 keys median key should be near 4th key, so this checks out.
}
} | random_line_split | |
ycsb.rs | /* Copyright (c) 2018 University of Utah
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
extern crate db;
extern crate rand;
extern crate splinter;
extern crate time;
extern crate zipf;
mod setup;
use std::cell::RefCell;
use std::fmt::Display;
use std::mem;
use std::mem::transmute;
use std::sync::Arc;
use db::config;
use db::cycles;
use db::e2d2::allocators::*;
use db::e2d2::interface::*;
use db::e2d2::scheduler::*;
use db::log::*;
use db::rpc::*;
use db::wireformat::*;
use rand::distributions::Sample;
use rand::{Rng, SeedableRng, XorShiftRng};
use zipf::ZipfDistribution;
use splinter::*;
// YCSB A, B, and C benchmark.
// The benchmark is created and parameterized with `new()`. Many threads
// share the same benchmark instance. Each thread can call `abc()` which
// runs the benchmark until another thread calls `stop()`. Each thread
// then returns their runtime and the number of gets and puts they have done.
// This benchmark doesn't care about how get/put are implemented; it takes
// function pointers to get/put on `new()` and just calls those as it runs.
//
// The tests below give an example of how to use it and how to aggregate the results.
pub struct Ycsb {
put_pct: usize,
rng: Box<dyn Rng>,
key_rng: Box<ZipfDistribution>,
tenant_rng: Box<ZipfDistribution>,
key_buf: Vec<u8>,
value_buf: Vec<u8>,
}
impl Ycsb {
// Create a new benchmark instance.
//
// # Arguments
// - key_len: Length of the keys to generate per get/put. Most bytes will be zero, since
// the benchmark poplates them from a random 32-bit value.
// - value_len: Length of the values to store per put. Always all zero bytes.
// - n_keys: Number of keys from which random keys are drawn.
// - put_pct: Number between 0 and 100 indicating percent of ops that are sets.
// - skew: Zipfian skew parameter. 0.99 is YCSB default.
// - n_tenants: The number of tenants from which the tenant id is chosen.
// - tenant_skew: The skew in the Zipfian distribution from which tenant id's are drawn.
// # Return
// A new instance of YCSB that threads can call `abc()` on to run.
fn new(
key_len: usize,
value_len: usize,
n_keys: usize,
put_pct: usize,
skew: f64,
n_tenants: u32,
tenant_skew: f64,
) -> Ycsb {
let seed: [u32; 4] = rand::random::<[u32; 4]>();
let mut key_buf: Vec<u8> = Vec::with_capacity(key_len);
key_buf.resize(key_len, 0);
let mut value_buf: Vec<u8> = Vec::with_capacity(value_len);
value_buf.resize(value_len, 0);
Ycsb {
put_pct: put_pct,
rng: Box::new(XorShiftRng::from_seed(seed)),
key_rng: Box::new(
ZipfDistribution::new(n_keys, skew).expect("Couldn't create key RNG."),
),
tenant_rng: Box::new(
ZipfDistribution::new(n_tenants as usize, tenant_skew)
.expect("Couldn't create tenant RNG."),
),
key_buf: key_buf,
value_buf: value_buf,
}
}
// Run YCSB A, B, or C (depending on `new()` parameters).
// The calling thread will not return until `done()` is called on this `Ycsb` instance.
//
// # Arguments
// - get: A function that fetches the data stored under a bytestring key of `self.key_len` bytes.
// - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes
// with a bytestring value of `self.value_len` bytes.
// # Return
// A three tuple consisting of the duration that this thread ran the benchmark, the
// number of gets it performed, and the number of puts it performed.
pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R
where
G: FnMut(u32, &[u8]) -> R,
P: FnMut(u32, &[u8], &[u8]) -> R,
{
let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32;
// Sample a tenant.
let t = self.tenant_rng.sample(&mut self.rng) as u32;
// Sample a key, and convert into a little endian byte array.
let k = self.key_rng.sample(&mut self.rng) as u32;
let k: [u8; 4] = unsafe { transmute(k.to_le()) };
self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k);
if is_get {
get(t, self.key_buf.as_slice())
} else {
put(t, self.key_buf.as_slice(), self.value_buf.as_slice())
}
}
}
/// Sends out YCSB based RPC requests to a Sandstorm server.
struct YcsbSend {
// The actual YCSB workload. Required to generate keys and values for get() and put() requests.
workload: RefCell<Ycsb>,
// Network stack required to actually send RPC requests out the network.
sender: dispatch::Sender,
// Total number of requests to be sent out.
requests: u64,
// Number of requests that have been sent out so far.
sent: u64,
// The inverse of the rate at which requests are to be generated. Basically, the time interval
// between two request generations in cycles.
rate_inv: u64,
// The time stamp at which the workload started generating requests in cycles.
start: u64,
// The time stamp at which the next request must be issued in cycles.
next: u64,
// If true, RPC requests corresponding to native get() and put() operations are sent out. If
// false, invoke() based RPC requests are sent out.
native: bool,
// Payload for an invoke() based get operation. Required in order to avoid making intermediate
// copies of the extension name, table id, and key.
payload_get: RefCell<Vec<u8>>,
// Payload for an invoke() based put operation. Required in order to avoid making intermediate
// copies of the extension name, table id, key length, key, and value.
payload_put: RefCell<Vec<u8>>,
}
// Implementation of methods on YcsbSend.
impl YcsbSend {
/// Constructs a YcsbSend.
///
/// # Arguments
///
/// * `config`: Client configuration with YCSB related (key and value length etc.) as well as
/// Network related (Server and Client MAC address etc.) parameters.
/// * `port`: Network port over which requests will be sent out.
/// * `reqs`: The number of requests to be issued to the server.
/// * `dst_ports`: The total number of UDP ports the server is listening on.
///
/// # Return
///
/// A YCSB request generator.
fn new(
config: &config::ClientConfig,
port: CacheAligned<PortQueue>,
reqs: u64,
dst_ports: u16,
) -> YcsbSend {
// The payload on an invoke() based get request consists of the extensions name ("get"),
// the table id to perform the lookup on, and the key to lookup.
let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len;
let mut payload_get = Vec::with_capacity(payload_len);
payload_get.extend_from_slice("get".as_bytes());
payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_get.resize(payload_len, 0);
// The payload on an invoke() based put request consists of the extensions name ("put"),
// the table id to perform the lookup on, the length of the key to lookup, the key, and the
// value to be inserted into the database.
let payload_len = "put".as_bytes().len()
+ mem::size_of::<u64>()
+ mem::size_of::<u16>()
+ config.key_len
+ config.value_len;
let mut payload_put = Vec::with_capacity(payload_len);
payload_put.extend_from_slice("put".as_bytes());
payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) });
payload_put.extend_from_slice(&unsafe {
transmute::<u16, [u8; 2]>((config.key_len as u16).to_le())
});
payload_put.resize(payload_len, 0);
YcsbSend {
workload: RefCell::new(Ycsb::new(
config.key_len,
config.value_len,
config.n_keys,
config.put_pct,
config.skew,
config.num_tenants,
config.tenant_skew,
)),
sender: dispatch::Sender::new(config, port, dst_ports),
requests: reqs,
sent: 0,
rate_inv: cycles::cycles_per_second() / config.req_rate as u64,
start: cycles::rdtsc(),
next: 0,
native: !config.use_invoke,
payload_get: RefCell::new(payload_get),
payload_put: RefCell::new(payload_put),
}
}
}
// The Executable trait allowing YcsbSend to be scheduled by Netbricks.
impl Executable for YcsbSend {
// Called internally by Netbricks.
fn execute(&mut self) {
// Return if there are no more requests to generate.
if self.requests <= self.sent {
return;
}
// Get the current time stamp so that we can determine if it is time to issue the next RPC.
let curr = cycles::rdtsc();
// If it is either time to send out a request, or if a request has never been sent out,
// then, do so.
if curr >= self.next || self.next == 0 {
if self.native == true {
// Configured to issue native RPCs, issue a regular get()/put() operation.
self.workload.borrow_mut().abc(
|tenant, key| self.sender.send_get(tenant, 1, key, curr),
|tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr),
);
} else {
// Configured to issue invoke() RPCs.
let mut p_get = self.payload_get.borrow_mut();
let mut p_put = self.payload_put.borrow_mut();
// XXX Heavily dependent on how `Ycsb` creates a key. Only the first four
// bytes of the key matter, the rest are zero. The value is always zero.
self.workload.borrow_mut().abc(
|tenant, key| {
// First 11 bytes on the payload were already pre-populated with the
// extension name (3 bytes), and the table id (8 bytes). Just write in the
// first 4 bytes of the key.
p_get[11..15].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_get, curr)
},
|tenant, key, _val| {
// First 13 bytes on the payload were already pre-populated with the
// extension name (3 bytes), the table id (8 bytes), and the key length (2
// bytes). Just write in the first 4 bytes of the key. The value is anyway
// always zero.
p_put[13..17].copy_from_slice(&key[0..4]);
self.sender.send_invoke(tenant, 3, &p_put, curr)
},
);
}
// Update the time stamp at which the next request should be generated, assuming that
// the first request was sent out at self.start.
self.sent += 1;
self.next = self.start + self.sent * self.rate_inv;
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Receives responses to YCSB requests sent out by YcsbSend.
struct YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// The network stack required to receives RPC response packets from a network port.
receiver: dispatch::Receiver<T>,
// The number of response packets to wait for before printing out statistics.
responses: u64,
// Time stamp in cycles at which measurement started. Required to calculate observed
// throughput of the Sandstorm server.
start: u64,
// The total number of responses received so far.
recvd: u64,
// Vector of sampled request latencies. Required to calculate distributions once all responses
// have been received.
latencies: Vec<u64>,
// If true, this receiver will make latency measurements.
master: bool,
// If true, then responses will be considered to correspond to native gets and puts.
native: bool,
// Time stamp in cycles at which measurement stopped.
stop: u64,
}
// Implementation of methods on YcsbRecv.
impl<T> YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
/// Constructs a YcsbRecv.
///
/// # Arguments
///
/// * `port` : Network port on which responses will be polled for.
/// * `resps`: The number of responses to wait for before calculating statistics.
/// * `master`: Boolean indicating if the receiver should make latency measurements.
/// * `native`: If true, responses will be considered to correspond to native gets and puts.
///
/// # Return
///
/// A YCSB response receiver that measures the median latency and throughput of a Sandstorm
/// server.
fn | (port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> {
YcsbRecv {
receiver: dispatch::Receiver::new(port),
responses: resps,
start: cycles::rdtsc(),
recvd: 0,
latencies: Vec::with_capacity(resps as usize),
master: master,
native: native,
stop: 0,
}
}
}
// Implementation of the `Drop` trait on YcsbRecv.
impl<T> Drop for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
fn drop(&mut self) {
// Calculate & print the throughput for all client threads.
println!(
"YCSB Throughput {}",
self.recvd as f64 / cycles::to_seconds(self.stop - self.start)
);
// Calculate & print median & tail latency only on the master thread.
if self.master {
self.latencies.sort();
let m;
let t = self.latencies[(self.latencies.len() * 99) / 100];
match self.latencies.len() % 2 {
0 => {
let n = self.latencies.len();
m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2;
}
_ => m = self.latencies[self.latencies.len() / 2],
}
println!(
">>> {} {}",
cycles::to_seconds(m) * 1e9,
cycles::to_seconds(t) * 1e9
);
}
}
}
// Executable trait allowing YcsbRecv to be scheduled by Netbricks.
impl<T> Executable for YcsbRecv<T>
where
T: PacketTx + PacketRx + Display + Clone + 'static,
{
// Called internally by Netbricks.
fn execute(&mut self) {
// Don't do anything after all responses have been received.
if self.responses <= self.recvd {
return;
}
// Try to receive packets from the network port.
// If there are packets, sample the latency of the server.
if let Some(mut packets) = self.receiver.recv_res() {
while let Some(packet) = packets.pop() {
self.recvd += 1;
// Measure latency on the master client after the first 2 million requests.
// The start timestamp is present on the RPC response header.
if self.recvd > 2 * 1000 * 1000 && self.master {
let curr = cycles::rdtsc();
match self.native {
// The response corresponds to an invoke() RPC.
false => {
let p = packet.parse_header::<InvokeResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
// The response corresponds to a get() or put() RPC.
// The opcode on the response identifies the RPC type.
true => match parse_rpc_opcode(&packet) {
OpCode::SandstormGetRpc => {
let p = packet.parse_header::<GetResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
OpCode::SandstormPutRpc => {
let p = packet.parse_header::<PutResponse>();
self.latencies
.push(curr - p.get_header().common_header.stamp);
p.free_packet();
}
_ => packet.free_packet(),
},
}
} else {
packet.free_packet();
}
}
}
// The moment all response packets have been received, set the value of the
// stop timestamp so that throughput can be estimated later.
if self.responses <= self.recvd {
self.stop = cycles::rdtsc();
}
}
fn dependencies(&mut self) -> Vec<usize> {
vec![]
}
}
/// Sets up YcsbSend by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `config`: Network related configuration such as the MAC and IP address.
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbSend will be added.
fn setup_send<S>(
config: &config::ClientConfig,
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the sender to a netbricks pipeline.
match scheduler.add_task(YcsbSend::new(
config,
ports[0].clone(),
config.num_reqs as u64,
config.server_udp_ports as u16,
)) {
Ok(_) => {
info!(
"Successfully added YcsbSend with tx queue {}.",
ports[0].txq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
/// Sets up YcsbRecv by adding it to a Netbricks scheduler.
///
/// # Arguments
///
/// * `ports`: Network port on which packets will be sent.
/// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added.
/// * `master`: If true, the added YcsbRecv will make latency measurements.
/// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets
/// and puts.
fn setup_recv<S>(
ports: Vec<CacheAligned<PortQueue>>,
scheduler: &mut S,
_core: i32,
master: bool,
native: bool,
) where
S: Scheduler + Sized,
{
if ports.len() != 1 {
error!("Client should be configured with exactly 1 port!");
std::process::exit(1);
}
// Add the receiver to a netbricks pipeline.
match scheduler.add_task(YcsbRecv::new(
ports[0].clone(),
34 * 1000 * 1000 as u64,
master,
native,
)) {
Ok(_) => {
info!(
"Successfully added YcsbRecv with rx queue {}.",
ports[0].rxq()
);
}
Err(ref err) => {
error!("Error while adding to Netbricks pipeline {}", err);
std::process::exit(1);
}
}
}
fn main() {
db::env_logger::init().expect("ERROR: failed to initialize logger!");
let config = config::ClientConfig::load();
info!("Starting up Sandstorm client with config {:?}", config);
// Based on the supplied client configuration, compute the amount of time it will take to send
// out `num_reqs` requests at a rate of `req_rate` requests per second.
let exec = config.num_reqs / config.req_rate;
// Setup Netbricks.
let mut net_context = setup::config_and_init_netbricks(&config);
// Setup the client pipeline.
net_context.start_schedulers();
// The core id's which will run the sender and receiver threads.
// XXX The following two arrays heavily depend on the set of cores
// configured in setup.rs
let senders = [0, 2, 4, 6];
let receive = [1, 3, 5, 7];
assert!((senders.len() == 4) && (receive.len() == 4));
// Setup 4 senders, and 4 receivers.
for i in 0..4 {
// First, retrieve a tx-rx queue pair from Netbricks
let port = net_context
.rx_queues
.get(&senders[i])
.expect("Failed to retrieve network port!")
.clone();
let mut master = false;
if i == 0 {
master = true;
}
let native = !config.use_invoke;
// Setup the receive side.
net_context
.add_pipeline_to_core(
receive[i],
Arc::new(
move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_recv(port.clone(), sched, core, master, native)
},
),
)
.expect("Failed to initialize receive side.");
// Setup the send side.
net_context
.add_pipeline_to_core(
senders[i],
Arc::new(
move |ports, sched: &mut StandaloneScheduler, core: i32, _sibling| {
setup_send(&config::ClientConfig::load(), ports, sched, core)
},
),
)
.expect("Failed to initialize send side.");
}
// Allow the system to bootup fully.
std::thread::sleep(std::time::Duration::from_secs(1));
// Run the client.
net_context.execute();
// Sleep for an amount of time approximately equal to the estimated execution time, and then
// shutdown the client.
std::thread::sleep(std::time::Duration::from_secs(exec as u64 + 11));
// Stop the client.
net_context.stop();
}
#[cfg(test)]
mod test {
use std;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn ycsb_abc_basic() {
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(10, 100, 1000000, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(|_t, _key| n_gets += 1, |_t, _key, _value| n_puts += 1);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
}
// Convert a key to u32 assuming little endian.
fn convert_key(key: &[u8]) -> u32 {
assert_eq!(4, key.len());
let k: u32 = 0
| key[0] as u32
| (key[1] as u32) << 8
| (key[2] as u32) << 16
| (key[3] as u32) << 24;
k
}
#[test]
fn ycsb_abc_histogram() {
let hist = Arc::new(Mutex::new(HashMap::new()));
let n_keys = 20;
let n_threads = 1;
let mut threads = Vec::with_capacity(n_threads);
let done = Arc::new(AtomicBool::new(false));
for _ in 0..n_threads {
let hist = hist.clone();
let done = done.clone();
threads.push(thread::spawn(move || {
let mut b = super::Ycsb::new(4, 100, n_keys, 5, 0.99, 1024, 0.1);
let mut n_gets = 0u64;
let mut n_puts = 0u64;
let start = Instant::now();
while !done.load(Ordering::Relaxed) {
b.abc(
|_t, key| {
// get
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).0 += 1;
n_gets += 1
},
|_t, key, _value| {
// put
let k = convert_key(key);
let mut ht = hist.lock().unwrap();
ht.entry(k).or_insert((0, 0)).1 += 1;
n_puts += 1
},
);
}
(start.elapsed(), n_gets, n_puts)
}));
}
thread::sleep(Duration::from_secs(2));
done.store(true, Ordering::Relaxed);
// Iterate across all threads. Return a tupule whose first member consists
// of the highest execution time across all threads, and whose second member
// is the sum of the number of iterations run on each benchmark thread.
// Dividing the second member by the first, will yeild the throughput.
let (duration, n_gets, n_puts) = threads
.into_iter()
.map(|t| t.join().expect("ERROR: Thread join failed."))
.fold(
(Duration::new(0, 0), 0, 0),
|(ldur, lgets, lputs), (rdur, rgets, rputs)| {
(std::cmp::max(ldur, rdur), lgets + rgets, lputs + rputs)
},
);
let secs = duration.as_secs() as f64 + (duration.subsec_nanos() as f64 / 1e9);
println!(
"{} threads: {:.0} gets/s {:.0} puts/s {:.0} ops/s",
n_threads,
n_gets as f64 / secs,
n_puts as f64 / secs,
(n_gets + n_puts) as f64 / secs
);
let ht = hist.lock().unwrap();
let mut kvs: Vec<_> = ht.iter().collect();
kvs.sort();
let v: Vec<_> = kvs
.iter()
.map(|&(k, v)| println!("Key {:?}: {:?} gets/puts", k, v))
.collect();
println!("Unique key count: {}", v.len());
assert_eq!(n_keys, v.len());
let total: i64 = kvs.iter().map(|&(_, &(g, s))| (g + s) as i64).sum();
let mut sum = 0;
for &(k, v) in kvs.iter() {
let &(g, s) = v;
sum += g + s;
let percentile = sum as f64 / total as f64;
println!("Key {:?}: {:?} percentile", k, percentile);
}
// For 20 keys median key should be near 4th key, so this checks out.
}
}
| new | identifier_name |
workspace.rs | use std::mem;
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
use std::ffi::OsStr;
use std::collections::{hash_map, HashMap};
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use futures::{future, stream, Future, Stream, BoxFuture};
use futures_cpupool::CpuPool;
use url::Url;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use walkdir::WalkDir;
use kailua_env::{Unit, Pos, Span, Spanned, Source, SourceFile, SourceSlice};
use kailua_diag::{self, Stop, Report, Locale, Localize, Localized};
use kailua_syntax::{Lexer, Nest, NestedToken, Parser, Chunk};
use kailua_check;
use kailua_check::options::FsSource;
use kailua_check::env::{Context, Output};
use kailua_workspace::{self, WorkspaceOptions};
use fmtutils::Ellipsis;
use diags::{self, ReportTree};
use futureutils::{CancelError, CancelToken, CancelFuture};
use message as m;
use protocol;
#[derive(Clone, Debug)]
pub struct WorkspaceError(pub &'static str);
pub type WorkspaceResult<T> = Result<T, WorkspaceError>;
fn uri_to_path(uri: &str) -> WorkspaceResult<PathBuf> {
let url = Url::parse(uri).map_err(|_| WorkspaceError("invalid URI"))?;
if url.scheme() != "file" {
return Err(WorkspaceError("non-file URI"));
}
if let Ok(path) = url.to_file_path() {
return Ok(path);
}
#[cfg(windows)]
{
use std::ffi::OsString;
use std::path::Component;
use url::Host;
// Url::to_file_path only handles no host or localhost, which is different from vscode-uri
// we first try localhost then retry by temporarily setting the authority part on windows
let host = match url.host() {
Some(Host::Domain(name)) => name.to_string(),
Some(Host::Ipv4(addr)) => addr.to_string(),
Some(Host::Ipv6(addr)) => {
// an "official" hack for UNC
// https://msdn.microsoft.com/en-us/library/aa385353.aspx
let s = &addr.segments();
format!("{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}.ipv6-literal.net",
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
},
None => return Err(WorkspaceError("non-file URI")),
};
// convert file://host/path to file:///z:/path (z: is required for rust-url)
let url = Url::parse(&format!("file:///z:{}", url.path())).unwrap();
if let Ok(path) = url.to_file_path() {
// now path starts with z:\foo\bar, so replace z: by \\host to complete it
let mut components = path.components();
let _prefix = components.next();
assert!(match _prefix { Some(Component::Prefix(..)) => true, _ => false });
let mut pathstr = OsString::from("\\\\");
pathstr.push(&host);
pathstr.push(components.as_path());
return Ok(PathBuf::from(pathstr));
}
}
Err(WorkspaceError("non-file URI"))
}
fn position_to_pos(file: &SourceFile, pos: &protocol::Position) -> Pos {
if let Some(mut span) = file.line_spans().nth(pos.line as usize) {
let begin = span.begin().to_usize();
let end = span.end().to_usize();
let mut k = pos.character as usize;
match file.data() {
SourceSlice::U8(s) => {
// locate k-th non-continuation byte in s where k is the 0-based column index.
//
// this code seems to be overly complicated. this is necessary because
// we need to detect the end of the line, and a plain .nth(k) cannot determine
// if the line has k exact scalar values or k is just out of bound.
let iter = span.zip(s[begin..end].iter());
for (p, _) in iter.filter(|&(_, &b)| b & 0b1100_0000 != 0b1000_0000) {
if k == 0 { return p; }
k -= 1;
}
if k == 0 { return span.end(); }
Pos::dummy()
},
SourceSlice::U16(_) => {
// same here, but the logic is much simpler
if span.len() == k {
span.end()
} else if let Some(p) = span.nth(k) {
p
} else {
Pos::dummy()
}
},
}
} else {
Pos::dummy()
}
}
fn collect_tokens(source: &Source, span: Span, report: &Report) -> Vec<NestedToken> {
let mut iter = source.iter_from_span(span).unwrap();
let tokens = {
let mut lexer = Lexer::new(&mut iter, report);
let nest = Nest::new(&mut lexer);
nest.collect::<Vec<_>>()
};
assert!(!tokens.is_empty()); // should include EOF
tokens
}
fn parse_to_chunk(tokens: Vec<NestedToken>, report: &Report) -> kailua_diag::Result<Chunk> {
let mut tokens = tokens.into_iter();
let chunk = Parser::new(&mut tokens, report).into_chunk();
chunk
}
#[derive(Clone, Debug)]
pub struct OpenDocument {
uri: String,
lang_id: String,
last_version: u64,
last_text: String,
}
impl OpenDocument {
fn new(item: protocol::TextDocumentItem) -> OpenDocument {
OpenDocument {
uri: item.uri,
lang_id: item.languageId,
last_version: item.version,
last_text: item.text,
}
}
}
// clonable, externally visible future type at work
pub type IoFuture<T> =
future::Shared<BoxFuture<T, CancelError<io::Error>>>;
pub type ReportFuture<T> =
future::Shared<BoxFuture<(T, ReportTree), CancelError<ReportTree>>>;
struct WorkspaceFileInner {
workspace: Arc<RwLock<WorkspaceShared>>,
pool: Arc<CpuPool>,
cancel_token: CancelToken,
source: Arc<RwLock<Source>>,
message_locale: Locale,
path: PathBuf,
unit: Unit,
// if Some, the file is managed by the client and the text is synchronized
document: Option<OpenDocument>,
// each parts are calculated on demand; in either case diagnostics are produced
span: Option<IoFuture<Span>>,
tokens: Option<ReportFuture<Arc<Vec<NestedToken>>>>,
chunk: Option<ReportFuture<Arc<Chunk>>>,
last_chunk: Option<Arc<Chunk>>,
}
type Inner = Arc<RwLock<WorkspaceFileInner>>;
type InnerWrite<'a> = RwLockWriteGuard<'a, WorkspaceFileInner>;
#[derive(Clone)]
pub struct WorkspaceFile {
inner: Inner,
}
impl fmt::Debug for WorkspaceFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = self.inner.read();
f.debug_struct("WorkspaceFile")
.field("workspace", &Ellipsis) // avoid excess output
.field("pool", &Ellipsis)
.field("cancel_token", &inner.cancel_token)
.field("source", &Ellipsis)
.field("message_locale", &inner.message_locale)
.field("path", &inner.path)
.field("unit", &inner.unit)
.field("document", &inner.document)
.field("span", &inner.span.as_ref().map(|_| Ellipsis))
.field("tokens", &inner.tokens.as_ref().map(|_| Ellipsis))
.field("chunk", &inner.chunk.as_ref().map(|_| Ellipsis))
.field("last_chunk", &inner.last_chunk.as_ref().map(|_| Ellipsis))
.finish()
}
}
impl WorkspaceFile {
fn new(shared: &Arc<RwLock<WorkspaceShared>>, pool: &Arc<CpuPool>,
source: &Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf) -> WorkspaceFile {
WorkspaceFile {
inner: Arc::new(RwLock::new(WorkspaceFileInner {
workspace: shared.clone(),
pool: pool.clone(),
cancel_token: CancelToken::new(),
source: source.clone(),
message_locale: message_locale,
path: path,
unit: Unit::dummy(),
document: None,
span: None,
tokens: None,
chunk: None,
last_chunk: None,
})),
}
}
fn cancel(&self) {
let mut inner = self.inner.write();
inner.cancel_token.cancel();
inner.cancel_token = CancelToken::new();
inner.span = None;
inner.tokens = None;
inner.chunk = None;
// also signal the workspace to cancel jobs
inner.workspace.write().cancel();
}
#[allow(dead_code)]
pub fn path(&self) -> PathBuf {
self.inner.read().path.clone()
}
fn update_document<F, E>(&self, f: F) -> Result<(), E>
where F: FnOnce(Option<OpenDocument>) -> Result<Option<OpenDocument>, E>
{
self.cancel();
let mut inner = self.inner.write();
inner.document = f(inner.document.take())?;
Ok(())
}
fn ensure_span_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> IoFuture<Span> {
if inner.span.is_none() {
let fut = future::lazy(move || -> Result<Span, CancelError<io::Error>> {
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let file = if let Some(ref doc) = inner.document {
SourceFile::from_u8(inner.path.display().to_string(),
doc.last_text.as_bytes().to_owned())
} else {
SourceFile::from_file(&inner.path)?
};
let span = if inner.unit.is_dummy() {
let span = inner.source.write().add(file);
inner.unit = span.unit();
span
} else {
inner.source.write().replace(inner.unit, file).unwrap()
};
Ok(span)
});
inner.span = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.span.as_ref().unwrap().clone()
}
pub fn ensure_span(&self) -> IoFuture<Span> {
let cloned = self.inner.clone();
Self::ensure_span_with_inner(cloned, &mut self.inner.write())
}
fn ensure_tokens_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Vec<NestedToken>>> {
if inner.tokens.is_none() {
let span_fut = Self::ensure_span_with_inner(spare_inner.clone(), inner);
// important: the task has to be spawned outside of the future.
// this is because, otherwise for the thread pool of n workers
// the future chain of n+1 or more tasks will block as the i-th task
// will spawn the (i+1)-th task without removing itself from the pool queue!
// chaining the already-spawned future will ensure that
// the task body will be only spawned after the last future has been finished.
let fut = span_fut.then(move |span_ret| {
let inner = spare_inner.read();
match span_ret {
Ok(span) => {
inner.cancel_token.keep_going()?;
let source = inner.source.read();
let path = source.file(span.unit()).map(|f| f.path());
let diags = ReportTree::new(inner.message_locale, path);
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, *span, &report);
Ok((Arc::new(tokens), diags))
},
Err(e) => {
Err(e.as_ref().map(|e| {
// translate an I/O error into a report
let dummy_diag = |msg: &Localize| {
protocol::Diagnostic {
range: protocol::Range {
start: protocol::Position { line: 0, character: 0 },
end: protocol::Position { line: 0, character: 0 },
},
severity: Some(protocol::DiagnosticSeverity::Error),
code: None,
source: None,
message: Localized::new(msg, inner.message_locale).to_string(),
}
};
let path = inner.path.display().to_string();
let config_path = inner.workspace.read().base.config_path_or_default();
let config_path = config_path.display().to_string();
let diags = ReportTree::new(inner.message_locale, Some(&path));
diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e }));
diags.add_diag(config_path, dummy_diag(&m::RestartRequired {}));
diags
}))
},
}
});
inner.tokens = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.tokens.as_ref().unwrap().clone()
}
pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> {
let cloned = self.inner.clone();
Self::ensure_tokens_with_inner(cloned, &mut self.inner.write())
}
fn ensure_chunk_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> {
if inner.chunk.is_none() {
let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner);
let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| {
let tokens = (*tokens_ret.0).clone();
let parent_diags = tokens_ret.1.clone();
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let diags = ReportTree::new(inner.message_locale, None);
diags.add_parent(parent_diags);
// in this future source access is only needed for reporting
let chunk = {
let report = diags.report(|span| {
diags::translate_span(span, &inner.source.read())
});
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
let chunk = Arc::new(chunk);
inner.last_chunk = Some(chunk.clone());
Ok((chunk, diags))
},
Err(_) => Err(From::from(diags)),
}
});
inner.chunk = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.chunk.as_ref().unwrap().clone()
}
pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> {
let cloned = self.inner.clone();
Self::ensure_chunk_with_inner(cloned, &mut self.inner.write())
}
pub fn last_chunk(&self) -> Option<Arc<Chunk>> {
self.inner.read().last_chunk.clone()
}
pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> {
let pos = pos.clone();
let source = self.inner.read().source.clone();
self.ensure_span().then(move |res| {
match res {
Ok(span) => {
let source = source.read();
if let Some(file) = source.file(span.unit()) {
Ok(position_to_pos(file, &pos))
} else {
Ok(Pos::dummy())
}
},
Err(e) => Err(e.as_ref().map(|_| ()))
}
}).boxed()
}
pub fn | (&mut self, version: u64,
event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> {
// TODO, there are several ambiguities with offsets?
if event.range.is_some() || event.rangeLength.is_some() {
return Err(WorkspaceError("incremental edits not yet supported"));
}
self.update_document(move |doc| {
if let Some(mut doc) = doc {
if doc.last_version >= version {
return Err(WorkspaceError("non-increasing version"));
}
doc.last_version = version;
doc.last_text = event.text;
Ok(Some(doc))
} else {
Err(WorkspaceError("change notification with non-existent or non-open file"))
}
})
}
}
#[derive(Clone, Debug)]
enum WorkspaceBase {
Config(kailua_workspace::Config),
Workspace(kailua_workspace::Workspace),
}
impl WorkspaceBase {
fn config_path(&self) -> Option<&Path> {
match *self {
WorkspaceBase::Config(ref config) => config.config_path(),
WorkspaceBase::Workspace(ref ws) => ws.config_path(),
}
}
fn config_path_or_default(&self) -> PathBuf {
if let Some(config_path) = self.config_path() {
config_path.to_owned()
} else {
// we allow both `kailua.json` or `.vscode/kailua.json`,
// for now we will issue an error at the latter
self.base_dir().join(".vscode").join("kailua.json")
}
}
fn base_dir(&self) -> &Path {
match *self {
WorkspaceBase::Config(ref config) => config.base_dir(),
WorkspaceBase::Workspace(ref ws) => ws.base_dir(),
}
}
}
// a portion of Workspace that should be shared across WorkspaceFile.
// this should not be modified in the normal cases (otherwise it can be easily deadlocked),
// with an exception of cascading cancellation.
struct WorkspaceShared {
cancel_token: CancelToken, // used for stopping ongoing checks
base: WorkspaceBase,
check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>,
last_check_outputs: Vec<Option<Arc<Output>>>,
}
type Shared = Arc<RwLock<WorkspaceShared>>;
type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>;
impl fmt::Debug for WorkspaceShared {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]);
impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish()
}
}
f.debug_struct("WorkspaceShared")
.field("base", &self.base)
.field("cancel_token", &self.cancel_token)
.field("check_outputs", &DummyOptionList(&self.check_outputs))
.field("last_check_outputs", &DummyOptionList(&self.last_check_outputs))
.finish()
}
}
impl WorkspaceShared {
fn cancel(&mut self) {
self.cancel_token.cancel();
self.cancel_token = CancelToken::new();
for output in &mut self.check_outputs {
*output = None;
}
}
}
struct WorkspaceFsSourceInner {
cancel_token: CancelToken, // will be used independently of WorkspaceShared
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
source: Arc<RwLock<Source>>,
temp_units: Vec<Unit>, // will be gone after checking
temp_files: HashMap<PathBuf, Chunk>,
message_locale: Locale,
root_report: ReportTree,
}
#[derive(Clone)]
struct WorkspaceFsSource {
inner: Rc<RefCell<WorkspaceFsSourceInner>>,
}
impl FsSource for WorkspaceFsSource {
fn chunk_from_path(&self, path: Spanned<&Path>,
_report: &Report) -> Result<Option<Chunk>, Option<Stop>> {
let mut fssource = self.inner.borrow_mut();
fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?;
// try to use the client-maintained text as a source code
let files = fssource.files.clone();
let files = files.read();
if let Some(file) = files.get(path.base) {
let (chunk, diags) = match file.ensure_chunk().wait() {
Ok(res) => {
let (ref chunk, ref diags) = *res;
(Some((**chunk).clone()), diags.clone())
},
Err(res) => match *res {
CancelError::Canceled => return Err(Some(Stop)),
CancelError::Error(ref diags) => (None, diags.clone())
},
};
// this can be called multiple times, which ReportTree handles correctly
fssource.root_report.add_parent(diags);
return Ok(chunk);
}
drop(files); // avoid prolonged lock
// try to use the already-read temporary chunk
if let Some(chunk) = fssource.temp_files.get(path.base) {
return Ok(Some(chunk.clone()));
}
// try to read the file (and finally raise an error if it can't be read)
let sourcefile = match SourceFile::from_file(path.base) {
Ok(f) => f,
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(_) => return Err(None),
};
let span = fssource.source.write().add(sourcefile);
fssource.temp_units.push(span.unit());
let diags = ReportTree::new(fssource.message_locale, path.to_str());
fssource.root_report.add_parent(diags.clone());
let chunk = {
let source = fssource.source.read();
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, span, &report);
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
fssource.temp_files.insert(path.base.to_owned(), chunk.clone());
Ok(Some(chunk))
},
Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors
}
}
}
pub struct Workspace {
message_locale: Locale,
pool: Arc<CpuPool>,
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
// conceptually this belongs to shared, but it is frequently updated by futures
// unlike all other fields in shared, so getting this out avoids deadlock
source: Arc<RwLock<Source>>,
shared: Arc<RwLock<WorkspaceShared>>,
}
impl fmt::Debug for Workspace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Workspace")
.field("message_locale", &self.message_locale)
.field("pool", &Ellipsis)
.field("files", &self.files)
.field("source", &Ellipsis)
.field("shared", &self.shared)
.finish()
}
}
impl Workspace {
pub fn new(base_dir: PathBuf, pool: Arc<CpuPool>, default_locale: Locale) -> Workspace {
Workspace {
message_locale: default_locale,
pool: pool,
files: Arc::new(RwLock::new(HashMap::new())),
source: Arc::new(RwLock::new(Source::new())),
shared: Arc::new(RwLock::new(WorkspaceShared {
cancel_token: CancelToken::new(),
base: WorkspaceBase::Config(kailua_workspace::Config::from_base_dir(base_dir)),
check_outputs: Vec::new(),
last_check_outputs: Vec::new(),
})),
}
}
pub fn pool(&self) -> &Arc<CpuPool> {
&self.pool
}
pub fn source<'a>(&'a self) -> RwLockReadGuard<'a, Source> {
self.source.read()
}
pub fn has_read_config(&self) -> bool {
if let WorkspaceBase::Workspace(_) = self.shared.read().base { true } else { false }
}
#[allow(dead_code)]
pub fn config_path(&self) -> Option<PathBuf> {
self.shared.read().base.config_path().map(|p| p.to_owned())
}
pub fn config_path_or_default(&self) -> PathBuf {
self.shared.read().base.config_path_or_default()
}
pub fn read_config(&mut self) -> bool {
let mut shared = self.shared.write();
let ws = if let WorkspaceBase::Config(ref mut config) = shared.base {
config.use_default_config_paths();
if let Some(ws) = kailua_workspace::Workspace::new(config, self.message_locale) {
Some(ws)
} else {
return false;
}
} else {
None
};
if let Some(ws) = ws {
let noutputs = ws.start_paths().len();
shared.base = WorkspaceBase::Workspace(ws);
shared.check_outputs.resize(noutputs, None);
shared.last_check_outputs.resize(noutputs, None);
}
true
}
pub fn populate_watchlist(&mut self) {
let walker = WalkDir::new(self.shared.read().base.base_dir());
for e in walker.follow_links(true) {
// we don't care about I/O errors and (in Unix) symlink loops
let e = if let Ok(e) = e { e } else { continue };
let ext = e.path().extension();
if ext == Some(OsStr::new("lua")) || ext == Some(OsStr::new("kailua")) {
// TODO probably this should be of the lower priority
let _ = self.ensure_file(e.path()).ensure_chunk();
}
}
}
pub fn localize<'a, T: Localize + ?Sized + 'a>(&self, msg: &'a T) -> Localized<'a, T> {
Localized::new(msg, self.message_locale)
}
pub fn files<'a>(&'a self) -> RwLockReadGuard<'a, HashMap<PathBuf, WorkspaceFile>> {
self.files.read()
}
pub fn file<'a>(&'a self, uri: &str) -> Option<WorkspaceFile> {
match uri_to_path(uri) {
Ok(path) => self.files.read().get(&path).cloned(),
Err(_) => None,
}
}
fn make_file(&self, path: PathBuf) -> WorkspaceFile {
WorkspaceFile::new(&self.shared, &self.pool, &self.source, self.message_locale, path)
}
fn destroy_file(&self, file: WorkspaceFile) -> bool {
file.cancel();
let file = file.inner.read();
let sourcefile = self.source.write().remove(file.unit);
file.document.is_some() && sourcefile.is_some()
}
pub fn open_file(&self, item: protocol::TextDocumentItem) -> WorkspaceResult<()> {
let path = uri_to_path(&item.uri)?;
let mut files = self.files.write();
let file = files.entry(path.clone()).or_insert_with(|| self.make_file(path));
file.update_document(|doc| {
if doc.is_some() {
Err(WorkspaceError("open notification with duplicate file"))
} else {
Ok(Some(OpenDocument::new(item)))
}
})
}
fn ensure_file(&self, path: &Path) -> WorkspaceFile {
let mut files = self.files.write();
files.entry(path.to_owned()).or_insert_with(|| self.make_file(path.to_owned())).clone()
}
pub fn close_file(&self, uri: &str) -> WorkspaceResult<()> {
let path = uri_to_path(uri)?;
// closing file breaks the synchronization so the file should be re-read from fs
let mut files = self.files.write();
let ok = if let hash_map::Entry::Occupied(mut e) = files.entry(path.clone()) {
// replace the previous WorkspaceFile by a fresh WorkspaceFile
let file = mem::replace(e.get_mut(), self.make_file(path));
self.destroy_file(file)
} else {
false
};
if ok {
Ok(())
} else {
Err(WorkspaceError("close notification with non-existent or non-open file"))
}
}
pub fn on_file_created(&self, uri: &str) -> Option<WorkspaceFile> {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
}
pub fn on_file_changed(&self, uri: &str) -> Option<WorkspaceFile> {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
file.cancel();
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
}
pub fn on_file_deleted(&self, uri: &str) {
if let Ok(path) = uri_to_path(uri) {
let mut files = self.files.write();
if let Some(file) = files.remove(&path) {
self.destroy_file(file);
}
}
}
#[allow(dead_code)]
pub fn cancel(&self) {
self.shared.write().cancel();
}
pub fn cancel_future(&self) -> CancelFuture {
self.shared.read().cancel_token.future()
}
fn build_future_for_check_output(
&self, index: usize, start_path: &Path, spare_shared: Shared, shared: &mut SharedWrite
) -> ReportFuture<Arc<Output>> {
let start_chunk_fut = self.ensure_file(start_path).ensure_chunk();
let start_path = start_path.to_owned();
let files = self.files.clone();
let source = self.source.clone();
let cancel_token = shared.cancel_token.clone();
let message_locale = self.message_locale;
let fut = start_chunk_fut.map_err(|e| (*e).clone()).and_then(move |chunk_ret| {
cancel_token.keep_going()?;
let start_chunk = (*chunk_ret.0).clone();
let diags = ReportTree::new(message_locale, None);
diags.add_parent(chunk_ret.1.clone());
// the actual checking process.
//
// this will routinely lock the shared, so we avoid locking it from the caller
// by cloning required values prematurely.
let fssource = WorkspaceFsSource {
inner: Rc::new(RefCell::new(WorkspaceFsSourceInner {
cancel_token: cancel_token.clone(),
files: files,
source: source.clone(),
temp_units: Vec::new(),
temp_files: HashMap::new(),
message_locale: message_locale,
root_report: diags.clone(),
})),
};
let (opts, preload) = match spare_shared.read().base {
WorkspaceBase::Config(_) => {
// it should not be the case, but if we ever get to this point,
// we cannot proceed at all because there's no start path.
// we should have been alerted though.
return Err(From::from(diags));
},
WorkspaceBase::Workspace(ref ws) => {
let opts = WorkspaceOptions::new(fssource.clone(), &start_path, ws);
(Rc::new(RefCell::new(opts)), ws.preload().clone())
},
};
let (ok, output) = {
// the translation should NOT lock the source (read or write) indefinitely.
// we also want to drop the proxy report as fast as possible.
let mut context = Context::new(diags.report(|span| {
diags::translate_span(span, &source.read())
}));
let ok = kailua_check::check_from_chunk_with_preloading(&mut context, start_chunk,
opts, &preload).is_ok();
(ok, context.into_output())
};
// fssource should be owned only by this function; the following should not fail
let fssource = Rc::try_unwrap(fssource.inner).ok().expect("no single owner");
let fssource = fssource.into_inner();
// remove all temporarily added chunks from the source
// XXX ideally this should be cached as much as possible though
let mut source = source.write();
for unit in fssource.temp_units {
let sourcefile = source.remove(unit);
assert!(sourcefile.is_some());
}
// FsSource may have failed from the cancel request, so we should catch it here
cancel_token.keep_going()?;
if ok {
let output = Arc::new(output);
spare_shared.write().last_check_outputs[index] = Some(output.clone());
Ok((output, diags))
} else {
Err(From::from(diags))
}
});
self.pool.spawn(fut).boxed().shared()
}
pub fn ensure_check_outputs(&self) -> WorkspaceResult<Vec<ReportFuture<Arc<Output>>>> {
let spare_shared = self.shared.clone();
let mut shared = self.shared.write();
let start_paths = match shared.base {
WorkspaceBase::Config(_) => {
return Err(WorkspaceError("cannot start checking without a start file specified"));
},
WorkspaceBase::Workspace(ref ws) => ws.start_paths().to_owned(),
};
assert_eq!(shared.check_outputs.len(), start_paths.len());
for (i, path) in start_paths.iter().enumerate() {
if shared.check_outputs[i].is_none() {
let fut = self.build_future_for_check_output(i, path, spare_shared.clone(),
&mut shared);
shared.check_outputs[i] = Some(fut);
}
}
Ok(shared.check_outputs.iter().map(|fut| fut.as_ref().unwrap().clone()).collect())
}
// this is similar to `ensure_check_outputs`, but produces a single future
// that returns an array of `Output`s and a single combined diagnostics.
// (it is intended that they are not associated to each other, so they are separate)
pub fn ensure_combined_check_outputs(&self)
-> WorkspaceResult<BoxFuture<(Vec<Arc<Output>>, ReportTree), CancelError<()>>>
{
let output_futs = self.ensure_check_outputs()?;
// checking can result in the fatal error (Err) only when cancellation is requested.
// so we only need to return Ok when every checking results in Ok,
// avoiding the difficulty to combine incomplete reports when one of them fails.
let output_stream = stream::iter(output_futs.into_iter().map(Ok)).and_then(|fut| fut);
let outputs_fut = output_stream.collect();
let message_locale = self.message_locale;
Ok(outputs_fut.map_err(|_| CancelError::Error(())).map(move |ret| {
let mut outputs = Vec::new();
let diags = ReportTree::new(message_locale, None);
for e in ret.into_iter() {
outputs.push(e.0.clone());
diags.add_parent(e.1.clone());
}
(outputs, diags)
}).boxed())
}
#[allow(dead_code)]
pub fn last_check_outputs(&self) -> Vec<Option<Arc<Output>>> {
self.shared.read().last_check_outputs.clone()
}
pub fn last_valid_check_outputs(&self) -> Vec<Arc<Output>> {
self.shared.read().last_check_outputs.iter().filter_map(|e| e.clone()).collect()
}
}
| apply_change | identifier_name |
workspace.rs | use std::mem;
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
use std::ffi::OsStr;
use std::collections::{hash_map, HashMap};
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use futures::{future, stream, Future, Stream, BoxFuture};
use futures_cpupool::CpuPool;
use url::Url;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use walkdir::WalkDir;
use kailua_env::{Unit, Pos, Span, Spanned, Source, SourceFile, SourceSlice};
use kailua_diag::{self, Stop, Report, Locale, Localize, Localized};
use kailua_syntax::{Lexer, Nest, NestedToken, Parser, Chunk};
use kailua_check;
use kailua_check::options::FsSource;
use kailua_check::env::{Context, Output};
use kailua_workspace::{self, WorkspaceOptions};
use fmtutils::Ellipsis;
use diags::{self, ReportTree};
use futureutils::{CancelError, CancelToken, CancelFuture};
use message as m;
use protocol;
#[derive(Clone, Debug)]
pub struct WorkspaceError(pub &'static str);
pub type WorkspaceResult<T> = Result<T, WorkspaceError>;
fn uri_to_path(uri: &str) -> WorkspaceResult<PathBuf> {
let url = Url::parse(uri).map_err(|_| WorkspaceError("invalid URI"))?;
if url.scheme() != "file" {
return Err(WorkspaceError("non-file URI"));
}
if let Ok(path) = url.to_file_path() {
return Ok(path);
}
#[cfg(windows)]
{
use std::ffi::OsString;
use std::path::Component;
use url::Host;
// Url::to_file_path only handles no host or localhost, which is different from vscode-uri
// we first try localhost then retry by temporarily setting the authority part on windows
let host = match url.host() {
Some(Host::Domain(name)) => name.to_string(),
Some(Host::Ipv4(addr)) => addr.to_string(),
Some(Host::Ipv6(addr)) => {
// an "official" hack for UNC
// https://msdn.microsoft.com/en-us/library/aa385353.aspx
let s = &addr.segments();
format!("{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}.ipv6-literal.net",
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
},
None => return Err(WorkspaceError("non-file URI")),
};
// convert file://host/path to file:///z:/path (z: is required for rust-url)
let url = Url::parse(&format!("file:///z:{}", url.path())).unwrap();
if let Ok(path) = url.to_file_path() {
// now path starts with z:\foo\bar, so replace z: by \\host to complete it
let mut components = path.components();
let _prefix = components.next();
assert!(match _prefix { Some(Component::Prefix(..)) => true, _ => false });
let mut pathstr = OsString::from("\\\\");
pathstr.push(&host);
pathstr.push(components.as_path());
return Ok(PathBuf::from(pathstr));
}
}
Err(WorkspaceError("non-file URI"))
}
fn position_to_pos(file: &SourceFile, pos: &protocol::Position) -> Pos {
if let Some(mut span) = file.line_spans().nth(pos.line as usize) {
let begin = span.begin().to_usize();
let end = span.end().to_usize();
let mut k = pos.character as usize;
match file.data() {
SourceSlice::U8(s) => {
// locate k-th non-continuation byte in s where k is the 0-based column index.
//
// this code seems to be overly complicated. this is necessary because
// we need to detect the end of the line, and a plain .nth(k) cannot determine
// if the line has k exact scalar values or k is just out of bound.
let iter = span.zip(s[begin..end].iter());
for (p, _) in iter.filter(|&(_, &b)| b & 0b1100_0000 != 0b1000_0000) {
if k == 0 { return p; }
k -= 1;
}
if k == 0 { return span.end(); }
Pos::dummy()
},
SourceSlice::U16(_) => {
// same here, but the logic is much simpler
if span.len() == k {
span.end()
} else if let Some(p) = span.nth(k) {
p
} else {
Pos::dummy()
}
},
}
} else {
Pos::dummy()
}
}
fn collect_tokens(source: &Source, span: Span, report: &Report) -> Vec<NestedToken> {
let mut iter = source.iter_from_span(span).unwrap();
let tokens = {
let mut lexer = Lexer::new(&mut iter, report);
let nest = Nest::new(&mut lexer);
nest.collect::<Vec<_>>()
};
assert!(!tokens.is_empty()); // should include EOF
tokens
}
fn parse_to_chunk(tokens: Vec<NestedToken>, report: &Report) -> kailua_diag::Result<Chunk> {
let mut tokens = tokens.into_iter();
let chunk = Parser::new(&mut tokens, report).into_chunk();
chunk
}
#[derive(Clone, Debug)]
pub struct OpenDocument {
uri: String,
lang_id: String,
last_version: u64,
last_text: String,
}
impl OpenDocument {
fn new(item: protocol::TextDocumentItem) -> OpenDocument {
OpenDocument {
uri: item.uri,
lang_id: item.languageId,
last_version: item.version,
last_text: item.text,
}
}
}
// clonable, externally visible future type at work
pub type IoFuture<T> =
future::Shared<BoxFuture<T, CancelError<io::Error>>>;
pub type ReportFuture<T> =
future::Shared<BoxFuture<(T, ReportTree), CancelError<ReportTree>>>;
struct WorkspaceFileInner {
workspace: Arc<RwLock<WorkspaceShared>>,
pool: Arc<CpuPool>,
cancel_token: CancelToken,
source: Arc<RwLock<Source>>,
message_locale: Locale,
path: PathBuf,
unit: Unit,
// if Some, the file is managed by the client and the text is synchronized
document: Option<OpenDocument>,
// each parts are calculated on demand; in either case diagnostics are produced
span: Option<IoFuture<Span>>,
tokens: Option<ReportFuture<Arc<Vec<NestedToken>>>>,
chunk: Option<ReportFuture<Arc<Chunk>>>,
last_chunk: Option<Arc<Chunk>>,
}
type Inner = Arc<RwLock<WorkspaceFileInner>>;
type InnerWrite<'a> = RwLockWriteGuard<'a, WorkspaceFileInner>;
#[derive(Clone)]
pub struct WorkspaceFile {
inner: Inner,
}
impl fmt::Debug for WorkspaceFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = self.inner.read();
f.debug_struct("WorkspaceFile")
.field("workspace", &Ellipsis) // avoid excess output
.field("pool", &Ellipsis)
.field("cancel_token", &inner.cancel_token)
.field("source", &Ellipsis)
.field("message_locale", &inner.message_locale)
.field("path", &inner.path)
.field("unit", &inner.unit)
.field("document", &inner.document)
.field("span", &inner.span.as_ref().map(|_| Ellipsis))
.field("tokens", &inner.tokens.as_ref().map(|_| Ellipsis))
.field("chunk", &inner.chunk.as_ref().map(|_| Ellipsis))
.field("last_chunk", &inner.last_chunk.as_ref().map(|_| Ellipsis))
.finish()
}
}
impl WorkspaceFile {
fn new(shared: &Arc<RwLock<WorkspaceShared>>, pool: &Arc<CpuPool>,
source: &Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf) -> WorkspaceFile {
WorkspaceFile {
inner: Arc::new(RwLock::new(WorkspaceFileInner {
workspace: shared.clone(),
pool: pool.clone(),
cancel_token: CancelToken::new(),
source: source.clone(),
message_locale: message_locale,
path: path,
unit: Unit::dummy(),
document: None,
span: None,
tokens: None,
chunk: None,
last_chunk: None,
})),
}
}
fn cancel(&self) {
let mut inner = self.inner.write();
inner.cancel_token.cancel();
inner.cancel_token = CancelToken::new();
inner.span = None;
inner.tokens = None;
inner.chunk = None;
// also signal the workspace to cancel jobs
inner.workspace.write().cancel();
}
#[allow(dead_code)]
pub fn path(&self) -> PathBuf {
self.inner.read().path.clone()
}
fn update_document<F, E>(&self, f: F) -> Result<(), E>
where F: FnOnce(Option<OpenDocument>) -> Result<Option<OpenDocument>, E>
{
self.cancel();
let mut inner = self.inner.write();
inner.document = f(inner.document.take())?;
Ok(())
}
fn ensure_span_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> IoFuture<Span> {
if inner.span.is_none() {
let fut = future::lazy(move || -> Result<Span, CancelError<io::Error>> {
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let file = if let Some(ref doc) = inner.document {
SourceFile::from_u8(inner.path.display().to_string(),
doc.last_text.as_bytes().to_owned())
} else {
SourceFile::from_file(&inner.path)?
};
let span = if inner.unit.is_dummy() {
let span = inner.source.write().add(file);
inner.unit = span.unit();
span
} else {
inner.source.write().replace(inner.unit, file).unwrap()
};
Ok(span)
});
inner.span = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.span.as_ref().unwrap().clone()
}
pub fn ensure_span(&self) -> IoFuture<Span> {
let cloned = self.inner.clone();
Self::ensure_span_with_inner(cloned, &mut self.inner.write())
}
fn ensure_tokens_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Vec<NestedToken>>> {
if inner.tokens.is_none() {
let span_fut = Self::ensure_span_with_inner(spare_inner.clone(), inner);
// important: the task has to be spawned outside of the future.
// this is because, otherwise for the thread pool of n workers
// the future chain of n+1 or more tasks will block as the i-th task
// will spawn the (i+1)-th task without removing itself from the pool queue!
// chaining the already-spawned future will ensure that
// the task body will be only spawned after the last future has been finished.
let fut = span_fut.then(move |span_ret| {
let inner = spare_inner.read();
match span_ret {
Ok(span) => {
inner.cancel_token.keep_going()?;
let source = inner.source.read();
let path = source.file(span.unit()).map(|f| f.path());
let diags = ReportTree::new(inner.message_locale, path);
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, *span, &report);
Ok((Arc::new(tokens), diags))
},
Err(e) => {
Err(e.as_ref().map(|e| {
// translate an I/O error into a report
let dummy_diag = |msg: &Localize| {
protocol::Diagnostic {
range: protocol::Range {
start: protocol::Position { line: 0, character: 0 },
end: protocol::Position { line: 0, character: 0 },
},
severity: Some(protocol::DiagnosticSeverity::Error),
code: None,
source: None,
message: Localized::new(msg, inner.message_locale).to_string(),
}
};
let path = inner.path.display().to_string();
let config_path = inner.workspace.read().base.config_path_or_default();
let config_path = config_path.display().to_string();
let diags = ReportTree::new(inner.message_locale, Some(&path));
diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e }));
diags.add_diag(config_path, dummy_diag(&m::RestartRequired {}));
diags
}))
},
}
});
inner.tokens = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.tokens.as_ref().unwrap().clone()
}
pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> {
let cloned = self.inner.clone();
Self::ensure_tokens_with_inner(cloned, &mut self.inner.write())
}
fn ensure_chunk_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> {
if inner.chunk.is_none() {
let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner);
let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| {
let tokens = (*tokens_ret.0).clone();
let parent_diags = tokens_ret.1.clone();
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let diags = ReportTree::new(inner.message_locale, None);
diags.add_parent(parent_diags);
// in this future source access is only needed for reporting
let chunk = {
let report = diags.report(|span| {
diags::translate_span(span, &inner.source.read())
});
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
let chunk = Arc::new(chunk);
inner.last_chunk = Some(chunk.clone());
Ok((chunk, diags))
},
Err(_) => Err(From::from(diags)),
}
});
inner.chunk = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.chunk.as_ref().unwrap().clone()
}
pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> {
let cloned = self.inner.clone();
Self::ensure_chunk_with_inner(cloned, &mut self.inner.write())
}
pub fn last_chunk(&self) -> Option<Arc<Chunk>> {
self.inner.read().last_chunk.clone()
}
pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> {
let pos = pos.clone();
let source = self.inner.read().source.clone();
self.ensure_span().then(move |res| {
match res {
Ok(span) => {
let source = source.read();
if let Some(file) = source.file(span.unit()) {
Ok(position_to_pos(file, &pos))
} else {
Ok(Pos::dummy())
}
},
Err(e) => Err(e.as_ref().map(|_| ()))
}
}).boxed()
}
pub fn apply_change(&mut self, version: u64,
event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> {
// TODO, there are several ambiguities with offsets?
if event.range.is_some() || event.rangeLength.is_some() {
return Err(WorkspaceError("incremental edits not yet supported"));
}
self.update_document(move |doc| {
if let Some(mut doc) = doc {
if doc.last_version >= version {
return Err(WorkspaceError("non-increasing version"));
}
doc.last_version = version;
doc.last_text = event.text;
Ok(Some(doc))
} else {
Err(WorkspaceError("change notification with non-existent or non-open file"))
}
})
}
}
#[derive(Clone, Debug)]
enum WorkspaceBase {
Config(kailua_workspace::Config),
Workspace(kailua_workspace::Workspace),
}
impl WorkspaceBase {
fn config_path(&self) -> Option<&Path> {
match *self {
WorkspaceBase::Config(ref config) => config.config_path(),
WorkspaceBase::Workspace(ref ws) => ws.config_path(),
}
}
fn config_path_or_default(&self) -> PathBuf {
if let Some(config_path) = self.config_path() {
config_path.to_owned()
} else {
// we allow both `kailua.json` or `.vscode/kailua.json`,
// for now we will issue an error at the latter
self.base_dir().join(".vscode").join("kailua.json")
}
}
fn base_dir(&self) -> &Path {
match *self {
WorkspaceBase::Config(ref config) => config.base_dir(),
WorkspaceBase::Workspace(ref ws) => ws.base_dir(),
}
}
}
// a portion of Workspace that should be shared across WorkspaceFile.
// this should not be modified in the normal cases (otherwise it can be easily deadlocked),
// with an exception of cascading cancellation.
struct WorkspaceShared {
cancel_token: CancelToken, // used for stopping ongoing checks
base: WorkspaceBase,
check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>,
last_check_outputs: Vec<Option<Arc<Output>>>,
}
type Shared = Arc<RwLock<WorkspaceShared>>;
type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>;
impl fmt::Debug for WorkspaceShared {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]);
impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish()
}
}
f.debug_struct("WorkspaceShared")
.field("base", &self.base)
.field("cancel_token", &self.cancel_token)
.field("check_outputs", &DummyOptionList(&self.check_outputs))
.field("last_check_outputs", &DummyOptionList(&self.last_check_outputs))
.finish()
}
}
impl WorkspaceShared {
fn cancel(&mut self) {
self.cancel_token.cancel();
self.cancel_token = CancelToken::new();
for output in &mut self.check_outputs {
*output = None;
}
}
}
struct WorkspaceFsSourceInner {
cancel_token: CancelToken, // will be used independently of WorkspaceShared
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
source: Arc<RwLock<Source>>,
temp_units: Vec<Unit>, // will be gone after checking
temp_files: HashMap<PathBuf, Chunk>,
message_locale: Locale,
root_report: ReportTree,
}
#[derive(Clone)]
struct WorkspaceFsSource {
inner: Rc<RefCell<WorkspaceFsSourceInner>>,
}
impl FsSource for WorkspaceFsSource {
fn chunk_from_path(&self, path: Spanned<&Path>,
_report: &Report) -> Result<Option<Chunk>, Option<Stop>> {
let mut fssource = self.inner.borrow_mut();
fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?;
// try to use the client-maintained text as a source code
let files = fssource.files.clone();
let files = files.read();
if let Some(file) = files.get(path.base) {
let (chunk, diags) = match file.ensure_chunk().wait() {
Ok(res) => {
let (ref chunk, ref diags) = *res;
(Some((**chunk).clone()), diags.clone())
},
Err(res) => match *res {
CancelError::Canceled => return Err(Some(Stop)),
CancelError::Error(ref diags) => (None, diags.clone())
},
};
// this can be called multiple times, which ReportTree handles correctly
fssource.root_report.add_parent(diags);
return Ok(chunk);
}
drop(files); // avoid prolonged lock
// try to use the already-read temporary chunk
if let Some(chunk) = fssource.temp_files.get(path.base) {
return Ok(Some(chunk.clone()));
}
// try to read the file (and finally raise an error if it can't be read)
let sourcefile = match SourceFile::from_file(path.base) {
Ok(f) => f,
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(_) => return Err(None),
};
let span = fssource.source.write().add(sourcefile);
fssource.temp_units.push(span.unit());
let diags = ReportTree::new(fssource.message_locale, path.to_str());
fssource.root_report.add_parent(diags.clone());
let chunk = {
let source = fssource.source.read();
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, span, &report);
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
fssource.temp_files.insert(path.base.to_owned(), chunk.clone());
Ok(Some(chunk))
},
Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors
}
}
}
pub struct Workspace {
message_locale: Locale,
pool: Arc<CpuPool>,
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
// conceptually this belongs to shared, but it is frequently updated by futures
// unlike all other fields in shared, so getting this out avoids deadlock
source: Arc<RwLock<Source>>,
shared: Arc<RwLock<WorkspaceShared>>,
}
| impl fmt::Debug for Workspace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Workspace")
.field("message_locale", &self.message_locale)
.field("pool", &Ellipsis)
.field("files", &self.files)
.field("source", &Ellipsis)
.field("shared", &self.shared)
.finish()
}
}
impl Workspace {
pub fn new(base_dir: PathBuf, pool: Arc<CpuPool>, default_locale: Locale) -> Workspace {
Workspace {
message_locale: default_locale,
pool: pool,
files: Arc::new(RwLock::new(HashMap::new())),
source: Arc::new(RwLock::new(Source::new())),
shared: Arc::new(RwLock::new(WorkspaceShared {
cancel_token: CancelToken::new(),
base: WorkspaceBase::Config(kailua_workspace::Config::from_base_dir(base_dir)),
check_outputs: Vec::new(),
last_check_outputs: Vec::new(),
})),
}
}
pub fn pool(&self) -> &Arc<CpuPool> {
&self.pool
}
pub fn source<'a>(&'a self) -> RwLockReadGuard<'a, Source> {
self.source.read()
}
pub fn has_read_config(&self) -> bool {
if let WorkspaceBase::Workspace(_) = self.shared.read().base { true } else { false }
}
#[allow(dead_code)]
pub fn config_path(&self) -> Option<PathBuf> {
self.shared.read().base.config_path().map(|p| p.to_owned())
}
pub fn config_path_or_default(&self) -> PathBuf {
self.shared.read().base.config_path_or_default()
}
pub fn read_config(&mut self) -> bool {
let mut shared = self.shared.write();
let ws = if let WorkspaceBase::Config(ref mut config) = shared.base {
config.use_default_config_paths();
if let Some(ws) = kailua_workspace::Workspace::new(config, self.message_locale) {
Some(ws)
} else {
return false;
}
} else {
None
};
if let Some(ws) = ws {
let noutputs = ws.start_paths().len();
shared.base = WorkspaceBase::Workspace(ws);
shared.check_outputs.resize(noutputs, None);
shared.last_check_outputs.resize(noutputs, None);
}
true
}
pub fn populate_watchlist(&mut self) {
let walker = WalkDir::new(self.shared.read().base.base_dir());
for e in walker.follow_links(true) {
// we don't care about I/O errors and (in Unix) symlink loops
let e = if let Ok(e) = e { e } else { continue };
let ext = e.path().extension();
if ext == Some(OsStr::new("lua")) || ext == Some(OsStr::new("kailua")) {
// TODO probably this should be of the lower priority
let _ = self.ensure_file(e.path()).ensure_chunk();
}
}
}
pub fn localize<'a, T: Localize + ?Sized + 'a>(&self, msg: &'a T) -> Localized<'a, T> {
Localized::new(msg, self.message_locale)
}
pub fn files<'a>(&'a self) -> RwLockReadGuard<'a, HashMap<PathBuf, WorkspaceFile>> {
self.files.read()
}
pub fn file<'a>(&'a self, uri: &str) -> Option<WorkspaceFile> {
match uri_to_path(uri) {
Ok(path) => self.files.read().get(&path).cloned(),
Err(_) => None,
}
}
fn make_file(&self, path: PathBuf) -> WorkspaceFile {
WorkspaceFile::new(&self.shared, &self.pool, &self.source, self.message_locale, path)
}
fn destroy_file(&self, file: WorkspaceFile) -> bool {
file.cancel();
let file = file.inner.read();
let sourcefile = self.source.write().remove(file.unit);
file.document.is_some() && sourcefile.is_some()
}
pub fn open_file(&self, item: protocol::TextDocumentItem) -> WorkspaceResult<()> {
let path = uri_to_path(&item.uri)?;
let mut files = self.files.write();
let file = files.entry(path.clone()).or_insert_with(|| self.make_file(path));
file.update_document(|doc| {
if doc.is_some() {
Err(WorkspaceError("open notification with duplicate file"))
} else {
Ok(Some(OpenDocument::new(item)))
}
})
}
fn ensure_file(&self, path: &Path) -> WorkspaceFile {
let mut files = self.files.write();
files.entry(path.to_owned()).or_insert_with(|| self.make_file(path.to_owned())).clone()
}
pub fn close_file(&self, uri: &str) -> WorkspaceResult<()> {
let path = uri_to_path(uri)?;
// closing file breaks the synchronization so the file should be re-read from fs
let mut files = self.files.write();
let ok = if let hash_map::Entry::Occupied(mut e) = files.entry(path.clone()) {
// replace the previous WorkspaceFile by a fresh WorkspaceFile
let file = mem::replace(e.get_mut(), self.make_file(path));
self.destroy_file(file)
} else {
false
};
if ok {
Ok(())
} else {
Err(WorkspaceError("close notification with non-existent or non-open file"))
}
}
pub fn on_file_created(&self, uri: &str) -> Option<WorkspaceFile> {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
}
pub fn on_file_changed(&self, uri: &str) -> Option<WorkspaceFile> {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
file.cancel();
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
}
pub fn on_file_deleted(&self, uri: &str) {
if let Ok(path) = uri_to_path(uri) {
let mut files = self.files.write();
if let Some(file) = files.remove(&path) {
self.destroy_file(file);
}
}
}
#[allow(dead_code)]
pub fn cancel(&self) {
self.shared.write().cancel();
}
pub fn cancel_future(&self) -> CancelFuture {
self.shared.read().cancel_token.future()
}
fn build_future_for_check_output(
&self, index: usize, start_path: &Path, spare_shared: Shared, shared: &mut SharedWrite
) -> ReportFuture<Arc<Output>> {
let start_chunk_fut = self.ensure_file(start_path).ensure_chunk();
let start_path = start_path.to_owned();
let files = self.files.clone();
let source = self.source.clone();
let cancel_token = shared.cancel_token.clone();
let message_locale = self.message_locale;
let fut = start_chunk_fut.map_err(|e| (*e).clone()).and_then(move |chunk_ret| {
cancel_token.keep_going()?;
let start_chunk = (*chunk_ret.0).clone();
let diags = ReportTree::new(message_locale, None);
diags.add_parent(chunk_ret.1.clone());
// the actual checking process.
//
// this will routinely lock the shared, so we avoid locking it from the caller
// by cloning required values prematurely.
let fssource = WorkspaceFsSource {
inner: Rc::new(RefCell::new(WorkspaceFsSourceInner {
cancel_token: cancel_token.clone(),
files: files,
source: source.clone(),
temp_units: Vec::new(),
temp_files: HashMap::new(),
message_locale: message_locale,
root_report: diags.clone(),
})),
};
let (opts, preload) = match spare_shared.read().base {
WorkspaceBase::Config(_) => {
// it should not be the case, but if we ever get to this point,
// we cannot proceed at all because there's no start path.
// we should have been alerted though.
return Err(From::from(diags));
},
WorkspaceBase::Workspace(ref ws) => {
let opts = WorkspaceOptions::new(fssource.clone(), &start_path, ws);
(Rc::new(RefCell::new(opts)), ws.preload().clone())
},
};
let (ok, output) = {
// the translation should NOT lock the source (read or write) indefinitely.
// we also want to drop the proxy report as fast as possible.
let mut context = Context::new(diags.report(|span| {
diags::translate_span(span, &source.read())
}));
let ok = kailua_check::check_from_chunk_with_preloading(&mut context, start_chunk,
opts, &preload).is_ok();
(ok, context.into_output())
};
// fssource should be owned only by this function; the following should not fail
let fssource = Rc::try_unwrap(fssource.inner).ok().expect("no single owner");
let fssource = fssource.into_inner();
// remove all temporarily added chunks from the source
// XXX ideally this should be cached as much as possible though
let mut source = source.write();
for unit in fssource.temp_units {
let sourcefile = source.remove(unit);
assert!(sourcefile.is_some());
}
// FsSource may have failed from the cancel request, so we should catch it here
cancel_token.keep_going()?;
if ok {
let output = Arc::new(output);
spare_shared.write().last_check_outputs[index] = Some(output.clone());
Ok((output, diags))
} else {
Err(From::from(diags))
}
});
self.pool.spawn(fut).boxed().shared()
}
pub fn ensure_check_outputs(&self) -> WorkspaceResult<Vec<ReportFuture<Arc<Output>>>> {
let spare_shared = self.shared.clone();
let mut shared = self.shared.write();
let start_paths = match shared.base {
WorkspaceBase::Config(_) => {
return Err(WorkspaceError("cannot start checking without a start file specified"));
},
WorkspaceBase::Workspace(ref ws) => ws.start_paths().to_owned(),
};
assert_eq!(shared.check_outputs.len(), start_paths.len());
for (i, path) in start_paths.iter().enumerate() {
if shared.check_outputs[i].is_none() {
let fut = self.build_future_for_check_output(i, path, spare_shared.clone(),
&mut shared);
shared.check_outputs[i] = Some(fut);
}
}
Ok(shared.check_outputs.iter().map(|fut| fut.as_ref().unwrap().clone()).collect())
}
// this is similar to `ensure_check_outputs`, but produces a single future
// that returns an array of `Output`s and a single combined diagnostics.
// (it is intended that they are not associated to each other, so they are separate)
pub fn ensure_combined_check_outputs(&self)
-> WorkspaceResult<BoxFuture<(Vec<Arc<Output>>, ReportTree), CancelError<()>>>
{
let output_futs = self.ensure_check_outputs()?;
// checking can result in the fatal error (Err) only when cancellation is requested.
// so we only need to return Ok when every checking results in Ok,
// avoiding the difficulty to combine incomplete reports when one of them fails.
let output_stream = stream::iter(output_futs.into_iter().map(Ok)).and_then(|fut| fut);
let outputs_fut = output_stream.collect();
let message_locale = self.message_locale;
Ok(outputs_fut.map_err(|_| CancelError::Error(())).map(move |ret| {
let mut outputs = Vec::new();
let diags = ReportTree::new(message_locale, None);
for e in ret.into_iter() {
outputs.push(e.0.clone());
diags.add_parent(e.1.clone());
}
(outputs, diags)
}).boxed())
}
#[allow(dead_code)]
pub fn last_check_outputs(&self) -> Vec<Option<Arc<Output>>> {
self.shared.read().last_check_outputs.clone()
}
pub fn last_valid_check_outputs(&self) -> Vec<Arc<Output>> {
self.shared.read().last_check_outputs.iter().filter_map(|e| e.clone()).collect()
}
} | random_line_split | |
workspace.rs | use std::mem;
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
use std::ffi::OsStr;
use std::collections::{hash_map, HashMap};
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use futures::{future, stream, Future, Stream, BoxFuture};
use futures_cpupool::CpuPool;
use url::Url;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use walkdir::WalkDir;
use kailua_env::{Unit, Pos, Span, Spanned, Source, SourceFile, SourceSlice};
use kailua_diag::{self, Stop, Report, Locale, Localize, Localized};
use kailua_syntax::{Lexer, Nest, NestedToken, Parser, Chunk};
use kailua_check;
use kailua_check::options::FsSource;
use kailua_check::env::{Context, Output};
use kailua_workspace::{self, WorkspaceOptions};
use fmtutils::Ellipsis;
use diags::{self, ReportTree};
use futureutils::{CancelError, CancelToken, CancelFuture};
use message as m;
use protocol;
#[derive(Clone, Debug)]
pub struct WorkspaceError(pub &'static str);
pub type WorkspaceResult<T> = Result<T, WorkspaceError>;
fn uri_to_path(uri: &str) -> WorkspaceResult<PathBuf> {
let url = Url::parse(uri).map_err(|_| WorkspaceError("invalid URI"))?;
if url.scheme() != "file" {
return Err(WorkspaceError("non-file URI"));
}
if let Ok(path) = url.to_file_path() {
return Ok(path);
}
#[cfg(windows)]
{
use std::ffi::OsString;
use std::path::Component;
use url::Host;
// Url::to_file_path only handles no host or localhost, which is different from vscode-uri
// we first try localhost then retry by temporarily setting the authority part on windows
let host = match url.host() {
Some(Host::Domain(name)) => name.to_string(),
Some(Host::Ipv4(addr)) => addr.to_string(),
Some(Host::Ipv6(addr)) => {
// an "official" hack for UNC
// https://msdn.microsoft.com/en-us/library/aa385353.aspx
let s = &addr.segments();
format!("{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}-{:x}.ipv6-literal.net",
s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
},
None => return Err(WorkspaceError("non-file URI")),
};
// convert file://host/path to file:///z:/path (z: is required for rust-url)
let url = Url::parse(&format!("file:///z:{}", url.path())).unwrap();
if let Ok(path) = url.to_file_path() {
// now path starts with z:\foo\bar, so replace z: by \\host to complete it
let mut components = path.components();
let _prefix = components.next();
assert!(match _prefix { Some(Component::Prefix(..)) => true, _ => false });
let mut pathstr = OsString::from("\\\\");
pathstr.push(&host);
pathstr.push(components.as_path());
return Ok(PathBuf::from(pathstr));
}
}
Err(WorkspaceError("non-file URI"))
}
fn position_to_pos(file: &SourceFile, pos: &protocol::Position) -> Pos {
if let Some(mut span) = file.line_spans().nth(pos.line as usize) {
let begin = span.begin().to_usize();
let end = span.end().to_usize();
let mut k = pos.character as usize;
match file.data() {
SourceSlice::U8(s) => {
// locate k-th non-continuation byte in s where k is the 0-based column index.
//
// this code seems to be overly complicated. this is necessary because
// we need to detect the end of the line, and a plain .nth(k) cannot determine
// if the line has k exact scalar values or k is just out of bound.
let iter = span.zip(s[begin..end].iter());
for (p, _) in iter.filter(|&(_, &b)| b & 0b1100_0000 != 0b1000_0000) {
if k == 0 { return p; }
k -= 1;
}
if k == 0 { return span.end(); }
Pos::dummy()
},
SourceSlice::U16(_) => {
// same here, but the logic is much simpler
if span.len() == k {
span.end()
} else if let Some(p) = span.nth(k) {
p
} else {
Pos::dummy()
}
},
}
} else {
Pos::dummy()
}
}
fn collect_tokens(source: &Source, span: Span, report: &Report) -> Vec<NestedToken> {
let mut iter = source.iter_from_span(span).unwrap();
let tokens = {
let mut lexer = Lexer::new(&mut iter, report);
let nest = Nest::new(&mut lexer);
nest.collect::<Vec<_>>()
};
assert!(!tokens.is_empty()); // should include EOF
tokens
}
fn parse_to_chunk(tokens: Vec<NestedToken>, report: &Report) -> kailua_diag::Result<Chunk> {
let mut tokens = tokens.into_iter();
let chunk = Parser::new(&mut tokens, report).into_chunk();
chunk
}
#[derive(Clone, Debug)]
pub struct OpenDocument {
uri: String,
lang_id: String,
last_version: u64,
last_text: String,
}
impl OpenDocument {
fn new(item: protocol::TextDocumentItem) -> OpenDocument {
OpenDocument {
uri: item.uri,
lang_id: item.languageId,
last_version: item.version,
last_text: item.text,
}
}
}
// clonable, externally visible future type at work
pub type IoFuture<T> =
future::Shared<BoxFuture<T, CancelError<io::Error>>>;
pub type ReportFuture<T> =
future::Shared<BoxFuture<(T, ReportTree), CancelError<ReportTree>>>;
struct WorkspaceFileInner {
workspace: Arc<RwLock<WorkspaceShared>>,
pool: Arc<CpuPool>,
cancel_token: CancelToken,
source: Arc<RwLock<Source>>,
message_locale: Locale,
path: PathBuf,
unit: Unit,
// if Some, the file is managed by the client and the text is synchronized
document: Option<OpenDocument>,
// each parts are calculated on demand; in either case diagnostics are produced
span: Option<IoFuture<Span>>,
tokens: Option<ReportFuture<Arc<Vec<NestedToken>>>>,
chunk: Option<ReportFuture<Arc<Chunk>>>,
last_chunk: Option<Arc<Chunk>>,
}
type Inner = Arc<RwLock<WorkspaceFileInner>>;
type InnerWrite<'a> = RwLockWriteGuard<'a, WorkspaceFileInner>;
#[derive(Clone)]
pub struct WorkspaceFile {
inner: Inner,
}
impl fmt::Debug for WorkspaceFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner = self.inner.read();
f.debug_struct("WorkspaceFile")
.field("workspace", &Ellipsis) // avoid excess output
.field("pool", &Ellipsis)
.field("cancel_token", &inner.cancel_token)
.field("source", &Ellipsis)
.field("message_locale", &inner.message_locale)
.field("path", &inner.path)
.field("unit", &inner.unit)
.field("document", &inner.document)
.field("span", &inner.span.as_ref().map(|_| Ellipsis))
.field("tokens", &inner.tokens.as_ref().map(|_| Ellipsis))
.field("chunk", &inner.chunk.as_ref().map(|_| Ellipsis))
.field("last_chunk", &inner.last_chunk.as_ref().map(|_| Ellipsis))
.finish()
}
}
impl WorkspaceFile {
fn new(shared: &Arc<RwLock<WorkspaceShared>>, pool: &Arc<CpuPool>,
source: &Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf) -> WorkspaceFile {
WorkspaceFile {
inner: Arc::new(RwLock::new(WorkspaceFileInner {
workspace: shared.clone(),
pool: pool.clone(),
cancel_token: CancelToken::new(),
source: source.clone(),
message_locale: message_locale,
path: path,
unit: Unit::dummy(),
document: None,
span: None,
tokens: None,
chunk: None,
last_chunk: None,
})),
}
}
fn cancel(&self) {
let mut inner = self.inner.write();
inner.cancel_token.cancel();
inner.cancel_token = CancelToken::new();
inner.span = None;
inner.tokens = None;
inner.chunk = None;
// also signal the workspace to cancel jobs
inner.workspace.write().cancel();
}
#[allow(dead_code)]
pub fn path(&self) -> PathBuf {
self.inner.read().path.clone()
}
fn update_document<F, E>(&self, f: F) -> Result<(), E>
where F: FnOnce(Option<OpenDocument>) -> Result<Option<OpenDocument>, E>
{
self.cancel();
let mut inner = self.inner.write();
inner.document = f(inner.document.take())?;
Ok(())
}
fn ensure_span_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> IoFuture<Span> {
if inner.span.is_none() {
let fut = future::lazy(move || -> Result<Span, CancelError<io::Error>> {
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let file = if let Some(ref doc) = inner.document {
SourceFile::from_u8(inner.path.display().to_string(),
doc.last_text.as_bytes().to_owned())
} else {
SourceFile::from_file(&inner.path)?
};
let span = if inner.unit.is_dummy() {
let span = inner.source.write().add(file);
inner.unit = span.unit();
span
} else {
inner.source.write().replace(inner.unit, file).unwrap()
};
Ok(span)
});
inner.span = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.span.as_ref().unwrap().clone()
}
pub fn ensure_span(&self) -> IoFuture<Span> {
let cloned = self.inner.clone();
Self::ensure_span_with_inner(cloned, &mut self.inner.write())
}
fn ensure_tokens_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Vec<NestedToken>>> {
if inner.tokens.is_none() {
let span_fut = Self::ensure_span_with_inner(spare_inner.clone(), inner);
// important: the task has to be spawned outside of the future.
// this is because, otherwise for the thread pool of n workers
// the future chain of n+1 or more tasks will block as the i-th task
// will spawn the (i+1)-th task without removing itself from the pool queue!
// chaining the already-spawned future will ensure that
// the task body will be only spawned after the last future has been finished.
let fut = span_fut.then(move |span_ret| {
let inner = spare_inner.read();
match span_ret {
Ok(span) => {
inner.cancel_token.keep_going()?;
let source = inner.source.read();
let path = source.file(span.unit()).map(|f| f.path());
let diags = ReportTree::new(inner.message_locale, path);
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, *span, &report);
Ok((Arc::new(tokens), diags))
},
Err(e) => {
Err(e.as_ref().map(|e| {
// translate an I/O error into a report
let dummy_diag = |msg: &Localize| {
protocol::Diagnostic {
range: protocol::Range {
start: protocol::Position { line: 0, character: 0 },
end: protocol::Position { line: 0, character: 0 },
},
severity: Some(protocol::DiagnosticSeverity::Error),
code: None,
source: None,
message: Localized::new(msg, inner.message_locale).to_string(),
}
};
let path = inner.path.display().to_string();
let config_path = inner.workspace.read().base.config_path_or_default();
let config_path = config_path.display().to_string();
let diags = ReportTree::new(inner.message_locale, Some(&path));
diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e }));
diags.add_diag(config_path, dummy_diag(&m::RestartRequired {}));
diags
}))
},
}
});
inner.tokens = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.tokens.as_ref().unwrap().clone()
}
pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> {
let cloned = self.inner.clone();
Self::ensure_tokens_with_inner(cloned, &mut self.inner.write())
}
fn ensure_chunk_with_inner(spare_inner: Inner,
inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> {
if inner.chunk.is_none() {
let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner);
let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| {
let tokens = (*tokens_ret.0).clone();
let parent_diags = tokens_ret.1.clone();
let mut inner = spare_inner.write();
inner.cancel_token.keep_going()?;
let diags = ReportTree::new(inner.message_locale, None);
diags.add_parent(parent_diags);
// in this future source access is only needed for reporting
let chunk = {
let report = diags.report(|span| {
diags::translate_span(span, &inner.source.read())
});
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
let chunk = Arc::new(chunk);
inner.last_chunk = Some(chunk.clone());
Ok((chunk, diags))
},
Err(_) => Err(From::from(diags)),
}
});
inner.chunk = Some(inner.pool.spawn(fut).boxed().shared());
}
inner.chunk.as_ref().unwrap().clone()
}
pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> {
let cloned = self.inner.clone();
Self::ensure_chunk_with_inner(cloned, &mut self.inner.write())
}
pub fn last_chunk(&self) -> Option<Arc<Chunk>> {
self.inner.read().last_chunk.clone()
}
pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> {
let pos = pos.clone();
let source = self.inner.read().source.clone();
self.ensure_span().then(move |res| {
match res {
Ok(span) => {
let source = source.read();
if let Some(file) = source.file(span.unit()) {
Ok(position_to_pos(file, &pos))
} else {
Ok(Pos::dummy())
}
},
Err(e) => Err(e.as_ref().map(|_| ()))
}
}).boxed()
}
pub fn apply_change(&mut self, version: u64,
event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> {
// TODO, there are several ambiguities with offsets?
if event.range.is_some() || event.rangeLength.is_some() {
return Err(WorkspaceError("incremental edits not yet supported"));
}
self.update_document(move |doc| {
if let Some(mut doc) = doc {
if doc.last_version >= version {
return Err(WorkspaceError("non-increasing version"));
}
doc.last_version = version;
doc.last_text = event.text;
Ok(Some(doc))
} else {
Err(WorkspaceError("change notification with non-existent or non-open file"))
}
})
}
}
#[derive(Clone, Debug)]
enum WorkspaceBase {
Config(kailua_workspace::Config),
Workspace(kailua_workspace::Workspace),
}
impl WorkspaceBase {
fn config_path(&self) -> Option<&Path> {
match *self {
WorkspaceBase::Config(ref config) => config.config_path(),
WorkspaceBase::Workspace(ref ws) => ws.config_path(),
}
}
fn config_path_or_default(&self) -> PathBuf {
if let Some(config_path) = self.config_path() {
config_path.to_owned()
} else {
// we allow both `kailua.json` or `.vscode/kailua.json`,
// for now we will issue an error at the latter
self.base_dir().join(".vscode").join("kailua.json")
}
}
fn base_dir(&self) -> &Path {
match *self {
WorkspaceBase::Config(ref config) => config.base_dir(),
WorkspaceBase::Workspace(ref ws) => ws.base_dir(),
}
}
}
// a portion of Workspace that should be shared across WorkspaceFile.
// this should not be modified in the normal cases (otherwise it can be easily deadlocked),
// with an exception of cascading cancellation.
struct WorkspaceShared {
cancel_token: CancelToken, // used for stopping ongoing checks
base: WorkspaceBase,
check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>,
last_check_outputs: Vec<Option<Arc<Output>>>,
}
type Shared = Arc<RwLock<WorkspaceShared>>;
type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>;
impl fmt::Debug for WorkspaceShared {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]);
impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish()
}
}
f.debug_struct("WorkspaceShared")
.field("base", &self.base)
.field("cancel_token", &self.cancel_token)
.field("check_outputs", &DummyOptionList(&self.check_outputs))
.field("last_check_outputs", &DummyOptionList(&self.last_check_outputs))
.finish()
}
}
impl WorkspaceShared {
fn cancel(&mut self) {
self.cancel_token.cancel();
self.cancel_token = CancelToken::new();
for output in &mut self.check_outputs {
*output = None;
}
}
}
struct WorkspaceFsSourceInner {
cancel_token: CancelToken, // will be used independently of WorkspaceShared
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
source: Arc<RwLock<Source>>,
temp_units: Vec<Unit>, // will be gone after checking
temp_files: HashMap<PathBuf, Chunk>,
message_locale: Locale,
root_report: ReportTree,
}
#[derive(Clone)]
struct WorkspaceFsSource {
inner: Rc<RefCell<WorkspaceFsSourceInner>>,
}
impl FsSource for WorkspaceFsSource {
fn chunk_from_path(&self, path: Spanned<&Path>,
_report: &Report) -> Result<Option<Chunk>, Option<Stop>> {
let mut fssource = self.inner.borrow_mut();
fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?;
// try to use the client-maintained text as a source code
let files = fssource.files.clone();
let files = files.read();
if let Some(file) = files.get(path.base) {
let (chunk, diags) = match file.ensure_chunk().wait() {
Ok(res) => {
let (ref chunk, ref diags) = *res;
(Some((**chunk).clone()), diags.clone())
},
Err(res) => match *res {
CancelError::Canceled => return Err(Some(Stop)),
CancelError::Error(ref diags) => (None, diags.clone())
},
};
// this can be called multiple times, which ReportTree handles correctly
fssource.root_report.add_parent(diags);
return Ok(chunk);
}
drop(files); // avoid prolonged lock
// try to use the already-read temporary chunk
if let Some(chunk) = fssource.temp_files.get(path.base) {
return Ok(Some(chunk.clone()));
}
// try to read the file (and finally raise an error if it can't be read)
let sourcefile = match SourceFile::from_file(path.base) {
Ok(f) => f,
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(_) => return Err(None),
};
let span = fssource.source.write().add(sourcefile);
fssource.temp_units.push(span.unit());
let diags = ReportTree::new(fssource.message_locale, path.to_str());
fssource.root_report.add_parent(diags.clone());
let chunk = {
let source = fssource.source.read();
let report = diags.report(|span| diags::translate_span(span, &source));
let tokens = collect_tokens(&source, span, &report);
parse_to_chunk(tokens, &report)
};
match chunk {
Ok(chunk) => {
fssource.temp_files.insert(path.base.to_owned(), chunk.clone());
Ok(Some(chunk))
},
Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors
}
}
}
pub struct Workspace {
message_locale: Locale,
pool: Arc<CpuPool>,
files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>,
// conceptually this belongs to shared, but it is frequently updated by futures
// unlike all other fields in shared, so getting this out avoids deadlock
source: Arc<RwLock<Source>>,
shared: Arc<RwLock<WorkspaceShared>>,
}
impl fmt::Debug for Workspace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Workspace")
.field("message_locale", &self.message_locale)
.field("pool", &Ellipsis)
.field("files", &self.files)
.field("source", &Ellipsis)
.field("shared", &self.shared)
.finish()
}
}
impl Workspace {
pub fn new(base_dir: PathBuf, pool: Arc<CpuPool>, default_locale: Locale) -> Workspace {
Workspace {
message_locale: default_locale,
pool: pool,
files: Arc::new(RwLock::new(HashMap::new())),
source: Arc::new(RwLock::new(Source::new())),
shared: Arc::new(RwLock::new(WorkspaceShared {
cancel_token: CancelToken::new(),
base: WorkspaceBase::Config(kailua_workspace::Config::from_base_dir(base_dir)),
check_outputs: Vec::new(),
last_check_outputs: Vec::new(),
})),
}
}
pub fn pool(&self) -> &Arc<CpuPool> {
&self.pool
}
pub fn source<'a>(&'a self) -> RwLockReadGuard<'a, Source> {
self.source.read()
}
pub fn has_read_config(&self) -> bool {
if let WorkspaceBase::Workspace(_) = self.shared.read().base { true } else { false }
}
#[allow(dead_code)]
pub fn config_path(&self) -> Option<PathBuf> {
self.shared.read().base.config_path().map(|p| p.to_owned())
}
pub fn config_path_or_default(&self) -> PathBuf {
self.shared.read().base.config_path_or_default()
}
pub fn read_config(&mut self) -> bool {
let mut shared = self.shared.write();
let ws = if let WorkspaceBase::Config(ref mut config) = shared.base {
config.use_default_config_paths();
if let Some(ws) = kailua_workspace::Workspace::new(config, self.message_locale) {
Some(ws)
} else {
return false;
}
} else {
None
};
if let Some(ws) = ws {
let noutputs = ws.start_paths().len();
shared.base = WorkspaceBase::Workspace(ws);
shared.check_outputs.resize(noutputs, None);
shared.last_check_outputs.resize(noutputs, None);
}
true
}
pub fn populate_watchlist(&mut self) {
let walker = WalkDir::new(self.shared.read().base.base_dir());
for e in walker.follow_links(true) {
// we don't care about I/O errors and (in Unix) symlink loops
let e = if let Ok(e) = e { e } else { continue };
let ext = e.path().extension();
if ext == Some(OsStr::new("lua")) || ext == Some(OsStr::new("kailua")) {
// TODO probably this should be of the lower priority
let _ = self.ensure_file(e.path()).ensure_chunk();
}
}
}
pub fn localize<'a, T: Localize + ?Sized + 'a>(&self, msg: &'a T) -> Localized<'a, T> {
Localized::new(msg, self.message_locale)
}
pub fn files<'a>(&'a self) -> RwLockReadGuard<'a, HashMap<PathBuf, WorkspaceFile>> {
self.files.read()
}
pub fn file<'a>(&'a self, uri: &str) -> Option<WorkspaceFile> {
match uri_to_path(uri) {
Ok(path) => self.files.read().get(&path).cloned(),
Err(_) => None,
}
}
fn make_file(&self, path: PathBuf) -> WorkspaceFile {
WorkspaceFile::new(&self.shared, &self.pool, &self.source, self.message_locale, path)
}
fn destroy_file(&self, file: WorkspaceFile) -> bool {
file.cancel();
let file = file.inner.read();
let sourcefile = self.source.write().remove(file.unit);
file.document.is_some() && sourcefile.is_some()
}
pub fn open_file(&self, item: protocol::TextDocumentItem) -> WorkspaceResult<()> {
let path = uri_to_path(&item.uri)?;
let mut files = self.files.write();
let file = files.entry(path.clone()).or_insert_with(|| self.make_file(path));
file.update_document(|doc| {
if doc.is_some() {
Err(WorkspaceError("open notification with duplicate file"))
} else {
Ok(Some(OpenDocument::new(item)))
}
})
}
fn ensure_file(&self, path: &Path) -> WorkspaceFile {
let mut files = self.files.write();
files.entry(path.to_owned()).or_insert_with(|| self.make_file(path.to_owned())).clone()
}
pub fn close_file(&self, uri: &str) -> WorkspaceResult<()> {
let path = uri_to_path(uri)?;
// closing file breaks the synchronization so the file should be re-read from fs
let mut files = self.files.write();
let ok = if let hash_map::Entry::Occupied(mut e) = files.entry(path.clone()) {
// replace the previous WorkspaceFile by a fresh WorkspaceFile
let file = mem::replace(e.get_mut(), self.make_file(path));
self.destroy_file(file)
} else {
false
};
if ok {
Ok(())
} else {
Err(WorkspaceError("close notification with non-existent or non-open file"))
}
}
pub fn on_file_created(&self, uri: &str) -> Option<WorkspaceFile> {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
}
pub fn on_file_changed(&self, uri: &str) -> Option<WorkspaceFile> |
pub fn on_file_deleted(&self, uri: &str) {
if let Ok(path) = uri_to_path(uri) {
let mut files = self.files.write();
if let Some(file) = files.remove(&path) {
self.destroy_file(file);
}
}
}
#[allow(dead_code)]
pub fn cancel(&self) {
self.shared.write().cancel();
}
pub fn cancel_future(&self) -> CancelFuture {
self.shared.read().cancel_token.future()
}
fn build_future_for_check_output(
&self, index: usize, start_path: &Path, spare_shared: Shared, shared: &mut SharedWrite
) -> ReportFuture<Arc<Output>> {
let start_chunk_fut = self.ensure_file(start_path).ensure_chunk();
let start_path = start_path.to_owned();
let files = self.files.clone();
let source = self.source.clone();
let cancel_token = shared.cancel_token.clone();
let message_locale = self.message_locale;
let fut = start_chunk_fut.map_err(|e| (*e).clone()).and_then(move |chunk_ret| {
cancel_token.keep_going()?;
let start_chunk = (*chunk_ret.0).clone();
let diags = ReportTree::new(message_locale, None);
diags.add_parent(chunk_ret.1.clone());
// the actual checking process.
//
// this will routinely lock the shared, so we avoid locking it from the caller
// by cloning required values prematurely.
let fssource = WorkspaceFsSource {
inner: Rc::new(RefCell::new(WorkspaceFsSourceInner {
cancel_token: cancel_token.clone(),
files: files,
source: source.clone(),
temp_units: Vec::new(),
temp_files: HashMap::new(),
message_locale: message_locale,
root_report: diags.clone(),
})),
};
let (opts, preload) = match spare_shared.read().base {
WorkspaceBase::Config(_) => {
// it should not be the case, but if we ever get to this point,
// we cannot proceed at all because there's no start path.
// we should have been alerted though.
return Err(From::from(diags));
},
WorkspaceBase::Workspace(ref ws) => {
let opts = WorkspaceOptions::new(fssource.clone(), &start_path, ws);
(Rc::new(RefCell::new(opts)), ws.preload().clone())
},
};
let (ok, output) = {
// the translation should NOT lock the source (read or write) indefinitely.
// we also want to drop the proxy report as fast as possible.
let mut context = Context::new(diags.report(|span| {
diags::translate_span(span, &source.read())
}));
let ok = kailua_check::check_from_chunk_with_preloading(&mut context, start_chunk,
opts, &preload).is_ok();
(ok, context.into_output())
};
// fssource should be owned only by this function; the following should not fail
let fssource = Rc::try_unwrap(fssource.inner).ok().expect("no single owner");
let fssource = fssource.into_inner();
// remove all temporarily added chunks from the source
// XXX ideally this should be cached as much as possible though
let mut source = source.write();
for unit in fssource.temp_units {
let sourcefile = source.remove(unit);
assert!(sourcefile.is_some());
}
// FsSource may have failed from the cancel request, so we should catch it here
cancel_token.keep_going()?;
if ok {
let output = Arc::new(output);
spare_shared.write().last_check_outputs[index] = Some(output.clone());
Ok((output, diags))
} else {
Err(From::from(diags))
}
});
self.pool.spawn(fut).boxed().shared()
}
pub fn ensure_check_outputs(&self) -> WorkspaceResult<Vec<ReportFuture<Arc<Output>>>> {
let spare_shared = self.shared.clone();
let mut shared = self.shared.write();
let start_paths = match shared.base {
WorkspaceBase::Config(_) => {
return Err(WorkspaceError("cannot start checking without a start file specified"));
},
WorkspaceBase::Workspace(ref ws) => ws.start_paths().to_owned(),
};
assert_eq!(shared.check_outputs.len(), start_paths.len());
for (i, path) in start_paths.iter().enumerate() {
if shared.check_outputs[i].is_none() {
let fut = self.build_future_for_check_output(i, path, spare_shared.clone(),
&mut shared);
shared.check_outputs[i] = Some(fut);
}
}
Ok(shared.check_outputs.iter().map(|fut| fut.as_ref().unwrap().clone()).collect())
}
// this is similar to `ensure_check_outputs`, but produces a single future
// that returns an array of `Output`s and a single combined diagnostics.
// (it is intended that they are not associated to each other, so they are separate)
pub fn ensure_combined_check_outputs(&self)
-> WorkspaceResult<BoxFuture<(Vec<Arc<Output>>, ReportTree), CancelError<()>>>
{
let output_futs = self.ensure_check_outputs()?;
// checking can result in the fatal error (Err) only when cancellation is requested.
// so we only need to return Ok when every checking results in Ok,
// avoiding the difficulty to combine incomplete reports when one of them fails.
let output_stream = stream::iter(output_futs.into_iter().map(Ok)).and_then(|fut| fut);
let outputs_fut = output_stream.collect();
let message_locale = self.message_locale;
Ok(outputs_fut.map_err(|_| CancelError::Error(())).map(move |ret| {
let mut outputs = Vec::new();
let diags = ReportTree::new(message_locale, None);
for e in ret.into_iter() {
outputs.push(e.0.clone());
diags.add_parent(e.1.clone());
}
(outputs, diags)
}).boxed())
}
#[allow(dead_code)]
pub fn last_check_outputs(&self) -> Vec<Option<Arc<Output>>> {
self.shared.read().last_check_outputs.clone()
}
pub fn last_valid_check_outputs(&self) -> Vec<Arc<Output>> {
self.shared.read().last_check_outputs.iter().filter_map(|e| e.clone()).collect()
}
}
| {
if let Ok(path) = uri_to_path(uri) {
let file = self.ensure_file(&path);
file.cancel();
let _ = file.ensure_chunk();
Some(file)
} else {
None
}
} | identifier_body |
train.py | ''' Sentence VAE '''
import os
import sys
import json
import time
import math
import random
import argparse
import ipdb as pdb
import logging as log
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
import numpy as np
from multiprocessing import cpu_count
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from collections import OrderedDict, defaultdict
from ptb import PTB
from utils import to_var, idx2word, experiment_name
from model import SentenceVAE, SentenceAE
SCR_PREFIX = '/misc/vlgscratch4/BowmanGroup/awang/'
EPS = 1e-3
def kl_anneal_function(anneal_function, step, k, x0):
if anneal_function == 'logistic':
return float(1/(1+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
return min(1, step/x0)
def loss_fn(NLL, logp, target, length, mean, logv, anneal_function, step, k, x0):
# cut-off unnecessary padding from target, and flatten
target = target[:, :torch.max(length).data[0]].contiguous().view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
nll_loss = NLL(logp, target)
# KL Divergence
kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
kl_weight = kl_anneal_function(anneal_function, step, k, x0)
return nll_loss, kl_loss, kl_weight
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int, default=19)
parser.add_argument('-gpu', '--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--run_dir', help='prefix to save ckpts to', type=str,
default=SCR_PREFIX + 'ckpts/svae/test/')
parser.add_argument('--log_file', help='file to log to', type=str, default='')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--max_sequence_length', type=int, default=40)
parser.add_argument('--min_occ', type=int, default=1)
parser.add_argument('--max_vocab_size', type=int, default=30000)
parser.add_argument('--test', action='store_true')
parser.add_argument('-ep', '--epochs', type=int, default=10)
parser.add_argument('-bs', '--batch_size', type=int, default=128)
parser.add_argument('-o', '--optimizer', type=str, choices=['sgd', 'adam'], default='adam')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('-p', '--patience', type=int, default=5)
parser.add_argument('--sched_patience', type=int, default=0)
parser.add_argument('-mg', '--max_grad_norm', type=float, default=5.)
parser.add_argument('-m', '--model', type=str, choices=['vae', 'ae'], default='vae')
parser.add_argument('-eb', '--embedding_size', type=int, default=300)
parser.add_argument('-rnn', '--rnn_type', type=str, choices=['rnn', 'lstm', 'gru'],
default='gru')
parser.add_argument('-hs', '--hidden_size', type=int, default=512)
parser.add_argument('-nl', '--num_layers', type=int, default=1)
parser.add_argument('-bi', '--bidirectional', action='store_true')
parser.add_argument('-ls', '--latent_size', type=int, default=16)
parser.add_argument('-wd', '--word_dropout', type=float, default=0.5)
parser.add_argument('-d', '--denoise', action='store_true')
parser.add_argument('-pd', '--prob_drop', type=float, default=0.1)
parser.add_argument('-ps', '--prob_swap', type=float, default=0.1)
parser.add_argument('-af', '--anneal_function', type=str, choices=['logistic', 'linear'],
default='logistic')
parser.add_argument('-k', '--k', type=float, default=0.0025)
parser.add_argument('-x0', '--x0', type=int, default=2500)
parser.add_argument('-v', '--print_every', type=int, default=50)
parser.add_argument('-tb', '--tensorboard_logging', action='store_true')
args = parser.parse_args(arguments)
log.basicConfig(format="%(asctime)s: %(message)s", level=log.INFO, datefmt='%m/%d %I:%M:%S %p')
if args.log_file:
log.getLogger().addHandler(log.FileHandler(args.log_file))
log.info(args)
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
seed = random.randint(1, 10000) if args.seed < 0 else args.seed
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_id)
torch.cuda.manual_seed_all(seed)
splits = ['train', 'valid'] + (['test'] if args.test else [])
datasets = OrderedDict()
for split in splits:
datasets[split] = PTB(
data_dir=args.data_dir,
split=split,
create_data=args.create_data,
max_sequence_length=args.max_sequence_length,
min_occ=args.min_occ)
if args.model == 'vae':
model = SentenceVAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
elif args.model == 'ae':
model = SentenceAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
if args.denoise:
log.info("DENOISING!")
if torch.cuda.is_available():
model = model.cuda()
log.info(model)
if args.tensorboard_logging:
writer = SummaryWriter(os.path.join(args.run_dir, experiment_name(args, ts)))
writer.add_text("model", str(model))
writer.add_text("args", str(args))
writer.add_text("ts", ts)
save_model_path = args.run_dir
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
NLL = torch.nn.NLLLoss(size_average=False, ignore_index=datasets['train'].pad_idx)
params = model.parameters()
if args.optimizer == 'sgd':
optimizer = optim.SGD(params, lr=args.learning_rate)
elif args.optimizer == 'adam':
optimizer = optim.Adam(params, lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=args.lr_decay_factor,
patience=args.sched_patience,
verbose=True)
tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
batch_size = args.batch_size
step, stop_training = 0, 0
global_tracker = {'best_epoch': -1, 'best_score': -1, 'history': []}
for epoch in range(args.epochs):
if stop_training:
break
for split in splits:
tracker = defaultdict(tensor)
exs = [ex for ex in datasets[split].data.values()]
random.shuffle(exs)
n_batches = math.ceil(len(exs) / batch_size)
# Enable/Disable Dropout
if split == 'train':
log.info("***** Epoch %02d *****", epoch)
log.info("Training...")
model.train()
else:
log.info("Validating...")
model.eval()
#for iteration, batch in enumerate(data_loader):
for iteration in range(n_batches):
raw_batch = exs[iteration*batch_size:(iteration+1)*batch_size]
batch = model.prepare_batch([e['input'] for e in raw_batch])
batch['src_length'] = model.tensor(batch['src_length']).long()
batch['trg_length'] = model.tensor(batch['trg_length']).long()
b_size = batch['input'].size(0)
for k, v in batch.items():
if torch.is_tensor(v):
batch[k] = to_var(v)
# Forward pass
logp, mean, logv, z = model(batch['input'], batch['target'],
batch['src_length'], batch['trg_length'])
# loss calculation
nll_loss, kl_loss, kl_weight = model.loss_fn(logp, batch['target'],
batch['trg_length'], mean, logv,
args.anneal_function, step,
args.k, args.x0)
loss = (nll_loss + kl_weight * kl_loss) / b_size
nll_loss /= b_size
kl_loss /= b_size
if loss.data[0] != loss.data[0]: # nan detection
log.info("***** UH OH NAN DETECTED *****")
pdb.set_trace()
# backward + optimization
if split == 'train':
optimizer.zero_grad()
loss.backward()
if args.max_grad_norm:
grad_norm = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
step += 1
# bookkeeping
tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.data))
loss = loss.data[0]
if args.model == 'vae':
tracker['NLL'] = torch.cat((tracker['NLL'], nll_loss.data))
tracker['KL'] = torch.cat((tracker['NLL'], kl_loss.data))
nll_loss = nll_loss.data[0]
kl_loss = kl_loss.data[0]
else:
tracker['NLL'] = torch.cat((tracker['NLL'], model.tensor([0])))
tracker['KL'] = torch.cat((tracker['KL'], model.tensor([0])))
if args.tensorboard_logging:
writer.add_scalar("%s/ELBO"%split.upper(), loss, epoch*n_batches + iteration)
writer.add_scalar("%s/NLL Loss"%split.upper(), nll_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Loss"%split.upper(), kl_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Weight"%split.upper(), kl_weight, epoch*n_batches + iteration)
if iteration % args.print_every == 0 or iteration + 1 == n_batches:
log.info(" Batch %04d/%i\tLoss %9.4f\tNLL-Loss %9.4f\tKL-Loss %9.4f\tKL-Weight %6.3f",
iteration, n_batches-1, loss, nll_loss, kl_loss, kl_weight)
if split == 'valid': # store the dev target sentences
if 'target_sents' not in tracker:
tracker['target_sents'] = list()
tracker['target_sents'] += idx2word(batch['target'].data, \
i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)
if args.model == 'vae':
tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)
log.info(" Mean ELBO %9.4f, NLL: %9.4f", torch.mean(tracker['ELBO']), torch.mean(tracker['NLL']))
if args.tensorboard_logging:
writer.add_scalar("%s-Epoch/ELBO" % split.upper(), torch.mean(tracker['ELBO']), epoch)
# save a dump of all sentences and the encoded latent space
if split == 'valid':
loss = torch.mean(tracker['ELBO'])
dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}
if not os.path.exists(os.path.join('dumps', ts)):
|
with open(os.path.join('dumps/'+ ts +'/valid_E%i.json'%epoch), 'w') as dump_file:
json.dump(dump, dump_file)
if loss < global_tracker['best_score'] or global_tracker['best_score'] < 0:
log.info(" Best model found")
global_tracker['best_epoch'] = epoch
global_tracker['best_score'] = loss
checkpoint_path = os.path.join(save_model_path, "best.mdl")
torch.save(model.state_dict(), checkpoint_path)
if kl_weight >= 1 - EPS:
if len(global_tracker['history']) >= args.patience and \
loss >= min(global_tracker['history'][-args.patience:]):
log.info("Ran out of patience!")
stop_training = 1
global_tracker['history'].append(loss)
scheduler.step(loss, epoch)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| os.makedirs('dumps/' + ts) | conditional_block |
train.py | ''' Sentence VAE '''
import os
import sys
import json
import time
import math
import random
import argparse
import ipdb as pdb
import logging as log
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
import numpy as np
from multiprocessing import cpu_count
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from collections import OrderedDict, defaultdict
from ptb import PTB
from utils import to_var, idx2word, experiment_name
from model import SentenceVAE, SentenceAE
SCR_PREFIX = '/misc/vlgscratch4/BowmanGroup/awang/'
EPS = 1e-3
def kl_anneal_function(anneal_function, step, k, x0):
|
def loss_fn(NLL, logp, target, length, mean, logv, anneal_function, step, k, x0):
# cut-off unnecessary padding from target, and flatten
target = target[:, :torch.max(length).data[0]].contiguous().view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
nll_loss = NLL(logp, target)
# KL Divergence
kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
kl_weight = kl_anneal_function(anneal_function, step, k, x0)
return nll_loss, kl_loss, kl_weight
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int, default=19)
parser.add_argument('-gpu', '--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--run_dir', help='prefix to save ckpts to', type=str,
default=SCR_PREFIX + 'ckpts/svae/test/')
parser.add_argument('--log_file', help='file to log to', type=str, default='')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--max_sequence_length', type=int, default=40)
parser.add_argument('--min_occ', type=int, default=1)
parser.add_argument('--max_vocab_size', type=int, default=30000)
parser.add_argument('--test', action='store_true')
parser.add_argument('-ep', '--epochs', type=int, default=10)
parser.add_argument('-bs', '--batch_size', type=int, default=128)
parser.add_argument('-o', '--optimizer', type=str, choices=['sgd', 'adam'], default='adam')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('-p', '--patience', type=int, default=5)
parser.add_argument('--sched_patience', type=int, default=0)
parser.add_argument('-mg', '--max_grad_norm', type=float, default=5.)
parser.add_argument('-m', '--model', type=str, choices=['vae', 'ae'], default='vae')
parser.add_argument('-eb', '--embedding_size', type=int, default=300)
parser.add_argument('-rnn', '--rnn_type', type=str, choices=['rnn', 'lstm', 'gru'],
default='gru')
parser.add_argument('-hs', '--hidden_size', type=int, default=512)
parser.add_argument('-nl', '--num_layers', type=int, default=1)
parser.add_argument('-bi', '--bidirectional', action='store_true')
parser.add_argument('-ls', '--latent_size', type=int, default=16)
parser.add_argument('-wd', '--word_dropout', type=float, default=0.5)
parser.add_argument('-d', '--denoise', action='store_true')
parser.add_argument('-pd', '--prob_drop', type=float, default=0.1)
parser.add_argument('-ps', '--prob_swap', type=float, default=0.1)
parser.add_argument('-af', '--anneal_function', type=str, choices=['logistic', 'linear'],
default='logistic')
parser.add_argument('-k', '--k', type=float, default=0.0025)
parser.add_argument('-x0', '--x0', type=int, default=2500)
parser.add_argument('-v', '--print_every', type=int, default=50)
parser.add_argument('-tb', '--tensorboard_logging', action='store_true')
args = parser.parse_args(arguments)
log.basicConfig(format="%(asctime)s: %(message)s", level=log.INFO, datefmt='%m/%d %I:%M:%S %p')
if args.log_file:
log.getLogger().addHandler(log.FileHandler(args.log_file))
log.info(args)
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
seed = random.randint(1, 10000) if args.seed < 0 else args.seed
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_id)
torch.cuda.manual_seed_all(seed)
splits = ['train', 'valid'] + (['test'] if args.test else [])
datasets = OrderedDict()
for split in splits:
datasets[split] = PTB(
data_dir=args.data_dir,
split=split,
create_data=args.create_data,
max_sequence_length=args.max_sequence_length,
min_occ=args.min_occ)
if args.model == 'vae':
model = SentenceVAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
elif args.model == 'ae':
model = SentenceAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
if args.denoise:
log.info("DENOISING!")
if torch.cuda.is_available():
model = model.cuda()
log.info(model)
if args.tensorboard_logging:
writer = SummaryWriter(os.path.join(args.run_dir, experiment_name(args, ts)))
writer.add_text("model", str(model))
writer.add_text("args", str(args))
writer.add_text("ts", ts)
save_model_path = args.run_dir
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
NLL = torch.nn.NLLLoss(size_average=False, ignore_index=datasets['train'].pad_idx)
params = model.parameters()
if args.optimizer == 'sgd':
optimizer = optim.SGD(params, lr=args.learning_rate)
elif args.optimizer == 'adam':
optimizer = optim.Adam(params, lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=args.lr_decay_factor,
patience=args.sched_patience,
verbose=True)
tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
batch_size = args.batch_size
step, stop_training = 0, 0
global_tracker = {'best_epoch': -1, 'best_score': -1, 'history': []}
for epoch in range(args.epochs):
if stop_training:
break
for split in splits:
tracker = defaultdict(tensor)
exs = [ex for ex in datasets[split].data.values()]
random.shuffle(exs)
n_batches = math.ceil(len(exs) / batch_size)
# Enable/Disable Dropout
if split == 'train':
log.info("***** Epoch %02d *****", epoch)
log.info("Training...")
model.train()
else:
log.info("Validating...")
model.eval()
#for iteration, batch in enumerate(data_loader):
for iteration in range(n_batches):
raw_batch = exs[iteration*batch_size:(iteration+1)*batch_size]
batch = model.prepare_batch([e['input'] for e in raw_batch])
batch['src_length'] = model.tensor(batch['src_length']).long()
batch['trg_length'] = model.tensor(batch['trg_length']).long()
b_size = batch['input'].size(0)
for k, v in batch.items():
if torch.is_tensor(v):
batch[k] = to_var(v)
# Forward pass
logp, mean, logv, z = model(batch['input'], batch['target'],
batch['src_length'], batch['trg_length'])
# loss calculation
nll_loss, kl_loss, kl_weight = model.loss_fn(logp, batch['target'],
batch['trg_length'], mean, logv,
args.anneal_function, step,
args.k, args.x0)
loss = (nll_loss + kl_weight * kl_loss) / b_size
nll_loss /= b_size
kl_loss /= b_size
if loss.data[0] != loss.data[0]: # nan detection
log.info("***** UH OH NAN DETECTED *****")
pdb.set_trace()
# backward + optimization
if split == 'train':
optimizer.zero_grad()
loss.backward()
if args.max_grad_norm:
grad_norm = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
step += 1
# bookkeeping
tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.data))
loss = loss.data[0]
if args.model == 'vae':
tracker['NLL'] = torch.cat((tracker['NLL'], nll_loss.data))
tracker['KL'] = torch.cat((tracker['NLL'], kl_loss.data))
nll_loss = nll_loss.data[0]
kl_loss = kl_loss.data[0]
else:
tracker['NLL'] = torch.cat((tracker['NLL'], model.tensor([0])))
tracker['KL'] = torch.cat((tracker['KL'], model.tensor([0])))
if args.tensorboard_logging:
writer.add_scalar("%s/ELBO"%split.upper(), loss, epoch*n_batches + iteration)
writer.add_scalar("%s/NLL Loss"%split.upper(), nll_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Loss"%split.upper(), kl_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Weight"%split.upper(), kl_weight, epoch*n_batches + iteration)
if iteration % args.print_every == 0 or iteration + 1 == n_batches:
log.info(" Batch %04d/%i\tLoss %9.4f\tNLL-Loss %9.4f\tKL-Loss %9.4f\tKL-Weight %6.3f",
iteration, n_batches-1, loss, nll_loss, kl_loss, kl_weight)
if split == 'valid': # store the dev target sentences
if 'target_sents' not in tracker:
tracker['target_sents'] = list()
tracker['target_sents'] += idx2word(batch['target'].data, \
i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)
if args.model == 'vae':
tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)
log.info(" Mean ELBO %9.4f, NLL: %9.4f", torch.mean(tracker['ELBO']), torch.mean(tracker['NLL']))
if args.tensorboard_logging:
writer.add_scalar("%s-Epoch/ELBO" % split.upper(), torch.mean(tracker['ELBO']), epoch)
# save a dump of all sentences and the encoded latent space
if split == 'valid':
loss = torch.mean(tracker['ELBO'])
dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}
if not os.path.exists(os.path.join('dumps', ts)):
os.makedirs('dumps/' + ts)
with open(os.path.join('dumps/'+ ts +'/valid_E%i.json'%epoch), 'w') as dump_file:
json.dump(dump, dump_file)
if loss < global_tracker['best_score'] or global_tracker['best_score'] < 0:
log.info(" Best model found")
global_tracker['best_epoch'] = epoch
global_tracker['best_score'] = loss
checkpoint_path = os.path.join(save_model_path, "best.mdl")
torch.save(model.state_dict(), checkpoint_path)
if kl_weight >= 1 - EPS:
if len(global_tracker['history']) >= args.patience and \
loss >= min(global_tracker['history'][-args.patience:]):
log.info("Ran out of patience!")
stop_training = 1
global_tracker['history'].append(loss)
scheduler.step(loss, epoch)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| if anneal_function == 'logistic':
return float(1/(1+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
return min(1, step/x0) | identifier_body |
train.py | ''' Sentence VAE '''
import os
import sys
import json
import time
import math
import random
import argparse
import ipdb as pdb
import logging as log
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
import numpy as np
from multiprocessing import cpu_count
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from collections import OrderedDict, defaultdict
from ptb import PTB
from utils import to_var, idx2word, experiment_name
from model import SentenceVAE, SentenceAE
SCR_PREFIX = '/misc/vlgscratch4/BowmanGroup/awang/'
EPS = 1e-3
def kl_anneal_function(anneal_function, step, k, x0):
if anneal_function == 'logistic':
return float(1/(1+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
return min(1, step/x0)
def loss_fn(NLL, logp, target, length, mean, logv, anneal_function, step, k, x0):
# cut-off unnecessary padding from target, and flatten
target = target[:, :torch.max(length).data[0]].contiguous().view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
nll_loss = NLL(logp, target)
# KL Divergence
kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
kl_weight = kl_anneal_function(anneal_function, step, k, x0)
return nll_loss, kl_loss, kl_weight
| parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int, default=19)
parser.add_argument('-gpu', '--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--run_dir', help='prefix to save ckpts to', type=str,
default=SCR_PREFIX + 'ckpts/svae/test/')
parser.add_argument('--log_file', help='file to log to', type=str, default='')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--max_sequence_length', type=int, default=40)
parser.add_argument('--min_occ', type=int, default=1)
parser.add_argument('--max_vocab_size', type=int, default=30000)
parser.add_argument('--test', action='store_true')
parser.add_argument('-ep', '--epochs', type=int, default=10)
parser.add_argument('-bs', '--batch_size', type=int, default=128)
parser.add_argument('-o', '--optimizer', type=str, choices=['sgd', 'adam'], default='adam')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('-p', '--patience', type=int, default=5)
parser.add_argument('--sched_patience', type=int, default=0)
parser.add_argument('-mg', '--max_grad_norm', type=float, default=5.)
parser.add_argument('-m', '--model', type=str, choices=['vae', 'ae'], default='vae')
parser.add_argument('-eb', '--embedding_size', type=int, default=300)
parser.add_argument('-rnn', '--rnn_type', type=str, choices=['rnn', 'lstm', 'gru'],
default='gru')
parser.add_argument('-hs', '--hidden_size', type=int, default=512)
parser.add_argument('-nl', '--num_layers', type=int, default=1)
parser.add_argument('-bi', '--bidirectional', action='store_true')
parser.add_argument('-ls', '--latent_size', type=int, default=16)
parser.add_argument('-wd', '--word_dropout', type=float, default=0.5)
parser.add_argument('-d', '--denoise', action='store_true')
parser.add_argument('-pd', '--prob_drop', type=float, default=0.1)
parser.add_argument('-ps', '--prob_swap', type=float, default=0.1)
parser.add_argument('-af', '--anneal_function', type=str, choices=['logistic', 'linear'],
default='logistic')
parser.add_argument('-k', '--k', type=float, default=0.0025)
parser.add_argument('-x0', '--x0', type=int, default=2500)
parser.add_argument('-v', '--print_every', type=int, default=50)
parser.add_argument('-tb', '--tensorboard_logging', action='store_true')
args = parser.parse_args(arguments)
log.basicConfig(format="%(asctime)s: %(message)s", level=log.INFO, datefmt='%m/%d %I:%M:%S %p')
if args.log_file:
log.getLogger().addHandler(log.FileHandler(args.log_file))
log.info(args)
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
seed = random.randint(1, 10000) if args.seed < 0 else args.seed
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_id)
torch.cuda.manual_seed_all(seed)
splits = ['train', 'valid'] + (['test'] if args.test else [])
datasets = OrderedDict()
for split in splits:
datasets[split] = PTB(
data_dir=args.data_dir,
split=split,
create_data=args.create_data,
max_sequence_length=args.max_sequence_length,
min_occ=args.min_occ)
if args.model == 'vae':
model = SentenceVAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
elif args.model == 'ae':
model = SentenceAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
if args.denoise:
log.info("DENOISING!")
if torch.cuda.is_available():
model = model.cuda()
log.info(model)
if args.tensorboard_logging:
writer = SummaryWriter(os.path.join(args.run_dir, experiment_name(args, ts)))
writer.add_text("model", str(model))
writer.add_text("args", str(args))
writer.add_text("ts", ts)
save_model_path = args.run_dir
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
NLL = torch.nn.NLLLoss(size_average=False, ignore_index=datasets['train'].pad_idx)
params = model.parameters()
if args.optimizer == 'sgd':
optimizer = optim.SGD(params, lr=args.learning_rate)
elif args.optimizer == 'adam':
optimizer = optim.Adam(params, lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=args.lr_decay_factor,
patience=args.sched_patience,
verbose=True)
tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
batch_size = args.batch_size
step, stop_training = 0, 0
global_tracker = {'best_epoch': -1, 'best_score': -1, 'history': []}
for epoch in range(args.epochs):
if stop_training:
break
for split in splits:
tracker = defaultdict(tensor)
exs = [ex for ex in datasets[split].data.values()]
random.shuffle(exs)
n_batches = math.ceil(len(exs) / batch_size)
# Enable/Disable Dropout
if split == 'train':
log.info("***** Epoch %02d *****", epoch)
log.info("Training...")
model.train()
else:
log.info("Validating...")
model.eval()
#for iteration, batch in enumerate(data_loader):
for iteration in range(n_batches):
raw_batch = exs[iteration*batch_size:(iteration+1)*batch_size]
batch = model.prepare_batch([e['input'] for e in raw_batch])
batch['src_length'] = model.tensor(batch['src_length']).long()
batch['trg_length'] = model.tensor(batch['trg_length']).long()
b_size = batch['input'].size(0)
for k, v in batch.items():
if torch.is_tensor(v):
batch[k] = to_var(v)
# Forward pass
logp, mean, logv, z = model(batch['input'], batch['target'],
batch['src_length'], batch['trg_length'])
# loss calculation
nll_loss, kl_loss, kl_weight = model.loss_fn(logp, batch['target'],
batch['trg_length'], mean, logv,
args.anneal_function, step,
args.k, args.x0)
loss = (nll_loss + kl_weight * kl_loss) / b_size
nll_loss /= b_size
kl_loss /= b_size
if loss.data[0] != loss.data[0]: # nan detection
log.info("***** UH OH NAN DETECTED *****")
pdb.set_trace()
# backward + optimization
if split == 'train':
optimizer.zero_grad()
loss.backward()
if args.max_grad_norm:
grad_norm = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
step += 1
# bookkeeping
tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.data))
loss = loss.data[0]
if args.model == 'vae':
tracker['NLL'] = torch.cat((tracker['NLL'], nll_loss.data))
tracker['KL'] = torch.cat((tracker['NLL'], kl_loss.data))
nll_loss = nll_loss.data[0]
kl_loss = kl_loss.data[0]
else:
tracker['NLL'] = torch.cat((tracker['NLL'], model.tensor([0])))
tracker['KL'] = torch.cat((tracker['KL'], model.tensor([0])))
if args.tensorboard_logging:
writer.add_scalar("%s/ELBO"%split.upper(), loss, epoch*n_batches + iteration)
writer.add_scalar("%s/NLL Loss"%split.upper(), nll_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Loss"%split.upper(), kl_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Weight"%split.upper(), kl_weight, epoch*n_batches + iteration)
if iteration % args.print_every == 0 or iteration + 1 == n_batches:
log.info(" Batch %04d/%i\tLoss %9.4f\tNLL-Loss %9.4f\tKL-Loss %9.4f\tKL-Weight %6.3f",
iteration, n_batches-1, loss, nll_loss, kl_loss, kl_weight)
if split == 'valid': # store the dev target sentences
if 'target_sents' not in tracker:
tracker['target_sents'] = list()
tracker['target_sents'] += idx2word(batch['target'].data, \
i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)
if args.model == 'vae':
tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)
log.info(" Mean ELBO %9.4f, NLL: %9.4f", torch.mean(tracker['ELBO']), torch.mean(tracker['NLL']))
if args.tensorboard_logging:
writer.add_scalar("%s-Epoch/ELBO" % split.upper(), torch.mean(tracker['ELBO']), epoch)
# save a dump of all sentences and the encoded latent space
if split == 'valid':
loss = torch.mean(tracker['ELBO'])
dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}
if not os.path.exists(os.path.join('dumps', ts)):
os.makedirs('dumps/' + ts)
with open(os.path.join('dumps/'+ ts +'/valid_E%i.json'%epoch), 'w') as dump_file:
json.dump(dump, dump_file)
if loss < global_tracker['best_score'] or global_tracker['best_score'] < 0:
log.info(" Best model found")
global_tracker['best_epoch'] = epoch
global_tracker['best_score'] = loss
checkpoint_path = os.path.join(save_model_path, "best.mdl")
torch.save(model.state_dict(), checkpoint_path)
if kl_weight >= 1 - EPS:
if len(global_tracker['history']) >= args.patience and \
loss >= min(global_tracker['history'][-args.patience:]):
log.info("Ran out of patience!")
stop_training = 1
global_tracker['history'].append(loss)
scheduler.step(loss, epoch)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | def main(arguments): | random_line_split |
train.py | ''' Sentence VAE '''
import os
import sys
import json
import time
import math
import random
import argparse
import ipdb as pdb
import logging as log
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
import numpy as np
from multiprocessing import cpu_count
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from collections import OrderedDict, defaultdict
from ptb import PTB
from utils import to_var, idx2word, experiment_name
from model import SentenceVAE, SentenceAE
SCR_PREFIX = '/misc/vlgscratch4/BowmanGroup/awang/'
EPS = 1e-3
def kl_anneal_function(anneal_function, step, k, x0):
if anneal_function == 'logistic':
return float(1/(1+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
return min(1, step/x0)
def | (NLL, logp, target, length, mean, logv, anneal_function, step, k, x0):
# cut-off unnecessary padding from target, and flatten
target = target[:, :torch.max(length).data[0]].contiguous().view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
nll_loss = NLL(logp, target)
# KL Divergence
kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
kl_weight = kl_anneal_function(anneal_function, step, k, x0)
return nll_loss, kl_loss, kl_weight
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int, default=19)
parser.add_argument('-gpu', '--gpu_id', help='GPU ID', type=int, default=0)
parser.add_argument('--run_dir', help='prefix to save ckpts to', type=str,
default=SCR_PREFIX + 'ckpts/svae/test/')
parser.add_argument('--log_file', help='file to log to', type=str, default='')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--max_sequence_length', type=int, default=40)
parser.add_argument('--min_occ', type=int, default=1)
parser.add_argument('--max_vocab_size', type=int, default=30000)
parser.add_argument('--test', action='store_true')
parser.add_argument('-ep', '--epochs', type=int, default=10)
parser.add_argument('-bs', '--batch_size', type=int, default=128)
parser.add_argument('-o', '--optimizer', type=str, choices=['sgd', 'adam'], default='adam')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('-p', '--patience', type=int, default=5)
parser.add_argument('--sched_patience', type=int, default=0)
parser.add_argument('-mg', '--max_grad_norm', type=float, default=5.)
parser.add_argument('-m', '--model', type=str, choices=['vae', 'ae'], default='vae')
parser.add_argument('-eb', '--embedding_size', type=int, default=300)
parser.add_argument('-rnn', '--rnn_type', type=str, choices=['rnn', 'lstm', 'gru'],
default='gru')
parser.add_argument('-hs', '--hidden_size', type=int, default=512)
parser.add_argument('-nl', '--num_layers', type=int, default=1)
parser.add_argument('-bi', '--bidirectional', action='store_true')
parser.add_argument('-ls', '--latent_size', type=int, default=16)
parser.add_argument('-wd', '--word_dropout', type=float, default=0.5)
parser.add_argument('-d', '--denoise', action='store_true')
parser.add_argument('-pd', '--prob_drop', type=float, default=0.1)
parser.add_argument('-ps', '--prob_swap', type=float, default=0.1)
parser.add_argument('-af', '--anneal_function', type=str, choices=['logistic', 'linear'],
default='logistic')
parser.add_argument('-k', '--k', type=float, default=0.0025)
parser.add_argument('-x0', '--x0', type=int, default=2500)
parser.add_argument('-v', '--print_every', type=int, default=50)
parser.add_argument('-tb', '--tensorboard_logging', action='store_true')
args = parser.parse_args(arguments)
log.basicConfig(format="%(asctime)s: %(message)s", level=log.INFO, datefmt='%m/%d %I:%M:%S %p')
if args.log_file:
log.getLogger().addHandler(log.FileHandler(args.log_file))
log.info(args)
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
seed = random.randint(1, 10000) if args.seed < 0 else args.seed
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_id)
torch.cuda.manual_seed_all(seed)
splits = ['train', 'valid'] + (['test'] if args.test else [])
datasets = OrderedDict()
for split in splits:
datasets[split] = PTB(
data_dir=args.data_dir,
split=split,
create_data=args.create_data,
max_sequence_length=args.max_sequence_length,
min_occ=args.min_occ)
if args.model == 'vae':
model = SentenceVAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
elif args.model == 'ae':
model = SentenceAE(args, datasets['train'].get_w2i(),
embedding_size=args.embedding_size,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
word_dropout=args.word_dropout,
latent_size=args.latent_size,
num_layers=args.num_layers,
bidirectional=args.bidirectional)
if args.denoise:
log.info("DENOISING!")
if torch.cuda.is_available():
model = model.cuda()
log.info(model)
if args.tensorboard_logging:
writer = SummaryWriter(os.path.join(args.run_dir, experiment_name(args, ts)))
writer.add_text("model", str(model))
writer.add_text("args", str(args))
writer.add_text("ts", ts)
save_model_path = args.run_dir
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
NLL = torch.nn.NLLLoss(size_average=False, ignore_index=datasets['train'].pad_idx)
params = model.parameters()
if args.optimizer == 'sgd':
optimizer = optim.SGD(params, lr=args.learning_rate)
elif args.optimizer == 'adam':
optimizer = optim.Adam(params, lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=args.lr_decay_factor,
patience=args.sched_patience,
verbose=True)
tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
batch_size = args.batch_size
step, stop_training = 0, 0
global_tracker = {'best_epoch': -1, 'best_score': -1, 'history': []}
for epoch in range(args.epochs):
if stop_training:
break
for split in splits:
tracker = defaultdict(tensor)
exs = [ex for ex in datasets[split].data.values()]
random.shuffle(exs)
n_batches = math.ceil(len(exs) / batch_size)
# Enable/Disable Dropout
if split == 'train':
log.info("***** Epoch %02d *****", epoch)
log.info("Training...")
model.train()
else:
log.info("Validating...")
model.eval()
#for iteration, batch in enumerate(data_loader):
for iteration in range(n_batches):
raw_batch = exs[iteration*batch_size:(iteration+1)*batch_size]
batch = model.prepare_batch([e['input'] for e in raw_batch])
batch['src_length'] = model.tensor(batch['src_length']).long()
batch['trg_length'] = model.tensor(batch['trg_length']).long()
b_size = batch['input'].size(0)
for k, v in batch.items():
if torch.is_tensor(v):
batch[k] = to_var(v)
# Forward pass
logp, mean, logv, z = model(batch['input'], batch['target'],
batch['src_length'], batch['trg_length'])
# loss calculation
nll_loss, kl_loss, kl_weight = model.loss_fn(logp, batch['target'],
batch['trg_length'], mean, logv,
args.anneal_function, step,
args.k, args.x0)
loss = (nll_loss + kl_weight * kl_loss) / b_size
nll_loss /= b_size
kl_loss /= b_size
if loss.data[0] != loss.data[0]: # nan detection
log.info("***** UH OH NAN DETECTED *****")
pdb.set_trace()
# backward + optimization
if split == 'train':
optimizer.zero_grad()
loss.backward()
if args.max_grad_norm:
grad_norm = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
step += 1
# bookkeeping
tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.data))
loss = loss.data[0]
if args.model == 'vae':
tracker['NLL'] = torch.cat((tracker['NLL'], nll_loss.data))
tracker['KL'] = torch.cat((tracker['NLL'], kl_loss.data))
nll_loss = nll_loss.data[0]
kl_loss = kl_loss.data[0]
else:
tracker['NLL'] = torch.cat((tracker['NLL'], model.tensor([0])))
tracker['KL'] = torch.cat((tracker['KL'], model.tensor([0])))
if args.tensorboard_logging:
writer.add_scalar("%s/ELBO"%split.upper(), loss, epoch*n_batches + iteration)
writer.add_scalar("%s/NLL Loss"%split.upper(), nll_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Loss"%split.upper(), kl_loss, epoch*n_batches + iteration)
writer.add_scalar("%s/KL Weight"%split.upper(), kl_weight, epoch*n_batches + iteration)
if iteration % args.print_every == 0 or iteration + 1 == n_batches:
log.info(" Batch %04d/%i\tLoss %9.4f\tNLL-Loss %9.4f\tKL-Loss %9.4f\tKL-Weight %6.3f",
iteration, n_batches-1, loss, nll_loss, kl_loss, kl_weight)
if split == 'valid': # store the dev target sentences
if 'target_sents' not in tracker:
tracker['target_sents'] = list()
tracker['target_sents'] += idx2word(batch['target'].data, \
i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)
if args.model == 'vae':
tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)
log.info(" Mean ELBO %9.4f, NLL: %9.4f", torch.mean(tracker['ELBO']), torch.mean(tracker['NLL']))
if args.tensorboard_logging:
writer.add_scalar("%s-Epoch/ELBO" % split.upper(), torch.mean(tracker['ELBO']), epoch)
# save a dump of all sentences and the encoded latent space
if split == 'valid':
loss = torch.mean(tracker['ELBO'])
dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}
if not os.path.exists(os.path.join('dumps', ts)):
os.makedirs('dumps/' + ts)
with open(os.path.join('dumps/'+ ts +'/valid_E%i.json'%epoch), 'w') as dump_file:
json.dump(dump, dump_file)
if loss < global_tracker['best_score'] or global_tracker['best_score'] < 0:
log.info(" Best model found")
global_tracker['best_epoch'] = epoch
global_tracker['best_score'] = loss
checkpoint_path = os.path.join(save_model_path, "best.mdl")
torch.save(model.state_dict(), checkpoint_path)
if kl_weight >= 1 - EPS:
if len(global_tracker['history']) >= args.patience and \
loss >= min(global_tracker['history'][-args.patience:]):
log.info("Ran out of patience!")
stop_training = 1
global_tracker['history'].append(loss)
scheduler.step(loss, epoch)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| loss_fn | identifier_name |
menu.rs | //! Menu abstrction module
use std::collections::HashMap;
use std::rc::Rc;
use std::cell::RefCell;
use dbusmenu::ComCanonicalDbusmenu;
use dbus::arg;
use dbus;
#[derive(Default)]
pub struct Menu {
/// - `revision: i32`: The revision number of the layout.
/// For matching with layoutUpdated signals.
revision: Rc<RefCell<i32>>,
/// The window ID that the menu was created on
pub window_id: Option<u32>,
/// The actual Menu structure, indexed by their action name / identifier
pub menu: HashMap<&'static str, SubMenu>,
/// The current language.
/// **NOTE** : The default is "en", so make sure to have at least one
/// entry in the menu items labels that is indexed by "en"
pub cur_language: &'static str,
}
/// Top-level submenu. Not to be confused with MenuData::SubMenuItem
pub struct SubMenu {
/// The label of the menu
pub label: HashMap<String, String>,
/// The menu items, indexed by their action name
pub menu: HashMap<String, MenuItem>,
}
impl Menu {
/// Creates a new window, but doesn't add it to any window yet
/// Starts a new thread for maintaining the rendering loop
pub fn new() -> Self {
Self {
revision: Rc::new(RefCell::new(0)),
window_id: None,
menu: HashMap::new(),
cur_language: "en",
}
}
/// Adds the menu to the window - takes XID of window as parameter
pub fn add_to_window(&mut self, window_id: u32) {
self.window_id = Some(window_id);
// todo: notify app menu registrar here
println!("registered window!");
}
/// Removes the menu
pub fn remove_from_window(&mut self) {
self.window_id = None;
// appmenu unregister window
// should also be called on drop
println!("unregistered window!");
}
/// Removes an item from the menu list.
/// Does not error out, but rather returns if the removal was successful
pub fn remove_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("remove_item: {:?}", item_id);
false
}
/// Adds an item to the menu list.
/// Does not error out, but rather returns if the add was successful
pub fn add_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("add item: {:?}", item_id);
false
}
/// Actually constructs the window so that it shows the menu now
/// Sends the menu over DBus
pub fn show() {
}
}
pub enum MenuItem {
/// Text menu item, regular. Gets called if clicked
TextMenuItem(MenuData<Box<Fn() -> ()>>),
/// Checkbox menu item,
CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>),
/// Radio menu item, consisting of multiple menu items.
/// Callback gets a string of the currently selected value
RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>),
/// Seperator
Seperator(),
/// Submenu
SubMenuItem(String, Box<SubMenu>),
}
#[derive(Debug)]
pub struct MenuData<F> {
/// The action to execute, depends on the type of menu item
pub action: F,
/// Optional image as PNG bytes
pub image: Option<Vec<u8>>,
/// The label(s) of the menu item, indexed by language identifier
///
/// For example:
///
/// de - Datei öffnen
/// en - Open file
pub label: HashMap<String, String>,
/// Should the menu entry be activated on hovering
pub activate_on_hover: bool,
/// Optional shortcuts in the format of a string
/// `[["Control", "S"]]`
/// `[["Control", "Q"], ["Alt", "X"]]`
/// This is only a visual cue (todo: really?)
pub shortcut: Option<Vec<ShortcutData>>,
}
#[derive(Debug, Clone)]
pub enum ShortcutData {
/// The "Control" in CTRL + S
ControlChar(CtrlChar),
/// The "S" in CTRL + S
Char(String),
}
/// The four controls registered by dbus
#[derive(Debug, Copy, Clone)]
pub enum CtrlChar {
Ctrl,
Alt,
Shift,
Super,
}
/*
0 => [
"type" => "standard" | "seperator",
"label" => "Hello",
"enabled" => true,
"visible" => true,
"icon-name" => "hello.png",
"icon-data" => Vec<u8>,
"shortcut" => [["Control", "S"]],
"toggle-type" => "checkmark" | "radio", "",
"toggle-state" => MenuItemToggleState,
"children-display" => "" | "submenu",
],
defaults:
type = "standard",
label = "",
enabled = "",
visible = "",
icon-name = "",
icon-data = None,
shortcut = None,
toggle-type = "",
toggle-state = -1
children-display = "",
*/
#[derive(Debug)]
pub enum MenuItemToggleState {
On,
Off,
Invalid,
}
impl Into<i32> for MenuItemToggleState {
fn into(self) -> i32 {
match self {
MenuItemToggleState::On => 1,
MenuItemToggleState::Off => 0,
MenuItemToggleState::Invalid => -1,
}
}
}
/// Implement the ComCanonicalMenu so we can push it to the server
impl ComCanonicalDbusmenu for Menu {
type Err = dbus::tree::MethodErr;
/// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero.
/// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array.
/// - -1: deliver all the items under the @a parentId.
/// - 0: no recursion, the array will be empty.
/// - n: array will contains items up to 'n' level depth.
/// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent.
///
/// ### Outputs
///
/// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals.
/// - `layout: HashMap`: The layout, as a recursive structure.
///
fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>)
-> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> {
// I have no idea if this will actually work in any way possible
// (u, (ia{sv}av))
// Nautilus: 0, 2, []
// Answer: 14
/*
try!(m.as_result());
let mut i = m.iter_init();
let revision: u32 = try!(i.read());
let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read());
Ok((revision, layout))
*/
use dbus::Message;
use dbus::Member;
println!("getlayout called!");
let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap();
try!(m.as_result());
let mut i = m.iter_init();
let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap());
*self.revision.borrow_mut() += 1;
Ok((1, (*self.revision.borrow(), map, Vec::new())))
}
fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>)
-> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> {
// I AM NOT SURE IF THS WORKS!
println!("get_group_properties called: {:?}, {:?}", ids, property_names);
/*
method call time=1510750424.121891
sender=:1.318
-> destination=org.freedesktop.DBus
serial=1 path=/org/freedesktop/DBus;
interface=org.freedesktop.DBus;
member=Hello
*/
// warning: other method is also called "hello"
// If Nautilus is called with [0], returns [(0, {'children-display': 'submenu'})]
let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string())));
Ok(vec![(0, properties_hashmap)])
}
fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> {
println!("get property called!");
// Nautilus get_propery(0, 'children-display') -> 'submenu'
Ok(arg::Variant(Box::new("everything is OK".to_string())))
}
fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> {
println!("event called!");
if event_id == "clicked" {
println!("received clicked event for menu item {:?}", id);
} else if event_id == "hovered" {
println!("received hovered event for menu item {:?}", id);
}
Ok(())
}
fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> {
// ??? "Whether this AboutToShow event should result in the menu being updated."
// not sure what this means
println!("about_to_show called, id: {:?}", id);
Ok(true)
}
fn get_version(&self) -> Result<u32, Self::Err> { |
fn get_status(&self) -> Result<String, Self::Err> {
println!("get_status called!");
// Menus will always be in "normal" state, may change later on
Ok("normal".into())
}
}
#[derive(Default, Clone)]
pub struct MData;
impl<'a> dbus::tree::DataType for MData {
type Tree = ();
type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object.
type Property = ();
type Interface = ();
type Method = ();
type Signal = ();
}
/// Since parts of the menu are not printable, implement Debug trait manually
/// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518
impl ::std::fmt::Debug for Menu {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Menu {{ /* non-printable fields omitted */ }}")
}
}
|
// ????
println!("about_to_show called!");
Ok(3)
}
| identifier_body |
menu.rs | //! Menu abstrction module
use std::collections::HashMap;
use std::rc::Rc;
use std::cell::RefCell;
use dbusmenu::ComCanonicalDbusmenu;
use dbus::arg;
use dbus;
#[derive(Default)]
pub struct Menu {
/// - `revision: i32`: The revision number of the layout.
/// For matching with layoutUpdated signals.
revision: Rc<RefCell<i32>>,
/// The window ID that the menu was created on
pub window_id: Option<u32>,
/// The actual Menu structure, indexed by their action name / identifier
pub menu: HashMap<&'static str, SubMenu>,
/// The current language.
/// **NOTE** : The default is "en", so make sure to have at least one
/// entry in the menu items labels that is indexed by "en"
pub cur_language: &'static str,
}
/// Top-level submenu. Not to be confused with MenuData::SubMenuItem
pub struct SubMenu {
/// The label of the menu
pub label: HashMap<String, String>,
/// The menu items, indexed by their action name
pub menu: HashMap<String, MenuItem>,
}
impl Menu {
/// Creates a new window, but doesn't add it to any window yet
/// Starts a new thread for maintaining the rendering loop
pub fn new() -> Self {
Self {
revision: Rc::new(RefCell::new(0)),
window_id: None,
menu: HashMap::new(),
cur_language: "en",
}
}
/// Adds the menu to the window - takes XID of window as parameter
pub fn add_to_window(&mut self, window_id: u32) {
self.window_id = Some(window_id);
// todo: notify app menu registrar here
println!("registered window!");
}
/// Removes the menu
pub fn remove_from_window(&mut self) {
self.window_id = None;
// appmenu unregister window
// should also be called on drop
println!("unregistered window!");
}
/// Removes an item from the menu list.
/// Does not error out, but rather returns if the removal was successful
pub fn remove_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("remove_item: {:?}", item_id);
false
}
/// Adds an item to the menu list.
/// Does not error out, but rather returns if the add was successful
pub fn add_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("add item: {:?}", item_id);
false
}
/// Actually constructs the window so that it shows the menu now
/// Sends the menu over DBus
pub fn show() {
}
}
pub enum MenuItem {
/// Text menu item, regular. Gets called if clicked
TextMenuItem(MenuData<Box<Fn() -> ()>>),
/// Checkbox menu item,
CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>),
/// Radio menu item, consisting of multiple menu items.
/// Callback gets a string of the currently selected value
RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>),
/// Seperator
Seperator(),
/// Submenu
SubMenuItem(String, Box<SubMenu>),
}
#[derive(Debug)]
pub struct MenuData<F> {
/// The action to execute, depends on the type of menu item
pub action: F,
/// Optional image as PNG bytes
pub image: Option<Vec<u8>>,
/// The label(s) of the menu item, indexed by language identifier
///
/// For example:
///
/// de - Datei öffnen
/// en - Open file
pub label: HashMap<String, String>,
/// Should the menu entry be activated on hovering
pub activate_on_hover: bool,
/// Optional shortcuts in the format of a string
/// `[["Control", "S"]]`
/// `[["Control", "Q"], ["Alt", "X"]]`
/// This is only a visual cue (todo: really?)
pub shortcut: Option<Vec<ShortcutData>>,
}
#[derive(Debug, Clone)]
pub enum ShortcutData {
/// The "Control" in CTRL + S
ControlChar(CtrlChar),
/// The "S" in CTRL + S
Char(String),
}
/// The four controls registered by dbus
#[derive(Debug, Copy, Clone)]
pub enum CtrlChar {
Ctrl,
Alt,
Shift,
Super,
}
/*
0 => [
"type" => "standard" | "seperator",
"label" => "Hello",
"enabled" => true,
"visible" => true,
"icon-name" => "hello.png",
"icon-data" => Vec<u8>,
"shortcut" => [["Control", "S"]],
"toggle-type" => "checkmark" | "radio", "",
"toggle-state" => MenuItemToggleState,
"children-display" => "" | "submenu",
],
defaults:
type = "standard",
label = "",
enabled = "",
visible = "",
icon-name = "",
icon-data = None,
shortcut = None,
toggle-type = "",
toggle-state = -1
children-display = "",
*/
#[derive(Debug)]
pub enum MenuItemToggleState {
On,
Off,
Invalid,
}
impl Into<i32> for MenuItemToggleState {
fn into(self) -> i32 {
match self {
MenuItemToggleState::On => 1,
MenuItemToggleState::Off => 0,
MenuItemToggleState::Invalid => -1,
}
}
}
/// Implement the ComCanonicalMenu so we can push it to the server
impl ComCanonicalDbusmenu for Menu {
type Err = dbus::tree::MethodErr;
/// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero.
/// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array.
/// - -1: deliver all the items under the @a parentId.
/// - 0: no recursion, the array will be empty.
/// - n: array will contains items up to 'n' level depth.
/// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent.
///
/// ### Outputs
///
/// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals.
/// - `layout: HashMap`: The layout, as a recursive structure.
///
fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>)
-> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> {
// I have no idea if this will actually work in any way possible
// (u, (ia{sv}av))
// Nautilus: 0, 2, []
// Answer: 14
/*
try!(m.as_result());
let mut i = m.iter_init();
let revision: u32 = try!(i.read());
let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read());
Ok((revision, layout))
*/
use dbus::Message;
use dbus::Member;
println!("getlayout called!");
let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap();
try!(m.as_result());
let mut i = m.iter_init();
let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap());
*self.revision.borrow_mut() += 1;
Ok((1, (*self.revision.borrow(), map, Vec::new())))
}
fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>)
-> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> {
// I AM NOT SURE IF THS WORKS!
println!("get_group_properties called: {:?}, {:?}", ids, property_names);
/*
method call time=1510750424.121891
sender=:1.318
-> destination=org.freedesktop.DBus
serial=1 path=/org/freedesktop/DBus;
interface=org.freedesktop.DBus;
member=Hello
*/
// warning: other method is also called "hello"
// If Nautilus is called with [0], returns [(0, {'children-display': 'submenu'})]
let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string())));
Ok(vec![(0, properties_hashmap)])
}
fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> {
println!("get property called!");
// Nautilus get_propery(0, 'children-display') -> 'submenu'
Ok(arg::Variant(Box::new("everything is OK".to_string())))
}
fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> {
println!("event called!");
if event_id == "clicked" {
println!("received clicked event for menu item {:?}", id);
} else if event_id == "hovered" {
println!("received hovered event for menu item {:?}", id);
}
Ok(())
}
fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> {
// ??? "Whether this AboutToShow event should result in the menu being updated."
// not sure what this means
println!("about_to_show called, id: {:?}", id);
Ok(true)
}
fn g | &self) -> Result<u32, Self::Err> {
// ????
println!("about_to_show called!");
Ok(3)
}
fn get_status(&self) -> Result<String, Self::Err> {
println!("get_status called!");
// Menus will always be in "normal" state, may change later on
Ok("normal".into())
}
}
#[derive(Default, Clone)]
pub struct MData;
impl<'a> dbus::tree::DataType for MData {
type Tree = ();
type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object.
type Property = ();
type Interface = ();
type Method = ();
type Signal = ();
}
/// Since parts of the menu are not printable, implement Debug trait manually
/// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518
impl ::std::fmt::Debug for Menu {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Menu {{ /* non-printable fields omitted */ }}")
}
}
| et_version( | identifier_name |
menu.rs | //! Menu abstrction module
use std::collections::HashMap;
use std::rc::Rc;
use std::cell::RefCell;
use dbusmenu::ComCanonicalDbusmenu;
use dbus::arg;
use dbus;
#[derive(Default)]
pub struct Menu {
/// - `revision: i32`: The revision number of the layout.
/// For matching with layoutUpdated signals.
revision: Rc<RefCell<i32>>,
/// The window ID that the menu was created on
pub window_id: Option<u32>,
/// The actual Menu structure, indexed by their action name / identifier
pub menu: HashMap<&'static str, SubMenu>,
/// The current language.
/// **NOTE** : The default is "en", so make sure to have at least one
/// entry in the menu items labels that is indexed by "en"
pub cur_language: &'static str,
}
/// Top-level submenu. Not to be confused with MenuData::SubMenuItem
pub struct SubMenu {
/// The label of the menu
pub label: HashMap<String, String>,
/// The menu items, indexed by their action name
pub menu: HashMap<String, MenuItem>,
}
impl Menu {
/// Creates a new window, but doesn't add it to any window yet
/// Starts a new thread for maintaining the rendering loop
pub fn new() -> Self {
Self {
revision: Rc::new(RefCell::new(0)),
window_id: None,
menu: HashMap::new(),
cur_language: "en",
}
}
/// Adds the menu to the window - takes XID of window as parameter
pub fn add_to_window(&mut self, window_id: u32) {
self.window_id = Some(window_id);
// todo: notify app menu registrar here
println!("registered window!");
}
/// Removes the menu
pub fn remove_from_window(&mut self) {
self.window_id = None;
// appmenu unregister window
// should also be called on drop
println!("unregistered window!");
}
/// Removes an item from the menu list.
/// Does not error out, but rather returns if the removal was successful
pub fn remove_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("remove_item: {:?}", item_id);
false
}
/// Adds an item to the menu list.
/// Does not error out, but rather returns if the add was successful
pub fn add_item<S: Into<String>>(item: S) -> bool {
let item_id = item.into();
println!("add item: {:?}", item_id);
false
}
/// Actually constructs the window so that it shows the menu now
/// Sends the menu over DBus
pub fn show() {
}
}
pub enum MenuItem {
/// Text menu item, regular. Gets called if clicked
TextMenuItem(MenuData<Box<Fn() -> ()>>),
/// Checkbox menu item,
CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>),
/// Radio menu item, consisting of multiple menu items.
/// Callback gets a string of the currently selected value
RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>),
/// Seperator
Seperator(),
/// Submenu
SubMenuItem(String, Box<SubMenu>),
}
#[derive(Debug)]
pub struct MenuData<F> {
/// The action to execute, depends on the type of menu item
pub action: F,
/// Optional image as PNG bytes
pub image: Option<Vec<u8>>,
/// The label(s) of the menu item, indexed by language identifier
///
/// For example:
///
/// de - Datei öffnen
/// en - Open file
pub label: HashMap<String, String>,
/// Should the menu entry be activated on hovering
pub activate_on_hover: bool,
/// Optional shortcuts in the format of a string
/// `[["Control", "S"]]`
/// `[["Control", "Q"], ["Alt", "X"]]`
/// This is only a visual cue (todo: really?)
pub shortcut: Option<Vec<ShortcutData>>,
}
#[derive(Debug, Clone)]
pub enum ShortcutData {
/// The "Control" in CTRL + S
ControlChar(CtrlChar),
/// The "S" in CTRL + S
Char(String),
}
/// The four controls registered by dbus
#[derive(Debug, Copy, Clone)]
pub enum CtrlChar {
Ctrl,
Alt,
Shift,
Super,
}
/*
0 => [
"type" => "standard" | "seperator",
"label" => "Hello",
"enabled" => true,
"visible" => true,
"icon-name" => "hello.png",
"icon-data" => Vec<u8>,
"shortcut" => [["Control", "S"]],
"toggle-type" => "checkmark" | "radio", "",
"toggle-state" => MenuItemToggleState,
"children-display" => "" | "submenu",
],
defaults:
type = "standard",
label = "",
enabled = "",
visible = "",
icon-name = "",
icon-data = None,
shortcut = None,
toggle-type = "",
toggle-state = -1
children-display = "",
*/
#[derive(Debug)]
pub enum MenuItemToggleState {
On,
Off,
Invalid,
}
impl Into<i32> for MenuItemToggleState {
fn into(self) -> i32 {
match self {
MenuItemToggleState::On => 1,
MenuItemToggleState::Off => 0,
MenuItemToggleState::Invalid => -1,
}
}
}
/// Implement the ComCanonicalMenu so we can push it to the server
impl ComCanonicalDbusmenu for Menu {
type Err = dbus::tree::MethodErr;
/// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero.
/// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array.
/// - -1: deliver all the items under the @a parentId.
/// - 0: no recursion, the array will be empty.
/// - n: array will contains items up to 'n' level depth.
/// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent.
///
/// ### Outputs
///
/// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals.
/// - `layout: HashMap`: The layout, as a recursive structure.
///
fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>)
-> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> {
// I have no idea if this will actually work in any way possible
// (u, (ia{sv}av))
// Nautilus: 0, 2, []
// Answer: 14
/*
try!(m.as_result());
let mut i = m.iter_init();
let revision: u32 = try!(i.read());
let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read());
Ok((revision, layout))
*/
use dbus::Message;
use dbus::Member;
println!("getlayout called!");
let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap();
try!(m.as_result());
let mut i = m.iter_init();
let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap());
*self.revision.borrow_mut() += 1;
Ok((1, (*self.revision.borrow(), map, Vec::new())))
}
fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>)
-> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> {
// I AM NOT SURE IF THS WORKS!
println!("get_group_properties called: {:?}, {:?}", ids, property_names);
/*
method call time=1510750424.121891
sender=:1.318
-> destination=org.freedesktop.DBus
serial=1 path=/org/freedesktop/DBus;
interface=org.freedesktop.DBus;
member=Hello
*/
// warning: other method is also called "hello"
// If Nautilus is called with [0], returns [(0, {'children-display': 'submenu'})]
let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new();
properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string())));
Ok(vec![(0, properties_hashmap)])
}
fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> {
println!("get property called!");
// Nautilus get_propery(0, 'children-display') -> 'submenu'
Ok(arg::Variant(Box::new("everything is OK".to_string())))
}
fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> {
println!("event called!");
if event_id == "clicked" {
println!("received clicked event for menu item {:?}", id);
} else if event_id == "hovered" {
println!("received hovered event for menu item {:?}", id);
}
Ok(())
}
fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> {
// ??? "Whether this AboutToShow event should result in the menu being updated."
// not sure what this means
println!("about_to_show called, id: {:?}", id);
Ok(true)
}
fn get_version(&self) -> Result<u32, Self::Err> {
// ????
println!("about_to_show called!");
Ok(3)
}
fn get_status(&self) -> Result<String, Self::Err> {
println!("get_status called!");
// Menus will always be in "normal" state, may change later on
Ok("normal".into())
} | }
#[derive(Default, Clone)]
pub struct MData;
impl<'a> dbus::tree::DataType for MData {
type Tree = ();
type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object.
type Property = ();
type Interface = ();
type Method = ();
type Signal = ();
}
/// Since parts of the menu are not printable, implement Debug trait manually
/// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518
impl ::std::fmt::Debug for Menu {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Menu {{ /* non-printable fields omitted */ }}")
}
} | random_line_split | |
poll_evented.rs | use crate::io::driver::{Direction, Handle, ReadyEvent};
use crate::io::registration::Registration;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use mio::event::Evented;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{Context, Poll};
cfg_io_driver! {
/// Associates an I/O resource that implements the [`std::io::Read`] and/or
/// [`std::io::Write`] traits with the reactor that drives it.
///
/// `PollEvented` uses [`Registration`] internally to take a type that
/// implements [`mio::Evented`] as well as [`std::io::Read`] and or
/// [`std::io::Write`] and associate it with a reactor that will drive it.
///
/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be
/// used from within the future's execution model. As such, the
/// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
/// implementations using the underlying I/O resource as well as readiness
/// events provided by the reactor.
///
/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
/// `Sync`), the caller must ensure that there are at most two tasks that
/// use a `PollEvented` instance concurrently. One for reading and one for
/// writing. While violating this requirement is "safe" from a Rust memory
/// model point of view, it will result in unexpected behavior in the form
/// of lost notifications and tasks hanging.
///
/// ## Readiness events
///
/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
/// this type also supports access to the underlying readiness event stream.
/// While similar in function to what [`Registration`] provides, the
/// semantics are a bit different.
///
/// Two functions are provided to access the readiness events:
/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
/// current readiness state of the `PollEvented` instance. If
/// [`poll_read_ready`] indicates read readiness, immediately calling
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call [`clear_read_ready`] or
/// [`clear_write_ready`]. This clears the readiness state until a new
/// readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and
/// [`clear_read_ready`].
///
/// ## Platform-specific events
///
/// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
/// These events are included as part of the read readiness event stream. The
/// write readiness event stream is only for `Ready::writable()` events.
///
/// [`std::io::Read`]: trait@std::io::Read
/// [`std::io::Write`]: trait@std::io::Write
/// [`AsyncRead`]: trait@AsyncRead
/// [`AsyncWrite`]: trait@AsyncWrite
/// [`mio::Evented`]: trait@mio::Evented
/// [`Registration`]: struct@Registration
/// [`TcpListener`]: struct@crate::net::TcpListener
/// [`clear_read_ready`]: method@Self::clear_read_ready
/// [`clear_write_ready`]: method@Self::clear_write_ready
/// [`poll_read_ready`]: method@Self::poll_read_ready
/// [`poll_write_ready`]: method@Self::poll_write_ready
pub(crate) struct PollEvented<E: Evented> {
io: Option<E>,
registration: Registration,
}
}
// ===== impl PollEvented =====
impl<E> PollEvented<E>
where
E: Evented,
{
/// Creates a new `PollEvented` associated with the default reactor.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new(io: E) -> io::Result<Self> {
PollEvented::new_with_ready(io, mio::Ready::all())
}
/// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready`
/// state. `new_with_ready` should be used over `new` when you need control over the readiness
/// state, such as when a file descriptor only allows reads. This does not add `hup` or `error`
/// so if you are interested in those states, you will need to add them to the readiness state
/// passed to this function.
///
/// An example to listen to read only
///
/// ```rust
/// ##[cfg(unix)]
/// mio::Ready::from_usize(
/// mio::Ready::readable().as_usize()
/// | mio::unix::UnixReady::error().as_usize()
/// | mio::unix::UnixReady::hup().as_usize()
/// );
/// ```
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> {
Self::new_with_ready_and_handle(io, ready, Handle::current())
}
pub(crate) fn new_with_ready_and_handle(
io: E,
ready: mio::Ready,
handle: Handle,
) -> io::Result<Self> {
let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?;
Ok(Self {
io: Some(io),
registration,
})
}
/// Returns a shared reference to the underlying I/O object this readiness
/// stream is wrapping.
#[cfg(any(
feature = "process",
feature = "tcp",
feature = "udp",
feature = "uds",
feature = "signal"
))]
pub(crate) fn get_ref(&self) -> &E {
self.io.as_ref().unwrap()
}
/// Returns a mutable reference to the underlying I/O object this readiness
/// stream is wrapping.
pub(crate) fn get_mut(&mut self) -> &mut E {
self.io.as_mut().unwrap()
}
/// Consumes self, returning the inner I/O object
///
/// This function will deregister the I/O resource from the reactor before
/// returning. If the deregistration operation fails, an error is returned.
///
/// Note that deregistering does not guarantee that the I/O resource can be
/// registered with a different reactor. Some I/O resource types can only be
/// associated with a single reactor instance for their lifetime.
#[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))]
pub(crate) fn into_inner(mut self) -> io::Result<E> {
let io = self.io.take().unwrap();
self.registration.deregister(&io)?;
Ok(io)
}
pub(crate) fn clear_readiness(&self, event: ReadyEvent) {
self.registration.clear_readiness(event);
}
/// Checks the I/O resource's read readiness state.
///
/// The mask argument allows specifying what readiness to notify on. This
/// can be any value, including platform specific readiness, **except**
/// `writable`. HUP is always implicitly included on platforms that support
/// it.
///
/// If the resource is not ready for a read then `Poll::Pending` is returned
/// and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a read-ready state until readiness is
/// cleared by calling [`clear_read_ready`].
///
/// [`clear_read_ready`]: method@Self::clear_read_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` includes writable.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_write_ready`.
pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Read)
}
/// Checks the I/O resource's write readiness state.
///
/// This always checks for writable readiness and also checks for HUP
/// readiness on platforms that support it.
///
/// If the resource is not ready for a write then `Poll::Pending` is
/// returned and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a write-ready state until readiness is
/// cleared by calling [`clear_write_ready`].
///
/// [`clear_write_ready`]: method@Self::clear_write_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` contains bits besides `writable` and `hup`.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_read_ready`.
pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Write)
}
}
cfg_io_readiness! {
impl<E> PollEvented<E>
where
E: Evented,
{
pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> {
self.registration.readiness(interest).await
}
pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R>
where
F: FnMut(&E) -> io::Result<R>,
{
loop {
let event = self.readiness(interest).await?;
match op(self.get_ref()) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.clear_readiness(event);
}
x => return x,
}
}
}
}
}
// ===== Read / Write impls =====
impl<E> AsyncRead for PollEvented<E>
where
E: Evented + Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_read_ready(cx))?;
// We can't assume the `Read` won't look at the read buffer,
// so we have to force initialization here.
let r = (*self).get_mut().read(buf.initialize_unfilled());
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r.map(|n| {
buf.add_filled(n);
}));
}
}
}
impl<E> AsyncWrite for PollEvented<E>
where
E: Evented + Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().write(buf);
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn | (mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().flush();
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
match *r {
Ok(_) => false,
Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
}
}
impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollEvented").field("io", &self.io).finish()
}
}
impl<E: Evented> Drop for PollEvented<E> {
fn drop(&mut self) {
if let Some(io) = self.io.take() {
// Ignore errors
let _ = self.registration.deregister(&io);
}
}
}
| poll_flush | identifier_name |
poll_evented.rs | use crate::io::driver::{Direction, Handle, ReadyEvent};
use crate::io::registration::Registration;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use mio::event::Evented;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{Context, Poll};
cfg_io_driver! {
/// Associates an I/O resource that implements the [`std::io::Read`] and/or
/// [`std::io::Write`] traits with the reactor that drives it.
///
/// `PollEvented` uses [`Registration`] internally to take a type that
/// implements [`mio::Evented`] as well as [`std::io::Read`] and or
/// [`std::io::Write`] and associate it with a reactor that will drive it.
///
/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be
/// used from within the future's execution model. As such, the
/// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
/// implementations using the underlying I/O resource as well as readiness
/// events provided by the reactor.
///
/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
/// `Sync`), the caller must ensure that there are at most two tasks that
/// use a `PollEvented` instance concurrently. One for reading and one for
/// writing. While violating this requirement is "safe" from a Rust memory
/// model point of view, it will result in unexpected behavior in the form
/// of lost notifications and tasks hanging.
///
/// ## Readiness events
///
/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
/// this type also supports access to the underlying readiness event stream.
/// While similar in function to what [`Registration`] provides, the
/// semantics are a bit different.
///
/// Two functions are provided to access the readiness events:
/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
/// current readiness state of the `PollEvented` instance. If
/// [`poll_read_ready`] indicates read readiness, immediately calling
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call [`clear_read_ready`] or
/// [`clear_write_ready`]. This clears the readiness state until a new
/// readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and
/// [`clear_read_ready`].
///
/// ## Platform-specific events
///
/// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
/// These events are included as part of the read readiness event stream. The
/// write readiness event stream is only for `Ready::writable()` events.
///
/// [`std::io::Read`]: trait@std::io::Read
/// [`std::io::Write`]: trait@std::io::Write
/// [`AsyncRead`]: trait@AsyncRead
/// [`AsyncWrite`]: trait@AsyncWrite
/// [`mio::Evented`]: trait@mio::Evented
/// [`Registration`]: struct@Registration
/// [`TcpListener`]: struct@crate::net::TcpListener
/// [`clear_read_ready`]: method@Self::clear_read_ready
/// [`clear_write_ready`]: method@Self::clear_write_ready
/// [`poll_read_ready`]: method@Self::poll_read_ready
/// [`poll_write_ready`]: method@Self::poll_write_ready
pub(crate) struct PollEvented<E: Evented> {
io: Option<E>,
registration: Registration,
}
}
// ===== impl PollEvented =====
impl<E> PollEvented<E>
where
E: Evented,
{
/// Creates a new `PollEvented` associated with the default reactor.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new(io: E) -> io::Result<Self> {
PollEvented::new_with_ready(io, mio::Ready::all())
}
/// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready`
/// state. `new_with_ready` should be used over `new` when you need control over the readiness
/// state, such as when a file descriptor only allows reads. This does not add `hup` or `error`
/// so if you are interested in those states, you will need to add them to the readiness state
/// passed to this function.
///
/// An example to listen to read only
///
/// ```rust
/// ##[cfg(unix)]
/// mio::Ready::from_usize(
/// mio::Ready::readable().as_usize()
/// | mio::unix::UnixReady::error().as_usize()
/// | mio::unix::UnixReady::hup().as_usize()
/// );
/// ```
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> {
Self::new_with_ready_and_handle(io, ready, Handle::current())
}
pub(crate) fn new_with_ready_and_handle(
io: E,
ready: mio::Ready,
handle: Handle,
) -> io::Result<Self> |
/// Returns a shared reference to the underlying I/O object this readiness
/// stream is wrapping.
#[cfg(any(
feature = "process",
feature = "tcp",
feature = "udp",
feature = "uds",
feature = "signal"
))]
pub(crate) fn get_ref(&self) -> &E {
self.io.as_ref().unwrap()
}
/// Returns a mutable reference to the underlying I/O object this readiness
/// stream is wrapping.
pub(crate) fn get_mut(&mut self) -> &mut E {
self.io.as_mut().unwrap()
}
/// Consumes self, returning the inner I/O object
///
/// This function will deregister the I/O resource from the reactor before
/// returning. If the deregistration operation fails, an error is returned.
///
/// Note that deregistering does not guarantee that the I/O resource can be
/// registered with a different reactor. Some I/O resource types can only be
/// associated with a single reactor instance for their lifetime.
#[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))]
pub(crate) fn into_inner(mut self) -> io::Result<E> {
let io = self.io.take().unwrap();
self.registration.deregister(&io)?;
Ok(io)
}
pub(crate) fn clear_readiness(&self, event: ReadyEvent) {
self.registration.clear_readiness(event);
}
/// Checks the I/O resource's read readiness state.
///
/// The mask argument allows specifying what readiness to notify on. This
/// can be any value, including platform specific readiness, **except**
/// `writable`. HUP is always implicitly included on platforms that support
/// it.
///
/// If the resource is not ready for a read then `Poll::Pending` is returned
/// and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a read-ready state until readiness is
/// cleared by calling [`clear_read_ready`].
///
/// [`clear_read_ready`]: method@Self::clear_read_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` includes writable.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_write_ready`.
pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Read)
}
/// Checks the I/O resource's write readiness state.
///
/// This always checks for writable readiness and also checks for HUP
/// readiness on platforms that support it.
///
/// If the resource is not ready for a write then `Poll::Pending` is
/// returned and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a write-ready state until readiness is
/// cleared by calling [`clear_write_ready`].
///
/// [`clear_write_ready`]: method@Self::clear_write_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` contains bits besides `writable` and `hup`.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_read_ready`.
pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Write)
}
}
cfg_io_readiness! {
impl<E> PollEvented<E>
where
E: Evented,
{
pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> {
self.registration.readiness(interest).await
}
pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R>
where
F: FnMut(&E) -> io::Result<R>,
{
loop {
let event = self.readiness(interest).await?;
match op(self.get_ref()) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.clear_readiness(event);
}
x => return x,
}
}
}
}
}
// ===== Read / Write impls =====
impl<E> AsyncRead for PollEvented<E>
where
E: Evented + Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_read_ready(cx))?;
// We can't assume the `Read` won't look at the read buffer,
// so we have to force initialization here.
let r = (*self).get_mut().read(buf.initialize_unfilled());
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r.map(|n| {
buf.add_filled(n);
}));
}
}
}
impl<E> AsyncWrite for PollEvented<E>
where
E: Evented + Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().write(buf);
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().flush();
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
match *r {
Ok(_) => false,
Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
}
}
impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollEvented").field("io", &self.io).finish()
}
}
impl<E: Evented> Drop for PollEvented<E> {
fn drop(&mut self) {
if let Some(io) = self.io.take() {
// Ignore errors
let _ = self.registration.deregister(&io);
}
}
}
| {
let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?;
Ok(Self {
io: Some(io),
registration,
})
} | identifier_body |
poll_evented.rs | use crate::io::driver::{Direction, Handle, ReadyEvent};
use crate::io::registration::Registration;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use mio::event::Evented;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{Context, Poll};
cfg_io_driver! {
/// Associates an I/O resource that implements the [`std::io::Read`] and/or
/// [`std::io::Write`] traits with the reactor that drives it.
///
/// `PollEvented` uses [`Registration`] internally to take a type that
/// implements [`mio::Evented`] as well as [`std::io::Read`] and or
/// [`std::io::Write`] and associate it with a reactor that will drive it.
///
/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be
/// used from within the future's execution model. As such, the
/// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
/// implementations using the underlying I/O resource as well as readiness
/// events provided by the reactor.
///
/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
/// `Sync`), the caller must ensure that there are at most two tasks that
/// use a `PollEvented` instance concurrently. One for reading and one for
/// writing. While violating this requirement is "safe" from a Rust memory
/// model point of view, it will result in unexpected behavior in the form
/// of lost notifications and tasks hanging.
///
/// ## Readiness events
///
/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
/// this type also supports access to the underlying readiness event stream.
/// While similar in function to what [`Registration`] provides, the
/// semantics are a bit different.
///
/// Two functions are provided to access the readiness events:
/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
/// current readiness state of the `PollEvented` instance. If
/// [`poll_read_ready`] indicates read readiness, immediately calling
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call [`clear_read_ready`] or
/// [`clear_write_ready`]. This clears the readiness state until a new
/// readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and
/// [`clear_read_ready`].
///
/// ## Platform-specific events
///
/// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
/// These events are included as part of the read readiness event stream. The
/// write readiness event stream is only for `Ready::writable()` events.
///
/// [`std::io::Read`]: trait@std::io::Read
/// [`std::io::Write`]: trait@std::io::Write
/// [`AsyncRead`]: trait@AsyncRead
/// [`AsyncWrite`]: trait@AsyncWrite
/// [`mio::Evented`]: trait@mio::Evented
/// [`Registration`]: struct@Registration
/// [`TcpListener`]: struct@crate::net::TcpListener
/// [`clear_read_ready`]: method@Self::clear_read_ready
/// [`clear_write_ready`]: method@Self::clear_write_ready
/// [`poll_read_ready`]: method@Self::poll_read_ready
/// [`poll_write_ready`]: method@Self::poll_write_ready
pub(crate) struct PollEvented<E: Evented> {
io: Option<E>,
registration: Registration,
}
}
// ===== impl PollEvented =====
impl<E> PollEvented<E>
where
E: Evented,
{
/// Creates a new `PollEvented` associated with the default reactor.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new(io: E) -> io::Result<Self> {
PollEvented::new_with_ready(io, mio::Ready::all())
}
/// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready`
/// state. `new_with_ready` should be used over `new` when you need control over the readiness
/// state, such as when a file descriptor only allows reads. This does not add `hup` or `error`
/// so if you are interested in those states, you will need to add them to the readiness state
/// passed to this function.
///
/// An example to listen to read only
///
/// ```rust
/// ##[cfg(unix)]
/// mio::Ready::from_usize(
/// mio::Ready::readable().as_usize()
/// | mio::unix::UnixReady::error().as_usize()
/// | mio::unix::UnixReady::hup().as_usize()
/// );
/// ```
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> {
Self::new_with_ready_and_handle(io, ready, Handle::current())
}
pub(crate) fn new_with_ready_and_handle(
io: E,
ready: mio::Ready,
handle: Handle,
) -> io::Result<Self> {
let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?;
Ok(Self {
io: Some(io),
registration,
})
}
/// Returns a shared reference to the underlying I/O object this readiness
/// stream is wrapping.
#[cfg(any(
feature = "process",
feature = "tcp",
feature = "udp",
feature = "uds",
feature = "signal"
))]
pub(crate) fn get_ref(&self) -> &E {
self.io.as_ref().unwrap()
}
/// Returns a mutable reference to the underlying I/O object this readiness
/// stream is wrapping.
pub(crate) fn get_mut(&mut self) -> &mut E {
self.io.as_mut().unwrap()
}
/// Consumes self, returning the inner I/O object
///
/// This function will deregister the I/O resource from the reactor before
/// returning. If the deregistration operation fails, an error is returned.
///
/// Note that deregistering does not guarantee that the I/O resource can be
/// registered with a different reactor. Some I/O resource types can only be
/// associated with a single reactor instance for their lifetime.
#[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))]
pub(crate) fn into_inner(mut self) -> io::Result<E> {
let io = self.io.take().unwrap();
self.registration.deregister(&io)?;
Ok(io)
}
pub(crate) fn clear_readiness(&self, event: ReadyEvent) {
self.registration.clear_readiness(event);
}
/// Checks the I/O resource's read readiness state.
///
/// The mask argument allows specifying what readiness to notify on. This
/// can be any value, including platform specific readiness, **except**
/// `writable`. HUP is always implicitly included on platforms that support
/// it.
///
/// If the resource is not ready for a read then `Poll::Pending` is returned
/// and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a read-ready state until readiness is
/// cleared by calling [`clear_read_ready`].
///
/// [`clear_read_ready`]: method@Self::clear_read_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` includes writable.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_write_ready`.
pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Read)
}
/// Checks the I/O resource's write readiness state.
///
/// This always checks for writable readiness and also checks for HUP
/// readiness on platforms that support it.
///
/// If the resource is not ready for a write then `Poll::Pending` is
/// returned and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a write-ready state until readiness is
/// cleared by calling [`clear_write_ready`].
///
/// [`clear_write_ready`]: method@Self::clear_write_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` contains bits besides `writable` and `hup`.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_read_ready`.
pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Write)
}
}
cfg_io_readiness! {
impl<E> PollEvented<E>
where
E: Evented,
{
pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> {
self.registration.readiness(interest).await
}
pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R>
where
F: FnMut(&E) -> io::Result<R>,
{
loop {
let event = self.readiness(interest).await?;
match op(self.get_ref()) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.clear_readiness(event);
}
x => return x,
}
}
}
}
}
// ===== Read / Write impls =====
impl<E> AsyncRead for PollEvented<E>
where
E: Evented + Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_read_ready(cx))?;
// We can't assume the `Read` won't look at the read buffer,
// so we have to force initialization here.
let r = (*self).get_mut().read(buf.initialize_unfilled());
if is_wouldblock(&r) |
return Poll::Ready(r.map(|n| {
buf.add_filled(n);
}));
}
}
}
impl<E> AsyncWrite for PollEvented<E>
where
E: Evented + Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().write(buf);
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().flush();
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
match *r {
Ok(_) => false,
Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
}
}
impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollEvented").field("io", &self.io).finish()
}
}
impl<E: Evented> Drop for PollEvented<E> {
fn drop(&mut self) {
if let Some(io) = self.io.take() {
// Ignore errors
let _ = self.registration.deregister(&io);
}
}
}
| {
self.clear_readiness(ev);
continue;
} | conditional_block |
poll_evented.rs | use crate::io::driver::{Direction, Handle, ReadyEvent};
use crate::io::registration::Registration;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use mio::event::Evented;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{Context, Poll};
cfg_io_driver! {
/// Associates an I/O resource that implements the [`std::io::Read`] and/or
/// [`std::io::Write`] traits with the reactor that drives it.
///
/// `PollEvented` uses [`Registration`] internally to take a type that
/// implements [`mio::Evented`] as well as [`std::io::Read`] and or
/// [`std::io::Write`] and associate it with a reactor that will drive it.
///
/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be
/// used from within the future's execution model. As such, the
/// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
/// implementations using the underlying I/O resource as well as readiness
/// events provided by the reactor.
///
/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
/// `Sync`), the caller must ensure that there are at most two tasks that
/// use a `PollEvented` instance concurrently. One for reading and one for
/// writing. While violating this requirement is "safe" from a Rust memory
/// model point of view, it will result in unexpected behavior in the form
/// of lost notifications and tasks hanging.
///
/// ## Readiness events
///
/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
/// this type also supports access to the underlying readiness event stream.
/// While similar in function to what [`Registration`] provides, the
/// semantics are a bit different.
///
/// Two functions are provided to access the readiness events:
/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
/// current readiness state of the `PollEvented` instance. If
/// [`poll_read_ready`] indicates read readiness, immediately calling
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call [`clear_read_ready`] or
/// [`clear_write_ready`]. This clears the readiness state until a new
/// readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and
/// [`clear_read_ready`].
///
/// ## Platform-specific events
///
/// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
/// These events are included as part of the read readiness event stream. The
/// write readiness event stream is only for `Ready::writable()` events.
///
/// [`std::io::Read`]: trait@std::io::Read
/// [`std::io::Write`]: trait@std::io::Write
/// [`AsyncRead`]: trait@AsyncRead
/// [`AsyncWrite`]: trait@AsyncWrite
/// [`mio::Evented`]: trait@mio::Evented
/// [`Registration`]: struct@Registration
/// [`TcpListener`]: struct@crate::net::TcpListener
/// [`clear_read_ready`]: method@Self::clear_read_ready
/// [`clear_write_ready`]: method@Self::clear_write_ready
/// [`poll_read_ready`]: method@Self::poll_read_ready
/// [`poll_write_ready`]: method@Self::poll_write_ready
pub(crate) struct PollEvented<E: Evented> {
io: Option<E>,
registration: Registration,
}
}
// ===== impl PollEvented =====
impl<E> PollEvented<E>
where
E: Evented,
{
/// Creates a new `PollEvented` associated with the default reactor.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new(io: E) -> io::Result<Self> {
PollEvented::new_with_ready(io, mio::Ready::all())
}
/// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready`
/// state. `new_with_ready` should be used over `new` when you need control over the readiness
/// state, such as when a file descriptor only allows reads. This does not add `hup` or `error`
/// so if you are interested in those states, you will need to add them to the readiness state
/// passed to this function.
///
/// An example to listen to read only
///
/// ```rust
/// ##[cfg(unix)]
/// mio::Ready::from_usize(
/// mio::Ready::readable().as_usize()
/// | mio::unix::UnixReady::error().as_usize()
/// | mio::unix::UnixReady::hup().as_usize()
/// );
/// ```
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> {
Self::new_with_ready_and_handle(io, ready, Handle::current())
}
pub(crate) fn new_with_ready_and_handle(
io: E,
ready: mio::Ready,
handle: Handle,
) -> io::Result<Self> {
let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?;
Ok(Self {
io: Some(io),
registration,
})
}
/// Returns a shared reference to the underlying I/O object this readiness
/// stream is wrapping.
#[cfg(any(
feature = "process",
feature = "tcp",
feature = "udp",
feature = "uds",
feature = "signal"
))]
pub(crate) fn get_ref(&self) -> &E {
self.io.as_ref().unwrap()
}
/// Returns a mutable reference to the underlying I/O object this readiness
/// stream is wrapping.
pub(crate) fn get_mut(&mut self) -> &mut E {
self.io.as_mut().unwrap()
}
/// Consumes self, returning the inner I/O object
///
/// This function will deregister the I/O resource from the reactor before
/// returning. If the deregistration operation fails, an error is returned.
///
/// Note that deregistering does not guarantee that the I/O resource can be
/// registered with a different reactor. Some I/O resource types can only be
/// associated with a single reactor instance for their lifetime.
#[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))]
pub(crate) fn into_inner(mut self) -> io::Result<E> {
let io = self.io.take().unwrap();
self.registration.deregister(&io)?;
Ok(io)
}
pub(crate) fn clear_readiness(&self, event: ReadyEvent) {
self.registration.clear_readiness(event);
}
/// Checks the I/O resource's read readiness state.
///
/// The mask argument allows specifying what readiness to notify on. This
/// can be any value, including platform specific readiness, **except**
/// `writable`. HUP is always implicitly included on platforms that support
/// it.
///
/// If the resource is not ready for a read then `Poll::Pending` is returned
/// and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a read-ready state until readiness is
/// cleared by calling [`clear_read_ready`].
///
/// [`clear_read_ready`]: method@Self::clear_read_ready
///
/// # Panics
/// | ///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_write_ready`.
pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Read)
}
/// Checks the I/O resource's write readiness state.
///
/// This always checks for writable readiness and also checks for HUP
/// readiness on platforms that support it.
///
/// If the resource is not ready for a write then `Poll::Pending` is
/// returned and the current task is notified once a new event is received.
///
/// The I/O resource will remain in a write-ready state until readiness is
/// cleared by calling [`clear_write_ready`].
///
/// [`clear_write_ready`]: method@Self::clear_write_ready
///
/// # Panics
///
/// This function panics if:
///
/// * `ready` contains bits besides `writable` and `hup`.
/// * called from outside of a task context.
///
/// # Warning
///
/// This method may not be called concurrently. It takes `&self` to allow
/// calling it concurrently with `poll_read_ready`.
pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> {
self.registration.poll_readiness(cx, Direction::Write)
}
}
cfg_io_readiness! {
impl<E> PollEvented<E>
where
E: Evented,
{
pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> {
self.registration.readiness(interest).await
}
pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R>
where
F: FnMut(&E) -> io::Result<R>,
{
loop {
let event = self.readiness(interest).await?;
match op(self.get_ref()) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.clear_readiness(event);
}
x => return x,
}
}
}
}
}
// ===== Read / Write impls =====
impl<E> AsyncRead for PollEvented<E>
where
E: Evented + Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_read_ready(cx))?;
// We can't assume the `Read` won't look at the read buffer,
// so we have to force initialization here.
let r = (*self).get_mut().read(buf.initialize_unfilled());
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r.map(|n| {
buf.add_filled(n);
}));
}
}
}
impl<E> AsyncWrite for PollEvented<E>
where
E: Evented + Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().write(buf);
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
loop {
let ev = ready!(self.poll_write_ready(cx))?;
let r = (*self).get_mut().flush();
if is_wouldblock(&r) {
self.clear_readiness(ev);
continue;
}
return Poll::Ready(r);
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
match *r {
Ok(_) => false,
Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
}
}
impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollEvented").field("io", &self.io).finish()
}
}
impl<E: Evented> Drop for PollEvented<E> {
fn drop(&mut self) {
if let Some(io) = self.io.take() {
// Ignore errors
let _ = self.registration.deregister(&io);
}
}
} | /// This function panics if:
///
/// * `ready` includes writable.
/// * called from outside of a task context. | random_line_split |
projects.ts | export default [
{
year: 2018,
name: {
ru: 'Мобильное приложение Hello TV',
en: 'Hello TV mobile app',
},
description: {
ru: 'Мобильное приложение для iOS и Android. Реализован личный кабинет учеников, которые могут видеть свои достижения и доступные награды. Зайти в него можно по логину <code>brunya@cat.ru</code> и паролю <code>123456</code>.',
en: 'Mobile application for iOS and Android. There is a personal account of students who can see their achievements and available awards. You can sign in via login <code>brunya@cat.ru</code> and password <code>123456</code>.',
},
img: 'hellotv.jpg',
links: [
{
url: 'https://itunes.apple.com/gb/app/hello-tv/id1438232244',
name: {
ru: 'Приложение на iOS',
en: 'iOS app',
},
icon: 'apple',
},
{
url: 'https://play.google.com/store/apps/details?id=io.ionic.polyakovintvhello',
name: {
ru: 'Приложение на Android',
en: 'Android app',
},
icon: 'android',
},
{
url: 'http://tvhello.ru/',
name: {
ru: 'Сайт заказчика',
en: 'Customer\'s website',
},
icon: 'globe',
},
],
feedback: true,
best: true,
forBanner: true,
},
{
year: 2018,
name: {
ru: 'EdLabs',
en: 'EdLabs',
},
description: {
ru: 'Простой сайт, приглашающий принять участие в новой образовательной платформе. Реализована система регистрация новых пользователей.',
en: 'A simple website inviting the visiton to participate in a new educational platform. There is a system for registering new users implemented.',
},
img: 'edlabs.jpg',
links: [
{
url: 'http://edlabs.ru/',
name: {
ru: 'Сайт проекта',
en: 'Project\'s website',
},
icon: 'globe',
},
],
feedback: true,
best: true,
forBanner: true,
},
{
year: 2018,
name: {
ru: 'Моё портфолио',
en: 'My portfolio',
},
description: {
ru: 'Возникла идея создать CV в формате посадочной страницы для лучшей демонстации своих возможностей в качестве веб-разработчика. Добавлены превьюшки и ссылки на основные и второстепенные проекты и средства коммуникации со мной. Ключевые навыки представлены в виде интерактивной инфографики.',
en: 'There was an idea to create a CV in the format of the landing page for the best demonstration of my opportunities as a web-developer. I\'ve added preview images and links to basic and secondary projects and communication links. Key skills are presented as an interactive infographics.',
},
img: 'portfolio.jpg',
links: [
{
url: 'http://polyakovin.ru',
name: {
ru: 'Текущий сайт',
en: 'Current website',
},
icon: 'globe'
},
{
url: 'https://github.com/polyakovin/portfolio',
name: {
ru: 'Рабочий репозиторий проекта',
en: 'Project\'s working repository',
},
icon: 'github-alt'
},
{
url: 'https://vk.com/ip.painter?w=wall-94792100_14',
name: {ru: 'Пост о проекте и других CV', en: 'Publication about this project and other CVs'},
icon: 'vk'
}
],
best: true,
forBanner: true
},
{
year: 2018,
name: {ru: 'Сайт фотографа Алёны Дёминой', en: 'Alena Demina photographer website'},
description: {ru: 'Сайт фотографа, отображающий лучшие его работы и содержащий тематические ссылки на соцсети с расширенным набором фотографий из портфолио.', en: 'The site of the photographer, displaying her best works and containing thematic links to social networks with an expanded set of photos from the portfolio.'},
img: 'demina.jpg',
feedback: true,
best: true,
forBanner: true
},
{
year: 2018,
name: {ru: 'Мобильное приложение Emotion Miner', en: 'Emotion Miner mobile app'},
description: {ru: 'Техническая демонстрация мобильного приложения для платформы Emotion Miner. Реализованы ключевые элементы управления приложением, заточенные под мобильные устройства. Проект подготовлен для загрузки на iOS, Android и Windows Phone.', en: 'Technical demonstration of the mobile application for the Emotion Miner platform. Implemented key application control elements designed for mobile devices. The project is prepared for download on iOS, Android and Windows Phone.'},
img: 'emotionminer_m.jpg',
links: [
{
url: 'https://emotion-miner-ionic-test.herokuapp.com/',
name: {ru: 'Тестовый сайт проекта', en: 'Project\'s test website'},
icon: 'cog'
}
],
feedback: true
},
{
year: 2017,
name: {ru: 'Emotion Miner', en: 'Emotion Miner'},
description: {ru: 'Платформа для сбора датасетов эмоций людей на фрагментах видео из YouTube. На платформу может зайти любой человек, пройти небольшой тест и обучение по разметке эмоций, а после этого начать зарабатывать и выводить деньги за разметку. Внедрена поддержка веб-камеры, записывающей эмоциональное состояние человека, размечающего видео. На платформе зарегистрировано и работает более 50.000 человек.', en: 'The platform for collecting datasets of people\'s emotions at video fragments from YouTube. Any person can sign up to the platform, pass a short test and a training on emotions annotating. And after that he can start working and withdraw money for markup. Implemented support for webcam, recording the emotional state of a person marking a video. There are over 50,000 people registered and working at the platform.'},
img: 'emotionminer.jpg',
links: [
{
url: 'https://emotionminer.com',
name: {ru: 'Сайт проекта', en: 'Project\'s website'},
icon: 'globe'
},
{
url: 'http://neurodatalab.com',
name: {ru: 'Сайт заказчика', en: 'Customer\'s website'},
icon: 'globe'
},
{
url: 'https://youtu.be/dTLLGIVRFj8',
name: {ru: 'Обзор на платформу от филиппинского видеоблоггера', en: 'Overview of the platform from the Filipino videoblogger'},
icon: 'youtube'
}
],
feedback: true,
best: true,
forBanner: true
},
{
year: 2017,
name: {ru: 'OYWO', en: 'OYWO'},
description: {ru: 'Оптимальная мотивационная система контроля своего времени и занятости. Разработан список дел и календарь, работающие в связке друг с другом. Есть возможность входить в систему через соцсети.', en: 'Optimal motivational system for controlling personal and working time. There was developed a todo-list working together in conjunction with a calendar. There is an opportunity to sign in to the system through social networks.'},
img: 'oywo.jpg',
links: [
{
url: 'https://oywo.herokuapp.com/',
name: {ru: 'Сайт проекта', en: 'Project\'s website'},
icon: 'globe'
},
{
url: 'https://vk.com/useoywo',
name: {ru: 'Поддержки проекта в группе ВКонтакте', en: 'Project\'s support VK group'},
icon: 'vk'
}
],
feedback: true,
best: true,
forBanner: true
},
{
year: 2017,
name: {ru: 'Мой блог', en: 'My blog'},
description: {ru: 'Блог-платформу на движке Jekyll, позволяющие писать статьи на языке Markdown. Внедрил возможность вставлять программный код с подсветкой, а также LaTeX-формулы.', en: 'A blog-platform on the Jekyll engine, which allows writing articles in the Markdown language. Implemented the ability to insert highlighted code, as well as LaTeX-equations.'},
img: 'blog.jpg',
links: [
{
url: 'https://polyakovin.github.io',
name: {ru: 'Сайт проекта', en: 'Project\'s website'},
icon: 'globe'
},
{
url: 'https://github.com/polyakovin/polyakovin.github.io',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
ohNo: true
},
{
year: 2017,
name: {ru: 'Конструктор документов для заказа', en: 'The document builder for the order'},
description: {ru: 'Упрощено делопроизводство в компании «Бастион», изготавливающей стальные двери. Программа принимает на вход все необоходимые параметры заказа и выдаёт документ, пригодный для печати.', en: 'Оffice work in the producing steel doors company “Bastion” has been simplified. The program accepts all required parameters of the order for input and produces a document suitable for printing.'},
img: 'bastion.jpg',
links: [
{
url: 'https://polyakovin.github.io/bastionRequest',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
},
{
url: 'https://github.com/polyakovin/bastionRequest',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
feedback: true,
best: true
},
{
year: 2016,
name: {ru: 'Интерактивные уроки для детей', en: 'Interactive lessons for children'},
description: {ru: 'В рамках участия в образовательном проекте UCHi.RU создал несколько обучающих игр, полезных функций и компонентов общего назначений. Разработан и внедрён в систему разработки сборщик документации по программным компонентам компании.', en: 'While taking part in the educational project UCHi.RU, there were developed several training games, some useful functions and general purpose components. There also was developed and implemented the documentation collector for the software components of the company in the development system.'},
img: 'uchi.jpg',
video: 'https://vk.com/video_ext.php?oid=-94792100&id=456239017&hash=3371d6e8fcab148c',
links: [
{
url: 'https://uchi.ru',
name: {ru: 'Образовательный проект UCHi.RU', en: 'Educational project UCHi.RU'},
icon: 'globe'
},
{
url: 'https://vk.com/ip.painter?w=wall-94792100_15',
name: {ru: 'Демонстрация работы функции Drug\'n\'Drop', en: 'Drag&drop function demonstration'},
icon: 'vk'
}
],
feedback: true,
best: true,
forBanner: true
},
{
year: 2016,
name: {ru: 'Веб-сайт компании «Ваш Выбор!»', en: '“Your Choice!” company website'},
description: {ru: 'Ребрендинг веб-сайта фирмы в соответствии с новым фирменным стилем. Количество элементов на сайте сокращено до необходимого минимума. Выделены ключевые достоинства компании и продукции перед конкурентами.', en: 'Rebranding the company\'s website in accordance with the new corporate style guide. The number of elements on the site has been reduced to the required minimum. There are the key advantages of the company and products in compare to competitors highlighted.'},
img: 'yourChoice2.jpg',
links: [
{
url: 'https://polyakovin.github.io/yourChoice2',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
}, | name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github'
}
],
feedback: true,
best: true,
forBanner: true
},
{
year: 2015,
name: {ru: 'Мой первый личный сайт', en: 'My first personal site'},
description: {ru: 'Представлена фотография автора, ссылки на средства коммуникации, а также перечслены основные проекты.', en: 'There is a picture of the author, links to the means of communication, and the main projects shown.'},
img: 'portfolio_old.jpg',
links: [
{
url: 'https://polyakovin.github.io/portfolio_old/',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
},
{
url: 'https://github.com/polyakovin/portfolio_old',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
ohNo: true
},
{
year: 2015,
name: {ru: 'Веб-сайт экзамена по ТФКП', en: 'TFCV exam web-site'},
description: {ru: 'Автоматизирован процесс отбора достойных из желающих попасть на досрочную сдачу экзамена. Внедрён административный блок для управления содержанием сайта.', en: 'The process of selecting the best students willing to attend an early exam has been automated. Implemented an administrator\'s cabinet for managing the content of the site.'},
img: 'tfcv.jpg',
links: [
{
url: 'https://polyakovin.github.io/tfkp',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
},
{
url: 'https://github.com/polyakovin/tfkp',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
feedback: true
},
{
year: 2013,
name: {ru: 'Электронная комната проекта «SMIDA»', en: '“SMIDA” project e-room'},
description: {ru: 'Сервис для совместной работы участников научного проекта Университетского Центра Свальбарда. В рамках проекта реализована система пользователей, файлообменник, библиотека, поддержка двух языков, система полезных ссылок по проекту.', en: 'Service for the joint work of the participants of the University Center of Svalbard scientific project. There are a users system, file sharing, library, support for two languages, a system of useful links for the project have been implemented.'},
img: 'smida.jpg',
links: [
{
url: 'http://smida.mipt.ru',
name: {ru: 'Сайт проекта', en: 'Project\'s website'},
icon: 'globe'
},
{
url: 'https://github.com/polyakovin/smida',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
feedback: true,
best: true,
forBanner: true
},
{
year: 2012,
name: {ru: 'Веб-сайт компании «Классика тренинга»', en: '“Training Classics” company website'},
description: {ru: 'Сайт-визитка с администивным блоком, позволяющим изменять основную информацию на сайте, а также добавлять новости и даты мероприятий фирмы.', en: 'A simple website with an administrator\'s cabinet that allows to change the basic information on the site, as well as adding news and dates for company events.'},
img: 'trainingСlassics.jpg',
links: [
{
url: 'https://polyakovin.github.io/trainingClassics',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
},
{
url: 'https://github.com/polyakovin/trainingClassics',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github'
}
]
},
{
year: 2011,
name: {ru: 'Веб-сайт компании «Ваш Выбор!»', en: '“Your Choice!” company web-site'},
description: {ru: 'Сайта предназначался главным образом в качестве платформы для размещения конструктора металлической двери. Помимо основного продукта на сайте реализован каталог дверей в виде буклета, форма заказа двери, контактная информация, а также администраторский блок для наполнения сайта содержимым.', en: 'The site was intended primarily as a platform for placing the metal door builder. In addition to the main product there is a catalog of doors in the form of a booklet, a door order form, contact information, and an administrator\'s block for filling the site with content on the site.'},
img: 'yourChoice.jpg',
links: [
{
url: 'https://polyakovin.github.io/yourChoice',
name: {ru: 'Архивная копия проекта', en: 'Project\'s archive copy'},
icon: 'archive'
},
{
url: 'https://polyakovin.github.io/yourChoice/constructor.html',
name: {ru: 'Конструктор стальных дверей', en: 'Steel door builder'},
icon: 'archive'
},
{
url: 'https://github.com/polyakovin/yourChoice',
name: {ru: 'Рабочий репозиторий проекта', en: 'Project\'s working repository'},
icon: 'github-alt'
}
],
feedback: true
}
] | {
url: 'https://github.com/polyakovin/yourChoice2', | random_line_split |
api.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package grpc
import (
"context"
"errors"
"fmt"
"time"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/actors"
components_v1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/dapr/dapr/pkg/channel"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/messaging"
dapr_pb "github.com/dapr/dapr/pkg/proto/dapr"
daprinternal_pb "github.com/dapr/dapr/pkg/proto/daprinternal"
"github.com/golang/protobuf/ptypes/any"
durpb "github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
daprSeparator = "||"
)
// API is the gRPC interface for the Dapr gRPC API. It implements both the internal and external proto definitions.
type API interface {
CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error)
CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error)
UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error)
PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error)
InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error)
InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error)
GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error)
SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error)
DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error)
}
type api struct {
actor actors.Actors
directMessaging messaging.DirectMessaging
componentsHandler components.ComponentHandler
appChannel channel.AppChannel
stateStores map[string]state.Store
pubSub pubsub.PubSub
id string
sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error
}
// NewAPI returns a new gRPC API
func NewAPI(daprID string, appChannel channel.AppChannel, stateStores map[string]state.Store, pubSub pubsub.PubSub, directMessaging messaging.DirectMessaging, actor actors.Actors, sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error, componentHandler components.ComponentHandler) API {
return &api{
directMessaging: directMessaging,
componentsHandler: componentHandler,
actor: actor,
id: daprID,
appChannel: appChannel,
pubSub: pubSub,
stateStores: stateStores,
sendToOutputBindingFn: sendToOutputBindingFn,
}
}
// CallLocal is used for internal dapr to dapr calls. It is invoked by another Dapr instance with a request to the local app.
func (a *api) CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error) {
if a.appChannel == nil {
return nil, errors.New("app channel is not initialized")
}
req := channel.InvokeRequest{
Payload: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.appChannel.InvokeMethod(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
// CallActor invokes a virtual actor
func (a *api) CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error) {
req := actors.CallRequest{
ActorType: in.ActorType,
ActorID: in.ActorID,
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.actor.Call(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: map[string]string{},
}, nil
}
// UpdateComponent is fired by the Dapr control plane when a component state changes
func (a *api) UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error) {
c := components_v1alpha1.Component{
ObjectMeta: meta_v1.ObjectMeta{
Name: in.Metadata.Name,
},
Auth: components_v1alpha1.Auth{
SecretStore: in.Auth.SecretStore,
},
}
for _, m := range in.Spec.Metadata {
c.Spec.Metadata = append(c.Spec.Metadata, components_v1alpha1.MetadataItem{
Name: m.Name,
Value: m.Value,
SecretKeyRef: components_v1alpha1.SecretKeyRef{
Key: m.SecretKeyRef.Key,
Name: m.SecretKeyRef.Name,
},
})
}
a.componentsHandler.OnComponentUpdated(c)
return &empty.Empty{}, nil
}
func (a *api) PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error) {
if a.pubSub == nil {
return &empty.Empty{}, errors.New("ERR_PUBSUB_NOT_FOUND")
}
topic := in.Topic
body := []byte{}
if in.Data != nil {
body = in.Data.Value
}
envelope := pubsub.NewCloudEventsEnvelope(uuid.New().String(), a.id, pubsub.DefaultCloudEventType, body)
b, err := jsoniter.ConfigFastest.Marshal(envelope)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_CLOUD_EVENTS_SER: %s", err)
}
req := pubsub.PublishRequest{
Topic: topic,
Data: b,
}
err = a.pubSub.Publish(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_PUBLISH_MESSAGE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error) {
req := messaging.DirectMessageRequest{
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
Target: in.Id,
}
resp, err := a.directMessaging.Invoke(&req)
if err != nil {
return nil, err
}
return &dapr_pb.InvokeServiceResponseEnvelope{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
func (a *api) InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error) {
req := &bindings.WriteRequest{
Metadata: in.Metadata,
}
if in.Data != nil {
req.Data = in.Data.Value
}
err := a.sendToOutputBindingFn(in.Name, req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_INVOKE_OUTPUT_BINDING: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return nil, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return nil, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.GetRequest{
Key: a.getModifiedStateKey(in.Key),
Options: state.GetStateOption{
Consistency: in.Consistency,
},
}
getResponse, err := a.stateStores[storeName].Get(&req)
if err != nil {
return nil, fmt.Errorf("ERR_STATE_GET: %s", err)
}
response := &dapr_pb.GetStateResponseEnvelope{}
if getResponse != nil {
response.Etag = getResponse.ETag
response.Data = &any.Any{Value: getResponse.Data}
}
return response, nil
}
func (a *api) SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
reqs := []state.SetRequest{}
for _, s := range in.Requests {
req := state.SetRequest{
Key: a.getModifiedStateKey(s.Key),
Metadata: s.Metadata,
Value: s.Value.Value,
}
if s.Options != nil {
req.Options = state.SetStateOption{
Consistency: s.Options.Consistency,
Concurrency: s.Options.Concurrency,
}
if s.Options.RetryPolicy != nil {
req.Options.RetryPolicy = state.RetryPolicy{
Threshold: int(s.Options.RetryPolicy.Threshold),
Pattern: s.Options.RetryPolicy.Pattern,
}
if s.Options.RetryPolicy.Interval != nil {
dur, err := duration(s.Options.RetryPolicy.Interval)
if err == nil {
req.Options.RetryPolicy.Interval = dur
}
}
}
}
reqs = append(reqs, req)
}
err := a.stateStores[storeName].BulkSet(reqs)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_SAVE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.DeleteRequest{
Key: a.getModifiedStateKey(in.Key),
ETag: in.Etag,
}
if in.Options != nil {
req.Options = state.DeleteStateOption{
Concurrency: in.Options.Concurrency,
Consistency: in.Options.Consistency,
}
if in.Options.RetryPolicy != nil {
retryPolicy := state.RetryPolicy{
Threshold: int(in.Options.RetryPolicy.Threshold),
Pattern: in.Options.RetryPolicy.Pattern,
}
if in.Options.RetryPolicy.Interval != nil |
req.Options.RetryPolicy = retryPolicy
}
}
err := a.stateStores[storeName].Delete(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_DELETE: failed deleting state with key %s: %s", in.Key, err)
}
return &empty.Empty{}, nil
}
func (a *api) getModifiedStateKey(key string) string {
if a.id != "" {
return fmt.Sprintf("%s%s%s", a.id, daprSeparator, key)
}
return key
}
func duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
| {
dur, err := duration(in.Options.RetryPolicy.Interval)
if err == nil {
retryPolicy.Interval = dur
}
} | conditional_block |
api.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package grpc
import (
"context"
"errors"
"fmt"
"time"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/actors"
components_v1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/dapr/dapr/pkg/channel"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/messaging"
dapr_pb "github.com/dapr/dapr/pkg/proto/dapr"
daprinternal_pb "github.com/dapr/dapr/pkg/proto/daprinternal"
"github.com/golang/protobuf/ptypes/any"
durpb "github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
daprSeparator = "||"
)
// API is the gRPC interface for the Dapr gRPC API. It implements both the internal and external proto definitions.
type API interface {
CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error)
CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error)
UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error)
PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error)
InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error)
InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error)
GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error)
SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error)
DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error)
}
type api struct {
actor actors.Actors
directMessaging messaging.DirectMessaging
componentsHandler components.ComponentHandler
appChannel channel.AppChannel
stateStores map[string]state.Store
pubSub pubsub.PubSub
id string
sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error
}
// NewAPI returns a new gRPC API
func NewAPI(daprID string, appChannel channel.AppChannel, stateStores map[string]state.Store, pubSub pubsub.PubSub, directMessaging messaging.DirectMessaging, actor actors.Actors, sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error, componentHandler components.ComponentHandler) API {
return &api{
directMessaging: directMessaging,
componentsHandler: componentHandler,
actor: actor,
id: daprID,
appChannel: appChannel,
pubSub: pubSub,
stateStores: stateStores,
sendToOutputBindingFn: sendToOutputBindingFn,
}
}
// CallLocal is used for internal dapr to dapr calls. It is invoked by another Dapr instance with a request to the local app.
func (a *api) CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error) {
if a.appChannel == nil {
return nil, errors.New("app channel is not initialized")
}
req := channel.InvokeRequest{
Payload: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.appChannel.InvokeMethod(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
// CallActor invokes a virtual actor
func (a *api) CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error) {
req := actors.CallRequest{
ActorType: in.ActorType,
ActorID: in.ActorID,
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.actor.Call(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: map[string]string{},
}, nil
}
// UpdateComponent is fired by the Dapr control plane when a component state changes
func (a *api) UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error) {
c := components_v1alpha1.Component{
ObjectMeta: meta_v1.ObjectMeta{
Name: in.Metadata.Name,
},
Auth: components_v1alpha1.Auth{
SecretStore: in.Auth.SecretStore,
},
}
for _, m := range in.Spec.Metadata {
c.Spec.Metadata = append(c.Spec.Metadata, components_v1alpha1.MetadataItem{
Name: m.Name,
Value: m.Value,
SecretKeyRef: components_v1alpha1.SecretKeyRef{
Key: m.SecretKeyRef.Key,
Name: m.SecretKeyRef.Name,
},
})
}
a.componentsHandler.OnComponentUpdated(c)
return &empty.Empty{}, nil
}
func (a *api) PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error) {
if a.pubSub == nil {
return &empty.Empty{}, errors.New("ERR_PUBSUB_NOT_FOUND")
}
topic := in.Topic
body := []byte{}
if in.Data != nil {
body = in.Data.Value
}
envelope := pubsub.NewCloudEventsEnvelope(uuid.New().String(), a.id, pubsub.DefaultCloudEventType, body)
b, err := jsoniter.ConfigFastest.Marshal(envelope)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_CLOUD_EVENTS_SER: %s", err)
}
req := pubsub.PublishRequest{
Topic: topic,
Data: b,
}
err = a.pubSub.Publish(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_PUBLISH_MESSAGE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error) {
req := messaging.DirectMessageRequest{
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
Target: in.Id,
}
resp, err := a.directMessaging.Invoke(&req)
if err != nil {
return nil, err
}
return &dapr_pb.InvokeServiceResponseEnvelope{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
func (a *api) InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error) {
req := &bindings.WriteRequest{
Metadata: in.Metadata,
}
if in.Data != nil {
req.Data = in.Data.Value
}
err := a.sendToOutputBindingFn(in.Name, req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_INVOKE_OUTPUT_BINDING: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return nil, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return nil, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.GetRequest{
Key: a.getModifiedStateKey(in.Key),
Options: state.GetStateOption{
Consistency: in.Consistency,
},
}
getResponse, err := a.stateStores[storeName].Get(&req)
if err != nil {
return nil, fmt.Errorf("ERR_STATE_GET: %s", err)
}
response := &dapr_pb.GetStateResponseEnvelope{}
if getResponse != nil {
response.Etag = getResponse.ETag
response.Data = &any.Any{Value: getResponse.Data}
}
return response, nil
}
func (a *api) SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
reqs := []state.SetRequest{}
for _, s := range in.Requests {
req := state.SetRequest{
Key: a.getModifiedStateKey(s.Key),
Metadata: s.Metadata,
Value: s.Value.Value,
}
if s.Options != nil {
req.Options = state.SetStateOption{
Consistency: s.Options.Consistency,
Concurrency: s.Options.Concurrency,
}
if s.Options.RetryPolicy != nil {
req.Options.RetryPolicy = state.RetryPolicy{
Threshold: int(s.Options.RetryPolicy.Threshold),
Pattern: s.Options.RetryPolicy.Pattern,
}
if s.Options.RetryPolicy.Interval != nil {
dur, err := duration(s.Options.RetryPolicy.Interval)
if err == nil {
req.Options.RetryPolicy.Interval = dur
}
}
}
}
reqs = append(reqs, req)
}
err := a.stateStores[storeName].BulkSet(reqs)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_SAVE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.DeleteRequest{
Key: a.getModifiedStateKey(in.Key),
ETag: in.Etag,
}
if in.Options != nil {
req.Options = state.DeleteStateOption{
Concurrency: in.Options.Concurrency,
Consistency: in.Options.Consistency,
}
if in.Options.RetryPolicy != nil {
retryPolicy := state.RetryPolicy{
Threshold: int(in.Options.RetryPolicy.Threshold),
Pattern: in.Options.RetryPolicy.Pattern,
}
if in.Options.RetryPolicy.Interval != nil {
dur, err := duration(in.Options.RetryPolicy.Interval)
if err == nil {
retryPolicy.Interval = dur
}
}
req.Options.RetryPolicy = retryPolicy
}
}
err := a.stateStores[storeName].Delete(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_DELETE: failed deleting state with key %s: %s", in.Key, err)
}
return &empty.Empty{}, nil
}
func (a *api) | (key string) string {
if a.id != "" {
return fmt.Sprintf("%s%s%s", a.id, daprSeparator, key)
}
return key
}
func duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
| getModifiedStateKey | identifier_name |
api.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package grpc
import (
"context"
"errors"
"fmt"
"time"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/actors"
components_v1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/dapr/dapr/pkg/channel"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/messaging"
dapr_pb "github.com/dapr/dapr/pkg/proto/dapr"
daprinternal_pb "github.com/dapr/dapr/pkg/proto/daprinternal"
"github.com/golang/protobuf/ptypes/any"
durpb "github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
daprSeparator = "||"
)
// API is the gRPC interface for the Dapr gRPC API. It implements both the internal and external proto definitions.
type API interface {
CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error)
CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error)
UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error)
PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error)
InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error)
InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error)
GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error)
SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error)
DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error)
}
type api struct {
actor actors.Actors
directMessaging messaging.DirectMessaging
componentsHandler components.ComponentHandler
appChannel channel.AppChannel
stateStores map[string]state.Store
pubSub pubsub.PubSub
id string
sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error
}
// NewAPI returns a new gRPC API
func NewAPI(daprID string, appChannel channel.AppChannel, stateStores map[string]state.Store, pubSub pubsub.PubSub, directMessaging messaging.DirectMessaging, actor actors.Actors, sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error, componentHandler components.ComponentHandler) API {
return &api{
directMessaging: directMessaging,
componentsHandler: componentHandler,
actor: actor,
id: daprID,
appChannel: appChannel,
pubSub: pubSub,
stateStores: stateStores,
sendToOutputBindingFn: sendToOutputBindingFn,
}
}
// CallLocal is used for internal dapr to dapr calls. It is invoked by another Dapr instance with a request to the local app.
func (a *api) CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error) {
if a.appChannel == nil {
return nil, errors.New("app channel is not initialized")
}
req := channel.InvokeRequest{
Payload: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.appChannel.InvokeMethod(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
// CallActor invokes a virtual actor
func (a *api) CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error) {
req := actors.CallRequest{
ActorType: in.ActorType,
ActorID: in.ActorID,
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.actor.Call(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: map[string]string{},
}, nil
}
// UpdateComponent is fired by the Dapr control plane when a component state changes
func (a *api) UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error) {
c := components_v1alpha1.Component{
ObjectMeta: meta_v1.ObjectMeta{
Name: in.Metadata.Name,
},
Auth: components_v1alpha1.Auth{
SecretStore: in.Auth.SecretStore,
},
}
for _, m := range in.Spec.Metadata {
c.Spec.Metadata = append(c.Spec.Metadata, components_v1alpha1.MetadataItem{
Name: m.Name,
Value: m.Value,
SecretKeyRef: components_v1alpha1.SecretKeyRef{
Key: m.SecretKeyRef.Key,
Name: m.SecretKeyRef.Name,
},
})
}
a.componentsHandler.OnComponentUpdated(c)
return &empty.Empty{}, nil
}
func (a *api) PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error) {
if a.pubSub == nil {
return &empty.Empty{}, errors.New("ERR_PUBSUB_NOT_FOUND")
}
topic := in.Topic
body := []byte{}
if in.Data != nil {
body = in.Data.Value
}
envelope := pubsub.NewCloudEventsEnvelope(uuid.New().String(), a.id, pubsub.DefaultCloudEventType, body)
b, err := jsoniter.ConfigFastest.Marshal(envelope)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_CLOUD_EVENTS_SER: %s", err)
}
req := pubsub.PublishRequest{
Topic: topic,
Data: b,
}
err = a.pubSub.Publish(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_PUBLISH_MESSAGE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error) {
req := messaging.DirectMessageRequest{
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
Target: in.Id,
}
resp, err := a.directMessaging.Invoke(&req)
if err != nil {
return nil, err
}
return &dapr_pb.InvokeServiceResponseEnvelope{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
func (a *api) InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error) {
req := &bindings.WriteRequest{
Metadata: in.Metadata,
}
if in.Data != nil {
req.Data = in.Data.Value
}
err := a.sendToOutputBindingFn(in.Name, req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_INVOKE_OUTPUT_BINDING: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return nil, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return nil, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.GetRequest{
Key: a.getModifiedStateKey(in.Key),
Options: state.GetStateOption{
Consistency: in.Consistency,
},
}
getResponse, err := a.stateStores[storeName].Get(&req)
if err != nil {
return nil, fmt.Errorf("ERR_STATE_GET: %s", err)
}
response := &dapr_pb.GetStateResponseEnvelope{}
if getResponse != nil {
response.Etag = getResponse.ETag
response.Data = &any.Any{Value: getResponse.Data}
}
return response, nil
}
func (a *api) SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
} | for _, s := range in.Requests {
req := state.SetRequest{
Key: a.getModifiedStateKey(s.Key),
Metadata: s.Metadata,
Value: s.Value.Value,
}
if s.Options != nil {
req.Options = state.SetStateOption{
Consistency: s.Options.Consistency,
Concurrency: s.Options.Concurrency,
}
if s.Options.RetryPolicy != nil {
req.Options.RetryPolicy = state.RetryPolicy{
Threshold: int(s.Options.RetryPolicy.Threshold),
Pattern: s.Options.RetryPolicy.Pattern,
}
if s.Options.RetryPolicy.Interval != nil {
dur, err := duration(s.Options.RetryPolicy.Interval)
if err == nil {
req.Options.RetryPolicy.Interval = dur
}
}
}
}
reqs = append(reqs, req)
}
err := a.stateStores[storeName].BulkSet(reqs)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_SAVE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.DeleteRequest{
Key: a.getModifiedStateKey(in.Key),
ETag: in.Etag,
}
if in.Options != nil {
req.Options = state.DeleteStateOption{
Concurrency: in.Options.Concurrency,
Consistency: in.Options.Consistency,
}
if in.Options.RetryPolicy != nil {
retryPolicy := state.RetryPolicy{
Threshold: int(in.Options.RetryPolicy.Threshold),
Pattern: in.Options.RetryPolicy.Pattern,
}
if in.Options.RetryPolicy.Interval != nil {
dur, err := duration(in.Options.RetryPolicy.Interval)
if err == nil {
retryPolicy.Interval = dur
}
}
req.Options.RetryPolicy = retryPolicy
}
}
err := a.stateStores[storeName].Delete(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_DELETE: failed deleting state with key %s: %s", in.Key, err)
}
return &empty.Empty{}, nil
}
func (a *api) getModifiedStateKey(key string) string {
if a.id != "" {
return fmt.Sprintf("%s%s%s", a.id, daprSeparator, key)
}
return key
}
func duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
} |
reqs := []state.SetRequest{} | random_line_split |
api.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package grpc
import (
"context"
"errors"
"fmt"
"time"
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/actors"
components_v1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/dapr/dapr/pkg/channel"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/messaging"
dapr_pb "github.com/dapr/dapr/pkg/proto/dapr"
daprinternal_pb "github.com/dapr/dapr/pkg/proto/daprinternal"
"github.com/golang/protobuf/ptypes/any"
durpb "github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
daprSeparator = "||"
)
// API is the gRPC interface for the Dapr gRPC API. It implements both the internal and external proto definitions.
type API interface {
CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error)
CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error)
UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error)
PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error)
InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error)
InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error)
GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error)
SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error)
DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error)
}
type api struct {
actor actors.Actors
directMessaging messaging.DirectMessaging
componentsHandler components.ComponentHandler
appChannel channel.AppChannel
stateStores map[string]state.Store
pubSub pubsub.PubSub
id string
sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error
}
// NewAPI returns a new gRPC API
func NewAPI(daprID string, appChannel channel.AppChannel, stateStores map[string]state.Store, pubSub pubsub.PubSub, directMessaging messaging.DirectMessaging, actor actors.Actors, sendToOutputBindingFn func(name string, req *bindings.WriteRequest) error, componentHandler components.ComponentHandler) API {
return &api{
directMessaging: directMessaging,
componentsHandler: componentHandler,
actor: actor,
id: daprID,
appChannel: appChannel,
pubSub: pubSub,
stateStores: stateStores,
sendToOutputBindingFn: sendToOutputBindingFn,
}
}
// CallLocal is used for internal dapr to dapr calls. It is invoked by another Dapr instance with a request to the local app.
func (a *api) CallLocal(ctx context.Context, in *daprinternal_pb.LocalCallEnvelope) (*daprinternal_pb.InvokeResponse, error) {
if a.appChannel == nil {
return nil, errors.New("app channel is not initialized")
}
req := channel.InvokeRequest{
Payload: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.appChannel.InvokeMethod(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
// CallActor invokes a virtual actor
func (a *api) CallActor(ctx context.Context, in *daprinternal_pb.CallActorEnvelope) (*daprinternal_pb.InvokeResponse, error) {
req := actors.CallRequest{
ActorType: in.ActorType,
ActorID: in.ActorID,
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
}
resp, err := a.actor.Call(&req)
if err != nil {
return nil, err
}
return &daprinternal_pb.InvokeResponse{
Data: &any.Any{Value: resp.Data},
Metadata: map[string]string{},
}, nil
}
// UpdateComponent is fired by the Dapr control plane when a component state changes
func (a *api) UpdateComponent(ctx context.Context, in *daprinternal_pb.Component) (*empty.Empty, error) {
c := components_v1alpha1.Component{
ObjectMeta: meta_v1.ObjectMeta{
Name: in.Metadata.Name,
},
Auth: components_v1alpha1.Auth{
SecretStore: in.Auth.SecretStore,
},
}
for _, m := range in.Spec.Metadata {
c.Spec.Metadata = append(c.Spec.Metadata, components_v1alpha1.MetadataItem{
Name: m.Name,
Value: m.Value,
SecretKeyRef: components_v1alpha1.SecretKeyRef{
Key: m.SecretKeyRef.Key,
Name: m.SecretKeyRef.Name,
},
})
}
a.componentsHandler.OnComponentUpdated(c)
return &empty.Empty{}, nil
}
func (a *api) PublishEvent(ctx context.Context, in *dapr_pb.PublishEventEnvelope) (*empty.Empty, error) {
if a.pubSub == nil {
return &empty.Empty{}, errors.New("ERR_PUBSUB_NOT_FOUND")
}
topic := in.Topic
body := []byte{}
if in.Data != nil {
body = in.Data.Value
}
envelope := pubsub.NewCloudEventsEnvelope(uuid.New().String(), a.id, pubsub.DefaultCloudEventType, body)
b, err := jsoniter.ConfigFastest.Marshal(envelope)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_CLOUD_EVENTS_SER: %s", err)
}
req := pubsub.PublishRequest{
Topic: topic,
Data: b,
}
err = a.pubSub.Publish(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_PUBSUB_PUBLISH_MESSAGE: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) InvokeService(ctx context.Context, in *dapr_pb.InvokeServiceEnvelope) (*dapr_pb.InvokeServiceResponseEnvelope, error) {
req := messaging.DirectMessageRequest{
Data: in.Data.Value,
Method: in.Method,
Metadata: in.Metadata,
Target: in.Id,
}
resp, err := a.directMessaging.Invoke(&req)
if err != nil {
return nil, err
}
return &dapr_pb.InvokeServiceResponseEnvelope{
Data: &any.Any{Value: resp.Data},
Metadata: resp.Metadata,
}, nil
}
func (a *api) InvokeBinding(ctx context.Context, in *dapr_pb.InvokeBindingEnvelope) (*empty.Empty, error) {
req := &bindings.WriteRequest{
Metadata: in.Metadata,
}
if in.Data != nil {
req.Data = in.Data.Value
}
err := a.sendToOutputBindingFn(in.Name, req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_INVOKE_OUTPUT_BINDING: %s", err)
}
return &empty.Empty{}, nil
}
func (a *api) GetState(ctx context.Context, in *dapr_pb.GetStateEnvelope) (*dapr_pb.GetStateResponseEnvelope, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return nil, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return nil, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.GetRequest{
Key: a.getModifiedStateKey(in.Key),
Options: state.GetStateOption{
Consistency: in.Consistency,
},
}
getResponse, err := a.stateStores[storeName].Get(&req)
if err != nil {
return nil, fmt.Errorf("ERR_STATE_GET: %s", err)
}
response := &dapr_pb.GetStateResponseEnvelope{}
if getResponse != nil {
response.Etag = getResponse.ETag
response.Data = &any.Any{Value: getResponse.Data}
}
return response, nil
}
func (a *api) SaveState(ctx context.Context, in *dapr_pb.SaveStateEnvelope) (*empty.Empty, error) |
func (a *api) DeleteState(ctx context.Context, in *dapr_pb.DeleteStateEnvelope) (*empty.Empty, error) {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
req := state.DeleteRequest{
Key: a.getModifiedStateKey(in.Key),
ETag: in.Etag,
}
if in.Options != nil {
req.Options = state.DeleteStateOption{
Concurrency: in.Options.Concurrency,
Consistency: in.Options.Consistency,
}
if in.Options.RetryPolicy != nil {
retryPolicy := state.RetryPolicy{
Threshold: int(in.Options.RetryPolicy.Threshold),
Pattern: in.Options.RetryPolicy.Pattern,
}
if in.Options.RetryPolicy.Interval != nil {
dur, err := duration(in.Options.RetryPolicy.Interval)
if err == nil {
retryPolicy.Interval = dur
}
}
req.Options.RetryPolicy = retryPolicy
}
}
err := a.stateStores[storeName].Delete(&req)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_DELETE: failed deleting state with key %s: %s", in.Key, err)
}
return &empty.Empty{}, nil
}
func (a *api) getModifiedStateKey(key string) string {
if a.id != "" {
return fmt.Sprintf("%s%s%s", a.id, daprSeparator, key)
}
return key
}
func duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
| {
if a.stateStores == nil || len(a.stateStores) == 0 {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_CONFIGURED")
}
storeName := in.StoreName
if a.stateStores[storeName] == nil {
return &empty.Empty{}, errors.New("ERR_STATE_STORE_NOT_FOUND")
}
reqs := []state.SetRequest{}
for _, s := range in.Requests {
req := state.SetRequest{
Key: a.getModifiedStateKey(s.Key),
Metadata: s.Metadata,
Value: s.Value.Value,
}
if s.Options != nil {
req.Options = state.SetStateOption{
Consistency: s.Options.Consistency,
Concurrency: s.Options.Concurrency,
}
if s.Options.RetryPolicy != nil {
req.Options.RetryPolicy = state.RetryPolicy{
Threshold: int(s.Options.RetryPolicy.Threshold),
Pattern: s.Options.RetryPolicy.Pattern,
}
if s.Options.RetryPolicy.Interval != nil {
dur, err := duration(s.Options.RetryPolicy.Interval)
if err == nil {
req.Options.RetryPolicy.Interval = dur
}
}
}
}
reqs = append(reqs, req)
}
err := a.stateStores[storeName].BulkSet(reqs)
if err != nil {
return &empty.Empty{}, fmt.Errorf("ERR_STATE_SAVE: %s", err)
}
return &empty.Empty{}, nil
} | identifier_body |
relay.py | import time
import zones
import smbus
import GD
import system
################################################################################
##
## Function: ActivateSystemRelay (I2CPort, systemRelay, setHigh)
##
## Parameters: I2CPort - I2C smbus object
## systemRelay - the relay to activate
## setHigh - True for high, False for low
##
## Returns:
##
## Globals modified:
##
## Comments: Looks up the status of a system control bit and sets relay on or off as required. Note: relays are active low drive.
##
################################################################################
def ActivateSystemRelay (I2CPort, systemRelay, setHigh) :
# TEMP TO ALLOW WITHOUT SYSTEM HARDWARE
## if systemRelay not in (GD.SYSTEM_RAD_PUMP, GD.SYSTEM_UFH_PUMP) : return
# Get the I2C parameters from our system control data.
address = system.systemControl [systemRelay].GetAddress ()
mask = system.systemControl [systemRelay].GetBitMask ()
# Read existing relay status for all relay bits at this address.
relayStatus = I2CPort.read_byte (address)
# Do we need to set this bit high or low?
if setHigh == True :
# We need to turn relay on so invert bit mask so bit we want is now zero, for active low, and all others will be high.
# AND it with existing bits to clear the bit we need to set low output for relay.
mask ^= 0xff
relayStatus &= mask
else :
# Need to turn relay off so we can just OR the bit with the existing bits to set high output for relay.
relayStatus |= mask
# Write new relay staus back for all the relay bits at this address.
I2CPort.write_byte (address, relayStatus)
################################################################################
##
## Function: UpdateSystemOutputs (I2CPort)
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through the system output config bits and update a relay that needs to be changed.
##
################################################################################
def UpdateSystemOutputs (I2CPort) :
# Scan through the outputs until we find one that needs updating.
for systemOutput in (GD.SYSTEM_OUTPUT_GROUP) :
# Only do I2C transfer if relay needs updating.
if system.systemControl [systemOutput].CheckIfBitChanged () == True :
# Set relay as required by system status
setHigh = system.systemControl [systemOutput].CheckIfBitHigh ()
ActivateSystemRelay (I2CPort, systemOutput, setHigh)
# Update status for bit now we have done relay update.
system.systemControl [systemOutput].UpdateBitChangedStatus ()
# Now that we have updated a relay we will leave. This is so that we only update 1 relay every time we are called,
# which is once a second. This will minmise power surges on the system as devices will be powered gradually rather
# than all at once.
break
for systemConfig in (GD.TANK_2_MANUAL_OVERRIDE_GROUP) :
system.systemControl [systemConfig].UpdateBitChangedStatus ()
################################################################################
##
## Function: UpdatePulsedOutputLines ()
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through and update all the output line pulse timers. If any are finished set the output line low.
##
################################################################################
def UpdatePulsedOutputLines (I2CPort) :
for systemOutput in GD.SYSTEM_PULSED_OUTPUTS_GROUP :
if system.systemControl [systemOutput].CheckIfBitTimerFinished () == True :
ActivateSystemRelay (I2CPort, systemOutput, False)
print 'SET IT LOW'
################################################################################
##
## Function: PulseLatchingRelay (I2CPort, register, relayBit)
##
## Parameters: I2CPort - I2C smbus object
## register - the I2C address of the relay controller
## relayBit - the binary bit of the bit to be pulsed
##
## Returns:
##
## Globals modified:
##
## Comments: Pulses the required relay specified by relayBit. We are controlling latching relays in the valve relay matrix so
## we have to give the required activate time.
##
################################################################################
def PulseLatchingRelay (I2CPort, register, relayBit) :
# Read existing relay status.
|
################################################################################
##
## Function: ActivateHeatingZoneRelay (I2CPort, relayZone)
##
## Parameters: I2CPort - I2C smbus object
## relayZone - integer - the zone to check if activation required.
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
def ActivateHeatingZoneRelay (I2CPort, relayZone) :
# Find out if status of this zone has changed, if it now needs to be on or needs a cleardown.
statusChanged = zones.zoneData[relayZone].CheckIfZoneStatusChanged () == True
statusOn = zones.zoneData[relayZone].CheckIfZoneOnRequested () == True
clearDown = zones.zoneData[relayZone].CheckIfTimeForCleardown () == True
# If the status has changed we need to update our current status.
if statusChanged :
zones.zoneData[relayZone].UpdateCurrentZoneStatus()
# Has the status changed or is it time for cleardown on this zone?
if statusChanged or clearDown :
print 'STATUS CHANGED'
# Select the correct I2C status register for UFH or RAD relays.
register = GD.I2C_ADDRESS_0X38 if relayZone >= 14 else GD.I2C_ADDRESS_0X39
# Read the status register to get the current state of the pump bit. Mask off all except pump bit.
# Bits are active low - so a 0 = on.
relays = I2CPort.read_byte (register)
relays &= 0x80
# Invert the pump bit as these are active low outputs and we are going to OR in bits.
relays ^= 0x80
# OR in the zone required (bits 0-3). Adjust to get UFH zone in range 0-15 from 14-29.
relays |= relayZone
if relayZone >= 14 :
relays -= 14
# Invert bits to make active low outputs.
relays ^= 0xff
# Activate the zone select relays in sequence so only 1 relay at a time is powered up to minimise current surge.
for bitSelect in (0x07, 0x03, 0x01, 0x00) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
# If we are in cleardown we need to turn off the power relay first.
if clearDown and not statusChanged :
# Set the mode bit to select the power relay (active low bit4).
relays &= ~0x10
# Send to relay register, wait for relays to stabilise.
I2CPort.write_byte (register, relays)
print 'select mode power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse OFF relay (active low bit6) to ensure power is removed from the valve.
PulseLatchingRelay (I2CPort, register, 0x40)
# Clear the mode bit to select the open relay (active low bit4), Wait for relay to stabilise.
relays |= 0x10
I2CPort.write_byte (register, relays)
print 'select open ', hex(relays^0xff)
time.sleep (0.1)
# We get here if there has been a status change or it is a cleardown. If it is a status change and the new status = ON
# then we will turn the relay on to open the valve when power is turned on. If it is a status change and the new
# status = OFF or there is no status change (must be a cleardown) we will turn the relay off. This will either close the
# valve if we apply power for a status change or simply turn the relay off for a cleardown.
# Do we need to open the valve?
if statusChanged and statusOn :
# Valve needs to open so pulse the ON relay (active low bit5).
PulseLatchingRelay (I2CPort, register, 0x20)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (30)
else :
# Valve needs to close so pulse OFF relay (active low bit6).
PulseLatchingRelay (I2CPort, register, 0x40)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (65)
# If we are here because of a status change we need to activate the power relay on. For a cleardown we do not need
# to do anything as a cleardown simply turns off the power and open relays.
if statusChanged :
# Set the mode bit to select power relay (active low bit4), wait for relay to stabilise.
relays &= ~0x10
I2CPort.write_byte (register, relays)
print 'select power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse ON relay (active low bit5) to turn power to the valve on.
PulseLatchingRelay (I2CPort, register, 0x20)
# Valve is now operating so set pump status required.
zones.zoneData[relayZone].UpdatePumpStatus()
# Not a status change so are we here because of a cleardown?
elif clearDown :
# Cleardown - now it is done cancel the timer.
zones.zoneData[relayZone].CancelCleardownTimer ()
# Relay operations complete so set all relays except pump to inactive.
# Deactivate the zone select relays in sequence so only 1 relay at a time is powered down to minimise back emfs.
for bitSelect in (0x01, 0x03, 0x07, 0x0F, 0x1F) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
#relays |= 0x7f
#I2CPort.write_byte (register, relays)
print 'relays all off ', hex(relays^0xff)
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
| relayStatus = I2CPort.read_byte (register)
# Set the relay to pulse (active low pulse).
relayStatus &= ~relayBit
# Pulse it low.
I2CPort.write_byte (register, relayStatus)
print 'pulse on ', hex(relayStatus^0xff)
# Give it some time to activate.
time.sleep (0.1)
# Now set up to clear the relay pulse (set it back high).
relayStatus |= relayBit
# Restore the level high.
I2CPort.write_byte (register, relayStatus)
print 'pulse off ', hex(relayStatus^0xff)
# Give it time to activate.
time.sleep (0.1) | identifier_body |
relay.py | import time
import zones
import smbus
import GD
import system
################################################################################
##
## Function: ActivateSystemRelay (I2CPort, systemRelay, setHigh)
##
## Parameters: I2CPort - I2C smbus object
## systemRelay - the relay to activate
## setHigh - True for high, False for low
##
## Returns:
##
## Globals modified:
##
## Comments: Looks up the status of a system control bit and sets relay on or off as required. Note: relays are active low drive.
##
################################################################################
def ActivateSystemRelay (I2CPort, systemRelay, setHigh) :
# TEMP TO ALLOW WITHOUT SYSTEM HARDWARE
## if systemRelay not in (GD.SYSTEM_RAD_PUMP, GD.SYSTEM_UFH_PUMP) : return
# Get the I2C parameters from our system control data.
address = system.systemControl [systemRelay].GetAddress ()
mask = system.systemControl [systemRelay].GetBitMask ()
# Read existing relay status for all relay bits at this address.
relayStatus = I2CPort.read_byte (address)
# Do we need to set this bit high or low?
if setHigh == True :
# We need to turn relay on so invert bit mask so bit we want is now zero, for active low, and all others will be high.
# AND it with existing bits to clear the bit we need to set low output for relay.
mask ^= 0xff
relayStatus &= mask
else :
# Need to turn relay off so we can just OR the bit with the existing bits to set high output for relay.
relayStatus |= mask
# Write new relay staus back for all the relay bits at this address.
I2CPort.write_byte (address, relayStatus)
################################################################################
##
## Function: UpdateSystemOutputs (I2CPort)
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through the system output config bits and update a relay that needs to be changed.
##
################################################################################
def UpdateSystemOutputs (I2CPort) :
# Scan through the outputs until we find one that needs updating.
for systemOutput in (GD.SYSTEM_OUTPUT_GROUP) :
# Only do I2C transfer if relay needs updating.
if system.systemControl [systemOutput].CheckIfBitChanged () == True :
# Set relay as required by system status
setHigh = system.systemControl [systemOutput].CheckIfBitHigh ()
ActivateSystemRelay (I2CPort, systemOutput, setHigh)
# Update status for bit now we have done relay update.
system.systemControl [systemOutput].UpdateBitChangedStatus ()
# Now that we have updated a relay we will leave. This is so that we only update 1 relay every time we are called,
# which is once a second. This will minmise power surges on the system as devices will be powered gradually rather
# than all at once.
break
for systemConfig in (GD.TANK_2_MANUAL_OVERRIDE_GROUP) :
system.systemControl [systemConfig].UpdateBitChangedStatus ()
################################################################################
##
## Function: UpdatePulsedOutputLines ()
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through and update all the output line pulse timers. If any are finished set the output line low.
##
################################################################################
def | (I2CPort) :
for systemOutput in GD.SYSTEM_PULSED_OUTPUTS_GROUP :
if system.systemControl [systemOutput].CheckIfBitTimerFinished () == True :
ActivateSystemRelay (I2CPort, systemOutput, False)
print 'SET IT LOW'
################################################################################
##
## Function: PulseLatchingRelay (I2CPort, register, relayBit)
##
## Parameters: I2CPort - I2C smbus object
## register - the I2C address of the relay controller
## relayBit - the binary bit of the bit to be pulsed
##
## Returns:
##
## Globals modified:
##
## Comments: Pulses the required relay specified by relayBit. We are controlling latching relays in the valve relay matrix so
## we have to give the required activate time.
##
################################################################################
def PulseLatchingRelay (I2CPort, register, relayBit) :
# Read existing relay status.
relayStatus = I2CPort.read_byte (register)
# Set the relay to pulse (active low pulse).
relayStatus &= ~relayBit
# Pulse it low.
I2CPort.write_byte (register, relayStatus)
print 'pulse on ', hex(relayStatus^0xff)
# Give it some time to activate.
time.sleep (0.1)
# Now set up to clear the relay pulse (set it back high).
relayStatus |= relayBit
# Restore the level high.
I2CPort.write_byte (register, relayStatus)
print 'pulse off ', hex(relayStatus^0xff)
# Give it time to activate.
time.sleep (0.1)
################################################################################
##
## Function: ActivateHeatingZoneRelay (I2CPort, relayZone)
##
## Parameters: I2CPort - I2C smbus object
## relayZone - integer - the zone to check if activation required.
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
def ActivateHeatingZoneRelay (I2CPort, relayZone) :
# Find out if status of this zone has changed, if it now needs to be on or needs a cleardown.
statusChanged = zones.zoneData[relayZone].CheckIfZoneStatusChanged () == True
statusOn = zones.zoneData[relayZone].CheckIfZoneOnRequested () == True
clearDown = zones.zoneData[relayZone].CheckIfTimeForCleardown () == True
# If the status has changed we need to update our current status.
if statusChanged :
zones.zoneData[relayZone].UpdateCurrentZoneStatus()
# Has the status changed or is it time for cleardown on this zone?
if statusChanged or clearDown :
print 'STATUS CHANGED'
# Select the correct I2C status register for UFH or RAD relays.
register = GD.I2C_ADDRESS_0X38 if relayZone >= 14 else GD.I2C_ADDRESS_0X39
# Read the status register to get the current state of the pump bit. Mask off all except pump bit.
# Bits are active low - so a 0 = on.
relays = I2CPort.read_byte (register)
relays &= 0x80
# Invert the pump bit as these are active low outputs and we are going to OR in bits.
relays ^= 0x80
# OR in the zone required (bits 0-3). Adjust to get UFH zone in range 0-15 from 14-29.
relays |= relayZone
if relayZone >= 14 :
relays -= 14
# Invert bits to make active low outputs.
relays ^= 0xff
# Activate the zone select relays in sequence so only 1 relay at a time is powered up to minimise current surge.
for bitSelect in (0x07, 0x03, 0x01, 0x00) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
# If we are in cleardown we need to turn off the power relay first.
if clearDown and not statusChanged :
# Set the mode bit to select the power relay (active low bit4).
relays &= ~0x10
# Send to relay register, wait for relays to stabilise.
I2CPort.write_byte (register, relays)
print 'select mode power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse OFF relay (active low bit6) to ensure power is removed from the valve.
PulseLatchingRelay (I2CPort, register, 0x40)
# Clear the mode bit to select the open relay (active low bit4), Wait for relay to stabilise.
relays |= 0x10
I2CPort.write_byte (register, relays)
print 'select open ', hex(relays^0xff)
time.sleep (0.1)
# We get here if there has been a status change or it is a cleardown. If it is a status change and the new status = ON
# then we will turn the relay on to open the valve when power is turned on. If it is a status change and the new
# status = OFF or there is no status change (must be a cleardown) we will turn the relay off. This will either close the
# valve if we apply power for a status change or simply turn the relay off for a cleardown.
# Do we need to open the valve?
if statusChanged and statusOn :
# Valve needs to open so pulse the ON relay (active low bit5).
PulseLatchingRelay (I2CPort, register, 0x20)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (30)
else :
# Valve needs to close so pulse OFF relay (active low bit6).
PulseLatchingRelay (I2CPort, register, 0x40)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (65)
# If we are here because of a status change we need to activate the power relay on. For a cleardown we do not need
# to do anything as a cleardown simply turns off the power and open relays.
if statusChanged :
# Set the mode bit to select power relay (active low bit4), wait for relay to stabilise.
relays &= ~0x10
I2CPort.write_byte (register, relays)
print 'select power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse ON relay (active low bit5) to turn power to the valve on.
PulseLatchingRelay (I2CPort, register, 0x20)
# Valve is now operating so set pump status required.
zones.zoneData[relayZone].UpdatePumpStatus()
# Not a status change so are we here because of a cleardown?
elif clearDown :
# Cleardown - now it is done cancel the timer.
zones.zoneData[relayZone].CancelCleardownTimer ()
# Relay operations complete so set all relays except pump to inactive.
# Deactivate the zone select relays in sequence so only 1 relay at a time is powered down to minimise back emfs.
for bitSelect in (0x01, 0x03, 0x07, 0x0F, 0x1F) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
#relays |= 0x7f
#I2CPort.write_byte (register, relays)
print 'relays all off ', hex(relays^0xff)
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
| UpdatePulsedOutputLines | identifier_name |
relay.py | import time
import zones
import smbus
import GD
import system
################################################################################
##
## Function: ActivateSystemRelay (I2CPort, systemRelay, setHigh)
##
## Parameters: I2CPort - I2C smbus object
## systemRelay - the relay to activate
## setHigh - True for high, False for low
##
## Returns:
##
## Globals modified:
##
## Comments: Looks up the status of a system control bit and sets relay on or off as required. Note: relays are active low drive.
##
################################################################################
def ActivateSystemRelay (I2CPort, systemRelay, setHigh) :
# TEMP TO ALLOW WITHOUT SYSTEM HARDWARE
## if systemRelay not in (GD.SYSTEM_RAD_PUMP, GD.SYSTEM_UFH_PUMP) : return
# Get the I2C parameters from our system control data.
address = system.systemControl [systemRelay].GetAddress ()
mask = system.systemControl [systemRelay].GetBitMask ()
# Read existing relay status for all relay bits at this address.
relayStatus = I2CPort.read_byte (address)
# Do we need to set this bit high or low?
if setHigh == True :
# We need to turn relay on so invert bit mask so bit we want is now zero, for active low, and all others will be high.
# AND it with existing bits to clear the bit we need to set low output for relay.
mask ^= 0xff
relayStatus &= mask
else :
# Need to turn relay off so we can just OR the bit with the existing bits to set high output for relay.
relayStatus |= mask
# Write new relay staus back for all the relay bits at this address.
I2CPort.write_byte (address, relayStatus)
################################################################################
##
## Function: UpdateSystemOutputs (I2CPort)
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through the system output config bits and update a relay that needs to be changed.
##
################################################################################
def UpdateSystemOutputs (I2CPort) :
# Scan through the outputs until we find one that needs updating.
for systemOutput in (GD.SYSTEM_OUTPUT_GROUP) :
# Only do I2C transfer if relay needs updating.
if system.systemControl [systemOutput].CheckIfBitChanged () == True :
# Set relay as required by system status
setHigh = system.systemControl [systemOutput].CheckIfBitHigh ()
ActivateSystemRelay (I2CPort, systemOutput, setHigh)
# Update status for bit now we have done relay update.
system.systemControl [systemOutput].UpdateBitChangedStatus ()
# Now that we have updated a relay we will leave. This is so that we only update 1 relay every time we are called,
# which is once a second. This will minmise power surges on the system as devices will be powered gradually rather
# than all at once.
break
for systemConfig in (GD.TANK_2_MANUAL_OVERRIDE_GROUP) :
system.systemControl [systemConfig].UpdateBitChangedStatus ()
################################################################################
##
## Function: UpdatePulsedOutputLines ()
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through and update all the output line pulse timers. If any are finished set the output line low.
##
################################################################################
def UpdatePulsedOutputLines (I2CPort) :
for systemOutput in GD.SYSTEM_PULSED_OUTPUTS_GROUP :
if system.systemControl [systemOutput].CheckIfBitTimerFinished () == True :
ActivateSystemRelay (I2CPort, systemOutput, False)
print 'SET IT LOW'
################################################################################
##
## Function: PulseLatchingRelay (I2CPort, register, relayBit)
##
## Parameters: I2CPort - I2C smbus object
## register - the I2C address of the relay controller
## relayBit - the binary bit of the bit to be pulsed
##
## Returns:
##
## Globals modified:
##
## Comments: Pulses the required relay specified by relayBit. We are controlling latching relays in the valve relay matrix so
## we have to give the required activate time.
##
################################################################################
def PulseLatchingRelay (I2CPort, register, relayBit) :
# Read existing relay status.
relayStatus = I2CPort.read_byte (register)
# Set the relay to pulse (active low pulse).
relayStatus &= ~relayBit
# Pulse it low.
I2CPort.write_byte (register, relayStatus)
print 'pulse on ', hex(relayStatus^0xff)
# Give it some time to activate.
time.sleep (0.1)
# Now set up to clear the relay pulse (set it back high).
relayStatus |= relayBit
# Restore the level high.
I2CPort.write_byte (register, relayStatus)
print 'pulse off ', hex(relayStatus^0xff)
# Give it time to activate.
time.sleep (0.1)
################################################################################
##
## Function: ActivateHeatingZoneRelay (I2CPort, relayZone)
##
## Parameters: I2CPort - I2C smbus object
## relayZone - integer - the zone to check if activation required.
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
def ActivateHeatingZoneRelay (I2CPort, relayZone) :
# Find out if status of this zone has changed, if it now needs to be on or needs a cleardown.
statusChanged = zones.zoneData[relayZone].CheckIfZoneStatusChanged () == True
statusOn = zones.zoneData[relayZone].CheckIfZoneOnRequested () == True
clearDown = zones.zoneData[relayZone].CheckIfTimeForCleardown () == True
# If the status has changed we need to update our current status.
if statusChanged :
|
# Has the status changed or is it time for cleardown on this zone?
if statusChanged or clearDown :
print 'STATUS CHANGED'
# Select the correct I2C status register for UFH or RAD relays.
register = GD.I2C_ADDRESS_0X38 if relayZone >= 14 else GD.I2C_ADDRESS_0X39
# Read the status register to get the current state of the pump bit. Mask off all except pump bit.
# Bits are active low - so a 0 = on.
relays = I2CPort.read_byte (register)
relays &= 0x80
# Invert the pump bit as these are active low outputs and we are going to OR in bits.
relays ^= 0x80
# OR in the zone required (bits 0-3). Adjust to get UFH zone in range 0-15 from 14-29.
relays |= relayZone
if relayZone >= 14 :
relays -= 14
# Invert bits to make active low outputs.
relays ^= 0xff
# Activate the zone select relays in sequence so only 1 relay at a time is powered up to minimise current surge.
for bitSelect in (0x07, 0x03, 0x01, 0x00) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
# If we are in cleardown we need to turn off the power relay first.
if clearDown and not statusChanged :
# Set the mode bit to select the power relay (active low bit4).
relays &= ~0x10
# Send to relay register, wait for relays to stabilise.
I2CPort.write_byte (register, relays)
print 'select mode power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse OFF relay (active low bit6) to ensure power is removed from the valve.
PulseLatchingRelay (I2CPort, register, 0x40)
# Clear the mode bit to select the open relay (active low bit4), Wait for relay to stabilise.
relays |= 0x10
I2CPort.write_byte (register, relays)
print 'select open ', hex(relays^0xff)
time.sleep (0.1)
# We get here if there has been a status change or it is a cleardown. If it is a status change and the new status = ON
# then we will turn the relay on to open the valve when power is turned on. If it is a status change and the new
# status = OFF or there is no status change (must be a cleardown) we will turn the relay off. This will either close the
# valve if we apply power for a status change or simply turn the relay off for a cleardown.
# Do we need to open the valve?
if statusChanged and statusOn :
# Valve needs to open so pulse the ON relay (active low bit5).
PulseLatchingRelay (I2CPort, register, 0x20)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (30)
else :
# Valve needs to close so pulse OFF relay (active low bit6).
PulseLatchingRelay (I2CPort, register, 0x40)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (65)
# If we are here because of a status change we need to activate the power relay on. For a cleardown we do not need
# to do anything as a cleardown simply turns off the power and open relays.
if statusChanged :
# Set the mode bit to select power relay (active low bit4), wait for relay to stabilise.
relays &= ~0x10
I2CPort.write_byte (register, relays)
print 'select power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse ON relay (active low bit5) to turn power to the valve on.
PulseLatchingRelay (I2CPort, register, 0x20)
# Valve is now operating so set pump status required.
zones.zoneData[relayZone].UpdatePumpStatus()
# Not a status change so are we here because of a cleardown?
elif clearDown :
# Cleardown - now it is done cancel the timer.
zones.zoneData[relayZone].CancelCleardownTimer ()
# Relay operations complete so set all relays except pump to inactive.
# Deactivate the zone select relays in sequence so only 1 relay at a time is powered down to minimise back emfs.
for bitSelect in (0x01, 0x03, 0x07, 0x0F, 0x1F) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
#relays |= 0x7f
#I2CPort.write_byte (register, relays)
print 'relays all off ', hex(relays^0xff)
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
| zones.zoneData[relayZone].UpdateCurrentZoneStatus() | conditional_block |
relay.py | import time
import zones
import smbus
import GD
import system
################################################################################
##
## Function: ActivateSystemRelay (I2CPort, systemRelay, setHigh)
##
## Parameters: I2CPort - I2C smbus object
## systemRelay - the relay to activate
## setHigh - True for high, False for low
##
## Returns:
##
## Globals modified:
##
## Comments: Looks up the status of a system control bit and sets relay on or off as required. Note: relays are active low drive.
##
################################################################################
def ActivateSystemRelay (I2CPort, systemRelay, setHigh) : |
# TEMP TO ALLOW WITHOUT SYSTEM HARDWARE
## if systemRelay not in (GD.SYSTEM_RAD_PUMP, GD.SYSTEM_UFH_PUMP) : return
# Get the I2C parameters from our system control data.
address = system.systemControl [systemRelay].GetAddress ()
mask = system.systemControl [systemRelay].GetBitMask ()
# Read existing relay status for all relay bits at this address.
relayStatus = I2CPort.read_byte (address)
# Do we need to set this bit high or low?
if setHigh == True :
# We need to turn relay on so invert bit mask so bit we want is now zero, for active low, and all others will be high.
# AND it with existing bits to clear the bit we need to set low output for relay.
mask ^= 0xff
relayStatus &= mask
else :
# Need to turn relay off so we can just OR the bit with the existing bits to set high output for relay.
relayStatus |= mask
# Write new relay staus back for all the relay bits at this address.
I2CPort.write_byte (address, relayStatus)
################################################################################
##
## Function: UpdateSystemOutputs (I2CPort)
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through the system output config bits and update a relay that needs to be changed.
##
################################################################################
def UpdateSystemOutputs (I2CPort) :
# Scan through the outputs until we find one that needs updating.
for systemOutput in (GD.SYSTEM_OUTPUT_GROUP) :
# Only do I2C transfer if relay needs updating.
if system.systemControl [systemOutput].CheckIfBitChanged () == True :
# Set relay as required by system status
setHigh = system.systemControl [systemOutput].CheckIfBitHigh ()
ActivateSystemRelay (I2CPort, systemOutput, setHigh)
# Update status for bit now we have done relay update.
system.systemControl [systemOutput].UpdateBitChangedStatus ()
# Now that we have updated a relay we will leave. This is so that we only update 1 relay every time we are called,
# which is once a second. This will minmise power surges on the system as devices will be powered gradually rather
# than all at once.
break
for systemConfig in (GD.TANK_2_MANUAL_OVERRIDE_GROUP) :
system.systemControl [systemConfig].UpdateBitChangedStatus ()
################################################################################
##
## Function: UpdatePulsedOutputLines ()
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments: Scan through and update all the output line pulse timers. If any are finished set the output line low.
##
################################################################################
def UpdatePulsedOutputLines (I2CPort) :
for systemOutput in GD.SYSTEM_PULSED_OUTPUTS_GROUP :
if system.systemControl [systemOutput].CheckIfBitTimerFinished () == True :
ActivateSystemRelay (I2CPort, systemOutput, False)
print 'SET IT LOW'
################################################################################
##
## Function: PulseLatchingRelay (I2CPort, register, relayBit)
##
## Parameters: I2CPort - I2C smbus object
## register - the I2C address of the relay controller
## relayBit - the binary bit of the bit to be pulsed
##
## Returns:
##
## Globals modified:
##
## Comments: Pulses the required relay specified by relayBit. We are controlling latching relays in the valve relay matrix so
## we have to give the required activate time.
##
################################################################################
def PulseLatchingRelay (I2CPort, register, relayBit) :
# Read existing relay status.
relayStatus = I2CPort.read_byte (register)
# Set the relay to pulse (active low pulse).
relayStatus &= ~relayBit
# Pulse it low.
I2CPort.write_byte (register, relayStatus)
print 'pulse on ', hex(relayStatus^0xff)
# Give it some time to activate.
time.sleep (0.1)
# Now set up to clear the relay pulse (set it back high).
relayStatus |= relayBit
# Restore the level high.
I2CPort.write_byte (register, relayStatus)
print 'pulse off ', hex(relayStatus^0xff)
# Give it time to activate.
time.sleep (0.1)
################################################################################
##
## Function: ActivateHeatingZoneRelay (I2CPort, relayZone)
##
## Parameters: I2CPort - I2C smbus object
## relayZone - integer - the zone to check if activation required.
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
def ActivateHeatingZoneRelay (I2CPort, relayZone) :
# Find out if status of this zone has changed, if it now needs to be on or needs a cleardown.
statusChanged = zones.zoneData[relayZone].CheckIfZoneStatusChanged () == True
statusOn = zones.zoneData[relayZone].CheckIfZoneOnRequested () == True
clearDown = zones.zoneData[relayZone].CheckIfTimeForCleardown () == True
# If the status has changed we need to update our current status.
if statusChanged :
zones.zoneData[relayZone].UpdateCurrentZoneStatus()
# Has the status changed or is it time for cleardown on this zone?
if statusChanged or clearDown :
print 'STATUS CHANGED'
# Select the correct I2C status register for UFH or RAD relays.
register = GD.I2C_ADDRESS_0X38 if relayZone >= 14 else GD.I2C_ADDRESS_0X39
# Read the status register to get the current state of the pump bit. Mask off all except pump bit.
# Bits are active low - so a 0 = on.
relays = I2CPort.read_byte (register)
relays &= 0x80
# Invert the pump bit as these are active low outputs and we are going to OR in bits.
relays ^= 0x80
# OR in the zone required (bits 0-3). Adjust to get UFH zone in range 0-15 from 14-29.
relays |= relayZone
if relayZone >= 14 :
relays -= 14
# Invert bits to make active low outputs.
relays ^= 0xff
# Activate the zone select relays in sequence so only 1 relay at a time is powered up to minimise current surge.
for bitSelect in (0x07, 0x03, 0x01, 0x00) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
# If we are in cleardown we need to turn off the power relay first.
if clearDown and not statusChanged :
# Set the mode bit to select the power relay (active low bit4).
relays &= ~0x10
# Send to relay register, wait for relays to stabilise.
I2CPort.write_byte (register, relays)
print 'select mode power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse OFF relay (active low bit6) to ensure power is removed from the valve.
PulseLatchingRelay (I2CPort, register, 0x40)
# Clear the mode bit to select the open relay (active low bit4), Wait for relay to stabilise.
relays |= 0x10
I2CPort.write_byte (register, relays)
print 'select open ', hex(relays^0xff)
time.sleep (0.1)
# We get here if there has been a status change or it is a cleardown. If it is a status change and the new status = ON
# then we will turn the relay on to open the valve when power is turned on. If it is a status change and the new
# status = OFF or there is no status change (must be a cleardown) we will turn the relay off. This will either close the
# valve if we apply power for a status change or simply turn the relay off for a cleardown.
# Do we need to open the valve?
if statusChanged and statusOn :
# Valve needs to open so pulse the ON relay (active low bit5).
PulseLatchingRelay (I2CPort, register, 0x20)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (30)
else :
# Valve needs to close so pulse OFF relay (active low bit6).
PulseLatchingRelay (I2CPort, register, 0x40)
# Set the cleardown timer for the period required before we can clear the valve down. ADJUST THIS LATER
zones.zoneData[relayZone].SetCleardownTimer (65)
# If we are here because of a status change we need to activate the power relay on. For a cleardown we do not need
# to do anything as a cleardown simply turns off the power and open relays.
if statusChanged :
# Set the mode bit to select power relay (active low bit4), wait for relay to stabilise.
relays &= ~0x10
I2CPort.write_byte (register, relays)
print 'select power ', hex(relays^0xff)
time.sleep (0.1)
# Now pulse ON relay (active low bit5) to turn power to the valve on.
PulseLatchingRelay (I2CPort, register, 0x20)
# Valve is now operating so set pump status required.
zones.zoneData[relayZone].UpdatePumpStatus()
# Not a status change so are we here because of a cleardown?
elif clearDown :
# Cleardown - now it is done cancel the timer.
zones.zoneData[relayZone].CancelCleardownTimer ()
# Relay operations complete so set all relays except pump to inactive.
# Deactivate the zone select relays in sequence so only 1 relay at a time is powered down to minimise back emfs.
for bitSelect in (0x01, 0x03, 0x07, 0x0F, 0x1F) :
I2CPort.write_byte (register, relays | bitSelect)
time.sleep (0.1)
#relays |= 0x7f
#I2CPort.write_byte (register, relays)
print 'relays all off ', hex(relays^0xff)
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################
################################################################################
##
## Function:
##
## Parameters:
##
## Returns:
##
## Globals modified:
##
## Comments:
##
################################################################################ | random_line_split | |
util.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::VecDeque,
future::Future,
sync::{
atomic::{self, AtomicU64},
Arc,
},
};
use crate::ipld::{CidHashSet, Ipld};
use crate::shim::clock::ChainEpoch;
use crate::utils::db::car_stream::Block;
use crate::utils::io::progress_log::WithProgressRaw;
use crate::{
blocks::{BlockHeader, Tipset},
utils::encoding::from_slice_with_fallback,
};
use cid::Cid;
use futures::Stream;
use fvm_ipld_blockstore::Blockstore;
use lazy_static::lazy_static;
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Traverses all Cid links, hashing and loading all unique values and using the
/// callback function to interact with the data.
#[async_recursion::async_recursion]
async fn traverse_ipld_links_hash<F, T>(
walked: &mut CidHashSet,
load_block: &mut F,
ipld: &Ipld,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
match ipld {
Ipld::Map(m) => {
for (_, v) in m.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
Ipld::List(list) => {
for v in list.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
&Ipld::Link(cid) => {
// WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed.
if cid.codec() == crate::shim::crypto::IPLD_RAW {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let _ = load_block(cid).await?;
}
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let bytes = load_block(cid).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
}
}
_ => (),
}
Ok(())
}
/// Load and hash CIDs and resolve recursively.
pub async fn recurse_links_hash<F, T>(
walked: &mut CidHashSet,
root: Cid,
load_block: &mut F,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
if !walked.insert(root) {
// Cid has already been traversed
return Ok(());
}
on_inserted(walked.len());
if root.codec() != fvm_ipld_encoding::DAG_CBOR {
return Ok(());
}
let bytes = load_block(root).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
Ok(())
}
pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>;
lazy_static! {
pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default();
}
/// Walks over tipset and state data and loads all blocks not yet seen.
/// This is tracked based on the callback function loading blocks.
pub async fn walk_snapshot<F, T>(
tipset: &Tipset,
recent_roots: i64,
mut load_block: F,
progress_bar_message: Option<&str>,
progress_tracker: Option<ProgressBarCurrentTotalPair>,
estimated_total_records: Option<u64>,
) -> anyhow::Result<usize>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = anyhow::Result<Vec<u8>>> + Send,
{
let estimated_total_records = estimated_total_records.unwrap_or_default();
let message = progress_bar_message.unwrap_or("Walking snapshot");
#[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157
let wp = WithProgressRaw::new(message, estimated_total_records);
let mut seen = CidHashSet::default();
let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into();
let mut current_min_height = tipset.epoch();
let incl_roots_epoch = tipset.epoch() - recent_roots;
let on_inserted = {
let wp = wp.clone();
let progress_tracker = progress_tracker.clone();
move |len: usize| {
let progress = len as u64;
let total = progress.max(estimated_total_records);
wp.set(progress);
wp.set_total(total);
if let Some(progress_tracker) = &progress_tracker {
progress_tracker
.0
.store(progress, atomic::Ordering::Relaxed);
progress_tracker.1.store(total, atomic::Ordering::Relaxed);
}
}
};
while let Some(next) = blocks_to_walk.pop_front() {
if !seen.insert(next) {
continue;
};
on_inserted(seen.len());
if !should_save_block_to_snapshot(next) {
continue;
}
let data = load_block(next).await?;
let h = from_slice_with_fallback::<BlockHeader>(&data)?;
if current_min_height > h.epoch() {
current_min_height = h.epoch();
}
if h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?;
}
if h.epoch() > 0 {
for p in h.parents().cids() {
blocks_to_walk.push_back(*p);
}
} else {
for p in h.parents().cids() {
load_block(*p).await?;
}
}
if h.epoch() == 0 || h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?;
}
}
Ok(seen.len())
}
fn should_save_block_to_snapshot(cid: Cid) -> bool {
// Don't include identity CIDs.
// We only include raw and dagcbor, for now.
// Raw for "code" CIDs.
if cid.hash().code() == u64::from(cid::multihash::Code::Identity) {
false
} else {
matches!(
cid.codec(),
crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR
)
}
}
/// Depth-first-search iterator for `ipld` leaf nodes.
///
/// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e.,
/// no list or map) in depth-first order. The iterator can be extended at any
/// point by the caller.
///
/// Consider walking this `ipld` graph:
/// ```text
/// List
/// ├ Integer(5)
/// ├ Link(Y)
/// └ String("string")
///
/// Link(Y):
/// Map
/// ├ "key1" => Bool(true)
/// └ "key2" => Float(3.14)
/// ```
///
/// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order:
/// 1. `Integer(5)`
/// 2. `Bool(true)`
/// 3. `Float(3.14)`
/// 4. `String("string")`
pub struct DfsIter {
dfs: VecDeque<Ipld>,
}
impl DfsIter {
pub fn new(root: Ipld) -> Self {
DfsIter {
dfs: VecDeque::from([root]),
}
}
pub fn walk_next(&mut self, ipld: Ipld) {
self.dfs.push_front(ipld)
}
}
impl From<Cid> for DfsIter {
fn from(cid: Cid) -> Self {
DfsIter::new(Ipld::Link(cid))
}
}
impl Iterator for DfsIter {
type Item = Ipld;
fn next(&mut self) -> Option<Self::Item> {
while let Some(ipld) = self.dfs.pop_front() {
match ipld {
Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)), | }
}
enum Task {
// Yield the block, don't visit it.
Emit(Cid),
// Visit all the elements, recursively.
Iterate(DfsIter),
}
pin_project! {
pub struct ChainStream<DB, T> {
#[pin]
tipset_iter: T,
db: DB,
dfs: VecDeque<Task>, // Depth-first work queue.
seen: CidHashSet,
stateroot_limit: ChainEpoch,
fail_on_dead_links: bool,
}
}
impl<DB, T> ChainStream<DB, T> {
pub fn with_seen(self, seen: CidHashSet) -> Self {
ChainStream { seen, ..self }
}
pub fn into_seen(self) -> CidHashSet {
self.seen
}
}
/// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only
/// block headers are streamed. Any dead links are reported as errors.
///
/// # Arguments
///
/// * `db` - A database that implements [`Blockstore`] interface.
/// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`.
/// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets.
/// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where
/// `$depth` is the number of `[`Tipset`]` that needs inspection.
pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
stateroot_limit: ChainEpoch,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit,
fail_on_dead_links: true,
}
}
// Stream available graph in a depth-first search. All reachable nodes are touched and dead-links
// are ignored.
pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit: 0,
fail_on_dead_links: false,
}
}
impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> {
type Item = anyhow::Result<Block>;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use Task::*;
let mut this = self.project();
let stateroot_limit = *this.stateroot_limit;
loop {
while let Some(task) = this.dfs.front_mut() {
match task {
Emit(cid) => {
let cid = *cid;
this.dfs.pop_front();
if let Some(data) = this.db.get(&cid)? {
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid))));
}
}
Iterate(dfs_iter) => {
while let Some(ipld) = dfs_iter.next() {
if let Ipld::Link(cid) = ipld {
// The link traversal implementation assumes there are three types of encoding:
// 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load.
// 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed.
// 3. _: ignore all other links
// Don't revisit what's already been visited.
if should_save_block_to_snapshot(cid) && this.seen.insert(cid) {
if let Some(data) = this.db.get(&cid)? {
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
let ipld: Ipld = from_slice_with_fallback(&data)?;
dfs_iter.walk_next(ipld);
}
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!(
"missing key: {}",
cid
))));
}
}
}
}
this.dfs.pop_front();
}
}
}
// This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the
// enclosing loop is processing the queue. Once the desired depth has been reached -
// yield the block without walking the graph it represents.
if let Some(tipset) = this.tipset_iter.as_mut().next() {
for block in tipset.into_blocks().into_iter() {
if this.seen.insert(*block.cid()) {
// Make sure we always yield a block otherwise.
this.dfs.push_back(Emit(*block.cid()));
if block.epoch() == 0 {
// The genesis block has some kind of dummy parent that needs to be emitted.
for p in block.parents().cids() {
this.dfs.push_back(Emit(*p));
}
}
// Process block messages.
if block.epoch() > stateroot_limit {
this.dfs
.push_back(Iterate(DfsIter::from(*block.messages())));
}
// Visit the block if it's within required depth. And a special case for `0`
// epoch to match Lotus' implementation.
if block.epoch() == 0 || block.epoch() > stateroot_limit {
// NOTE: In the original `walk_snapshot` implementation we walk the dag
// immediately. Which is what we do here as well, but using a queue.
this.dfs
.push_back(Iterate(DfsIter::from(*block.state_root())));
}
}
}
} else {
// That's it, nothing else to do. End of stream.
return Poll::Ready(None);
}
}
}
} | Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)),
other => return Some(other),
}
}
None | random_line_split |
util.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::VecDeque,
future::Future,
sync::{
atomic::{self, AtomicU64},
Arc,
},
};
use crate::ipld::{CidHashSet, Ipld};
use crate::shim::clock::ChainEpoch;
use crate::utils::db::car_stream::Block;
use crate::utils::io::progress_log::WithProgressRaw;
use crate::{
blocks::{BlockHeader, Tipset},
utils::encoding::from_slice_with_fallback,
};
use cid::Cid;
use futures::Stream;
use fvm_ipld_blockstore::Blockstore;
use lazy_static::lazy_static;
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Traverses all Cid links, hashing and loading all unique values and using the
/// callback function to interact with the data.
#[async_recursion::async_recursion]
async fn traverse_ipld_links_hash<F, T>(
walked: &mut CidHashSet,
load_block: &mut F,
ipld: &Ipld,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
match ipld {
Ipld::Map(m) => {
for (_, v) in m.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
Ipld::List(list) => {
for v in list.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
&Ipld::Link(cid) => {
// WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed.
if cid.codec() == crate::shim::crypto::IPLD_RAW {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let _ = load_block(cid).await?;
}
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let bytes = load_block(cid).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
}
}
_ => (),
}
Ok(())
}
/// Load and hash CIDs and resolve recursively.
pub async fn recurse_links_hash<F, T>(
walked: &mut CidHashSet,
root: Cid,
load_block: &mut F,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
if !walked.insert(root) {
// Cid has already been traversed
return Ok(());
}
on_inserted(walked.len());
if root.codec() != fvm_ipld_encoding::DAG_CBOR {
return Ok(());
}
let bytes = load_block(root).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
Ok(())
}
pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>;
lazy_static! {
pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default();
}
/// Walks over tipset and state data and loads all blocks not yet seen.
/// This is tracked based on the callback function loading blocks.
pub async fn walk_snapshot<F, T>(
tipset: &Tipset,
recent_roots: i64,
mut load_block: F,
progress_bar_message: Option<&str>,
progress_tracker: Option<ProgressBarCurrentTotalPair>,
estimated_total_records: Option<u64>,
) -> anyhow::Result<usize>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = anyhow::Result<Vec<u8>>> + Send,
{
let estimated_total_records = estimated_total_records.unwrap_or_default();
let message = progress_bar_message.unwrap_or("Walking snapshot");
#[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157
let wp = WithProgressRaw::new(message, estimated_total_records);
let mut seen = CidHashSet::default();
let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into();
let mut current_min_height = tipset.epoch();
let incl_roots_epoch = tipset.epoch() - recent_roots;
let on_inserted = {
let wp = wp.clone();
let progress_tracker = progress_tracker.clone();
move |len: usize| {
let progress = len as u64;
let total = progress.max(estimated_total_records);
wp.set(progress);
wp.set_total(total);
if let Some(progress_tracker) = &progress_tracker {
progress_tracker
.0
.store(progress, atomic::Ordering::Relaxed);
progress_tracker.1.store(total, atomic::Ordering::Relaxed);
}
}
};
while let Some(next) = blocks_to_walk.pop_front() {
if !seen.insert(next) {
continue;
};
on_inserted(seen.len());
if !should_save_block_to_snapshot(next) {
continue;
}
let data = load_block(next).await?;
let h = from_slice_with_fallback::<BlockHeader>(&data)?;
if current_min_height > h.epoch() {
current_min_height = h.epoch();
}
if h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?;
}
if h.epoch() > 0 {
for p in h.parents().cids() {
blocks_to_walk.push_back(*p);
}
} else {
for p in h.parents().cids() {
load_block(*p).await?;
}
}
if h.epoch() == 0 || h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?;
}
}
Ok(seen.len())
}
fn should_save_block_to_snapshot(cid: Cid) -> bool |
/// Depth-first-search iterator for `ipld` leaf nodes.
///
/// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e.,
/// no list or map) in depth-first order. The iterator can be extended at any
/// point by the caller.
///
/// Consider walking this `ipld` graph:
/// ```text
/// List
/// ├ Integer(5)
/// ├ Link(Y)
/// └ String("string")
///
/// Link(Y):
/// Map
/// ├ "key1" => Bool(true)
/// └ "key2" => Float(3.14)
/// ```
///
/// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order:
/// 1. `Integer(5)`
/// 2. `Bool(true)`
/// 3. `Float(3.14)`
/// 4. `String("string")`
pub struct DfsIter {
dfs: VecDeque<Ipld>,
}
impl DfsIter {
pub fn new(root: Ipld) -> Self {
DfsIter {
dfs: VecDeque::from([root]),
}
}
pub fn walk_next(&mut self, ipld: Ipld) {
self.dfs.push_front(ipld)
}
}
impl From<Cid> for DfsIter {
fn from(cid: Cid) -> Self {
DfsIter::new(Ipld::Link(cid))
}
}
impl Iterator for DfsIter {
type Item = Ipld;
fn next(&mut self) -> Option<Self::Item> {
while let Some(ipld) = self.dfs.pop_front() {
match ipld {
Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)),
Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)),
other => return Some(other),
}
}
None
}
}
enum Task {
// Yield the block, don't visit it.
Emit(Cid),
// Visit all the elements, recursively.
Iterate(DfsIter),
}
pin_project! {
pub struct ChainStream<DB, T> {
#[pin]
tipset_iter: T,
db: DB,
dfs: VecDeque<Task>, // Depth-first work queue.
seen: CidHashSet,
stateroot_limit: ChainEpoch,
fail_on_dead_links: bool,
}
}
impl<DB, T> ChainStream<DB, T> {
pub fn with_seen(self, seen: CidHashSet) -> Self {
ChainStream { seen, ..self }
}
pub fn into_seen(self) -> CidHashSet {
self.seen
}
}
/// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only
/// block headers are streamed. Any dead links are reported as errors.
///
/// # Arguments
///
/// * `db` - A database that implements [`Blockstore`] interface.
/// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`.
/// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets.
/// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where
/// `$depth` is the number of `[`Tipset`]` that needs inspection.
pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
stateroot_limit: ChainEpoch,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit,
fail_on_dead_links: true,
}
}
// Stream available graph in a depth-first search. All reachable nodes are touched and dead-links
// are ignored.
pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit: 0,
fail_on_dead_links: false,
}
}
impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> {
type Item = anyhow::Result<Block>;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use Task::*;
let mut this = self.project();
let stateroot_limit = *this.stateroot_limit;
loop {
while let Some(task) = this.dfs.front_mut() {
match task {
Emit(cid) => {
let cid = *cid;
this.dfs.pop_front();
if let Some(data) = this.db.get(&cid)? {
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid))));
}
}
Iterate(dfs_iter) => {
while let Some(ipld) = dfs_iter.next() {
if let Ipld::Link(cid) = ipld {
// The link traversal implementation assumes there are three types of encoding:
// 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load.
// 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed.
// 3. _: ignore all other links
// Don't revisit what's already been visited.
if should_save_block_to_snapshot(cid) && this.seen.insert(cid) {
if let Some(data) = this.db.get(&cid)? {
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
let ipld: Ipld = from_slice_with_fallback(&data)?;
dfs_iter.walk_next(ipld);
}
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!(
"missing key: {}",
cid
))));
}
}
}
}
this.dfs.pop_front();
}
}
}
// This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the
// enclosing loop is processing the queue. Once the desired depth has been reached -
// yield the block without walking the graph it represents.
if let Some(tipset) = this.tipset_iter.as_mut().next() {
for block in tipset.into_blocks().into_iter() {
if this.seen.insert(*block.cid()) {
// Make sure we always yield a block otherwise.
this.dfs.push_back(Emit(*block.cid()));
if block.epoch() == 0 {
// The genesis block has some kind of dummy parent that needs to be emitted.
for p in block.parents().cids() {
this.dfs.push_back(Emit(*p));
}
}
// Process block messages.
if block.epoch() > stateroot_limit {
this.dfs
.push_back(Iterate(DfsIter::from(*block.messages())));
}
// Visit the block if it's within required depth. And a special case for `0`
// epoch to match Lotus' implementation.
if block.epoch() == 0 || block.epoch() > stateroot_limit {
// NOTE: In the original `walk_snapshot` implementation we walk the dag
// immediately. Which is what we do here as well, but using a queue.
this.dfs
.push_back(Iterate(DfsIter::from(*block.state_root())));
}
}
}
} else {
// That's it, nothing else to do. End of stream.
return Poll::Ready(None);
}
}
}
}
| {
// Don't include identity CIDs.
// We only include raw and dagcbor, for now.
// Raw for "code" CIDs.
if cid.hash().code() == u64::from(cid::multihash::Code::Identity) {
false
} else {
matches!(
cid.codec(),
crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR
)
}
} | identifier_body |
util.rs | // Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::VecDeque,
future::Future,
sync::{
atomic::{self, AtomicU64},
Arc,
},
};
use crate::ipld::{CidHashSet, Ipld};
use crate::shim::clock::ChainEpoch;
use crate::utils::db::car_stream::Block;
use crate::utils::io::progress_log::WithProgressRaw;
use crate::{
blocks::{BlockHeader, Tipset},
utils::encoding::from_slice_with_fallback,
};
use cid::Cid;
use futures::Stream;
use fvm_ipld_blockstore::Blockstore;
use lazy_static::lazy_static;
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Traverses all Cid links, hashing and loading all unique values and using the
/// callback function to interact with the data.
#[async_recursion::async_recursion]
async fn traverse_ipld_links_hash<F, T>(
walked: &mut CidHashSet,
load_block: &mut F,
ipld: &Ipld,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
match ipld {
Ipld::Map(m) => {
for (_, v) in m.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
Ipld::List(list) => {
for v in list.iter() {
traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?;
}
}
&Ipld::Link(cid) => {
// WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed.
if cid.codec() == crate::shim::crypto::IPLD_RAW {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let _ = load_block(cid).await?;
}
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
if !walked.insert(cid) {
return Ok(());
}
on_inserted(walked.len());
let bytes = load_block(cid).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
}
}
_ => (),
}
Ok(())
}
/// Load and hash CIDs and resolve recursively.
pub async fn recurse_links_hash<F, T>(
walked: &mut CidHashSet,
root: Cid,
load_block: &mut F,
on_inserted: &(impl Fn(usize) + Send + Sync),
) -> Result<(), anyhow::Error>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send,
{
if !walked.insert(root) {
// Cid has already been traversed
return Ok(());
}
on_inserted(walked.len());
if root.codec() != fvm_ipld_encoding::DAG_CBOR {
return Ok(());
}
let bytes = load_block(root).await?;
let ipld = from_slice_with_fallback(&bytes)?;
traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?;
Ok(())
}
pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>;
lazy_static! {
pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default();
}
/// Walks over tipset and state data and loads all blocks not yet seen.
/// This is tracked based on the callback function loading blocks.
pub async fn walk_snapshot<F, T>(
tipset: &Tipset,
recent_roots: i64,
mut load_block: F,
progress_bar_message: Option<&str>,
progress_tracker: Option<ProgressBarCurrentTotalPair>,
estimated_total_records: Option<u64>,
) -> anyhow::Result<usize>
where
F: FnMut(Cid) -> T + Send,
T: Future<Output = anyhow::Result<Vec<u8>>> + Send,
{
let estimated_total_records = estimated_total_records.unwrap_or_default();
let message = progress_bar_message.unwrap_or("Walking snapshot");
#[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157
let wp = WithProgressRaw::new(message, estimated_total_records);
let mut seen = CidHashSet::default();
let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into();
let mut current_min_height = tipset.epoch();
let incl_roots_epoch = tipset.epoch() - recent_roots;
let on_inserted = {
let wp = wp.clone();
let progress_tracker = progress_tracker.clone();
move |len: usize| {
let progress = len as u64;
let total = progress.max(estimated_total_records);
wp.set(progress);
wp.set_total(total);
if let Some(progress_tracker) = &progress_tracker {
progress_tracker
.0
.store(progress, atomic::Ordering::Relaxed);
progress_tracker.1.store(total, atomic::Ordering::Relaxed);
}
}
};
while let Some(next) = blocks_to_walk.pop_front() {
if !seen.insert(next) {
continue;
};
on_inserted(seen.len());
if !should_save_block_to_snapshot(next) {
continue;
}
let data = load_block(next).await?;
let h = from_slice_with_fallback::<BlockHeader>(&data)?;
if current_min_height > h.epoch() {
current_min_height = h.epoch();
}
if h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?;
}
if h.epoch() > 0 {
for p in h.parents().cids() {
blocks_to_walk.push_back(*p);
}
} else {
for p in h.parents().cids() {
load_block(*p).await?;
}
}
if h.epoch() == 0 || h.epoch() > incl_roots_epoch {
recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?;
}
}
Ok(seen.len())
}
fn should_save_block_to_snapshot(cid: Cid) -> bool {
// Don't include identity CIDs.
// We only include raw and dagcbor, for now.
// Raw for "code" CIDs.
if cid.hash().code() == u64::from(cid::multihash::Code::Identity) {
false
} else {
matches!(
cid.codec(),
crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR
)
}
}
/// Depth-first-search iterator for `ipld` leaf nodes.
///
/// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e.,
/// no list or map) in depth-first order. The iterator can be extended at any
/// point by the caller.
///
/// Consider walking this `ipld` graph:
/// ```text
/// List
/// ├ Integer(5)
/// ├ Link(Y)
/// └ String("string")
///
/// Link(Y):
/// Map
/// ├ "key1" => Bool(true)
/// └ "key2" => Float(3.14)
/// ```
///
/// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order:
/// 1. `Integer(5)`
/// 2. `Bool(true)`
/// 3. `Float(3.14)`
/// 4. `String("string")`
pub struct DfsIter {
dfs: VecDeque<Ipld>,
}
impl DfsIter {
pub fn new(root: Ipld) -> Self {
DfsIter {
dfs: VecDeque::from([root]),
}
}
pub fn walk_next(&mut self, ipld: Ipld) {
self.dfs.push_front(ipld)
}
}
impl From<Cid> for DfsIter {
fn from(cid: Cid) -> Self {
DfsIter::new(Ipld::Link(cid))
}
}
impl Iterator for DfsIter {
type Item = Ipld;
fn next(&mut self) -> Option<Self::Item> {
while let Some(ipld) = self.dfs.pop_front() {
match ipld {
Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)),
Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)),
other => return Some(other),
}
}
None
}
}
enum Task {
| Yield the block, don't visit it.
Emit(Cid),
// Visit all the elements, recursively.
Iterate(DfsIter),
}
pin_project! {
pub struct ChainStream<DB, T> {
#[pin]
tipset_iter: T,
db: DB,
dfs: VecDeque<Task>, // Depth-first work queue.
seen: CidHashSet,
stateroot_limit: ChainEpoch,
fail_on_dead_links: bool,
}
}
impl<DB, T> ChainStream<DB, T> {
pub fn with_seen(self, seen: CidHashSet) -> Self {
ChainStream { seen, ..self }
}
pub fn into_seen(self) -> CidHashSet {
self.seen
}
}
/// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only
/// block headers are streamed. Any dead links are reported as errors.
///
/// # Arguments
///
/// * `db` - A database that implements [`Blockstore`] interface.
/// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`.
/// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets.
/// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where
/// `$depth` is the number of `[`Tipset`]` that needs inspection.
pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
stateroot_limit: ChainEpoch,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit,
fail_on_dead_links: true,
}
}
// Stream available graph in a depth-first search. All reachable nodes are touched and dead-links
// are ignored.
pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>(
db: DB,
tipset_iter: T,
) -> ChainStream<DB, T> {
ChainStream {
tipset_iter,
db,
dfs: VecDeque::new(),
seen: CidHashSet::default(),
stateroot_limit: 0,
fail_on_dead_links: false,
}
}
impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> {
type Item = anyhow::Result<Block>;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use Task::*;
let mut this = self.project();
let stateroot_limit = *this.stateroot_limit;
loop {
while let Some(task) = this.dfs.front_mut() {
match task {
Emit(cid) => {
let cid = *cid;
this.dfs.pop_front();
if let Some(data) = this.db.get(&cid)? {
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid))));
}
}
Iterate(dfs_iter) => {
while let Some(ipld) = dfs_iter.next() {
if let Ipld::Link(cid) = ipld {
// The link traversal implementation assumes there are three types of encoding:
// 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load.
// 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed.
// 3. _: ignore all other links
// Don't revisit what's already been visited.
if should_save_block_to_snapshot(cid) && this.seen.insert(cid) {
if let Some(data) = this.db.get(&cid)? {
if cid.codec() == fvm_ipld_encoding::DAG_CBOR {
let ipld: Ipld = from_slice_with_fallback(&data)?;
dfs_iter.walk_next(ipld);
}
return Poll::Ready(Some(Ok(Block { cid, data })));
} else if *this.fail_on_dead_links {
return Poll::Ready(Some(Err(anyhow::anyhow!(
"missing key: {}",
cid
))));
}
}
}
}
this.dfs.pop_front();
}
}
}
// This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the
// enclosing loop is processing the queue. Once the desired depth has been reached -
// yield the block without walking the graph it represents.
if let Some(tipset) = this.tipset_iter.as_mut().next() {
for block in tipset.into_blocks().into_iter() {
if this.seen.insert(*block.cid()) {
// Make sure we always yield a block otherwise.
this.dfs.push_back(Emit(*block.cid()));
if block.epoch() == 0 {
// The genesis block has some kind of dummy parent that needs to be emitted.
for p in block.parents().cids() {
this.dfs.push_back(Emit(*p));
}
}
// Process block messages.
if block.epoch() > stateroot_limit {
this.dfs
.push_back(Iterate(DfsIter::from(*block.messages())));
}
// Visit the block if it's within required depth. And a special case for `0`
// epoch to match Lotus' implementation.
if block.epoch() == 0 || block.epoch() > stateroot_limit {
// NOTE: In the original `walk_snapshot` implementation we walk the dag
// immediately. Which is what we do here as well, but using a queue.
this.dfs
.push_back(Iterate(DfsIter::from(*block.state_root())));
}
}
}
} else {
// That's it, nothing else to do. End of stream.
return Poll::Ready(None);
}
}
}
}
| // | identifier_name |
octree_gui.rs | use std::collections::VecDeque;
use std::ops::RangeInclusive;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use cgmath::{Rotation, Vector3};
use glium::{Display, Surface};
use glium::glutin;
use glium::glutin::event::WindowEvent;
use glium::glutin::window::WindowBuilder;
use imgui::*;
use imgui::{Context, FontConfig, FontGlyphRanges, FontSource};
use imgui_glium_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use num_traits::{FromPrimitive, ToPrimitive};
use winit::event::{ElementState, Event, VirtualKeyCode};
use crate::core::{Filter, Message, Payload, System};
use crate::NSE;
use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation};
use crate::rendering::fractal_generators::FractalSelection;
pub struct OctreeGuiSystem {
imgui: Arc<Mutex<Context>>,
platform: WinitPlatform,
renderer: Renderer,
display: Arc<Mutex<glium::Display>>,
// octree data
octree_config: OctreeConfig,
octree_optimizations: OctreeOptimizations,
// profiling data
profiling_data: ProfilingData,
frame_times: VecDeque<f32>,
// message passing
messages: Vec<Message>,
}
impl OctreeGuiSystem {
pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> {
let mut imgui = Context::create();
// configure imgui-rs Context if necessary
imgui.set_ini_filename(None);
let context = glutin::ContextBuilder::new().with_vsync(false);
let builder = WindowBuilder::new()
.with_title("Octree - Config")
.with_decorations(true)
.with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64));
let display =
Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display");
let mut platform = WinitPlatform::init(&mut imgui); // step 1
platform.attach_window(
imgui.io_mut(),
&display.gl_window().window(),
HiDpiMode::Default,
); // step 2
let hidpi_factor = platform.hidpi_factor();
let font_size = (13.0 * hidpi_factor) as f32;
imgui.fonts().add_font(&[
FontSource::DefaultFontData {
config: Some(FontConfig {
size_pixels: font_size,
..FontConfig::default()
}),
},
FontSource::TtfData {
data: include_bytes!("resources/mplus-1p-regular.ttf"),
size_pixels: font_size,
config: Some(FontConfig {
rasterizer_multiply: 1.75,
glyph_ranges: FontGlyphRanges::japanese(),
..FontConfig::default()
}),
},
]);
imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32;
let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer");
let mut frame_times = VecDeque::new();
frame_times.resize(500, 0.0);
Arc::new(Mutex::new(OctreeGuiSystem {
imgui: Arc::new(Mutex::new(imgui)),
platform,
renderer,
display: Arc::new(Mutex::new(display)),
octree_config: OctreeConfig::default(),
octree_optimizations: OctreeOptimizations::default(),
profiling_data: ProfilingData::default(),
frame_times,
messages: vec![],
}))
}
fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) {
if CollapsingHeader::new(im_str!("Settings"))
.default_open(true)
.build(&ui)
{
// reset the reset flag
self.octree_config.reset = None;
let mut modified = false;
ui.text(format!("Fractal Selection"));
let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0);
let mut fractal_names = vec![];
for x in 0.. {
match FromPrimitive::from_i32(x) {
Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")),
Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")),
Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")),
Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")),
Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")),
Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")),
_ => break, // leave loop
}
}
if ComboBox::new(im_str!("Select Fractal"))
.build_simple_string(
&ui,
&mut selected_fractal,
&fractal_names) {
self.octree_config = OctreeConfig::default();
self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal);
self.octree_config.reset = Some(true);
modified = true;
}
ui.separator();
ui.text(format!("Performance Settings"));
if ui.button(im_str!("Update Now"), [0.0, 0.0]) {
self.messages.push(Message::new(self.octree_config.clone()));
};
ui.same_line(0.0);
if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Subdivision Threshold (px)"))
.range(RangeInclusive::new(1.0, 50.0))
.flags(SliderFlags::LOGARITHMIC)
.build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Distance Scale"))
.range(RangeInclusive::new(0.05 as f64, 1.0 as f64))
// .flags(SliderFlags::LOGARITHMIC)
.build(&ui, self.octree_config.distance_scale.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Max. Octree Nodes"))
.range(RangeInclusive::new(1e4 as u64, 2e7 as u64))
.build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap())
{
modified = true;
}
if modified {
self.messages.push(Message::new(self.octree_config.clone()));
}
if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) {
let prev_selection = self.octree_config.fractal;
self.octree_config = OctreeConfig::default();
self.octree_config.reset = Some(true);
self.octree_config.fractal = prev_selection;
self.messages.push(Message::new(self.octree_config.clone()));
};
}
}
fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) {
let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0);
let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64;
let frame_times = &mut self.frame_times;
frame_times.pop_front();
frame_times.push_back(delta_time.as_secs_f32());
let f_times: Vec<f32> = frame_times.iter().cloned().collect();
if CollapsingHeader::new(im_str!("Profiling"))
.default_open(true)
.build(&ui)
{
// Plot Frame Times
ui.plot_lines(im_str!("Frame Times"), &f_times[..])
.graph_size([0.0, 50.0])
.overlay_text(&im_str!("{} ms", delta_time.as_millis()))
.build();
// print times of seperate systems
if self.profiling_data.system_times.is_some() {
for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap()
{
ui.text(im_str!("{}: {}", system_name, system_time.as_millis()));
}
}
ui.separator();
ui.text(im_str!("Rendered Nodes: {}", rendered_nodes));
ui.text(im_str!("Render Time: {:.2} ms", render_time));
}
}
fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) {
if CollapsingHeader::new(im_str!("Camera"))
.default_open(true)
.build(&ui)
{
let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z());
InputFloat3::new(
&ui,
im_str!("View Direction (read only)"),
view_dir.as_mut(),
).read_only(true).build();
InputFloat3::new(
&ui,
im_str!("Camera Position"),
camera_transform.position.as_mut(),
).build();
camera_transform.update();
}
}
}
impl System for OctreeGuiSystem {
fn | (&mut self) -> Vec<Filter> {
vec![
crate::filter!(Octree, Mesh, Transformation),
crate::filter!(Camera, Transformation),
]
}
fn handle_input(&mut self, _event: &Event<()>) {
let platform = &mut self.platform;
let display = self.display.lock().unwrap();
let gl_window = display.gl_window();
let mut imgui = self.imgui.lock().unwrap();
match _event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4
.expect("Failed to prepare frame");
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
window_id,
} => {
if *window_id == gl_window.window().id() {
println!("Close Octree Config Window");
gl_window.window().set_visible(false);
return;
}
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => match input {
winit::event::KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::F12), ElementState::Pressed) => {
println!("Open Octree Config Window");
gl_window.window().set_visible(true);
}
_ => (),
},
},
_ => (),
},
_ => (),
}
platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3
}
fn consume_messages(&mut self, messages: &Vec<Message>) {
for m in messages {
if m.is_type::<ProfilingData>() {
let data = m.get_payload::<ProfilingData>().unwrap();
self.profiling_data.replace(data);
}
if m.is_type::<OctreeOptimizations>() {
let data = m.get_payload::<OctreeOptimizations>().unwrap();
self.octree_optimizations = data.clone();
}
if m.is_type::<OctreeConfig>() {
let data = m.get_payload::<OctreeConfig>().unwrap();
self.octree_config.merge(data);
}
}
}
fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) {
let ctx = self.imgui.clone();
let display = self.display.clone();
let mut ctx_lock = ctx.lock().unwrap();
let display_lock = display.lock().unwrap();
let ui = ctx_lock.frame();
let gl_window = display_lock.gl_window();
let octree_entities = &filter[0].lock().unwrap().entities;
let camera_entities = &filter[1].lock().unwrap().entities;
let window = Window::new(im_str!("Octree"))
.collapsible(false)
.movable(false)
.position([10.0, 10.0], Condition::FirstUseEver)
.size([400.0, 740.0], Condition::FirstUseEver);
let window_token = window.begin(&ui).unwrap();
for entity in octree_entities {
let entitiy_mutex = entity.lock().unwrap();
let _octree_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap();
let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap();
self.display_profiling_ui(delta_time, &ui);
ui.new_line();
self.display_octree_ui(&ui, &octree.config, &octree.info);
ui.new_line();
}
for entity in camera_entities {
let mut entitiy_mutex = entity.lock().unwrap();
let mut camera_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap().clone();
let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap();
self.display_camera_ui(&ui, camera, &mut camera_transform);
entitiy_mutex.add_component(camera_transform);
}
window_token.end(&ui);
// construct the UI
self.platform.prepare_render(&ui, &gl_window.window()); // step 5
// render the UI with a renderer
let draw_data = ui.render();
let mut target = display_lock.draw();
target.clear_color_srgb(0.1, 0.1, 0.11, 1.0);
self.renderer
.render(&mut target, draw_data)
.expect("Rendering failed");
target.finish().expect("Failed to swap buffers");
}
fn get_messages(&mut self) -> Vec<Message> {
let ret = self.messages.clone();
self.messages.clear();
ret
}
}
#[derive(Debug, Clone, Default)]
pub struct ProfilingData {
pub rendered_nodes: Option<u32>,
pub instance_data_generation: Option<u64>,
pub render_time: Option<u64>,
// in nano seconds
pub system_times: Option<Vec<(String, Duration)>>,
}
impl ProfilingData {
pub fn replace(&mut self, other: &Self) {
Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes);
Self::replace_option(
&mut self.instance_data_generation,
&other.instance_data_generation,
);
Self::replace_option(&mut self.render_time, &other.render_time);
Self::replace_option(&mut self.system_times, &other.system_times);
}
fn replace_option<T>(target: &mut Option<T>, source: &Option<T>)
where
T: Clone,
{
match source {
Some(val) => target.replace(val.clone()),
None => None,
};
}
}
impl Payload for ProfilingData {}
| get_filter | identifier_name |
octree_gui.rs | use std::collections::VecDeque;
use std::ops::RangeInclusive;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use cgmath::{Rotation, Vector3};
use glium::{Display, Surface};
use glium::glutin;
use glium::glutin::event::WindowEvent;
use glium::glutin::window::WindowBuilder;
use imgui::*;
use imgui::{Context, FontConfig, FontGlyphRanges, FontSource};
use imgui_glium_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use num_traits::{FromPrimitive, ToPrimitive};
use winit::event::{ElementState, Event, VirtualKeyCode};
use crate::core::{Filter, Message, Payload, System};
use crate::NSE;
use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation};
use crate::rendering::fractal_generators::FractalSelection;
pub struct OctreeGuiSystem {
imgui: Arc<Mutex<Context>>,
platform: WinitPlatform,
renderer: Renderer,
display: Arc<Mutex<glium::Display>>,
// octree data
octree_config: OctreeConfig,
octree_optimizations: OctreeOptimizations,
// profiling data
profiling_data: ProfilingData,
frame_times: VecDeque<f32>,
// message passing
messages: Vec<Message>,
}
impl OctreeGuiSystem {
pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> {
let mut imgui = Context::create();
// configure imgui-rs Context if necessary
imgui.set_ini_filename(None);
let context = glutin::ContextBuilder::new().with_vsync(false);
let builder = WindowBuilder::new()
.with_title("Octree - Config")
.with_decorations(true)
.with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64));
let display =
Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display");
let mut platform = WinitPlatform::init(&mut imgui); // step 1
platform.attach_window(
imgui.io_mut(),
&display.gl_window().window(),
HiDpiMode::Default,
); // step 2
let hidpi_factor = platform.hidpi_factor();
let font_size = (13.0 * hidpi_factor) as f32;
imgui.fonts().add_font(&[
FontSource::DefaultFontData {
config: Some(FontConfig {
size_pixels: font_size,
..FontConfig::default()
}),
},
FontSource::TtfData {
data: include_bytes!("resources/mplus-1p-regular.ttf"),
size_pixels: font_size,
config: Some(FontConfig {
rasterizer_multiply: 1.75,
glyph_ranges: FontGlyphRanges::japanese(),
..FontConfig::default()
}),
},
]);
imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32;
let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer");
let mut frame_times = VecDeque::new();
frame_times.resize(500, 0.0);
Arc::new(Mutex::new(OctreeGuiSystem {
imgui: Arc::new(Mutex::new(imgui)),
platform,
renderer,
display: Arc::new(Mutex::new(display)),
octree_config: OctreeConfig::default(),
octree_optimizations: OctreeOptimizations::default(),
profiling_data: ProfilingData::default(),
frame_times,
messages: vec![],
}))
}
fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) {
if CollapsingHeader::new(im_str!("Settings"))
.default_open(true)
.build(&ui)
{
// reset the reset flag
self.octree_config.reset = None;
let mut modified = false;
ui.text(format!("Fractal Selection"));
let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0);
let mut fractal_names = vec![];
for x in 0.. {
match FromPrimitive::from_i32(x) {
Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")),
Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")),
Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")),
Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")),
Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")),
Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")),
_ => break, // leave loop
}
}
if ComboBox::new(im_str!("Select Fractal"))
.build_simple_string(
&ui,
&mut selected_fractal,
&fractal_names) {
self.octree_config = OctreeConfig::default();
self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal);
self.octree_config.reset = Some(true);
modified = true;
}
ui.separator();
ui.text(format!("Performance Settings"));
if ui.button(im_str!("Update Now"), [0.0, 0.0]) {
self.messages.push(Message::new(self.octree_config.clone()));
};
ui.same_line(0.0);
if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Subdivision Threshold (px)"))
.range(RangeInclusive::new(1.0, 50.0))
.flags(SliderFlags::LOGARITHMIC)
.build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Distance Scale"))
.range(RangeInclusive::new(0.05 as f64, 1.0 as f64))
// .flags(SliderFlags::LOGARITHMIC)
.build(&ui, self.octree_config.distance_scale.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Max. Octree Nodes"))
.range(RangeInclusive::new(1e4 as u64, 2e7 as u64))
.build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap())
{
modified = true;
}
if modified {
self.messages.push(Message::new(self.octree_config.clone()));
}
if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) {
let prev_selection = self.octree_config.fractal;
self.octree_config = OctreeConfig::default();
self.octree_config.reset = Some(true);
self.octree_config.fractal = prev_selection;
self.messages.push(Message::new(self.octree_config.clone()));
};
}
}
fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) {
let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0);
let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64;
let frame_times = &mut self.frame_times;
frame_times.pop_front();
frame_times.push_back(delta_time.as_secs_f32());
let f_times: Vec<f32> = frame_times.iter().cloned().collect();
if CollapsingHeader::new(im_str!("Profiling"))
.default_open(true)
.build(&ui)
{
// Plot Frame Times
ui.plot_lines(im_str!("Frame Times"), &f_times[..])
.graph_size([0.0, 50.0])
.overlay_text(&im_str!("{} ms", delta_time.as_millis()))
.build();
// print times of seperate systems
if self.profiling_data.system_times.is_some() {
for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap()
{
ui.text(im_str!("{}: {}", system_name, system_time.as_millis()));
}
}
ui.separator();
ui.text(im_str!("Rendered Nodes: {}", rendered_nodes));
ui.text(im_str!("Render Time: {:.2} ms", render_time));
}
}
fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) {
if CollapsingHeader::new(im_str!("Camera"))
.default_open(true)
.build(&ui)
{
let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z());
InputFloat3::new(
&ui,
im_str!("View Direction (read only)"),
view_dir.as_mut(),
).read_only(true).build();
InputFloat3::new(
&ui,
im_str!("Camera Position"),
camera_transform.position.as_mut(),
).build();
camera_transform.update();
}
}
}
impl System for OctreeGuiSystem {
fn get_filter(&mut self) -> Vec<Filter> |
fn handle_input(&mut self, _event: &Event<()>) {
let platform = &mut self.platform;
let display = self.display.lock().unwrap();
let gl_window = display.gl_window();
let mut imgui = self.imgui.lock().unwrap();
match _event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4
.expect("Failed to prepare frame");
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
window_id,
} => {
if *window_id == gl_window.window().id() {
println!("Close Octree Config Window");
gl_window.window().set_visible(false);
return;
}
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => match input {
winit::event::KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::F12), ElementState::Pressed) => {
println!("Open Octree Config Window");
gl_window.window().set_visible(true);
}
_ => (),
},
},
_ => (),
},
_ => (),
}
platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3
}
fn consume_messages(&mut self, messages: &Vec<Message>) {
for m in messages {
if m.is_type::<ProfilingData>() {
let data = m.get_payload::<ProfilingData>().unwrap();
self.profiling_data.replace(data);
}
if m.is_type::<OctreeOptimizations>() {
let data = m.get_payload::<OctreeOptimizations>().unwrap();
self.octree_optimizations = data.clone();
}
if m.is_type::<OctreeConfig>() {
let data = m.get_payload::<OctreeConfig>().unwrap();
self.octree_config.merge(data);
}
}
}
fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) {
let ctx = self.imgui.clone();
let display = self.display.clone();
let mut ctx_lock = ctx.lock().unwrap();
let display_lock = display.lock().unwrap();
let ui = ctx_lock.frame();
let gl_window = display_lock.gl_window();
let octree_entities = &filter[0].lock().unwrap().entities;
let camera_entities = &filter[1].lock().unwrap().entities;
let window = Window::new(im_str!("Octree"))
.collapsible(false)
.movable(false)
.position([10.0, 10.0], Condition::FirstUseEver)
.size([400.0, 740.0], Condition::FirstUseEver);
let window_token = window.begin(&ui).unwrap();
for entity in octree_entities {
let entitiy_mutex = entity.lock().unwrap();
let _octree_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap();
let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap();
self.display_profiling_ui(delta_time, &ui);
ui.new_line();
self.display_octree_ui(&ui, &octree.config, &octree.info);
ui.new_line();
}
for entity in camera_entities {
let mut entitiy_mutex = entity.lock().unwrap();
let mut camera_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap().clone();
let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap();
self.display_camera_ui(&ui, camera, &mut camera_transform);
entitiy_mutex.add_component(camera_transform);
}
window_token.end(&ui);
// construct the UI
self.platform.prepare_render(&ui, &gl_window.window()); // step 5
// render the UI with a renderer
let draw_data = ui.render();
let mut target = display_lock.draw();
target.clear_color_srgb(0.1, 0.1, 0.11, 1.0);
self.renderer
.render(&mut target, draw_data)
.expect("Rendering failed");
target.finish().expect("Failed to swap buffers");
}
fn get_messages(&mut self) -> Vec<Message> {
let ret = self.messages.clone();
self.messages.clear();
ret
}
}
#[derive(Debug, Clone, Default)]
pub struct ProfilingData {
pub rendered_nodes: Option<u32>,
pub instance_data_generation: Option<u64>,
pub render_time: Option<u64>,
// in nano seconds
pub system_times: Option<Vec<(String, Duration)>>,
}
impl ProfilingData {
pub fn replace(&mut self, other: &Self) {
Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes);
Self::replace_option(
&mut self.instance_data_generation,
&other.instance_data_generation,
);
Self::replace_option(&mut self.render_time, &other.render_time);
Self::replace_option(&mut self.system_times, &other.system_times);
}
fn replace_option<T>(target: &mut Option<T>, source: &Option<T>)
where
T: Clone,
{
match source {
Some(val) => target.replace(val.clone()),
None => None,
};
}
}
impl Payload for ProfilingData {}
| {
vec![
crate::filter!(Octree, Mesh, Transformation),
crate::filter!(Camera, Transformation),
]
} | identifier_body |
octree_gui.rs | use std::collections::VecDeque;
use std::ops::RangeInclusive;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use cgmath::{Rotation, Vector3};
use glium::{Display, Surface};
use glium::glutin;
use glium::glutin::event::WindowEvent;
use glium::glutin::window::WindowBuilder;
use imgui::*;
use imgui::{Context, FontConfig, FontGlyphRanges, FontSource};
use imgui_glium_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use num_traits::{FromPrimitive, ToPrimitive};
use winit::event::{ElementState, Event, VirtualKeyCode};
use crate::core::{Filter, Message, Payload, System};
use crate::NSE;
use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation};
use crate::rendering::fractal_generators::FractalSelection;
pub struct OctreeGuiSystem {
imgui: Arc<Mutex<Context>>,
platform: WinitPlatform,
renderer: Renderer,
display: Arc<Mutex<glium::Display>>,
// octree data
octree_config: OctreeConfig,
octree_optimizations: OctreeOptimizations,
// profiling data
profiling_data: ProfilingData,
frame_times: VecDeque<f32>,
// message passing
messages: Vec<Message>,
}
impl OctreeGuiSystem {
pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> {
let mut imgui = Context::create();
// configure imgui-rs Context if necessary
imgui.set_ini_filename(None);
let context = glutin::ContextBuilder::new().with_vsync(false);
let builder = WindowBuilder::new()
.with_title("Octree - Config")
.with_decorations(true)
.with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64));
let display =
Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display");
let mut platform = WinitPlatform::init(&mut imgui); // step 1
platform.attach_window(
imgui.io_mut(),
&display.gl_window().window(),
HiDpiMode::Default,
); // step 2
let hidpi_factor = platform.hidpi_factor();
let font_size = (13.0 * hidpi_factor) as f32;
imgui.fonts().add_font(&[
FontSource::DefaultFontData {
config: Some(FontConfig {
size_pixels: font_size,
..FontConfig::default()
}),
},
FontSource::TtfData {
data: include_bytes!("resources/mplus-1p-regular.ttf"),
size_pixels: font_size,
config: Some(FontConfig {
rasterizer_multiply: 1.75,
glyph_ranges: FontGlyphRanges::japanese(),
..FontConfig::default()
}),
},
]);
imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32;
let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer");
let mut frame_times = VecDeque::new();
frame_times.resize(500, 0.0);
Arc::new(Mutex::new(OctreeGuiSystem {
imgui: Arc::new(Mutex::new(imgui)),
platform,
renderer,
display: Arc::new(Mutex::new(display)),
octree_config: OctreeConfig::default(),
octree_optimizations: OctreeOptimizations::default(),
profiling_data: ProfilingData::default(),
frame_times,
messages: vec![],
}))
}
fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) {
if CollapsingHeader::new(im_str!("Settings"))
.default_open(true)
.build(&ui)
{
// reset the reset flag
self.octree_config.reset = None;
let mut modified = false;
ui.text(format!("Fractal Selection"));
let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0);
let mut fractal_names = vec![];
for x in 0.. {
match FromPrimitive::from_i32(x) {
Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")),
Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")),
Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")),
Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")),
Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")),
Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")),
_ => break, // leave loop
}
}
if ComboBox::new(im_str!("Select Fractal"))
.build_simple_string(
&ui,
&mut selected_fractal,
&fractal_names) {
self.octree_config = OctreeConfig::default();
self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal);
self.octree_config.reset = Some(true);
modified = true;
}
ui.separator();
ui.text(format!("Performance Settings"));
if ui.button(im_str!("Update Now"), [0.0, 0.0]) {
self.messages.push(Message::new(self.octree_config.clone()));
};
ui.same_line(0.0);
if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Subdivision Threshold (px)"))
.range(RangeInclusive::new(1.0, 50.0))
.flags(SliderFlags::LOGARITHMIC)
.build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Distance Scale"))
.range(RangeInclusive::new(0.05 as f64, 1.0 as f64))
// .flags(SliderFlags::LOGARITHMIC)
.build(&ui, self.octree_config.distance_scale.as_mut().unwrap())
{
modified = true;
}
if Slider::new(im_str!("Max. Octree Nodes"))
.range(RangeInclusive::new(1e4 as u64, 2e7 as u64))
.build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap())
{
modified = true;
}
if modified {
self.messages.push(Message::new(self.octree_config.clone()));
}
if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) {
let prev_selection = self.octree_config.fractal;
self.octree_config = OctreeConfig::default();
self.octree_config.reset = Some(true);
self.octree_config.fractal = prev_selection;
self.messages.push(Message::new(self.octree_config.clone()));
};
}
}
fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) {
let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0);
let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64;
let frame_times = &mut self.frame_times;
frame_times.pop_front();
frame_times.push_back(delta_time.as_secs_f32());
let f_times: Vec<f32> = frame_times.iter().cloned().collect();
if CollapsingHeader::new(im_str!("Profiling"))
.default_open(true)
.build(&ui)
{
// Plot Frame Times
ui.plot_lines(im_str!("Frame Times"), &f_times[..])
.graph_size([0.0, 50.0])
.overlay_text(&im_str!("{} ms", delta_time.as_millis()))
.build();
// print times of seperate systems
if self.profiling_data.system_times.is_some() {
for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap()
{
ui.text(im_str!("{}: {}", system_name, system_time.as_millis()));
}
}
ui.separator();
ui.text(im_str!("Rendered Nodes: {}", rendered_nodes));
ui.text(im_str!("Render Time: {:.2} ms", render_time));
}
}
fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) {
if CollapsingHeader::new(im_str!("Camera"))
.default_open(true)
.build(&ui)
{
let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z());
InputFloat3::new(
&ui,
im_str!("View Direction (read only)"), | view_dir.as_mut(),
).read_only(true).build();
InputFloat3::new(
&ui,
im_str!("Camera Position"),
camera_transform.position.as_mut(),
).build();
camera_transform.update();
}
}
}
impl System for OctreeGuiSystem {
fn get_filter(&mut self) -> Vec<Filter> {
vec![
crate::filter!(Octree, Mesh, Transformation),
crate::filter!(Camera, Transformation),
]
}
fn handle_input(&mut self, _event: &Event<()>) {
let platform = &mut self.platform;
let display = self.display.lock().unwrap();
let gl_window = display.gl_window();
let mut imgui = self.imgui.lock().unwrap();
match _event {
Event::MainEventsCleared => {
platform
.prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4
.expect("Failed to prepare frame");
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
window_id,
} => {
if *window_id == gl_window.window().id() {
println!("Close Octree Config Window");
gl_window.window().set_visible(false);
return;
}
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => match input {
winit::event::KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::F12), ElementState::Pressed) => {
println!("Open Octree Config Window");
gl_window.window().set_visible(true);
}
_ => (),
},
},
_ => (),
},
_ => (),
}
platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3
}
fn consume_messages(&mut self, messages: &Vec<Message>) {
for m in messages {
if m.is_type::<ProfilingData>() {
let data = m.get_payload::<ProfilingData>().unwrap();
self.profiling_data.replace(data);
}
if m.is_type::<OctreeOptimizations>() {
let data = m.get_payload::<OctreeOptimizations>().unwrap();
self.octree_optimizations = data.clone();
}
if m.is_type::<OctreeConfig>() {
let data = m.get_payload::<OctreeConfig>().unwrap();
self.octree_config.merge(data);
}
}
}
fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) {
let ctx = self.imgui.clone();
let display = self.display.clone();
let mut ctx_lock = ctx.lock().unwrap();
let display_lock = display.lock().unwrap();
let ui = ctx_lock.frame();
let gl_window = display_lock.gl_window();
let octree_entities = &filter[0].lock().unwrap().entities;
let camera_entities = &filter[1].lock().unwrap().entities;
let window = Window::new(im_str!("Octree"))
.collapsible(false)
.movable(false)
.position([10.0, 10.0], Condition::FirstUseEver)
.size([400.0, 740.0], Condition::FirstUseEver);
let window_token = window.begin(&ui).unwrap();
for entity in octree_entities {
let entitiy_mutex = entity.lock().unwrap();
let _octree_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap();
let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap();
self.display_profiling_ui(delta_time, &ui);
ui.new_line();
self.display_octree_ui(&ui, &octree.config, &octree.info);
ui.new_line();
}
for entity in camera_entities {
let mut entitiy_mutex = entity.lock().unwrap();
let mut camera_transform = entitiy_mutex
.get_component::<Transformation>()
.ok()
.unwrap().clone();
let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap();
self.display_camera_ui(&ui, camera, &mut camera_transform);
entitiy_mutex.add_component(camera_transform);
}
window_token.end(&ui);
// construct the UI
self.platform.prepare_render(&ui, &gl_window.window()); // step 5
// render the UI with a renderer
let draw_data = ui.render();
let mut target = display_lock.draw();
target.clear_color_srgb(0.1, 0.1, 0.11, 1.0);
self.renderer
.render(&mut target, draw_data)
.expect("Rendering failed");
target.finish().expect("Failed to swap buffers");
}
fn get_messages(&mut self) -> Vec<Message> {
let ret = self.messages.clone();
self.messages.clear();
ret
}
}
#[derive(Debug, Clone, Default)]
pub struct ProfilingData {
pub rendered_nodes: Option<u32>,
pub instance_data_generation: Option<u64>,
pub render_time: Option<u64>,
// in nano seconds
pub system_times: Option<Vec<(String, Duration)>>,
}
impl ProfilingData {
pub fn replace(&mut self, other: &Self) {
Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes);
Self::replace_option(
&mut self.instance_data_generation,
&other.instance_data_generation,
);
Self::replace_option(&mut self.render_time, &other.render_time);
Self::replace_option(&mut self.system_times, &other.system_times);
}
fn replace_option<T>(target: &mut Option<T>, source: &Option<T>)
where
T: Clone,
{
match source {
Some(val) => target.replace(val.clone()),
None => None,
};
}
}
impl Payload for ProfilingData {} | random_line_split | |
lib.rs | //! This is a platform agnostic Rust driver for the MAX3010x high-sensitivity
//! pulse oximeter and heart-rate sensor for wearable health, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Get the number of samples available on the FIFO. See [`get_available_sample_count()`].
//! - Get the number of samples lost from the FIFO. See [`get_overflow_sample_count()`].
//! - Read samples from the FIFO. See [`read_fifo()`].
//! - Perform a temperature measurement. See [`read_temperature()`].
//! - Change into heart-rate, oximeter or multi-LED modes. See [`into_multi_led()`].
//! - Set the sample averaging. See [`set_sample_averaging()`].
//! - Set the LED pulse amplitude. See [`set_pulse_amplitude()`].
//! - Set the LED pulse width. See [`set_pulse_width()`].
//! - Set the sampling rate. See [`set_sampling_rate()`].
//! - Set the ADC range. See [`set_adc_range()`].
//! - Set the LED time slots in multi-LED mode. [`set_led_time_slots()`].
//! - Enable/disable the FIFO rollover. See [`enable_fifo_rollover()`].
//! - Clear the FIFO. See [`clear_fifo()`].
//! - Wake-up and shutdown the device. See [`shutdown()`].
//! - Perform a software reset. See [`reset()`].
//! - Get the device part and revision id. See [`get_part_id()`].
//! - Interrupts:
//! - Read the status of all interrupts. See [`read_interrupt_status()`].
//! - Set FIFO-almost-full level interrupt. See [`set_fifo_almost_full_level_interrupt()`].
//! - Enable/disable the FIFO-almost-full interrupt. See [`enable_fifo_almost_full_interrupt()`].
//! - Enable/disable the ambient-light-cancellation overflow interrupt. See [`enable_alc_overflow_interrupt()`].
//! - Enable/disable the temperature-ready interrupt. See [`enable_temperature_ready_interrupt()`].
//! - Enable/disable the new-FIFO-data-ready interrupt. See [`enable_new_fifo_data_ready_interrupt()`].
//!
//! [`get_available_sample_count()`]: struct.Max3010x.html#method.get_available_sample_count
//! [`get_overflow_sample_count()`]: struct.Max3010x.html#method.get_overflow_sample_count
//! [`read_fifo()`]: struct.Max3010x.html#method.read_fifo
//! [`read_temperature()`]: struct.Max3010x.html#method.read_temperature
//! [`into_multi_led()`]: struct.Max3010x.html#method.into_multi_led
//! [`set_sample_averaging()`]: struct.Max3010x.html#method.set_sample_averaging
//! [`set_pulse_width()`]: struct.Max3010x.html#method.set_pulse_width
//! [`set_pulse_amplitude()`]: struct.Max3010x.html#method.set_pulse_amplitude
//! [`set_sampling_rate()`]: struct.Max3010x.html#method.set_sampling_rate
//! [`set_adc_range()`]: struct.Max3010x.html#method.set_adc_range
//! [`set_led_time_slots()`]: struct.Max3010x.html#method.set_led_time_slots
//! [`shutdown()`]: struct.Max3010x.html#method.shutdown
//! [`reset()`]: struct.Max3010x.html#method.reset
//! [`set_fifo_almost_full_level_interrupt()`]: struct.Max3010x.html#method.set_fifo_almost_full_level_interrupt
//! [`enable_fifo_rollover()`]: struct.Max3010x.html#method.enable_fifo_rollover
//! [`clear_fifo()`]: struct.Max3010x.html#method.clear_fifo
//! [`read_interrupt_status()`]: struct.Max3010x.html#method.read_interrupt_status
//! [`enable_fifo_almost_full_interrupt()`]: struct.Max3010x.html#method.enable_fifo_almost_full_interrupt
//! [`enable_alc_overflow_interrupt()`]: struct.Max3010x.html#method.enable_alc_overflow_interrupt
//! [`enable_temperature_ready_interrupt()`]: struct.Max3010x.html#method.enable_temperature_ready_interrupt
//! [`enable_new_fifo_data_ready_interrupt()`]: struct.Max3010x.html#method.enable_new_fifo_data_ready_interrupt
//! [`get_part_id()`]: struct.Max3010x.html#method.get_part_id
//!
//! ## The device
//! The `MAX30102` is an integrated pulse oximetry and heart-rate monitor module.
//! It includes internal LEDs, photodetectors, optical elements, and low-noise
//! electronics with ambient light rejection. The `MAX30102` provides a complete
//! system solution to ease the design-in process for mobile and
//! wearable devices.
//!
//! The `MAX30102` operates on a single 1.8V power supply and a separate 3.3V
//! power supply for the internal LEDs. Communication is through a standard
//! I2C-compatible interface. The module can be shut down through software
//! with zero standby current, allowing the power rails to remain
//! powered at all times.
//!
//! Datasheet:
//! - [`MAX30102`](https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the device.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Read samples in heart-rate mode
//!
//! ```no_run
//! extern crate linux_embedded_hal as hal;
//! extern crate max3010x;
//! use max3010x::{Max3010x, Led, SampleAveraging};
//!
//! # fn main() {
//! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap();
//! let mut sensor = Max3010x::new_max30102(dev);
//! let mut sensor = sensor.into_heart_rate().unwrap();
//! sensor.set_sample_averaging(SampleAveraging::Sa4).unwrap();
//! sensor.set_pulse_amplitude(Led::All, 15).unwrap();
//! sensor.enable_fifo_rollover().unwrap();
//! let mut data = [0; 3];
//! let samples_read = sensor.read_fifo(&mut data).unwrap();
//!
//! // get the I2C device back
//! let dev = sensor.destroy();
//! # }
//! ```
//!
//! ### Set led slots in multi-led mode
//!
//! ```no_run
//! extern crate linux_embedded_hal as hal;
//! extern crate max3010x;
//! use max3010x::{ Max3010x, Led, TimeSlot };
//!
//! # fn main() {
//! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap();
//! let mut max30102 = Max3010x::new_max30102(dev);
//! let mut max30102 = max30102.into_multi_led().unwrap();
//! max30102.set_pulse_amplitude(Led::All, 15).unwrap();
//! max30102.set_led_time_slots([
//! TimeSlot::Led1,
//! TimeSlot::Led2,
//! TimeSlot::Led1,
//! TimeSlot::Disabled
//! ]).unwrap();
//! max30102.enable_fifo_rollover().unwrap();
//! let mut data = [0; 2];
//! let samples_read = max30102.read_fifo(&mut data).unwrap();
//!
//! // get the I2C device back
//! let dev = max30102.destroy();
//! # }
//! ```
//!
#![deny(missing_docs, unsafe_code)]
#![no_std]
extern crate embedded_hal as hal;
use hal::blocking::i2c;
extern crate nb;
use core::marker::PhantomData;
/// All possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// Invalid arguments provided
InvalidArguments,
}
/// LEDs
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Led {
/// LED1 corresponds to Red in MAX30102
Led1,
/// LED1 corresponds to IR in MAX30102
Led2,
/// Select all available LEDs in the device
All,
}
/// Multi-LED mode sample time slot configuration
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TimeSlot {
/// Time slot is disabled
Disabled,
/// LED 1 active during time slot (corresponds to Red in MAX30102)
Led1,
/// LED 2 active during time slot (corresponds to IR in MAX30102)
Led2,
}
/// Sample averaging
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SampleAveraging {
/// 1 (no averaging) (default)
Sa1,
/// 2
Sa2,
/// 4
Sa4,
/// 8
Sa8,
/// 16
Sa16,
/// 32
Sa32,
}
/// Number of empty data samples when the FIFO almost full interrupt is issued.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FifoAlmostFullLevelInterrupt {
/// Interrupt issue when 0 spaces are left in FIFO. (default)
L0,
/// Interrupt issue when 1 space is left in FIFO.
L1,
/// Interrupt issue when 2 spaces are left in FIFO.
L2,
/// Interrupt issue when 3 spaces are left in FIFO.
L3,
/// Interrupt issue when 4 spaces are left in FIFO.
L4,
/// Interrupt issue when 5 spaces are left in FIFO.
L5,
/// Interrupt issue when 6 spaces are left in FIFO.
L6,
/// Interrupt issue when 7 spaces are left in FIFO.
L7,
/// Interrupt issue when 8 spaces are left in FIFO.
L8,
/// Interrupt issue when 9 spaces are left in FIFO.
L9,
/// Interrupt issue when 10 spaces are left in FIFO.
L10,
/// Interrupt issue when 11 spaces are left in FIFO.
L11,
/// Interrupt issue when 12 spaces are left in FIFO.
L12,
/// Interrupt issue when 13 spaces are left in FIFO.
L13,
/// Interrupt issue when 14 spaces are left in FIFO.
L14,
/// Interrupt issue when 15 spaces are left in FIFO.
L15,
}
/// LED pulse width (determines ADC resolution)
///
/// This is limited by the current mode and the selected sample rate.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum LedPulseWidth {
/// 69 μs pulse width (15-bit ADC resolution)
Pw69,
/// 118 μs pulse width (16-bit ADC resolution)
Pw118,
/// 215 μs pulse width (17-bit ADC resolution)
Pw215,
/// 411 μs pulse width (18-bit ADC resolution)
Pw411,
}
/// Sampling rate
///
/// This is limited by the current mode and the selected LED pulse width.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SamplingRate {
/// 50 samples per second
Sps50,
/// 100 samples per second
Sps100,
/// 200 samples per second
Sps200,
/// 400 samples per second
Sps400,
/// 800 samples per second
Sps800,
/// 1000 samples per second
Sps1000,
/// 1600 samples per second
Sps1600,
/// 3200 samples per second
Sps3200,
}
/// ADC range
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum AdcRange {
/// Full scale 2048 nA
Fs2k,
/// Full scale 4094 nA
Fs4k,
/// Full scale 8192 nA
Fs8k,
/// Full scale 16394 nA
Fs16k,
}
/// Interrupt status flags
#[derive(Debug, Clone, Copy)]
pub struct InterruptStatus {
/// Power ready interrupt
pub power_ready: bool,
/// FIFO almost full interrupt
pub fifo_almost_full: bool,
/// New FIFO data ready interrupt
pub new_fifo_data_ready: bool,
/// Ambient light cancellation overflow interrupt
pub alc_overflow: bool,
/// Internal die temperature conversion ready interrupt
pub temperature_ready: bool,
}
const DEVICE_ADDRESS: u8 = 0b101_0111;
struct Register;
impl Register {
const INT_STATUS: u8 = 0x0;
const INT_EN1: u8 = 0x02;
const INT_EN2: u8 = 0x03;
const FIFO_WR_PTR: u8 = 0x04;
const OVF_COUNTER: u8 = 0x05;
const FIFO_DATA: u8 = 0x07;
const FIFO_CONFIG: u8 = 0x08;
const MODE: u8 = 0x09;
const SPO2_CONFIG: u8 = 0x0A;
const LED1_PA: u8 = 0x0C;
const LED2_PA: u8 = 0x0D;
const SLOT_CONFIG0: u8 = 0x11;
const TEMP_INT: u8 = 0x1F;
const TEMP_CONFIG: u8 = 0x21;
const REV_ID: u8 = 0xFE;
const PART_ID: u8 = 0xFF;
}
struct BitFlags;
impl BitFlags {
const FIFO_A_FULL_INT: u8 = 0b1000_0000;
const ALC_OVF_INT: u8 = 0b0010_0000;
const DIE_TEMP_RDY_INT: u8 = 0b0000_0010;
const PPG_RDY_INT: u8 = 0b0100_0000;
const PWR_RDY_INT: u8 = 0b0000_0001;
const TEMP_EN: u8 = 0b0000_0001;
const SHUTDOWN: u8 = 0b1000_0000;
const RESET: u8 = 0b0100_0000;
const FIFO_ROLLOVER_EN: u8 = 0b0001_0000;
const ADC_RGE0: u8 = 0b0010_0000;
const ADC_RGE1: u8 = 0b0100_0000;
const LED_PW0: u8 = 0b0000_0001;
const LED_PW1: u8 = 0b0000_0010;
const SPO2_SR0: u8 = 0b0000_0100;
const SPO2_SR1: u8 = 0b0000_1000;
const SPO2_SR2: u8 = 0b0001_0000;
}
#[derive(Debug, Default, Clone, PartialEq)]
struct Config {
bits: u8,
}
impl Config {
fn with_high(&self, mask: u8) -> Self {
Config {
bits: self.bits | mask,
}
}
fn with_low(&self, mask: u8) -> Self {
Config {
bits: self.bits & !mask,
}
}
}
#[doc(hidden)]
pub mod marker {
pub mod mode {
pub struct None(());
pub struct HeartRate(());
pub struct Oxime |
pub struct MultiLed(());
}
pub mod ic {
pub struct Max30102(());
}
}
/// MAX3010x device driver.
#[derive(Debug, Default)]
pub struct Max3010x<I2C, IC, MODE> {
/// The concrete I²C device implementation.
i2c: I2C,
temperature_measurement_started: bool,
mode: Config,
fifo_config: Config,
spo2_config: Config,
int_en1: Config,
int_en2: Config,
_ic: PhantomData<IC>,
_mode: PhantomData<MODE>,
}
impl<I2C, E> Max3010x<I2C, marker::ic::Max30102, marker::mode::None>
where
I2C: i2c::WriteRead<Error = E> + i2c::Write<Error = E>,
{
/// Create new instance of the MAX3010x device.
pub fn new_max30102(i2c: I2C) -> Self {
Max3010x {
i2c,
temperature_measurement_started: false,
mode: Config { bits: 0 },
fifo_config: Config { bits: 0 },
spo2_config: Config { bits: 0 },
int_en1: Config { bits: 0 },
int_en2: Config { bits: 0 },
_ic: PhantomData,
_mode: PhantomData,
}
}
}
impl<I2C, E, IC, MODE> Max3010x<I2C, IC, MODE>
where
I2C: i2c::Write<Error = E>,
{
/// Destroy driver instance, return I²C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
fn write_data(&mut self, data: &[u8]) -> Result<(), Error<E>> {
self.i2c.write(DEVICE_ADDRESS, data).map_err(Error::I2C)
}
}
mod config;
mod reading;
mod private {
use super::*;
pub trait Sealed {}
impl Sealed for marker::mode::HeartRate {}
impl Sealed for marker::mode::Oximeter {}
impl Sealed for marker::mode::MultiLed {}
impl Sealed for marker::ic::Max30102 {}
}
| ter(()); | identifier_name |
lib.rs | //! This is a platform agnostic Rust driver for the MAX3010x high-sensitivity
//! pulse oximeter and heart-rate sensor for wearable health, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Get the number of samples available on the FIFO. See [`get_available_sample_count()`].
//! - Get the number of samples lost from the FIFO. See [`get_overflow_sample_count()`].
//! - Read samples from the FIFO. See [`read_fifo()`].
//! - Perform a temperature measurement. See [`read_temperature()`].
//! - Change into heart-rate, oximeter or multi-LED modes. See [`into_multi_led()`].
//! - Set the sample averaging. See [`set_sample_averaging()`].
//! - Set the LED pulse amplitude. See [`set_pulse_amplitude()`].
//! - Set the LED pulse width. See [`set_pulse_width()`].
//! - Set the sampling rate. See [`set_sampling_rate()`].
//! - Set the ADC range. See [`set_adc_range()`].
//! - Set the LED time slots in multi-LED mode. [`set_led_time_slots()`].
//! - Enable/disable the FIFO rollover. See [`enable_fifo_rollover()`].
//! - Clear the FIFO. See [`clear_fifo()`].
//! - Wake-up and shutdown the device. See [`shutdown()`].
//! - Perform a software reset. See [`reset()`].
//! - Get the device part and revision id. See [`get_part_id()`].
//! - Interrupts:
//! - Read the status of all interrupts. See [`read_interrupt_status()`].
//! - Set FIFO-almost-full level interrupt. See [`set_fifo_almost_full_level_interrupt()`].
//! - Enable/disable the FIFO-almost-full interrupt. See [`enable_fifo_almost_full_interrupt()`].
//! - Enable/disable the ambient-light-cancellation overflow interrupt. See [`enable_alc_overflow_interrupt()`].
//! - Enable/disable the temperature-ready interrupt. See [`enable_temperature_ready_interrupt()`].
//! - Enable/disable the new-FIFO-data-ready interrupt. See [`enable_new_fifo_data_ready_interrupt()`].
//!
//! [`get_available_sample_count()`]: struct.Max3010x.html#method.get_available_sample_count
//! [`get_overflow_sample_count()`]: struct.Max3010x.html#method.get_overflow_sample_count
//! [`read_fifo()`]: struct.Max3010x.html#method.read_fifo
//! [`read_temperature()`]: struct.Max3010x.html#method.read_temperature
//! [`into_multi_led()`]: struct.Max3010x.html#method.into_multi_led
//! [`set_sample_averaging()`]: struct.Max3010x.html#method.set_sample_averaging
//! [`set_pulse_width()`]: struct.Max3010x.html#method.set_pulse_width
//! [`set_pulse_amplitude()`]: struct.Max3010x.html#method.set_pulse_amplitude
//! [`set_sampling_rate()`]: struct.Max3010x.html#method.set_sampling_rate
//! [`set_adc_range()`]: struct.Max3010x.html#method.set_adc_range
//! [`set_led_time_slots()`]: struct.Max3010x.html#method.set_led_time_slots
//! [`shutdown()`]: struct.Max3010x.html#method.shutdown
//! [`reset()`]: struct.Max3010x.html#method.reset
//! [`set_fifo_almost_full_level_interrupt()`]: struct.Max3010x.html#method.set_fifo_almost_full_level_interrupt
//! [`enable_fifo_rollover()`]: struct.Max3010x.html#method.enable_fifo_rollover
//! [`clear_fifo()`]: struct.Max3010x.html#method.clear_fifo
//! [`read_interrupt_status()`]: struct.Max3010x.html#method.read_interrupt_status
//! [`enable_fifo_almost_full_interrupt()`]: struct.Max3010x.html#method.enable_fifo_almost_full_interrupt
//! [`enable_alc_overflow_interrupt()`]: struct.Max3010x.html#method.enable_alc_overflow_interrupt
//! [`enable_temperature_ready_interrupt()`]: struct.Max3010x.html#method.enable_temperature_ready_interrupt
//! [`enable_new_fifo_data_ready_interrupt()`]: struct.Max3010x.html#method.enable_new_fifo_data_ready_interrupt
//! [`get_part_id()`]: struct.Max3010x.html#method.get_part_id
//!
//! ## The device
//! The `MAX30102` is an integrated pulse oximetry and heart-rate monitor module.
//! It includes internal LEDs, photodetectors, optical elements, and low-noise
//! electronics with ambient light rejection. The `MAX30102` provides a complete
//! system solution to ease the design-in process for mobile and
//! wearable devices.
//!
//! The `MAX30102` operates on a single 1.8V power supply and a separate 3.3V
//! power supply for the internal LEDs. Communication is through a standard
//! I2C-compatible interface. The module can be shut down through software
//! with zero standby current, allowing the power rails to remain
//! powered at all times.
//!
//! Datasheet:
//! - [`MAX30102`](https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the device.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Read samples in heart-rate mode
//!
//! ```no_run
//! extern crate linux_embedded_hal as hal;
//! extern crate max3010x;
//! use max3010x::{Max3010x, Led, SampleAveraging};
//!
//! # fn main() {
//! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap();
//! let mut sensor = Max3010x::new_max30102(dev);
//! let mut sensor = sensor.into_heart_rate().unwrap();
//! sensor.set_sample_averaging(SampleAveraging::Sa4).unwrap();
//! sensor.set_pulse_amplitude(Led::All, 15).unwrap();
//! sensor.enable_fifo_rollover().unwrap();
//! let mut data = [0; 3];
//! let samples_read = sensor.read_fifo(&mut data).unwrap();
//!
//! // get the I2C device back
//! let dev = sensor.destroy();
//! # }
//! ```
//!
//! ### Set led slots in multi-led mode
//!
//! ```no_run
//! extern crate linux_embedded_hal as hal;
//! extern crate max3010x;
//! use max3010x::{ Max3010x, Led, TimeSlot };
//!
//! # fn main() {
//! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap();
//! let mut max30102 = Max3010x::new_max30102(dev);
//! let mut max30102 = max30102.into_multi_led().unwrap();
//! max30102.set_pulse_amplitude(Led::All, 15).unwrap();
//! max30102.set_led_time_slots([
//! TimeSlot::Led1,
//! TimeSlot::Led2,
//! TimeSlot::Led1,
//! TimeSlot::Disabled
//! ]).unwrap();
//! max30102.enable_fifo_rollover().unwrap();
//! let mut data = [0; 2];
//! let samples_read = max30102.read_fifo(&mut data).unwrap();
//!
//! // get the I2C device back
//! let dev = max30102.destroy();
//! # }
//! ```
//!
#![deny(missing_docs, unsafe_code)]
#![no_std]
extern crate embedded_hal as hal;
use hal::blocking::i2c;
extern crate nb;
use core::marker::PhantomData;
/// All possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// Invalid arguments provided
InvalidArguments,
}
/// LEDs
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Led {
/// LED1 corresponds to Red in MAX30102
Led1,
/// LED1 corresponds to IR in MAX30102
Led2,
/// Select all available LEDs in the device
All,
}
/// Multi-LED mode sample time slot configuration
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TimeSlot {
/// Time slot is disabled
Disabled,
/// LED 1 active during time slot (corresponds to Red in MAX30102)
Led1,
/// LED 2 active during time slot (corresponds to IR in MAX30102)
Led2,
}
/// Sample averaging
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SampleAveraging {
/// 1 (no averaging) (default)
Sa1,
/// 2
Sa2,
/// 4
Sa4,
/// 8
Sa8,
/// 16
Sa16,
/// 32
Sa32,
}
/// Number of empty data samples when the FIFO almost full interrupt is issued.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FifoAlmostFullLevelInterrupt {
/// Interrupt issue when 0 spaces are left in FIFO. (default)
L0,
/// Interrupt issue when 1 space is left in FIFO.
L1,
/// Interrupt issue when 2 spaces are left in FIFO.
L2,
/// Interrupt issue when 3 spaces are left in FIFO.
L3,
/// Interrupt issue when 4 spaces are left in FIFO.
L4,
/// Interrupt issue when 5 spaces are left in FIFO.
L5,
/// Interrupt issue when 6 spaces are left in FIFO.
L6,
/// Interrupt issue when 7 spaces are left in FIFO.
L7,
/// Interrupt issue when 8 spaces are left in FIFO.
L8,
/// Interrupt issue when 9 spaces are left in FIFO.
L9,
/// Interrupt issue when 10 spaces are left in FIFO.
L10,
/// Interrupt issue when 11 spaces are left in FIFO.
L11,
/// Interrupt issue when 12 spaces are left in FIFO.
L12,
/// Interrupt issue when 13 spaces are left in FIFO.
L13,
/// Interrupt issue when 14 spaces are left in FIFO.
L14,
/// Interrupt issue when 15 spaces are left in FIFO.
L15,
}
/// LED pulse width (determines ADC resolution)
///
/// This is limited by the current mode and the selected sample rate.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum LedPulseWidth {
/// 69 μs pulse width (15-bit ADC resolution)
Pw69,
/// 118 μs pulse width (16-bit ADC resolution)
Pw118,
/// 215 μs pulse width (17-bit ADC resolution)
Pw215,
/// 411 μs pulse width (18-bit ADC resolution)
Pw411,
}
/// Sampling rate
///
/// This is limited by the current mode and the selected LED pulse width.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SamplingRate {
/// 50 samples per second
Sps50,
/// 100 samples per second
Sps100,
/// 200 samples per second
Sps200,
/// 400 samples per second
Sps400,
/// 800 samples per second
Sps800,
/// 1000 samples per second
Sps1000,
/// 1600 samples per second
Sps1600,
/// 3200 samples per second
Sps3200,
}
/// ADC range
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum AdcRange {
/// Full scale 2048 nA
Fs2k,
/// Full scale 4094 nA
Fs4k,
/// Full scale 8192 nA
Fs8k,
/// Full scale 16394 nA
Fs16k,
}
/// Interrupt status flags
#[derive(Debug, Clone, Copy)]
pub struct InterruptStatus {
/// Power ready interrupt
pub power_ready: bool,
/// FIFO almost full interrupt
pub fifo_almost_full: bool,
/// New FIFO data ready interrupt
pub new_fifo_data_ready: bool,
/// Ambient light cancellation overflow interrupt | const DEVICE_ADDRESS: u8 = 0b101_0111;
struct Register;
impl Register {
const INT_STATUS: u8 = 0x0;
const INT_EN1: u8 = 0x02;
const INT_EN2: u8 = 0x03;
const FIFO_WR_PTR: u8 = 0x04;
const OVF_COUNTER: u8 = 0x05;
const FIFO_DATA: u8 = 0x07;
const FIFO_CONFIG: u8 = 0x08;
const MODE: u8 = 0x09;
const SPO2_CONFIG: u8 = 0x0A;
const LED1_PA: u8 = 0x0C;
const LED2_PA: u8 = 0x0D;
const SLOT_CONFIG0: u8 = 0x11;
const TEMP_INT: u8 = 0x1F;
const TEMP_CONFIG: u8 = 0x21;
const REV_ID: u8 = 0xFE;
const PART_ID: u8 = 0xFF;
}
struct BitFlags;
impl BitFlags {
const FIFO_A_FULL_INT: u8 = 0b1000_0000;
const ALC_OVF_INT: u8 = 0b0010_0000;
const DIE_TEMP_RDY_INT: u8 = 0b0000_0010;
const PPG_RDY_INT: u8 = 0b0100_0000;
const PWR_RDY_INT: u8 = 0b0000_0001;
const TEMP_EN: u8 = 0b0000_0001;
const SHUTDOWN: u8 = 0b1000_0000;
const RESET: u8 = 0b0100_0000;
const FIFO_ROLLOVER_EN: u8 = 0b0001_0000;
const ADC_RGE0: u8 = 0b0010_0000;
const ADC_RGE1: u8 = 0b0100_0000;
const LED_PW0: u8 = 0b0000_0001;
const LED_PW1: u8 = 0b0000_0010;
const SPO2_SR0: u8 = 0b0000_0100;
const SPO2_SR1: u8 = 0b0000_1000;
const SPO2_SR2: u8 = 0b0001_0000;
}
#[derive(Debug, Default, Clone, PartialEq)]
struct Config {
bits: u8,
}
impl Config {
fn with_high(&self, mask: u8) -> Self {
Config {
bits: self.bits | mask,
}
}
fn with_low(&self, mask: u8) -> Self {
Config {
bits: self.bits & !mask,
}
}
}
#[doc(hidden)]
pub mod marker {
pub mod mode {
pub struct None(());
pub struct HeartRate(());
pub struct Oximeter(());
pub struct MultiLed(());
}
pub mod ic {
pub struct Max30102(());
}
}
/// MAX3010x device driver.
#[derive(Debug, Default)]
pub struct Max3010x<I2C, IC, MODE> {
/// The concrete I²C device implementation.
i2c: I2C,
temperature_measurement_started: bool,
mode: Config,
fifo_config: Config,
spo2_config: Config,
int_en1: Config,
int_en2: Config,
_ic: PhantomData<IC>,
_mode: PhantomData<MODE>,
}
impl<I2C, E> Max3010x<I2C, marker::ic::Max30102, marker::mode::None>
where
I2C: i2c::WriteRead<Error = E> + i2c::Write<Error = E>,
{
/// Create new instance of the MAX3010x device.
pub fn new_max30102(i2c: I2C) -> Self {
Max3010x {
i2c,
temperature_measurement_started: false,
mode: Config { bits: 0 },
fifo_config: Config { bits: 0 },
spo2_config: Config { bits: 0 },
int_en1: Config { bits: 0 },
int_en2: Config { bits: 0 },
_ic: PhantomData,
_mode: PhantomData,
}
}
}
impl<I2C, E, IC, MODE> Max3010x<I2C, IC, MODE>
where
I2C: i2c::Write<Error = E>,
{
/// Destroy driver instance, return I²C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
fn write_data(&mut self, data: &[u8]) -> Result<(), Error<E>> {
self.i2c.write(DEVICE_ADDRESS, data).map_err(Error::I2C)
}
}
mod config;
mod reading;
mod private {
use super::*;
pub trait Sealed {}
impl Sealed for marker::mode::HeartRate {}
impl Sealed for marker::mode::Oximeter {}
impl Sealed for marker::mode::MultiLed {}
impl Sealed for marker::ic::Max30102 {}
} | pub alc_overflow: bool,
/// Internal die temperature conversion ready interrupt
pub temperature_ready: bool,
}
| random_line_split |
grpc.go | package protocols
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"github.com/feiyuw/simgo/logger"
"github.com/fullstorydev/grpcurl"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/dynamic"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// ================================== client ==================================
var (
clientCTX = context.Background() // TODO: add timeout, dialtime options
)
// client
type GrpcClient struct {
addr string // connected service addr, eg. 127.0.0.1:1988
conn *grpc.ClientConn // connection with grpc server
desc grpcurl.DescriptorSource
}
// Create a new grpc client
// if protos set, will get services and methods from proto files
// if addr set but protos empty, will get services and methods from server reflection
func NewGrpcClient(addr string, protos []string, opts ...grpc.DialOption) (*GrpcClient, error) {
var descSource grpcurl.DescriptorSource
if addr == "" {
return nil, fmt.Errorf("addr should not be empty")
}
conn, err := grpc.Dial(addr, opts...)
if err != nil {
return nil, fmt.Errorf("did not connect: %v", err)
}
if len(protos) > 0 {
descSource, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
} | c := rpb.NewServerReflectionClient(conn)
refClient := grpcreflect.NewClient(clientCTX, c)
descSource = grpcurl.DescriptorSourceFromServer(clientCTX, refClient)
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
func (gc *GrpcClient) ListServices() ([]string, error) {
svcs, err := grpcurl.ListServices(gc.desc)
if err != nil {
return nil, err
}
return svcs, nil
}
func (gc *GrpcClient) ListMethods(svcName string) ([]string, error) {
mtds, err := grpcurl.ListMethods(gc.desc, svcName)
if err != nil {
return nil, err
}
return mtds, nil
}
func (gc *GrpcClient) Close() error {
if gc.conn == nil {
return nil
}
return gc.conn.Close()
}
type rpcResponse struct {
messages []bytes.Buffer
}
func (rr *rpcResponse) Write(p []byte) (int, error) {
var msg bytes.Buffer
n, err := msg.Write(p)
rr.messages = append(rr.messages, msg)
return n, err
}
func (rr *rpcResponse) ToJSON() (interface{}, error) {
switch len(rr.messages) {
case 0:
return map[string]interface{}{}, nil
case 1:
resp := make(map[string]interface{})
if err := json.Unmarshal(rr.messages[0].Bytes(), &resp); err != nil {
return nil, err
}
return resp, nil
default:
resp := make([]map[string]interface{}, len(rr.messages))
for idx, msg := range rr.messages {
oneResp := make(map[string]interface{})
if err := json.Unmarshal(msg.Bytes(), &oneResp); err != nil {
return nil, err
}
resp[idx] = oneResp
}
return resp, nil
}
}
func (gc *GrpcClient) InvokeRPC(mtdName string, reqData interface{}) (interface{}, error) {
var in bytes.Buffer
var out = rpcResponse{messages: []bytes.Buffer{}}
switch reflect.TypeOf(reqData).Kind() {
case reflect.Slice:
for _, data := range reqData.([]map[string]interface{}) {
reqBytes, err := json.Marshal(data)
if err != nil {
return nil, err
}
in.Write(reqBytes)
}
case reflect.Map:
reqBytes, err := json.Marshal(reqData)
if err != nil {
return nil, err
}
in.Write(reqBytes)
default:
in.WriteString(reqData.(string))
}
rf, formatter, err := grpcurl.RequestParserAndFormatterFor(grpcurl.FormatJSON, gc.desc, true, false, &in)
if err != nil {
return nil, err
}
h := grpcurl.NewDefaultEventHandler(&out, gc.desc, formatter, false)
if err = grpcurl.InvokeRPC(clientCTX, gc.desc, gc.conn, mtdName, []string{}, h, rf.Next); err != nil {
return nil, err
}
return out.ToJSON()
}
// ================================== server ==================================
type GrpcServer struct {
addr string
desc grpcurl.DescriptorSource
server *grpc.Server
handlerM map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error
listeners []func(mtd, direction, from, to, body string) error
}
// create a new grpc server
func NewGrpcServer(addr string, protos []string, opts ...grpc.ServerOption) (*GrpcServer, error) {
descFromProto, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
gs := &GrpcServer{
addr: addr,
desc: descFromProto,
server: grpc.NewServer(opts...),
handlerM: map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{},
}
gs.server = grpc.NewServer()
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
unaryMethods := []grpc.MethodDesc{}
streamMethods := []grpc.StreamDesc{}
for _, mtd := range sd.GetMethods() {
logger.Debugf("protocols/grpc", "try to add method: %v of service: %s", mtd, svcName)
if mtd.IsClientStreaming() || mtd.IsServerStreaming() {
streamMethods = append(streamMethods, grpc.StreamDesc{
StreamName: mtd.GetName(),
Handler: gs.getStreamHandler(mtd),
ServerStreams: mtd.IsServerStreaming(),
ClientStreams: mtd.IsClientStreaming(),
})
} else {
unaryMethods = append(unaryMethods, grpc.MethodDesc{
MethodName: mtd.GetName(),
Handler: gs.getUnaryHandler(mtd),
})
}
}
svcDesc := grpc.ServiceDesc{
ServiceName: svcName,
HandlerType: (*interface{})(nil),
Methods: unaryMethods,
Streams: streamMethods,
Metadata: sd.GetFile().GetName(),
}
gs.server.RegisterService(&svcDesc, &mockServer{})
}
return gs, nil
}
func (gs *GrpcServer) Start() error {
lis, err := net.Listen("tcp", gs.addr)
if err != nil {
return err
}
logger.Infof("protocols/grpc", "server listening at %v", lis.Addr())
go func() {
if err := gs.server.Serve(lis); err != nil {
logger.Errorf("protocols/grpc", "failed to serve: %v", err)
}
}()
return nil
}
func (gs *GrpcServer) Close() error {
if gs.server != nil {
gs.server.Stop()
gs.server = nil
gs.handlerM = map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{}
logger.Infof("protocols/grpc", "grpc server %s stopped", gs.addr)
}
return nil
}
func (gs *GrpcServer) AddListener(listener func(mtd, direction, from, to, body string) error) {
gs.listeners = append(gs.listeners, listener)
logger.Infof("protocols/grpc", "new listener added, now %d listeners", len(gs.listeners))
}
// set specified method handler, one method only have one handler, it's the highest priority
// if you want to return error, see https://github.com/avinassh/grpc-errors/blob/master/go/server.go
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) SetMethodHandler(mtd string, handler func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error) error {
if _, exists := gs.handlerM[mtd]; exists {
logger.Warnf("protocols/grpc", "handler for method %s exists, will be overrided", mtd)
}
gs.handlerM[mtd] = handler
return nil
}
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) RemoveMethodHandler(mtd string) error {
if _, exists := gs.handlerM[mtd]; exists {
delete(gs.handlerM, mtd)
}
return nil
}
func (gs *GrpcServer) ListMethods() ([]string, error) {
methods := []string{}
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
for _, mtd := range sd.GetMethods() {
methods = append(methods, mtd.GetFullyQualifiedName())
}
}
return methods, nil
}
func (gs *GrpcServer) getMethodHandler(mtd string) (func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error, error) {
handler, ok := gs.handlerM[mtd]
if !ok {
return nil, fmt.Errorf("handler for method %s not found", mtd)
}
return handler, nil
}
func (gs *GrpcServer) getUnaryHandler(mtd *desc.MethodDescriptor) func(interface{}, context.Context, func(interface{}) error, grpc.UnaryServerInterceptor) (interface{}, error) {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
var peerAddr string
if len(gs.listeners) > 0 {
p, ok := peer.FromContext(ctx)
if ok {
peerAddr = p.Addr.String()
} else {
logger.Error("protocols/grpc", "failed to get peer address")
}
}
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
logger.Errorf("protocols/grpc", "no handler for %s", mtdFqn)
return nil, err
}
in := dynamic.NewMessage(mtd.GetInputType())
if err := dec(in); err != nil {
return nil, err
}
// handle in message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "in", peerAddr, gs.addr, in.String())
}
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, nil); err != nil {
return nil, err
}
// handle out message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "out", gs.addr, peerAddr, out.String())
}
if interceptor == nil {
return out, nil
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: mtdFqn,
}
wrapper := func(ctx context.Context, req interface{}) (interface{}, error) {
return out, nil
}
return interceptor(ctx, in, info, wrapper)
}
}
func (gs *GrpcServer) getStreamHandler(mtd *desc.MethodDescriptor) func(interface{}, grpc.ServerStream) error {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, stream grpc.ServerStream) error {
// TODO: listeners for stream messages
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
return err
}
in := dynamic.NewMessage(mtd.GetInputType())
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, stream); err != nil {
return err
}
return nil
}
}
// mock server struct for service descriptor
type mockServer struct {
} |
// fetch from server reflection RPC | random_line_split |
grpc.go | package protocols
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"github.com/feiyuw/simgo/logger"
"github.com/fullstorydev/grpcurl"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/dynamic"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// ================================== client ==================================
var (
clientCTX = context.Background() // TODO: add timeout, dialtime options
)
// client
type GrpcClient struct {
addr string // connected service addr, eg. 127.0.0.1:1988
conn *grpc.ClientConn // connection with grpc server
desc grpcurl.DescriptorSource
}
// Create a new grpc client
// if protos set, will get services and methods from proto files
// if addr set but protos empty, will get services and methods from server reflection
func NewGrpcClient(addr string, protos []string, opts ...grpc.DialOption) (*GrpcClient, error) {
var descSource grpcurl.DescriptorSource
if addr == "" {
return nil, fmt.Errorf("addr should not be empty")
}
conn, err := grpc.Dial(addr, opts...)
if err != nil {
return nil, fmt.Errorf("did not connect: %v", err)
}
if len(protos) > 0 {
descSource, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
// fetch from server reflection RPC
c := rpb.NewServerReflectionClient(conn)
refClient := grpcreflect.NewClient(clientCTX, c)
descSource = grpcurl.DescriptorSourceFromServer(clientCTX, refClient)
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
func (gc *GrpcClient) ListServices() ([]string, error) {
svcs, err := grpcurl.ListServices(gc.desc)
if err != nil {
return nil, err
}
return svcs, nil
}
func (gc *GrpcClient) ListMethods(svcName string) ([]string, error) {
mtds, err := grpcurl.ListMethods(gc.desc, svcName)
if err != nil {
return nil, err
}
return mtds, nil
}
func (gc *GrpcClient) Close() error {
if gc.conn == nil {
return nil
}
return gc.conn.Close()
}
type rpcResponse struct {
messages []bytes.Buffer
}
func (rr *rpcResponse) Write(p []byte) (int, error) {
var msg bytes.Buffer
n, err := msg.Write(p)
rr.messages = append(rr.messages, msg)
return n, err
}
func (rr *rpcResponse) ToJSON() (interface{}, error) {
switch len(rr.messages) {
case 0:
return map[string]interface{}{}, nil
case 1:
resp := make(map[string]interface{})
if err := json.Unmarshal(rr.messages[0].Bytes(), &resp); err != nil {
return nil, err
}
return resp, nil
default:
resp := make([]map[string]interface{}, len(rr.messages))
for idx, msg := range rr.messages {
oneResp := make(map[string]interface{})
if err := json.Unmarshal(msg.Bytes(), &oneResp); err != nil {
return nil, err
}
resp[idx] = oneResp
}
return resp, nil
}
}
func (gc *GrpcClient) InvokeRPC(mtdName string, reqData interface{}) (interface{}, error) {
var in bytes.Buffer
var out = rpcResponse{messages: []bytes.Buffer{}}
switch reflect.TypeOf(reqData).Kind() {
case reflect.Slice:
for _, data := range reqData.([]map[string]interface{}) {
reqBytes, err := json.Marshal(data)
if err != nil {
return nil, err
}
in.Write(reqBytes)
}
case reflect.Map:
reqBytes, err := json.Marshal(reqData)
if err != nil {
return nil, err
}
in.Write(reqBytes)
default:
in.WriteString(reqData.(string))
}
rf, formatter, err := grpcurl.RequestParserAndFormatterFor(grpcurl.FormatJSON, gc.desc, true, false, &in)
if err != nil {
return nil, err
}
h := grpcurl.NewDefaultEventHandler(&out, gc.desc, formatter, false)
if err = grpcurl.InvokeRPC(clientCTX, gc.desc, gc.conn, mtdName, []string{}, h, rf.Next); err != nil {
return nil, err
}
return out.ToJSON()
}
// ================================== server ==================================
type GrpcServer struct {
addr string
desc grpcurl.DescriptorSource
server *grpc.Server
handlerM map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error
listeners []func(mtd, direction, from, to, body string) error
}
// create a new grpc server
func NewGrpcServer(addr string, protos []string, opts ...grpc.ServerOption) (*GrpcServer, error) {
descFromProto, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
gs := &GrpcServer{
addr: addr,
desc: descFromProto,
server: grpc.NewServer(opts...),
handlerM: map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{},
}
gs.server = grpc.NewServer()
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
unaryMethods := []grpc.MethodDesc{}
streamMethods := []grpc.StreamDesc{}
for _, mtd := range sd.GetMethods() {
logger.Debugf("protocols/grpc", "try to add method: %v of service: %s", mtd, svcName)
if mtd.IsClientStreaming() || mtd.IsServerStreaming() {
streamMethods = append(streamMethods, grpc.StreamDesc{
StreamName: mtd.GetName(),
Handler: gs.getStreamHandler(mtd),
ServerStreams: mtd.IsServerStreaming(),
ClientStreams: mtd.IsClientStreaming(),
})
} else {
unaryMethods = append(unaryMethods, grpc.MethodDesc{
MethodName: mtd.GetName(),
Handler: gs.getUnaryHandler(mtd),
})
}
}
svcDesc := grpc.ServiceDesc{
ServiceName: svcName,
HandlerType: (*interface{})(nil),
Methods: unaryMethods,
Streams: streamMethods,
Metadata: sd.GetFile().GetName(),
}
gs.server.RegisterService(&svcDesc, &mockServer{})
}
return gs, nil
}
func (gs *GrpcServer) Start() error {
lis, err := net.Listen("tcp", gs.addr)
if err != nil {
return err
}
logger.Infof("protocols/grpc", "server listening at %v", lis.Addr())
go func() {
if err := gs.server.Serve(lis); err != nil {
logger.Errorf("protocols/grpc", "failed to serve: %v", err)
}
}()
return nil
}
func (gs *GrpcServer) Close() error {
if gs.server != nil {
gs.server.Stop()
gs.server = nil
gs.handlerM = map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{}
logger.Infof("protocols/grpc", "grpc server %s stopped", gs.addr)
}
return nil
}
func (gs *GrpcServer) AddListener(listener func(mtd, direction, from, to, body string) error) {
gs.listeners = append(gs.listeners, listener)
logger.Infof("protocols/grpc", "new listener added, now %d listeners", len(gs.listeners))
}
// set specified method handler, one method only have one handler, it's the highest priority
// if you want to return error, see https://github.com/avinassh/grpc-errors/blob/master/go/server.go
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) | (mtd string, handler func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error) error {
if _, exists := gs.handlerM[mtd]; exists {
logger.Warnf("protocols/grpc", "handler for method %s exists, will be overrided", mtd)
}
gs.handlerM[mtd] = handler
return nil
}
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) RemoveMethodHandler(mtd string) error {
if _, exists := gs.handlerM[mtd]; exists {
delete(gs.handlerM, mtd)
}
return nil
}
func (gs *GrpcServer) ListMethods() ([]string, error) {
methods := []string{}
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
for _, mtd := range sd.GetMethods() {
methods = append(methods, mtd.GetFullyQualifiedName())
}
}
return methods, nil
}
func (gs *GrpcServer) getMethodHandler(mtd string) (func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error, error) {
handler, ok := gs.handlerM[mtd]
if !ok {
return nil, fmt.Errorf("handler for method %s not found", mtd)
}
return handler, nil
}
func (gs *GrpcServer) getUnaryHandler(mtd *desc.MethodDescriptor) func(interface{}, context.Context, func(interface{}) error, grpc.UnaryServerInterceptor) (interface{}, error) {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
var peerAddr string
if len(gs.listeners) > 0 {
p, ok := peer.FromContext(ctx)
if ok {
peerAddr = p.Addr.String()
} else {
logger.Error("protocols/grpc", "failed to get peer address")
}
}
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
logger.Errorf("protocols/grpc", "no handler for %s", mtdFqn)
return nil, err
}
in := dynamic.NewMessage(mtd.GetInputType())
if err := dec(in); err != nil {
return nil, err
}
// handle in message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "in", peerAddr, gs.addr, in.String())
}
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, nil); err != nil {
return nil, err
}
// handle out message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "out", gs.addr, peerAddr, out.String())
}
if interceptor == nil {
return out, nil
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: mtdFqn,
}
wrapper := func(ctx context.Context, req interface{}) (interface{}, error) {
return out, nil
}
return interceptor(ctx, in, info, wrapper)
}
}
func (gs *GrpcServer) getStreamHandler(mtd *desc.MethodDescriptor) func(interface{}, grpc.ServerStream) error {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, stream grpc.ServerStream) error {
// TODO: listeners for stream messages
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
return err
}
in := dynamic.NewMessage(mtd.GetInputType())
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, stream); err != nil {
return err
}
return nil
}
}
// mock server struct for service descriptor
type mockServer struct {
}
| SetMethodHandler | identifier_name |
grpc.go | package protocols
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"github.com/feiyuw/simgo/logger"
"github.com/fullstorydev/grpcurl"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/dynamic"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// ================================== client ==================================
var (
clientCTX = context.Background() // TODO: add timeout, dialtime options
)
// client
type GrpcClient struct {
addr string // connected service addr, eg. 127.0.0.1:1988
conn *grpc.ClientConn // connection with grpc server
desc grpcurl.DescriptorSource
}
// Create a new grpc client
// if protos set, will get services and methods from proto files
// if addr set but protos empty, will get services and methods from server reflection
func NewGrpcClient(addr string, protos []string, opts ...grpc.DialOption) (*GrpcClient, error) {
var descSource grpcurl.DescriptorSource
if addr == "" {
return nil, fmt.Errorf("addr should not be empty")
}
conn, err := grpc.Dial(addr, opts...)
if err != nil {
return nil, fmt.Errorf("did not connect: %v", err)
}
if len(protos) > 0 {
descSource, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
// fetch from server reflection RPC
c := rpb.NewServerReflectionClient(conn)
refClient := grpcreflect.NewClient(clientCTX, c)
descSource = grpcurl.DescriptorSourceFromServer(clientCTX, refClient)
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
func (gc *GrpcClient) ListServices() ([]string, error) {
svcs, err := grpcurl.ListServices(gc.desc)
if err != nil {
return nil, err
}
return svcs, nil
}
func (gc *GrpcClient) ListMethods(svcName string) ([]string, error) {
mtds, err := grpcurl.ListMethods(gc.desc, svcName)
if err != nil {
return nil, err
}
return mtds, nil
}
func (gc *GrpcClient) Close() error {
if gc.conn == nil {
return nil
}
return gc.conn.Close()
}
type rpcResponse struct {
messages []bytes.Buffer
}
func (rr *rpcResponse) Write(p []byte) (int, error) {
var msg bytes.Buffer
n, err := msg.Write(p)
rr.messages = append(rr.messages, msg)
return n, err
}
func (rr *rpcResponse) ToJSON() (interface{}, error) {
switch len(rr.messages) {
case 0:
return map[string]interface{}{}, nil
case 1:
resp := make(map[string]interface{})
if err := json.Unmarshal(rr.messages[0].Bytes(), &resp); err != nil {
return nil, err
}
return resp, nil
default:
resp := make([]map[string]interface{}, len(rr.messages))
for idx, msg := range rr.messages {
oneResp := make(map[string]interface{})
if err := json.Unmarshal(msg.Bytes(), &oneResp); err != nil {
return nil, err
}
resp[idx] = oneResp
}
return resp, nil
}
}
func (gc *GrpcClient) InvokeRPC(mtdName string, reqData interface{}) (interface{}, error) {
var in bytes.Buffer
var out = rpcResponse{messages: []bytes.Buffer{}}
switch reflect.TypeOf(reqData).Kind() {
case reflect.Slice:
for _, data := range reqData.([]map[string]interface{}) {
reqBytes, err := json.Marshal(data)
if err != nil {
return nil, err
}
in.Write(reqBytes)
}
case reflect.Map:
reqBytes, err := json.Marshal(reqData)
if err != nil {
return nil, err
}
in.Write(reqBytes)
default:
in.WriteString(reqData.(string))
}
rf, formatter, err := grpcurl.RequestParserAndFormatterFor(grpcurl.FormatJSON, gc.desc, true, false, &in)
if err != nil {
return nil, err
}
h := grpcurl.NewDefaultEventHandler(&out, gc.desc, formatter, false)
if err = grpcurl.InvokeRPC(clientCTX, gc.desc, gc.conn, mtdName, []string{}, h, rf.Next); err != nil {
return nil, err
}
return out.ToJSON()
}
// ================================== server ==================================
type GrpcServer struct {
addr string
desc grpcurl.DescriptorSource
server *grpc.Server
handlerM map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error
listeners []func(mtd, direction, from, to, body string) error
}
// create a new grpc server
func NewGrpcServer(addr string, protos []string, opts ...grpc.ServerOption) (*GrpcServer, error) |
func (gs *GrpcServer) Start() error {
lis, err := net.Listen("tcp", gs.addr)
if err != nil {
return err
}
logger.Infof("protocols/grpc", "server listening at %v", lis.Addr())
go func() {
if err := gs.server.Serve(lis); err != nil {
logger.Errorf("protocols/grpc", "failed to serve: %v", err)
}
}()
return nil
}
func (gs *GrpcServer) Close() error {
if gs.server != nil {
gs.server.Stop()
gs.server = nil
gs.handlerM = map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{}
logger.Infof("protocols/grpc", "grpc server %s stopped", gs.addr)
}
return nil
}
func (gs *GrpcServer) AddListener(listener func(mtd, direction, from, to, body string) error) {
gs.listeners = append(gs.listeners, listener)
logger.Infof("protocols/grpc", "new listener added, now %d listeners", len(gs.listeners))
}
// set specified method handler, one method only have one handler, it's the highest priority
// if you want to return error, see https://github.com/avinassh/grpc-errors/blob/master/go/server.go
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) SetMethodHandler(mtd string, handler func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error) error {
if _, exists := gs.handlerM[mtd]; exists {
logger.Warnf("protocols/grpc", "handler for method %s exists, will be overrided", mtd)
}
gs.handlerM[mtd] = handler
return nil
}
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) RemoveMethodHandler(mtd string) error {
if _, exists := gs.handlerM[mtd]; exists {
delete(gs.handlerM, mtd)
}
return nil
}
func (gs *GrpcServer) ListMethods() ([]string, error) {
methods := []string{}
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
for _, mtd := range sd.GetMethods() {
methods = append(methods, mtd.GetFullyQualifiedName())
}
}
return methods, nil
}
func (gs *GrpcServer) getMethodHandler(mtd string) (func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error, error) {
handler, ok := gs.handlerM[mtd]
if !ok {
return nil, fmt.Errorf("handler for method %s not found", mtd)
}
return handler, nil
}
func (gs *GrpcServer) getUnaryHandler(mtd *desc.MethodDescriptor) func(interface{}, context.Context, func(interface{}) error, grpc.UnaryServerInterceptor) (interface{}, error) {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
var peerAddr string
if len(gs.listeners) > 0 {
p, ok := peer.FromContext(ctx)
if ok {
peerAddr = p.Addr.String()
} else {
logger.Error("protocols/grpc", "failed to get peer address")
}
}
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
logger.Errorf("protocols/grpc", "no handler for %s", mtdFqn)
return nil, err
}
in := dynamic.NewMessage(mtd.GetInputType())
if err := dec(in); err != nil {
return nil, err
}
// handle in message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "in", peerAddr, gs.addr, in.String())
}
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, nil); err != nil {
return nil, err
}
// handle out message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "out", gs.addr, peerAddr, out.String())
}
if interceptor == nil {
return out, nil
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: mtdFqn,
}
wrapper := func(ctx context.Context, req interface{}) (interface{}, error) {
return out, nil
}
return interceptor(ctx, in, info, wrapper)
}
}
func (gs *GrpcServer) getStreamHandler(mtd *desc.MethodDescriptor) func(interface{}, grpc.ServerStream) error {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, stream grpc.ServerStream) error {
// TODO: listeners for stream messages
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
return err
}
in := dynamic.NewMessage(mtd.GetInputType())
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, stream); err != nil {
return err
}
return nil
}
}
// mock server struct for service descriptor
type mockServer struct {
}
| {
descFromProto, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
gs := &GrpcServer{
addr: addr,
desc: descFromProto,
server: grpc.NewServer(opts...),
handlerM: map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{},
}
gs.server = grpc.NewServer()
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
unaryMethods := []grpc.MethodDesc{}
streamMethods := []grpc.StreamDesc{}
for _, mtd := range sd.GetMethods() {
logger.Debugf("protocols/grpc", "try to add method: %v of service: %s", mtd, svcName)
if mtd.IsClientStreaming() || mtd.IsServerStreaming() {
streamMethods = append(streamMethods, grpc.StreamDesc{
StreamName: mtd.GetName(),
Handler: gs.getStreamHandler(mtd),
ServerStreams: mtd.IsServerStreaming(),
ClientStreams: mtd.IsClientStreaming(),
})
} else {
unaryMethods = append(unaryMethods, grpc.MethodDesc{
MethodName: mtd.GetName(),
Handler: gs.getUnaryHandler(mtd),
})
}
}
svcDesc := grpc.ServiceDesc{
ServiceName: svcName,
HandlerType: (*interface{})(nil),
Methods: unaryMethods,
Streams: streamMethods,
Metadata: sd.GetFile().GetName(),
}
gs.server.RegisterService(&svcDesc, &mockServer{})
}
return gs, nil
} | identifier_body |
grpc.go | package protocols
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"github.com/feiyuw/simgo/logger"
"github.com/fullstorydev/grpcurl"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/dynamic"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// ================================== client ==================================
var (
clientCTX = context.Background() // TODO: add timeout, dialtime options
)
// client
type GrpcClient struct {
addr string // connected service addr, eg. 127.0.0.1:1988
conn *grpc.ClientConn // connection with grpc server
desc grpcurl.DescriptorSource
}
// Create a new grpc client
// if protos set, will get services and methods from proto files
// if addr set but protos empty, will get services and methods from server reflection
func NewGrpcClient(addr string, protos []string, opts ...grpc.DialOption) (*GrpcClient, error) {
var descSource grpcurl.DescriptorSource
if addr == "" {
return nil, fmt.Errorf("addr should not be empty")
}
conn, err := grpc.Dial(addr, opts...)
if err != nil {
return nil, fmt.Errorf("did not connect: %v", err)
}
if len(protos) > 0 {
descSource, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
// fetch from server reflection RPC
c := rpb.NewServerReflectionClient(conn)
refClient := grpcreflect.NewClient(clientCTX, c)
descSource = grpcurl.DescriptorSourceFromServer(clientCTX, refClient)
return &GrpcClient{addr: addr, conn: conn, desc: descSource}, nil
}
func (gc *GrpcClient) ListServices() ([]string, error) {
svcs, err := grpcurl.ListServices(gc.desc)
if err != nil {
return nil, err
}
return svcs, nil
}
func (gc *GrpcClient) ListMethods(svcName string) ([]string, error) {
mtds, err := grpcurl.ListMethods(gc.desc, svcName)
if err != nil {
return nil, err
}
return mtds, nil
}
func (gc *GrpcClient) Close() error {
if gc.conn == nil {
return nil
}
return gc.conn.Close()
}
type rpcResponse struct {
messages []bytes.Buffer
}
func (rr *rpcResponse) Write(p []byte) (int, error) {
var msg bytes.Buffer
n, err := msg.Write(p)
rr.messages = append(rr.messages, msg)
return n, err
}
func (rr *rpcResponse) ToJSON() (interface{}, error) {
switch len(rr.messages) {
case 0:
return map[string]interface{}{}, nil
case 1:
resp := make(map[string]interface{})
if err := json.Unmarshal(rr.messages[0].Bytes(), &resp); err != nil {
return nil, err
}
return resp, nil
default:
resp := make([]map[string]interface{}, len(rr.messages))
for idx, msg := range rr.messages {
oneResp := make(map[string]interface{})
if err := json.Unmarshal(msg.Bytes(), &oneResp); err != nil {
return nil, err
}
resp[idx] = oneResp
}
return resp, nil
}
}
func (gc *GrpcClient) InvokeRPC(mtdName string, reqData interface{}) (interface{}, error) {
var in bytes.Buffer
var out = rpcResponse{messages: []bytes.Buffer{}}
switch reflect.TypeOf(reqData).Kind() {
case reflect.Slice:
for _, data := range reqData.([]map[string]interface{}) {
reqBytes, err := json.Marshal(data)
if err != nil {
return nil, err
}
in.Write(reqBytes)
}
case reflect.Map:
reqBytes, err := json.Marshal(reqData)
if err != nil {
return nil, err
}
in.Write(reqBytes)
default:
in.WriteString(reqData.(string))
}
rf, formatter, err := grpcurl.RequestParserAndFormatterFor(grpcurl.FormatJSON, gc.desc, true, false, &in)
if err != nil {
return nil, err
}
h := grpcurl.NewDefaultEventHandler(&out, gc.desc, formatter, false)
if err = grpcurl.InvokeRPC(clientCTX, gc.desc, gc.conn, mtdName, []string{}, h, rf.Next); err != nil {
return nil, err
}
return out.ToJSON()
}
// ================================== server ==================================
type GrpcServer struct {
addr string
desc grpcurl.DescriptorSource
server *grpc.Server
handlerM map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error
listeners []func(mtd, direction, from, to, body string) error
}
// create a new grpc server
func NewGrpcServer(addr string, protos []string, opts ...grpc.ServerOption) (*GrpcServer, error) {
descFromProto, err := grpcurl.DescriptorSourceFromProtoFiles([]string{}, protos...)
if err != nil {
return nil, fmt.Errorf("cannot parse proto file: %v", err)
}
gs := &GrpcServer{
addr: addr,
desc: descFromProto,
server: grpc.NewServer(opts...),
handlerM: map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{},
}
gs.server = grpc.NewServer()
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
unaryMethods := []grpc.MethodDesc{}
streamMethods := []grpc.StreamDesc{}
for _, mtd := range sd.GetMethods() {
logger.Debugf("protocols/grpc", "try to add method: %v of service: %s", mtd, svcName)
if mtd.IsClientStreaming() || mtd.IsServerStreaming() {
streamMethods = append(streamMethods, grpc.StreamDesc{
StreamName: mtd.GetName(),
Handler: gs.getStreamHandler(mtd),
ServerStreams: mtd.IsServerStreaming(),
ClientStreams: mtd.IsClientStreaming(),
})
} else {
unaryMethods = append(unaryMethods, grpc.MethodDesc{
MethodName: mtd.GetName(),
Handler: gs.getUnaryHandler(mtd),
})
}
}
svcDesc := grpc.ServiceDesc{
ServiceName: svcName,
HandlerType: (*interface{})(nil),
Methods: unaryMethods,
Streams: streamMethods,
Metadata: sd.GetFile().GetName(),
}
gs.server.RegisterService(&svcDesc, &mockServer{})
}
return gs, nil
}
func (gs *GrpcServer) Start() error {
lis, err := net.Listen("tcp", gs.addr)
if err != nil {
return err
}
logger.Infof("protocols/grpc", "server listening at %v", lis.Addr())
go func() {
if err := gs.server.Serve(lis); err != nil {
logger.Errorf("protocols/grpc", "failed to serve: %v", err)
}
}()
return nil
}
func (gs *GrpcServer) Close() error {
if gs.server != nil {
gs.server.Stop()
gs.server = nil
gs.handlerM = map[string]func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error{}
logger.Infof("protocols/grpc", "grpc server %s stopped", gs.addr)
}
return nil
}
func (gs *GrpcServer) AddListener(listener func(mtd, direction, from, to, body string) error) {
gs.listeners = append(gs.listeners, listener)
logger.Infof("protocols/grpc", "new listener added, now %d listeners", len(gs.listeners))
}
// set specified method handler, one method only have one handler, it's the highest priority
// if you want to return error, see https://github.com/avinassh/grpc-errors/blob/master/go/server.go
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) SetMethodHandler(mtd string, handler func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error) error {
if _, exists := gs.handlerM[mtd]; exists {
logger.Warnf("protocols/grpc", "handler for method %s exists, will be overrided", mtd)
}
gs.handlerM[mtd] = handler
return nil
}
// NOTE: thread unsafe, use lock in ops level
func (gs *GrpcServer) RemoveMethodHandler(mtd string) error {
if _, exists := gs.handlerM[mtd]; exists |
return nil
}
func (gs *GrpcServer) ListMethods() ([]string, error) {
methods := []string{}
services, err := grpcurl.ListServices(gs.desc)
if err != nil {
return nil, fmt.Errorf("failed to list services")
}
for _, svcName := range services {
dsc, err := gs.desc.FindSymbol(svcName)
if err != nil {
return nil, fmt.Errorf("unable to find service: %s, error: %v", svcName, err)
}
sd := dsc.(*desc.ServiceDescriptor)
for _, mtd := range sd.GetMethods() {
methods = append(methods, mtd.GetFullyQualifiedName())
}
}
return methods, nil
}
func (gs *GrpcServer) getMethodHandler(mtd string) (func(in *dynamic.Message, out *dynamic.Message, stream grpc.ServerStream) error, error) {
handler, ok := gs.handlerM[mtd]
if !ok {
return nil, fmt.Errorf("handler for method %s not found", mtd)
}
return handler, nil
}
func (gs *GrpcServer) getUnaryHandler(mtd *desc.MethodDescriptor) func(interface{}, context.Context, func(interface{}) error, grpc.UnaryServerInterceptor) (interface{}, error) {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
var peerAddr string
if len(gs.listeners) > 0 {
p, ok := peer.FromContext(ctx)
if ok {
peerAddr = p.Addr.String()
} else {
logger.Error("protocols/grpc", "failed to get peer address")
}
}
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
logger.Errorf("protocols/grpc", "no handler for %s", mtdFqn)
return nil, err
}
in := dynamic.NewMessage(mtd.GetInputType())
if err := dec(in); err != nil {
return nil, err
}
// handle in message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "in", peerAddr, gs.addr, in.String())
}
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, nil); err != nil {
return nil, err
}
// handle out message in listener
for _, listener := range gs.listeners {
listener(mtdFqn, "out", gs.addr, peerAddr, out.String())
}
if interceptor == nil {
return out, nil
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: mtdFqn,
}
wrapper := func(ctx context.Context, req interface{}) (interface{}, error) {
return out, nil
}
return interceptor(ctx, in, info, wrapper)
}
}
func (gs *GrpcServer) getStreamHandler(mtd *desc.MethodDescriptor) func(interface{}, grpc.ServerStream) error {
mtdFqn := mtd.GetFullyQualifiedName()
return func(srv interface{}, stream grpc.ServerStream) error {
// TODO: listeners for stream messages
handler, err := gs.getMethodHandler(mtdFqn)
if err != nil {
return err
}
in := dynamic.NewMessage(mtd.GetInputType())
out := dynamic.NewMessage(mtd.GetOutputType())
if err := handler(in, out, stream); err != nil {
return err
}
return nil
}
}
// mock server struct for service descriptor
type mockServer struct {
}
| {
delete(gs.handlerM, mtd)
} | conditional_block |
asn1.rs | //! Support for ECDSA signatures encoded as ASN.1 DER.
// Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>.
// Relicensed under Apache 2.0 + MIT (from original MIT) with permission.
//
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c>
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c>
use crate::{
generic_array::{typenum::Unsigned, ArrayLength, GenericArray},
Error,
};
use core::{
convert::{TryFrom, TryInto},
fmt,
ops::{Add, Range},
};
use elliptic_curve::{consts::U9, weierstrass::Curve};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
/// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve:
/// 9-bytes.
///
/// Includes 3-byte ASN.1 DER header:
///
/// - 1-byte: ASN.1 `SEQUENCE` tag (0x30)
/// - 2-byte: length
///
/// ...followed by two ASN.1 `INTEGER` values, which each have a header whose
/// maximum length is the following:
///
/// - 1-byte: ASN.1 `INTEGER` tag (0x02)
/// - 1-byte: length
/// - 1-byte: zero to indicate value is positive (`INTEGER` is signed)
pub type MaxOverhead = U9;
/// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve.
pub type MaxSize<C> =
<<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output;
/// Byte array containing a serialized ASN.1 signature
type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>;
/// ASN.1 `INTEGER` tag
const INTEGER_TAG: u8 = 0x02;
/// ASN.1 `SEQUENCE` tag
const SEQUENCE_TAG: u8 = 0x30;
/// ASN.1 DER-encoded signature.
///
/// Generic over the scalar size of the elliptic curve.
pub struct Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// ASN.1 DER-encoded signature data
bytes: DocumentBytes<C>,
/// Range of the `r` value within the signature
r_range: Range<usize>,
/// Range of the `s` value within the signature
s_range: Range<usize>,
}
impl<C> signature::Signature for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice
fn from_bytes(bytes: &[u8]) -> Result<Self, Error> {
bytes.try_into()
}
}
#[allow(clippy::len_without_is_empty)]
impl<C> Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Get the length of the signature in bytes
pub fn len(&self) -> usize {
self.s_range.end
}
/// Borrow this signature as a byte slice
pub fn | (&self) -> &[u8] {
&self.bytes.as_slice()[..self.len()]
}
/// Serialize this signature as a boxed byte slice
#[cfg(feature = "alloc")]
pub fn to_bytes(&self) -> Box<[u8]> {
self.as_bytes().to_vec().into_boxed_slice()
}
/// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars
pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self {
let r_len = int_length(r);
let s_len = int_length(s);
let scalar_size = C::FieldSize::to_usize();
let mut bytes = DocumentBytes::<C>::default();
// SEQUENCE header
bytes[0] = SEQUENCE_TAG as u8;
let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap();
let offset = if zlen >= 0x80 {
bytes[1] = 0x81;
bytes[2] = zlen as u8;
3
} else {
bytes[1] = zlen as u8;
2
};
// First INTEGER (r)
serialize_int(r, &mut bytes[offset..], r_len, scalar_size);
let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap();
// Second INTEGER (s)
serialize_int(s, &mut bytes[r_end..], s_len, scalar_size);
let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap();
bytes[..s_end]
.try_into()
.expect("generated invalid ASN.1 DER")
}
/// Get the `r` component of the signature (leading zeros removed)
pub(crate) fn r(&self) -> &[u8] {
&self.bytes[self.r_range.clone()]
}
/// Get the `s` component of the signature (leading zeros removed)
pub(crate) fn s(&self) -> &[u8] {
&self.bytes[self.s_range.clone()]
}
}
impl<C> AsRef<[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<C> fmt::Debug for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("asn1::Signature")
.field("r", &self.r())
.field("s", &self.s())
.finish()
}
}
impl<C> TryFrom<&[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Error = Error;
fn try_from(bytes: &[u8]) -> Result<Self, Error> {
// Signature format is a SEQUENCE of two INTEGER values. We
// support only integers of less than 127 bytes each (signed
// encoding) so the resulting raw signature will have length
// at most 254 bytes.
//
// First byte is SEQUENCE tag.
if bytes[0] != SEQUENCE_TAG as u8 {
return Err(Error::new());
}
// The SEQUENCE length will be encoded over one or two bytes. We
// limit the total SEQUENCE contents to 255 bytes, because it
// makes things simpler; this is enough for subgroup orders up
// to 999 bits.
let mut zlen = bytes[1] as usize;
let offset = if zlen > 0x80 {
if zlen != 0x81 {
return Err(Error::new());
}
zlen = bytes[2] as usize;
3
} else {
2
};
if zlen != bytes.len().checked_sub(offset).unwrap() {
return Err(Error::new());
}
// First INTEGER (r)
let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?;
let r_start = offset.checked_add(r_range.start).unwrap();
let r_end = offset.checked_add(r_range.end).unwrap();
// Second INTEGER (s)
let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?;
let s_start = r_end.checked_add(s_range.start).unwrap();
let s_end = r_end.checked_add(s_range.end).unwrap();
if s_end != bytes.as_ref().len() {
return Err(Error::new());
}
let mut byte_arr = DocumentBytes::<C>::default();
byte_arr[..s_end].copy_from_slice(bytes.as_ref());
Ok(Signature {
bytes: byte_arr,
r_range: Range {
start: r_start,
end: r_end,
},
s_range: Range {
start: s_start,
end: s_end,
},
})
}
}
#[cfg(all(feature = "digest", feature = "hazmat"))]
impl<C> signature::PrehashSignature for Signature<C>
where
C: Curve + crate::hazmat::DigestPrimitive,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Digest = C::Digest;
}
/// Parse an integer from its ASN.1 DER serialization
fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> {
if bytes.len() < 3 {
return Err(Error::new());
}
if bytes[0] != INTEGER_TAG as u8 {
return Err(Error::new());
}
let len = bytes[1] as usize;
if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() {
return Err(Error::new());
}
let mut start = 2usize;
let end = start.checked_add(len).unwrap();
start = start
.checked_add(trim_zeroes(&bytes[start..end], scalar_size)?)
.unwrap();
Ok(Range { start, end })
}
/// Serialize scalar as ASN.1 DER
fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) {
out[0] = INTEGER_TAG as u8;
out[1] = len as u8;
if len > scalar_size {
out[2] = 0x00;
out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar);
} else {
out[2..len.checked_add(2).unwrap()]
.copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]);
}
}
/// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1
/// encoding is signed, so its leading bit must have value 0; it must also be
/// of minimal length (so leading bytes of value 0 must be removed, except if
/// that would contradict the rule about the sign bit).
fn int_length(mut x: &[u8]) -> usize {
while !x.is_empty() && x[0] == 0 {
x = &x[1..];
}
if x.is_empty() || x[0] >= 0x80 {
x.len().checked_add(1).unwrap()
} else {
x.len()
}
}
/// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes
fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> {
let mut offset = 0;
if bytes.len() > scalar_size {
if bytes.len() != scalar_size.checked_add(1).unwrap() {
return Err(Error::new());
}
if bytes[0] != 0 {
return Err(Error::new());
}
bytes = &bytes[1..];
offset += 1;
}
while !bytes.is_empty() && bytes[0] == 0 {
bytes = &bytes[1..];
offset += 1;
}
Ok(offset)
}
#[cfg(all(feature = "dev", test))]
mod tests {
use crate::dev::curve::Signature;
use signature::Signature as _;
const EXAMPLE_SIGNATURE: [u8; 64] = [
0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27,
0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8,
0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b,
0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a,
0xcc, 0x5, 0x89, 0x3,
];
#[test]
fn test_fixed_to_asn1_signature_roundtrip() {
let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap();
// Convert to ASN.1 DER and back
let asn1_signature = signature1.to_asn1();
let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap();
assert_eq!(signature1, signature2);
}
}
| as_bytes | identifier_name |
asn1.rs | //! Support for ECDSA signatures encoded as ASN.1 DER.
// Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>.
// Relicensed under Apache 2.0 + MIT (from original MIT) with permission.
//
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c>
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c>
use crate::{
generic_array::{typenum::Unsigned, ArrayLength, GenericArray},
Error,
};
use core::{
convert::{TryFrom, TryInto},
fmt,
ops::{Add, Range},
};
use elliptic_curve::{consts::U9, weierstrass::Curve};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
/// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve:
/// 9-bytes.
///
/// Includes 3-byte ASN.1 DER header:
///
/// - 1-byte: ASN.1 `SEQUENCE` tag (0x30)
/// - 2-byte: length
///
/// ...followed by two ASN.1 `INTEGER` values, which each have a header whose
/// maximum length is the following:
///
/// - 1-byte: ASN.1 `INTEGER` tag (0x02)
/// - 1-byte: length
/// - 1-byte: zero to indicate value is positive (`INTEGER` is signed)
pub type MaxOverhead = U9;
/// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve.
pub type MaxSize<C> =
<<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output;
/// Byte array containing a serialized ASN.1 signature
type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>;
/// ASN.1 `INTEGER` tag
const INTEGER_TAG: u8 = 0x02;
/// ASN.1 `SEQUENCE` tag
const SEQUENCE_TAG: u8 = 0x30;
/// ASN.1 DER-encoded signature.
///
/// Generic over the scalar size of the elliptic curve.
pub struct Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// ASN.1 DER-encoded signature data
bytes: DocumentBytes<C>,
/// Range of the `r` value within the signature
r_range: Range<usize>,
/// Range of the `s` value within the signature
s_range: Range<usize>,
}
impl<C> signature::Signature for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice
fn from_bytes(bytes: &[u8]) -> Result<Self, Error> {
bytes.try_into()
}
}
#[allow(clippy::len_without_is_empty)]
impl<C> Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Get the length of the signature in bytes
pub fn len(&self) -> usize {
self.s_range.end
}
/// Borrow this signature as a byte slice
pub fn as_bytes(&self) -> &[u8] {
&self.bytes.as_slice()[..self.len()]
}
/// Serialize this signature as a boxed byte slice
#[cfg(feature = "alloc")]
pub fn to_bytes(&self) -> Box<[u8]> {
self.as_bytes().to_vec().into_boxed_slice()
}
/// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars
pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self |
/// Get the `r` component of the signature (leading zeros removed)
pub(crate) fn r(&self) -> &[u8] {
&self.bytes[self.r_range.clone()]
}
/// Get the `s` component of the signature (leading zeros removed)
pub(crate) fn s(&self) -> &[u8] {
&self.bytes[self.s_range.clone()]
}
}
impl<C> AsRef<[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<C> fmt::Debug for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("asn1::Signature")
.field("r", &self.r())
.field("s", &self.s())
.finish()
}
}
impl<C> TryFrom<&[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Error = Error;
fn try_from(bytes: &[u8]) -> Result<Self, Error> {
// Signature format is a SEQUENCE of two INTEGER values. We
// support only integers of less than 127 bytes each (signed
// encoding) so the resulting raw signature will have length
// at most 254 bytes.
//
// First byte is SEQUENCE tag.
if bytes[0] != SEQUENCE_TAG as u8 {
return Err(Error::new());
}
// The SEQUENCE length will be encoded over one or two bytes. We
// limit the total SEQUENCE contents to 255 bytes, because it
// makes things simpler; this is enough for subgroup orders up
// to 999 bits.
let mut zlen = bytes[1] as usize;
let offset = if zlen > 0x80 {
if zlen != 0x81 {
return Err(Error::new());
}
zlen = bytes[2] as usize;
3
} else {
2
};
if zlen != bytes.len().checked_sub(offset).unwrap() {
return Err(Error::new());
}
// First INTEGER (r)
let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?;
let r_start = offset.checked_add(r_range.start).unwrap();
let r_end = offset.checked_add(r_range.end).unwrap();
// Second INTEGER (s)
let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?;
let s_start = r_end.checked_add(s_range.start).unwrap();
let s_end = r_end.checked_add(s_range.end).unwrap();
if s_end != bytes.as_ref().len() {
return Err(Error::new());
}
let mut byte_arr = DocumentBytes::<C>::default();
byte_arr[..s_end].copy_from_slice(bytes.as_ref());
Ok(Signature {
bytes: byte_arr,
r_range: Range {
start: r_start,
end: r_end,
},
s_range: Range {
start: s_start,
end: s_end,
},
})
}
}
#[cfg(all(feature = "digest", feature = "hazmat"))]
impl<C> signature::PrehashSignature for Signature<C>
where
C: Curve + crate::hazmat::DigestPrimitive,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Digest = C::Digest;
}
/// Parse an integer from its ASN.1 DER serialization
fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> {
if bytes.len() < 3 {
return Err(Error::new());
}
if bytes[0] != INTEGER_TAG as u8 {
return Err(Error::new());
}
let len = bytes[1] as usize;
if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() {
return Err(Error::new());
}
let mut start = 2usize;
let end = start.checked_add(len).unwrap();
start = start
.checked_add(trim_zeroes(&bytes[start..end], scalar_size)?)
.unwrap();
Ok(Range { start, end })
}
/// Serialize scalar as ASN.1 DER
fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) {
out[0] = INTEGER_TAG as u8;
out[1] = len as u8;
if len > scalar_size {
out[2] = 0x00;
out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar);
} else {
out[2..len.checked_add(2).unwrap()]
.copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]);
}
}
/// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1
/// encoding is signed, so its leading bit must have value 0; it must also be
/// of minimal length (so leading bytes of value 0 must be removed, except if
/// that would contradict the rule about the sign bit).
fn int_length(mut x: &[u8]) -> usize {
while !x.is_empty() && x[0] == 0 {
x = &x[1..];
}
if x.is_empty() || x[0] >= 0x80 {
x.len().checked_add(1).unwrap()
} else {
x.len()
}
}
/// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes
fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> {
let mut offset = 0;
if bytes.len() > scalar_size {
if bytes.len() != scalar_size.checked_add(1).unwrap() {
return Err(Error::new());
}
if bytes[0] != 0 {
return Err(Error::new());
}
bytes = &bytes[1..];
offset += 1;
}
while !bytes.is_empty() && bytes[0] == 0 {
bytes = &bytes[1..];
offset += 1;
}
Ok(offset)
}
#[cfg(all(feature = "dev", test))]
mod tests {
use crate::dev::curve::Signature;
use signature::Signature as _;
const EXAMPLE_SIGNATURE: [u8; 64] = [
0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27,
0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8,
0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b,
0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a,
0xcc, 0x5, 0x89, 0x3,
];
#[test]
fn test_fixed_to_asn1_signature_roundtrip() {
let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap();
// Convert to ASN.1 DER and back
let asn1_signature = signature1.to_asn1();
let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap();
assert_eq!(signature1, signature2);
}
}
| {
let r_len = int_length(r);
let s_len = int_length(s);
let scalar_size = C::FieldSize::to_usize();
let mut bytes = DocumentBytes::<C>::default();
// SEQUENCE header
bytes[0] = SEQUENCE_TAG as u8;
let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap();
let offset = if zlen >= 0x80 {
bytes[1] = 0x81;
bytes[2] = zlen as u8;
3
} else {
bytes[1] = zlen as u8;
2
};
// First INTEGER (r)
serialize_int(r, &mut bytes[offset..], r_len, scalar_size);
let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap();
// Second INTEGER (s)
serialize_int(s, &mut bytes[r_end..], s_len, scalar_size);
let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap();
bytes[..s_end]
.try_into()
.expect("generated invalid ASN.1 DER")
} | identifier_body |
asn1.rs | //! Support for ECDSA signatures encoded as ASN.1 DER.
// Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>.
// Relicensed under Apache 2.0 + MIT (from original MIT) with permission.
//
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c>
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c>
use crate::{
generic_array::{typenum::Unsigned, ArrayLength, GenericArray},
Error,
};
use core::{
convert::{TryFrom, TryInto},
fmt,
ops::{Add, Range},
};
use elliptic_curve::{consts::U9, weierstrass::Curve};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
/// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve:
/// 9-bytes.
///
/// Includes 3-byte ASN.1 DER header:
///
/// - 1-byte: ASN.1 `SEQUENCE` tag (0x30)
/// - 2-byte: length
///
/// ...followed by two ASN.1 `INTEGER` values, which each have a header whose
/// maximum length is the following:
///
/// - 1-byte: ASN.1 `INTEGER` tag (0x02)
/// - 1-byte: length
/// - 1-byte: zero to indicate value is positive (`INTEGER` is signed)
pub type MaxOverhead = U9;
/// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve.
pub type MaxSize<C> =
<<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output;
/// Byte array containing a serialized ASN.1 signature
type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>;
/// ASN.1 `INTEGER` tag
const INTEGER_TAG: u8 = 0x02;
/// ASN.1 `SEQUENCE` tag
const SEQUENCE_TAG: u8 = 0x30;
/// ASN.1 DER-encoded signature.
///
/// Generic over the scalar size of the elliptic curve.
pub struct Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// ASN.1 DER-encoded signature data
bytes: DocumentBytes<C>,
/// Range of the `r` value within the signature
r_range: Range<usize>,
/// Range of the `s` value within the signature
s_range: Range<usize>,
}
impl<C> signature::Signature for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice
fn from_bytes(bytes: &[u8]) -> Result<Self, Error> {
bytes.try_into()
}
}
#[allow(clippy::len_without_is_empty)]
impl<C> Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Get the length of the signature in bytes
pub fn len(&self) -> usize {
self.s_range.end
}
/// Borrow this signature as a byte slice
pub fn as_bytes(&self) -> &[u8] {
&self.bytes.as_slice()[..self.len()]
}
/// Serialize this signature as a boxed byte slice
#[cfg(feature = "alloc")]
pub fn to_bytes(&self) -> Box<[u8]> {
self.as_bytes().to_vec().into_boxed_slice()
}
/// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars
pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self {
let r_len = int_length(r);
let s_len = int_length(s);
let scalar_size = C::FieldSize::to_usize();
let mut bytes = DocumentBytes::<C>::default();
// SEQUENCE header
bytes[0] = SEQUENCE_TAG as u8;
let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap();
let offset = if zlen >= 0x80 {
bytes[1] = 0x81;
bytes[2] = zlen as u8;
3
} else {
bytes[1] = zlen as u8;
2
};
// First INTEGER (r)
serialize_int(r, &mut bytes[offset..], r_len, scalar_size);
let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap();
// Second INTEGER (s)
serialize_int(s, &mut bytes[r_end..], s_len, scalar_size);
let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap();
bytes[..s_end]
.try_into()
.expect("generated invalid ASN.1 DER")
}
/// Get the `r` component of the signature (leading zeros removed)
pub(crate) fn r(&self) -> &[u8] {
&self.bytes[self.r_range.clone()]
}
/// Get the `s` component of the signature (leading zeros removed)
pub(crate) fn s(&self) -> &[u8] {
&self.bytes[self.s_range.clone()]
}
}
impl<C> AsRef<[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<C> fmt::Debug for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("asn1::Signature")
.field("r", &self.r())
.field("s", &self.s())
.finish()
}
}
impl<C> TryFrom<&[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Error = Error;
fn try_from(bytes: &[u8]) -> Result<Self, Error> {
// Signature format is a SEQUENCE of two INTEGER values. We
// support only integers of less than 127 bytes each (signed
// encoding) so the resulting raw signature will have length
// at most 254 bytes.
//
// First byte is SEQUENCE tag.
if bytes[0] != SEQUENCE_TAG as u8 {
return Err(Error::new());
}
// The SEQUENCE length will be encoded over one or two bytes. We
// limit the total SEQUENCE contents to 255 bytes, because it
// makes things simpler; this is enough for subgroup orders up
// to 999 bits.
let mut zlen = bytes[1] as usize;
let offset = if zlen > 0x80 {
if zlen != 0x81 {
return Err(Error::new());
}
zlen = bytes[2] as usize;
3
} else {
2
};
if zlen != bytes.len().checked_sub(offset).unwrap() {
return Err(Error::new());
}
// First INTEGER (r)
let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?;
let r_start = offset.checked_add(r_range.start).unwrap();
let r_end = offset.checked_add(r_range.end).unwrap();
// Second INTEGER (s)
let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?;
let s_start = r_end.checked_add(s_range.start).unwrap();
let s_end = r_end.checked_add(s_range.end).unwrap();
if s_end != bytes.as_ref().len() {
return Err(Error::new());
}
let mut byte_arr = DocumentBytes::<C>::default();
byte_arr[..s_end].copy_from_slice(bytes.as_ref());
Ok(Signature {
bytes: byte_arr,
r_range: Range {
start: r_start,
end: r_end,
},
s_range: Range {
start: s_start,
end: s_end,
},
})
}
}
#[cfg(all(feature = "digest", feature = "hazmat"))]
impl<C> signature::PrehashSignature for Signature<C>
where
C: Curve + crate::hazmat::DigestPrimitive,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Digest = C::Digest;
}
/// Parse an integer from its ASN.1 DER serialization
fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> {
if bytes.len() < 3 {
return Err(Error::new());
}
if bytes[0] != INTEGER_TAG as u8 {
return Err(Error::new());
}
let len = bytes[1] as usize;
if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() {
return Err(Error::new());
}
let mut start = 2usize;
let end = start.checked_add(len).unwrap();
start = start
.checked_add(trim_zeroes(&bytes[start..end], scalar_size)?)
.unwrap();
Ok(Range { start, end })
}
/// Serialize scalar as ASN.1 DER
fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) {
out[0] = INTEGER_TAG as u8;
out[1] = len as u8;
if len > scalar_size {
out[2] = 0x00;
out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar);
} else {
out[2..len.checked_add(2).unwrap()]
.copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]);
}
}
/// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1
/// encoding is signed, so its leading bit must have value 0; it must also be
/// of minimal length (so leading bytes of value 0 must be removed, except if
/// that would contradict the rule about the sign bit).
fn int_length(mut x: &[u8]) -> usize {
while !x.is_empty() && x[0] == 0 {
x = &x[1..];
}
if x.is_empty() || x[0] >= 0x80 {
x.len().checked_add(1).unwrap()
} else {
x.len()
}
}
/// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes
fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> {
let mut offset = 0;
if bytes.len() > scalar_size {
if bytes.len() != scalar_size.checked_add(1).unwrap() |
if bytes[0] != 0 {
return Err(Error::new());
}
bytes = &bytes[1..];
offset += 1;
}
while !bytes.is_empty() && bytes[0] == 0 {
bytes = &bytes[1..];
offset += 1;
}
Ok(offset)
}
#[cfg(all(feature = "dev", test))]
mod tests {
use crate::dev::curve::Signature;
use signature::Signature as _;
const EXAMPLE_SIGNATURE: [u8; 64] = [
0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27,
0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8,
0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b,
0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a,
0xcc, 0x5, 0x89, 0x3,
];
#[test]
fn test_fixed_to_asn1_signature_roundtrip() {
let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap();
// Convert to ASN.1 DER and back
let asn1_signature = signature1.to_asn1();
let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap();
assert_eq!(signature1, signature2);
}
}
| {
return Err(Error::new());
} | conditional_block |
asn1.rs | //! Support for ECDSA signatures encoded as ASN.1 DER.
// Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>.
// Relicensed under Apache 2.0 + MIT (from original MIT) with permission.
//
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c>
// <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c>
use crate::{
generic_array::{typenum::Unsigned, ArrayLength, GenericArray},
Error,
};
use core::{
convert::{TryFrom, TryInto},
fmt,
ops::{Add, Range},
};
use elliptic_curve::{consts::U9, weierstrass::Curve};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
/// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve:
/// 9-bytes.
///
/// Includes 3-byte ASN.1 DER header:
///
/// - 1-byte: ASN.1 `SEQUENCE` tag (0x30)
/// - 2-byte: length
///
/// ...followed by two ASN.1 `INTEGER` values, which each have a header whose
/// maximum length is the following:
///
/// - 1-byte: ASN.1 `INTEGER` tag (0x02)
/// - 1-byte: length
/// - 1-byte: zero to indicate value is positive (`INTEGER` is signed)
pub type MaxOverhead = U9;
/// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve.
pub type MaxSize<C> =
<<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output;
/// Byte array containing a serialized ASN.1 signature
type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>;
/// ASN.1 `INTEGER` tag
const INTEGER_TAG: u8 = 0x02;
/// ASN.1 `SEQUENCE` tag
const SEQUENCE_TAG: u8 = 0x30;
/// ASN.1 DER-encoded signature.
///
/// Generic over the scalar size of the elliptic curve.
pub struct Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// ASN.1 DER-encoded signature data
bytes: DocumentBytes<C>,
/// Range of the `r` value within the signature
r_range: Range<usize>,
/// Range of the `s` value within the signature
s_range: Range<usize>,
}
impl<C> signature::Signature for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice
fn from_bytes(bytes: &[u8]) -> Result<Self, Error> {
bytes.try_into()
}
}
#[allow(clippy::len_without_is_empty)]
impl<C> Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
/// Get the length of the signature in bytes
pub fn len(&self) -> usize {
self.s_range.end
}
/// Borrow this signature as a byte slice
pub fn as_bytes(&self) -> &[u8] {
&self.bytes.as_slice()[..self.len()]
}
/// Serialize this signature as a boxed byte slice
#[cfg(feature = "alloc")]
pub fn to_bytes(&self) -> Box<[u8]> {
self.as_bytes().to_vec().into_boxed_slice()
}
/// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars
pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self {
let r_len = int_length(r);
let s_len = int_length(s);
let scalar_size = C::FieldSize::to_usize();
let mut bytes = DocumentBytes::<C>::default();
// SEQUENCE header
bytes[0] = SEQUENCE_TAG as u8;
let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap();
let offset = if zlen >= 0x80 {
bytes[1] = 0x81;
bytes[2] = zlen as u8;
3
} else {
bytes[1] = zlen as u8;
2
};
// First INTEGER (r)
serialize_int(r, &mut bytes[offset..], r_len, scalar_size);
let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap();
// Second INTEGER (s)
serialize_int(s, &mut bytes[r_end..], s_len, scalar_size);
let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap();
bytes[..s_end]
.try_into()
.expect("generated invalid ASN.1 DER")
}
/// Get the `r` component of the signature (leading zeros removed)
pub(crate) fn r(&self) -> &[u8] {
&self.bytes[self.r_range.clone()]
}
/// Get the `s` component of the signature (leading zeros removed)
pub(crate) fn s(&self) -> &[u8] {
&self.bytes[self.s_range.clone()]
}
}
impl<C> AsRef<[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<C> fmt::Debug for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("asn1::Signature")
.field("r", &self.r())
.field("s", &self.s())
.finish()
}
}
impl<C> TryFrom<&[u8]> for Signature<C>
where
C: Curve,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Error = Error;
fn try_from(bytes: &[u8]) -> Result<Self, Error> {
// Signature format is a SEQUENCE of two INTEGER values. We
// support only integers of less than 127 bytes each (signed
// encoding) so the resulting raw signature will have length
// at most 254 bytes.
//
// First byte is SEQUENCE tag.
if bytes[0] != SEQUENCE_TAG as u8 {
return Err(Error::new());
}
// The SEQUENCE length will be encoded over one or two bytes. We
// limit the total SEQUENCE contents to 255 bytes, because it
// makes things simpler; this is enough for subgroup orders up
// to 999 bits.
let mut zlen = bytes[1] as usize;
let offset = if zlen > 0x80 {
if zlen != 0x81 {
return Err(Error::new());
}
zlen = bytes[2] as usize;
3
} else {
2
};
if zlen != bytes.len().checked_sub(offset).unwrap() {
return Err(Error::new());
}
// First INTEGER (r)
let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?;
let r_start = offset.checked_add(r_range.start).unwrap();
let r_end = offset.checked_add(r_range.end).unwrap();
// Second INTEGER (s) | if s_end != bytes.as_ref().len() {
return Err(Error::new());
}
let mut byte_arr = DocumentBytes::<C>::default();
byte_arr[..s_end].copy_from_slice(bytes.as_ref());
Ok(Signature {
bytes: byte_arr,
r_range: Range {
start: r_start,
end: r_end,
},
s_range: Range {
start: s_start,
end: s_end,
},
})
}
}
#[cfg(all(feature = "digest", feature = "hazmat"))]
impl<C> signature::PrehashSignature for Signature<C>
where
C: Curve + crate::hazmat::DigestPrimitive,
C::FieldSize: Add + ArrayLength<u8>,
MaxSize<C>: ArrayLength<u8>,
<C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
{
type Digest = C::Digest;
}
/// Parse an integer from its ASN.1 DER serialization
fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> {
if bytes.len() < 3 {
return Err(Error::new());
}
if bytes[0] != INTEGER_TAG as u8 {
return Err(Error::new());
}
let len = bytes[1] as usize;
if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() {
return Err(Error::new());
}
let mut start = 2usize;
let end = start.checked_add(len).unwrap();
start = start
.checked_add(trim_zeroes(&bytes[start..end], scalar_size)?)
.unwrap();
Ok(Range { start, end })
}
/// Serialize scalar as ASN.1 DER
fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) {
out[0] = INTEGER_TAG as u8;
out[1] = len as u8;
if len > scalar_size {
out[2] = 0x00;
out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar);
} else {
out[2..len.checked_add(2).unwrap()]
.copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]);
}
}
/// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1
/// encoding is signed, so its leading bit must have value 0; it must also be
/// of minimal length (so leading bytes of value 0 must be removed, except if
/// that would contradict the rule about the sign bit).
fn int_length(mut x: &[u8]) -> usize {
while !x.is_empty() && x[0] == 0 {
x = &x[1..];
}
if x.is_empty() || x[0] >= 0x80 {
x.len().checked_add(1).unwrap()
} else {
x.len()
}
}
/// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes
fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> {
let mut offset = 0;
if bytes.len() > scalar_size {
if bytes.len() != scalar_size.checked_add(1).unwrap() {
return Err(Error::new());
}
if bytes[0] != 0 {
return Err(Error::new());
}
bytes = &bytes[1..];
offset += 1;
}
while !bytes.is_empty() && bytes[0] == 0 {
bytes = &bytes[1..];
offset += 1;
}
Ok(offset)
}
#[cfg(all(feature = "dev", test))]
mod tests {
use crate::dev::curve::Signature;
use signature::Signature as _;
const EXAMPLE_SIGNATURE: [u8; 64] = [
0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27,
0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8,
0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b,
0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a,
0xcc, 0x5, 0x89, 0x3,
];
#[test]
fn test_fixed_to_asn1_signature_roundtrip() {
let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap();
// Convert to ASN.1 DER and back
let asn1_signature = signature1.to_asn1();
let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap();
assert_eq!(signature1, signature2);
}
} | let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?;
let s_start = r_end.checked_add(s_range.start).unwrap();
let s_end = r_end.checked_add(s_range.end).unwrap();
| random_line_split |
lsp_plugin.rs | // Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of Language Server Plugin
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use url::Url;
use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View};
use xi_rope::rope::RopeDelta;
use crate::conversion_utils::*;
use crate::language_server_client::LanguageServerClient;
use crate::lsp_types::*;
use crate::result_queue::ResultQueue;
use crate::types::{Config, LanguageResponseError, LspResponse};
use crate::utils::*;
use crate::xi_core::{ConfigTable, ViewId};
pub struct ViewInfo {
version: u64,
ls_identifier: String,
}
/// Represents the state of the Language Server Plugin
pub struct LspPlugin {
pub config: Config,
view_info: HashMap<ViewId, ViewInfo>,
core: Option<CoreProxy>,
result_queue: ResultQueue,
language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>,
}
impl LspPlugin {
pub fn new(config: Config) -> Self {
LspPlugin {
config,
core: None,
result_queue: ResultQueue::new(),
view_info: HashMap::new(),
language_server_clients: HashMap::new(),
}
}
}
impl Plugin for LspPlugin {
type Cache = ChunkCache;
fn initialize(&mut self, core: CoreProxy) {
self.core = Some(core)
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
_edit_type: String,
_author: String,
) {
let view_info = self.view_info.get_mut(&view.get_id());
if let Some(view_info) = view_info {
// This won't fail since we definitely have a client for the given
// client identifier
let ls_client = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client.lock().unwrap();
let sync_kind = ls_client.get_sync_kind();
view_info.version += 1;
if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) {
ls_client.send_did_change(view.get_id(), changes, view_info.version);
}
}
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
trace!("saved view {}", view.get_id());
let document_text = view.get_document().unwrap();
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_save(view.get_id(), &document_text);
});
}
fn did_close(&mut self, view: &View<Self::Cache>) {
trace!("close view {}", view.get_id());
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_close(view.get_id());
});
}
fn new_view(&mut self, view: &mut View<Self::Cache>) {
trace!("new view {}", view.get_id());
let document_text = view.get_document().unwrap();
let path = view.get_path();
let view_id = view.get_id();
// TODO: Use Language Idenitifier assigned by core when the
// implementation is settled
if let Some(language_id) = self.get_language_for_view(view) {
let path = path.unwrap();
let workspace_root_uri = {
let config = &self.config.language_config.get_mut(&language_id).unwrap();
config.workspace_identifier.clone().and_then(|identifier| {
let path = view.get_path().unwrap();
let q = get_workspace_root_uri(&identifier, path);
q.ok()
})
};
let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri);
if let Some((identifier, ls_client)) = result {
self.view_info
.insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier });
let mut ls_client = ls_client.lock().unwrap();
let document_uri = Url::from_file_path(path).unwrap();
if !ls_client.is_initialized {
ls_client.send_initialize(workspace_root_uri, move |ls_client, result| {
if let Ok(result) = result {
let init_result: InitializeResult =
serde_json::from_value(result).unwrap();
debug!("Init Result: {:?}", init_result);
ls_client.server_capabilities = Some(init_result.capabilities);
ls_client.is_initialized = true;
ls_client.send_did_open(view_id, document_uri, document_text);
}
});
} else {
ls_client.send_did_open(view_id, document_uri, document_text);
}
}
}
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) {
let view_id = view.get_id();
let position_ls = get_position_of_offset(view, position);
self.with_language_server_for_view(view, |ls_client| match position_ls {
Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| {
let res = result
.map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e)))
.and_then(|h| {
let hover: Option<Hover> = serde_json::from_value(h).unwrap();
hover.ok_or(LanguageResponseError::NullResponse)
});
ls_client.result_queue.push_result(request_id, LspResponse::Hover(res));
ls_client.core.schedule_idle(view_id);
}),
Err(err) => {
ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into())));
ls_client.core.schedule_idle(view_id);
}
});
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let result = self.result_queue.pop_result();
if let Some((request_id, reponse)) = result {
match reponse {
LspResponse::Hover(res) => {
let res =
res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into());
self.with_language_server_for_view(view, |ls_client| {
ls_client.core.display_hover(view.get_id(), request_id, &res)
});
}
}
}
}
}
/// Util Methods
impl LspPlugin {
/// Get the Language Server Client given the Workspace root
/// This method checks if a language server is running at the specified root
/// and returns it else it tries to spawn a new language server and returns a
/// Arc reference to it
fn | (
&mut self,
language_id: &str,
workspace_root: &Option<Url>,
) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> {
workspace_root
.clone()
.map(|r| r.into_string())
.or_else(|| {
let config = &self.config.language_config[language_id];
if config.supports_single_file {
// A generic client is the one that supports single files i.e.
// Non-Workspace projects as well
Some(String::from("generic"))
} else {
None
}
})
.and_then(|language_server_identifier| {
let contains =
self.language_server_clients.contains_key(&language_server_identifier);
if contains {
let client = self.language_server_clients[&language_server_identifier].clone();
Some((language_server_identifier, client))
} else {
let config = &self.config.language_config[language_id];
let client = start_new_server(
config.start_command.clone(),
config.start_arguments.clone(),
config.extensions.clone(),
language_id,
// Unwrap is safe
self.core.clone().unwrap(),
self.result_queue.clone(),
);
match client {
Ok(client) => {
let client_clone = client.clone();
self.language_server_clients
.insert(language_server_identifier.clone(), client);
Some((language_server_identifier, client_clone))
}
Err(err) => {
error!(
"Error occured while starting server for Language: {}: {:?}",
language_id, err
);
None
}
}
}
})
}
/// Tries to get language for the View using the extension of the document.
/// Only searches for the languages supported by the Language Plugin as
/// defined in the config
fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> {
view.get_path()
.and_then(|path| path.extension())
.and_then(|extension| extension.to_str())
.and_then(|extension_str| {
for (lang, config) in &self.config.language_config {
if config.extensions.iter().any(|x| x == extension_str) {
return Some(lang.clone());
}
}
None
})
}
fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R>
where
F: FnOnce(&mut LanguageServerClient) -> R,
{
let view_info = self.view_info.get_mut(&view.get_id())?;
let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client_arc.lock().unwrap();
Some(f(&mut ls_client))
}
}
| get_lsclient_from_workspace_root | identifier_name |
lsp_plugin.rs | // Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of Language Server Plugin
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use url::Url;
use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View};
use xi_rope::rope::RopeDelta;
use crate::conversion_utils::*;
use crate::language_server_client::LanguageServerClient;
use crate::lsp_types::*;
use crate::result_queue::ResultQueue;
use crate::types::{Config, LanguageResponseError, LspResponse};
use crate::utils::*;
use crate::xi_core::{ConfigTable, ViewId};
pub struct ViewInfo {
version: u64,
ls_identifier: String,
}
/// Represents the state of the Language Server Plugin
pub struct LspPlugin {
pub config: Config,
view_info: HashMap<ViewId, ViewInfo>,
core: Option<CoreProxy>,
result_queue: ResultQueue,
language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>,
}
impl LspPlugin {
pub fn new(config: Config) -> Self {
LspPlugin {
config,
core: None,
result_queue: ResultQueue::new(),
view_info: HashMap::new(),
language_server_clients: HashMap::new(),
}
}
}
impl Plugin for LspPlugin {
type Cache = ChunkCache;
fn initialize(&mut self, core: CoreProxy) {
self.core = Some(core)
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
_edit_type: String,
_author: String,
) {
let view_info = self.view_info.get_mut(&view.get_id());
if let Some(view_info) = view_info {
// This won't fail since we definitely have a client for the given
// client identifier
let ls_client = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client.lock().unwrap();
let sync_kind = ls_client.get_sync_kind();
view_info.version += 1;
if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) {
ls_client.send_did_change(view.get_id(), changes, view_info.version);
}
}
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
trace!("saved view {}", view.get_id());
let document_text = view.get_document().unwrap();
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_save(view.get_id(), &document_text);
});
}
fn did_close(&mut self, view: &View<Self::Cache>) {
trace!("close view {}", view.get_id());
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_close(view.get_id());
});
}
fn new_view(&mut self, view: &mut View<Self::Cache>) {
trace!("new view {}", view.get_id());
let document_text = view.get_document().unwrap();
let path = view.get_path();
let view_id = view.get_id();
// TODO: Use Language Idenitifier assigned by core when the
// implementation is settled
if let Some(language_id) = self.get_language_for_view(view) {
let path = path.unwrap();
let workspace_root_uri = {
let config = &self.config.language_config.get_mut(&language_id).unwrap();
config.workspace_identifier.clone().and_then(|identifier| {
let path = view.get_path().unwrap();
let q = get_workspace_root_uri(&identifier, path);
q.ok()
})
};
let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri);
if let Some((identifier, ls_client)) = result {
self.view_info
.insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier });
let mut ls_client = ls_client.lock().unwrap();
let document_uri = Url::from_file_path(path).unwrap();
if !ls_client.is_initialized {
ls_client.send_initialize(workspace_root_uri, move |ls_client, result| {
if let Ok(result) = result {
let init_result: InitializeResult =
serde_json::from_value(result).unwrap();
debug!("Init Result: {:?}", init_result);
ls_client.server_capabilities = Some(init_result.capabilities);
ls_client.is_initialized = true;
ls_client.send_did_open(view_id, document_uri, document_text);
}
});
} else {
ls_client.send_did_open(view_id, document_uri, document_text);
}
}
}
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) {
let view_id = view.get_id();
let position_ls = get_position_of_offset(view, position);
self.with_language_server_for_view(view, |ls_client| match position_ls {
Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| {
let res = result
.map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e)))
.and_then(|h| {
let hover: Option<Hover> = serde_json::from_value(h).unwrap();
hover.ok_or(LanguageResponseError::NullResponse)
});
ls_client.result_queue.push_result(request_id, LspResponse::Hover(res));
ls_client.core.schedule_idle(view_id);
}),
Err(err) => {
ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into())));
ls_client.core.schedule_idle(view_id);
}
});
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let result = self.result_queue.pop_result();
if let Some((request_id, reponse)) = result {
match reponse {
LspResponse::Hover(res) => {
let res =
res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into());
self.with_language_server_for_view(view, |ls_client| {
ls_client.core.display_hover(view.get_id(), request_id, &res)
});
}
}
}
}
}
/// Util Methods
impl LspPlugin {
/// Get the Language Server Client given the Workspace root
/// This method checks if a language server is running at the specified root
/// and returns it else it tries to spawn a new language server and returns a
/// Arc reference to it
fn get_lsclient_from_workspace_root(
&mut self,
language_id: &str,
workspace_root: &Option<Url>,
) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> {
workspace_root
.clone()
.map(|r| r.into_string())
.or_else(|| {
let config = &self.config.language_config[language_id];
if config.supports_single_file {
// A generic client is the one that supports single files i.e.
// Non-Workspace projects as well
Some(String::from("generic"))
} else {
None
}
})
.and_then(|language_server_identifier| {
let contains =
self.language_server_clients.contains_key(&language_server_identifier);
if contains {
let client = self.language_server_clients[&language_server_identifier].clone();
Some((language_server_identifier, client))
} else |
})
}
/// Tries to get language for the View using the extension of the document.
/// Only searches for the languages supported by the Language Plugin as
/// defined in the config
fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> {
view.get_path()
.and_then(|path| path.extension())
.and_then(|extension| extension.to_str())
.and_then(|extension_str| {
for (lang, config) in &self.config.language_config {
if config.extensions.iter().any(|x| x == extension_str) {
return Some(lang.clone());
}
}
None
})
}
fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R>
where
F: FnOnce(&mut LanguageServerClient) -> R,
{
let view_info = self.view_info.get_mut(&view.get_id())?;
let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client_arc.lock().unwrap();
Some(f(&mut ls_client))
}
}
| {
let config = &self.config.language_config[language_id];
let client = start_new_server(
config.start_command.clone(),
config.start_arguments.clone(),
config.extensions.clone(),
language_id,
// Unwrap is safe
self.core.clone().unwrap(),
self.result_queue.clone(),
);
match client {
Ok(client) => {
let client_clone = client.clone();
self.language_server_clients
.insert(language_server_identifier.clone(), client);
Some((language_server_identifier, client_clone))
}
Err(err) => {
error!(
"Error occured while starting server for Language: {}: {:?}",
language_id, err
);
None
}
}
} | conditional_block |
lsp_plugin.rs | // Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of Language Server Plugin
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use url::Url;
use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View};
use xi_rope::rope::RopeDelta;
use crate::conversion_utils::*;
use crate::language_server_client::LanguageServerClient;
use crate::lsp_types::*;
use crate::result_queue::ResultQueue;
use crate::types::{Config, LanguageResponseError, LspResponse};
use crate::utils::*;
use crate::xi_core::{ConfigTable, ViewId};
pub struct ViewInfo {
version: u64,
ls_identifier: String,
}
/// Represents the state of the Language Server Plugin
pub struct LspPlugin {
pub config: Config,
view_info: HashMap<ViewId, ViewInfo>,
core: Option<CoreProxy>,
result_queue: ResultQueue,
language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>,
}
impl LspPlugin {
pub fn new(config: Config) -> Self {
LspPlugin {
config,
core: None,
result_queue: ResultQueue::new(),
view_info: HashMap::new(),
language_server_clients: HashMap::new(),
}
}
}
impl Plugin for LspPlugin {
type Cache = ChunkCache;
fn initialize(&mut self, core: CoreProxy) {
self.core = Some(core)
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
_edit_type: String,
_author: String,
) {
let view_info = self.view_info.get_mut(&view.get_id());
if let Some(view_info) = view_info {
// This won't fail since we definitely have a client for the given
// client identifier
let ls_client = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client.lock().unwrap();
let sync_kind = ls_client.get_sync_kind();
view_info.version += 1;
if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) {
ls_client.send_did_change(view.get_id(), changes, view_info.version);
}
}
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
trace!("saved view {}", view.get_id());
let document_text = view.get_document().unwrap();
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_save(view.get_id(), &document_text);
});
}
fn did_close(&mut self, view: &View<Self::Cache>) {
trace!("close view {}", view.get_id());
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_close(view.get_id());
});
}
fn new_view(&mut self, view: &mut View<Self::Cache>) {
trace!("new view {}", view.get_id());
let document_text = view.get_document().unwrap();
let path = view.get_path();
let view_id = view.get_id();
// TODO: Use Language Idenitifier assigned by core when the
// implementation is settled
if let Some(language_id) = self.get_language_for_view(view) {
let path = path.unwrap();
let workspace_root_uri = {
let config = &self.config.language_config.get_mut(&language_id).unwrap();
config.workspace_identifier.clone().and_then(|identifier| {
let path = view.get_path().unwrap();
let q = get_workspace_root_uri(&identifier, path);
q.ok()
})
};
let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri);
if let Some((identifier, ls_client)) = result {
self.view_info
.insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier });
let mut ls_client = ls_client.lock().unwrap();
let document_uri = Url::from_file_path(path).unwrap();
if !ls_client.is_initialized {
ls_client.send_initialize(workspace_root_uri, move |ls_client, result| {
if let Ok(result) = result {
let init_result: InitializeResult =
serde_json::from_value(result).unwrap();
debug!("Init Result: {:?}", init_result);
ls_client.server_capabilities = Some(init_result.capabilities);
ls_client.is_initialized = true;
ls_client.send_did_open(view_id, document_uri, document_text);
}
});
} else {
ls_client.send_did_open(view_id, document_uri, document_text);
}
}
}
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) {
let view_id = view.get_id();
let position_ls = get_position_of_offset(view, position);
self.with_language_server_for_view(view, |ls_client| match position_ls {
Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| {
let res = result
.map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e)))
.and_then(|h| {
let hover: Option<Hover> = serde_json::from_value(h).unwrap();
hover.ok_or(LanguageResponseError::NullResponse)
});
ls_client.result_queue.push_result(request_id, LspResponse::Hover(res));
ls_client.core.schedule_idle(view_id);
}),
Err(err) => {
ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into())));
ls_client.core.schedule_idle(view_id);
}
});
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let result = self.result_queue.pop_result();
if let Some((request_id, reponse)) = result {
match reponse {
LspResponse::Hover(res) => {
let res =
res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into());
self.with_language_server_for_view(view, |ls_client| {
ls_client.core.display_hover(view.get_id(), request_id, &res)
});
}
}
}
}
}
/// Util Methods
impl LspPlugin {
/// Get the Language Server Client given the Workspace root
/// This method checks if a language server is running at the specified root
/// and returns it else it tries to spawn a new language server and returns a
/// Arc reference to it
fn get_lsclient_from_workspace_root(
&mut self,
language_id: &str,
workspace_root: &Option<Url>,
) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> {
workspace_root
.clone()
.map(|r| r.into_string())
.or_else(|| {
let config = &self.config.language_config[language_id];
if config.supports_single_file {
// A generic client is the one that supports single files i.e.
// Non-Workspace projects as well
Some(String::from("generic"))
} else {
None
}
})
.and_then(|language_server_identifier| {
let contains =
self.language_server_clients.contains_key(&language_server_identifier);
if contains {
let client = self.language_server_clients[&language_server_identifier].clone();
Some((language_server_identifier, client))
} else {
let config = &self.config.language_config[language_id];
let client = start_new_server(
config.start_command.clone(),
config.start_arguments.clone(),
config.extensions.clone(),
language_id,
// Unwrap is safe
self.core.clone().unwrap(),
self.result_queue.clone(),
);
match client {
Ok(client) => {
let client_clone = client.clone(); | Err(err) => {
error!(
"Error occured while starting server for Language: {}: {:?}",
language_id, err
);
None
}
}
}
})
}
/// Tries to get language for the View using the extension of the document.
/// Only searches for the languages supported by the Language Plugin as
/// defined in the config
fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> {
view.get_path()
.and_then(|path| path.extension())
.and_then(|extension| extension.to_str())
.and_then(|extension_str| {
for (lang, config) in &self.config.language_config {
if config.extensions.iter().any(|x| x == extension_str) {
return Some(lang.clone());
}
}
None
})
}
fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R>
where
F: FnOnce(&mut LanguageServerClient) -> R,
{
let view_info = self.view_info.get_mut(&view.get_id())?;
let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client_arc.lock().unwrap();
Some(f(&mut ls_client))
}
} | self.language_server_clients
.insert(language_server_identifier.clone(), client);
Some((language_server_identifier, client_clone))
} | random_line_split |
lsp_plugin.rs | // Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of Language Server Plugin
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use url::Url;
use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View};
use xi_rope::rope::RopeDelta;
use crate::conversion_utils::*;
use crate::language_server_client::LanguageServerClient;
use crate::lsp_types::*;
use crate::result_queue::ResultQueue;
use crate::types::{Config, LanguageResponseError, LspResponse};
use crate::utils::*;
use crate::xi_core::{ConfigTable, ViewId};
pub struct ViewInfo {
version: u64,
ls_identifier: String,
}
/// Represents the state of the Language Server Plugin
pub struct LspPlugin {
pub config: Config,
view_info: HashMap<ViewId, ViewInfo>,
core: Option<CoreProxy>,
result_queue: ResultQueue,
language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>,
}
impl LspPlugin {
pub fn new(config: Config) -> Self {
LspPlugin {
config,
core: None,
result_queue: ResultQueue::new(),
view_info: HashMap::new(),
language_server_clients: HashMap::new(),
}
}
}
impl Plugin for LspPlugin {
type Cache = ChunkCache;
fn initialize(&mut self, core: CoreProxy) {
self.core = Some(core)
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
_edit_type: String,
_author: String,
) {
let view_info = self.view_info.get_mut(&view.get_id());
if let Some(view_info) = view_info {
// This won't fail since we definitely have a client for the given
// client identifier
let ls_client = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client.lock().unwrap();
let sync_kind = ls_client.get_sync_kind();
view_info.version += 1;
if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) {
ls_client.send_did_change(view.get_id(), changes, view_info.version);
}
}
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
trace!("saved view {}", view.get_id());
let document_text = view.get_document().unwrap();
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_save(view.get_id(), &document_text);
});
}
fn did_close(&mut self, view: &View<Self::Cache>) |
fn new_view(&mut self, view: &mut View<Self::Cache>) {
trace!("new view {}", view.get_id());
let document_text = view.get_document().unwrap();
let path = view.get_path();
let view_id = view.get_id();
// TODO: Use Language Idenitifier assigned by core when the
// implementation is settled
if let Some(language_id) = self.get_language_for_view(view) {
let path = path.unwrap();
let workspace_root_uri = {
let config = &self.config.language_config.get_mut(&language_id).unwrap();
config.workspace_identifier.clone().and_then(|identifier| {
let path = view.get_path().unwrap();
let q = get_workspace_root_uri(&identifier, path);
q.ok()
})
};
let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri);
if let Some((identifier, ls_client)) = result {
self.view_info
.insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier });
let mut ls_client = ls_client.lock().unwrap();
let document_uri = Url::from_file_path(path).unwrap();
if !ls_client.is_initialized {
ls_client.send_initialize(workspace_root_uri, move |ls_client, result| {
if let Ok(result) = result {
let init_result: InitializeResult =
serde_json::from_value(result).unwrap();
debug!("Init Result: {:?}", init_result);
ls_client.server_capabilities = Some(init_result.capabilities);
ls_client.is_initialized = true;
ls_client.send_did_open(view_id, document_uri, document_text);
}
});
} else {
ls_client.send_did_open(view_id, document_uri, document_text);
}
}
}
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) {
let view_id = view.get_id();
let position_ls = get_position_of_offset(view, position);
self.with_language_server_for_view(view, |ls_client| match position_ls {
Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| {
let res = result
.map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e)))
.and_then(|h| {
let hover: Option<Hover> = serde_json::from_value(h).unwrap();
hover.ok_or(LanguageResponseError::NullResponse)
});
ls_client.result_queue.push_result(request_id, LspResponse::Hover(res));
ls_client.core.schedule_idle(view_id);
}),
Err(err) => {
ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into())));
ls_client.core.schedule_idle(view_id);
}
});
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let result = self.result_queue.pop_result();
if let Some((request_id, reponse)) = result {
match reponse {
LspResponse::Hover(res) => {
let res =
res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into());
self.with_language_server_for_view(view, |ls_client| {
ls_client.core.display_hover(view.get_id(), request_id, &res)
});
}
}
}
}
}
/// Util Methods
impl LspPlugin {
/// Get the Language Server Client given the Workspace root
/// This method checks if a language server is running at the specified root
/// and returns it else it tries to spawn a new language server and returns a
/// Arc reference to it
fn get_lsclient_from_workspace_root(
&mut self,
language_id: &str,
workspace_root: &Option<Url>,
) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> {
workspace_root
.clone()
.map(|r| r.into_string())
.or_else(|| {
let config = &self.config.language_config[language_id];
if config.supports_single_file {
// A generic client is the one that supports single files i.e.
// Non-Workspace projects as well
Some(String::from("generic"))
} else {
None
}
})
.and_then(|language_server_identifier| {
let contains =
self.language_server_clients.contains_key(&language_server_identifier);
if contains {
let client = self.language_server_clients[&language_server_identifier].clone();
Some((language_server_identifier, client))
} else {
let config = &self.config.language_config[language_id];
let client = start_new_server(
config.start_command.clone(),
config.start_arguments.clone(),
config.extensions.clone(),
language_id,
// Unwrap is safe
self.core.clone().unwrap(),
self.result_queue.clone(),
);
match client {
Ok(client) => {
let client_clone = client.clone();
self.language_server_clients
.insert(language_server_identifier.clone(), client);
Some((language_server_identifier, client_clone))
}
Err(err) => {
error!(
"Error occured while starting server for Language: {}: {:?}",
language_id, err
);
None
}
}
}
})
}
/// Tries to get language for the View using the extension of the document.
/// Only searches for the languages supported by the Language Plugin as
/// defined in the config
fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> {
view.get_path()
.and_then(|path| path.extension())
.and_then(|extension| extension.to_str())
.and_then(|extension_str| {
for (lang, config) in &self.config.language_config {
if config.extensions.iter().any(|x| x == extension_str) {
return Some(lang.clone());
}
}
None
})
}
fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R>
where
F: FnOnce(&mut LanguageServerClient) -> R,
{
let view_info = self.view_info.get_mut(&view.get_id())?;
let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier];
let mut ls_client = ls_client_arc.lock().unwrap();
Some(f(&mut ls_client))
}
}
| {
trace!("close view {}", view.get_id());
self.with_language_server_for_view(view, |ls_client| {
ls_client.send_did_close(view.get_id());
});
} | identifier_body |
cluster.ts | import * as path from 'path';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import * as lambda from 'aws-cdk-lib/aws-lambda';
import * as s3 from 'aws-cdk-lib/aws-s3';
import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager';
import { ArnFormat, CustomResource, Duration, IResource, Lazy, RemovalPolicy, Resource, SecretValue, Stack, Token } from 'aws-cdk-lib/core';
import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId, Provider } from 'aws-cdk-lib/custom-resources';
import { Construct } from 'constructs';
import { DatabaseSecret } from './database-secret';
import { Endpoint } from './endpoint';
import { ClusterParameterGroup, IClusterParameterGroup } from './parameter-group';
import { CfnCluster } from 'aws-cdk-lib/aws-redshift';
import { ClusterSubnetGroup, IClusterSubnetGroup } from './subnet-group';
/**
* Possible Node Types to use in the cluster
* used for defining `ClusterProps.nodeType`.
*/
export enum NodeType {
/**
* ds2.xlarge
*/
DS2_XLARGE = 'ds2.xlarge',
/**
* ds2.8xlarge
*/
DS2_8XLARGE = 'ds2.8xlarge',
/**
* dc1.large
*/
DC1_LARGE = 'dc1.large',
/**
* dc1.8xlarge
*/
DC1_8XLARGE = 'dc1.8xlarge',
/**
* dc2.large
*/
DC2_LARGE = 'dc2.large',
/**
* dc2.8xlarge
*/
DC2_8XLARGE = 'dc2.8xlarge',
/**
* ra3.xlplus
*/
RA3_XLPLUS = 'ra3.xlplus',
/**
* ra3.4xlarge
*/
RA3_4XLARGE = 'ra3.4xlarge',
/**
* ra3.16xlarge
*/
RA3_16XLARGE = 'ra3.16xlarge',
}
/**
* What cluster type to use.
* Used by `ClusterProps.clusterType`
*/
export enum ClusterType {
/**
* single-node cluster, the `ClusterProps.numberOfNodes` parameter is not required
*/
SINGLE_NODE = 'single-node',
/**
* multi-node cluster, set the amount of nodes using `ClusterProps.numberOfNodes` parameter
*/
MULTI_NODE = 'multi-node',
}
/**
* Username and password combination
*/
export interface Login {
/**
* Username
*/
readonly masterUsername: string;
/**
* Password
*
* Do not put passwords in your CDK code directly.
*
* @default a Secrets Manager generated password
*/
readonly masterPassword?: SecretValue;
/**
* KMS encryption key to encrypt the generated secret.
*
* @default default master key
*/
readonly encryptionKey?: kms.IKey;
}
/**
* Logging bucket and S3 prefix combination
*/
export interface LoggingProperties {
/**
* Bucket to send logs to.
* Logging information includes queries and connection attempts, for the specified Amazon Redshift cluster.
*
*/
readonly loggingBucket: s3.IBucket
/**
* Prefix used for logging.
*
*/
readonly loggingKeyPrefix: string
}
/**
* Options to add the multi user rotation
*/
export interface RotationMultiUserOptions {
/**
* The secret to rotate. It must be a JSON string with the following format:
* ```
* {
* "engine": <required: database engine>,
* "host": <required: instance host name>,
* "username": <required: username>,
* "password": <required: password>,
* "dbname": <optional: database name>,
* "port": <optional: if not specified, default port will be used>,
* "masterarn": <required: the arn of the master secret which will be used to create users/change passwords>
* }
* ```
*/
readonly secret: secretsmanager.ISecret;
/**
* Specifies the number of days after the previous rotation before
* Secrets Manager triggers the next automatic rotation.
*
* @default Duration.days(30)
*/
readonly automaticallyAfter?: Duration;
}
/**
* Create a Redshift Cluster with a given number of nodes.
* Implemented by `Cluster` via `ClusterBase`.
*/
export interface ICluster extends IResource, ec2.IConnectable, secretsmanager.ISecretAttachmentTarget {
/**
* Name of the cluster
*
* @attribute ClusterName
*/
readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*
* @attribute EndpointAddress,EndpointPort
*/
readonly clusterEndpoint: Endpoint;
}
/**
* Properties that describe an existing cluster instance
*/
export interface ClusterAttributes {
/**
* The security groups of the redshift cluster
*
* @default no security groups will be attached to the import
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* Identifier for the cluster
*/
readonly clusterName: string;
/**
* Cluster endpoint address
*/
readonly clusterEndpointAddress: string;
/**
* Cluster endpoint port
*/
readonly clusterEndpointPort: number;
}
/**
* Properties for a new database cluster
*/
export interface ClusterProps {
/**
* An optional identifier for the cluster
*
* @default - A name is automatically generated.
*/
readonly clusterName?: string;
/**
* Additional parameters to pass to the database engine
* https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html
*
* @default - No parameter group.
*/
readonly parameterGroup?: IClusterParameterGroup;
/**
* Number of compute nodes in the cluster. Only specify this property for multi-node clusters.
*
* Value must be at least 2 and no more than 100.
*
* @default - 2 if `clusterType` is ClusterType.MULTI_NODE, undefined otherwise
*/
readonly numberOfNodes?: number;
/**
* The node type to be provisioned for the cluster.
*
* @default `NodeType.DC2_LARGE`
*/
readonly nodeType?: NodeType;
/**
* Settings for the individual instances that are launched
*
* @default `ClusterType.MULTI_NODE`
*/
readonly clusterType?: ClusterType;
/**
* What port to listen on
*
* @default - The default for the engine is used.
*/
readonly port?: number;
/**
* Whether to enable encryption of data at rest in the cluster.
*
* @default true
*/
readonly encrypted?: boolean
/**
* The KMS key to use for encryption of data at rest.
*
* @default - AWS-managed key, if encryption at rest is enabled
*/
readonly encryptionKey?: kms.IKey;
/**
* A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
*
* Example: 'Sun:23:45-Mon:00:15'
*
* @default - 30-minute window selected at random from an 8-hour block of time for
* each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance
*/
readonly preferredMaintenanceWindow?: string;
/**
* The VPC to place the cluster in.
*/
readonly vpc: ec2.IVpc;
/**
* Where to place the instances within the VPC
*
* @default - private subnets
*/
readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Security group.
*
* @default - a new security group is created.
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* A cluster subnet group to use with this cluster.
*
* @default - a new subnet group will be created.
*/
readonly subnetGroup?: IClusterSubnetGroup;
/**
* Username and password for the administrative user
*/
readonly masterUser: Login;
/**
* A list of AWS Identity and Access Management (IAM) role that can be used by the cluster to access other AWS services.
* The maximum number of roles to attach to a cluster is subject to a quota.
*
* @default - No role is attached to the cluster.
*/
readonly roles?: iam.IRole[];
/**
* A single AWS Identity and Access Management (IAM) role to be used as the default role for the cluster.
* The default role must be included in the roles list.
*
* @default - No default role is specified for the cluster.
*/
readonly defaultRole?: iam.IRole;
/**
* Name of a database which is automatically created inside the cluster
*
* @default - default_db
*/
readonly defaultDatabaseName?: string;
/**
* Bucket details for log files to be sent to, including prefix.
*
* @default - No logging bucket is used
*/
readonly loggingProperties?: LoggingProperties;
/**
* The removal policy to apply when the cluster and its instances are removed
* from the stack or replaced during an update.
*
* @default RemovalPolicy.RETAIN
*/
readonly removalPolicy?: RemovalPolicy
/**
* Whether to make cluster publicly accessible.
*
* @default false
*/
readonly publiclyAccessible?: boolean
/**
* If this flag is set, the cluster resizing type will be set to classic.
* When resizing a cluster, classic resizing will always provision a new cluster and transfer the data there.
*
* Classic resize takes more time to complete, but it can be useful in cases where the change in node count or
* the node type to migrate to doesn't fall within the bounds for elastic resize.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html#elastic-resize
*
* @default - Elastic resize type
*/
readonly classicResizing?: boolean
/**
* The Elastic IP (EIP) address for the cluster.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html
*
* @default - No Elastic IP
*/
readonly elasticIp?: string
/**
* If this flag is set, the cluster will be rebooted when changes to the cluster's parameter group that require a restart to apply.
* @default false
*/
readonly rebootForParameterChanges?: boolean
/**
* If this flag is set, Amazon Redshift forces all COPY and UNLOAD traffic between your cluster and your data repositories through your virtual private cloud (VPC).
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html
*
* @default - false
*/
readonly enhancedVpcRouting?: boolean
}
/**
* A new or imported clustered database.
*/
abstract class ClusterBase extends Resource implements ICluster {
/**
* Name of the cluster
*/
public abstract readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public abstract readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public abstract readonly connections: ec2.Connections;
/**
* Renders the secret attachment target specifications.
*/
public asSecretAttachmentTarget(): secretsmanager.SecretAttachmentTargetProps {
return {
targetId: this.clusterName,
targetType: secretsmanager.AttachmentTargetType.REDSHIFT_CLUSTER,
};
}
}
/**
* Create a Redshift cluster a given number of nodes.
*
* @resource AWS::Redshift::Cluster
*/
export class Cluster extends ClusterBase {
/**
* Import an existing DatabaseCluster from properties
*/
public static fromClusterAttributes(scope: Construct, id: string, attrs: ClusterAttributes): ICluster {
class Import extends ClusterBase {
public readonly connections = new ec2.Connections({
securityGroups: attrs.securityGroups,
defaultPort: ec2.Port.tcp(attrs.clusterEndpointPort),
});
public readonly clusterName = attrs.clusterName;
public readonly instanceIdentifiers: string[] = [];
public readonly clusterEndpoint = new Endpoint(attrs.clusterEndpointAddress, attrs.clusterEndpointPort);
}
return new Import(scope, id);
}
/**
* Identifier of the cluster
*/
public readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public readonly connections: ec2.Connections;
/**
* The secret attached to this cluster
*/
public readonly secret?: secretsmanager.ISecret;
private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication;
private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication;
/**
* The VPC where the DB subnet group is created.
*/
private readonly vpc: ec2.IVpc;
/**
* The subnets used by the DB subnet group.
*/
private readonly vpcSubnets?: ec2.SubnetSelection;
/**
* The underlying CfnCluster
*/
private readonly cluster: CfnCluster;
/**
* The cluster's parameter group
*/
protected parameterGroup?: IClusterParameterGroup;
/**
* The ARNs of the roles that will be attached to the cluster.
*
* **NOTE** Please do not access this directly, use the `addIamRole` method instead.
*/
private readonly roles: iam.IRole[];
constructor(scope: Construct, id: string, props: ClusterProps) {
super(scope, id);
this.vpc = props.vpc;
this.vpcSubnets = props.vpcSubnets ?? {
subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
};
this.parameterGroup = props.parameterGroup;
this.roles = props?.roles ? [...props.roles] : [];
const removalPolicy = props.removalPolicy ?? RemovalPolicy.RETAIN;
const subnetGroup = props.subnetGroup ?? new ClusterSubnetGroup(this, 'Subnets', {
description: `Subnets for ${id} Redshift cluster`,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
removalPolicy: removalPolicy,
});
const securityGroups = props.securityGroups ?? [new ec2.SecurityGroup(this, 'SecurityGroup', {
description: 'Redshift security group',
vpc: this.vpc,
})];
const securityGroupIds = securityGroups.map(sg => sg.securityGroupId);
let secret: DatabaseSecret | undefined;
if (!props.masterUser.masterPassword) {
secret = new DatabaseSecret(this, 'Secret', {
username: props.masterUser.masterUsername,
encryptionKey: props.masterUser.encryptionKey,
});
}
const clusterType = props.clusterType || ClusterType.MULTI_NODE;
const nodeCount = this.validateNodeCount(clusterType, props.numberOfNodes);
if (props.encrypted === false && props.encryptionKey !== undefined) {
throw new Error('Cannot set property encryptionKey without enabling encryption!');
}
this.singleUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_SINGLE_USER;
this.multiUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_MULTI_USER;
let loggingProperties;
if (props.loggingProperties) {
loggingProperties = {
bucketName: props.loggingProperties.loggingBucket.bucketName,
s3KeyPrefix: props.loggingProperties.loggingKeyPrefix,
};
props.loggingProperties.loggingBucket.addToResourcePolicy(
new iam.PolicyStatement(
{
actions: [
's3:GetBucketAcl',
's3:PutObject',
],
resources: [
props.loggingProperties.loggingBucket.arnForObjects('*'),
props.loggingProperties.loggingBucket.bucketArn,
],
principals: [
new iam.ServicePrincipal('redshift.amazonaws.com'),
],
},
),
);
}
this.cluster = new CfnCluster(this, 'Resource', {
// Basic
allowVersionUpgrade: true,
automatedSnapshotRetentionPeriod: 1,
clusterType,
clusterIdentifier: props.clusterName,
clusterSubnetGroupName: subnetGroup.clusterSubnetGroupName,
vpcSecurityGroupIds: securityGroupIds,
port: props.port,
clusterParameterGroupName: props.parameterGroup && props.parameterGroup.clusterParameterGroupName,
// Admin (unsafeUnwrap here is safe)
masterUsername: secret?.secretValueFromJson('username').unsafeUnwrap() ?? props.masterUser.masterUsername,
masterUserPassword: secret?.secretValueFromJson('password').unsafeUnwrap()
?? props.masterUser.masterPassword?.unsafeUnwrap()
?? 'default',
preferredMaintenanceWindow: props.preferredMaintenanceWindow,
nodeType: props.nodeType || NodeType.DC2_LARGE,
numberOfNodes: nodeCount,
loggingProperties,
iamRoles: Lazy.list({ produce: () => this.roles.map(role => role.roleArn) }, { omitEmpty: true }),
dbName: props.defaultDatabaseName || 'default_db',
publiclyAccessible: props.publiclyAccessible || false,
// Encryption
kmsKeyId: props.encryptionKey?.keyId,
encrypted: props.encrypted ?? true,
classic: props.classicResizing,
elasticIp: props.elasticIp,
enhancedVpcRouting: props.enhancedVpcRouting,
});
this.cluster.applyRemovalPolicy(removalPolicy, {
applyToUpdateReplacePolicy: true,
});
this.clusterName = this.cluster.ref;
// create a number token that represents the port of the cluster
const portAttribute = Token.asNumber(this.cluster.attrEndpointPort);
this.clusterEndpoint = new Endpoint(this.cluster.attrEndpointAddress, portAttribute);
if (secret) {
this.secret = secret.attach(this);
}
const defaultPort = ec2.Port.tcp(this.clusterEndpoint.port);
this.connections = new ec2.Connections({ securityGroups, defaultPort });
if (props.rebootForParameterChanges) {
this.enableRebootForParameterChanges();
}
// Add default role if specified and also available in the roles list
if (props.defaultRole) {
if (props.roles?.some(x => x === props.defaultRole)) {
this.addDefaultIamRole(props.defaultRole);
} else {
throw new Error('Default role must be included in role list.');
}
}
}
/**
* Adds the single user rotation of the master password to this cluster.
*
* @param [automaticallyAfter=Duration.days(30)] Specifies the number of days after the previous rotation
* before Secrets Manager triggers the next automatic rotation.
*/
public addRotationSingleUser(automaticallyAfter?: Duration): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add single user rotation for a cluster without secret.');
}
const id = 'RotationSingleUser';
const existing = this.node.tryFindChild(id);
if (existing) {
throw new Error('A single user rotation was already added to this cluster.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: this.secret,
automaticallyAfter,
application: this.singleUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
/**
* Adds the multi user rotation to this cluster.
*/
public addRotationMultiUser(id: string, options: RotationMultiUserOptions): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add multi user rotation for a cluster without secret.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: options.secret,
masterSecret: this.secret,
automaticallyAfter: options.automaticallyAfter,
application: this.multiUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
private validateNodeCount(clusterType: ClusterType, numberOfNodes?: number): number | undefined {
if (clusterType === ClusterType.SINGLE_NODE) {
// This property must not be set for single-node clusters; be generous and treat a value of 1 node as undefined.
if (numberOfNodes !== undefined && numberOfNodes !== 1) {
throw new Error('Number of nodes must be not be supplied or be 1 for cluster type single-node');
}
return undefined;
} else {
if (Token.isUnresolved(numberOfNodes)) {
return numberOfNodes;
}
const nodeCount = numberOfNodes ?? 2;
if (nodeCount < 2 || nodeCount > 100) {
throw new Error('Number of nodes for cluster type multi-node must be at least 2 and no more than 100');
}
return nodeCount;
}
}
/**
* Adds a parameter to the Clusters' parameter group
*
* @param name the parameter name
* @param value the parameter name
*/
public addToParameterGroup(name: string, value: string): void {
if (!this.parameterGroup) {
const param: { [name: string]: string } = {};
param[name] = value;
this.parameterGroup = new ClusterParameterGroup(this, 'ParameterGroup', {
description: this.cluster.clusterIdentifier ? `Parameter Group for the ${this.cluster.clusterIdentifier} Redshift cluster` : 'Cluster parameter group for family redshift-1.0',
parameters: param,
});
this.cluster.clusterParameterGroupName = this.parameterGroup.clusterParameterGroupName;
} else if (this.parameterGroup instanceof ClusterParameterGroup) {
this.parameterGroup.addParameter(name, value);
} else {
throw new Error('Cannot add a parameter to an imported parameter group.');
}
}
/**
* Enables automatic cluster rebooting when changes to the cluster's parameter group require a restart to apply.
*/
public enableRebootForParameterChanges(): void {
if (this.node.tryFindChild('RedshiftClusterRebooterCustomResource')) {
return;
}
const rebootFunction = new lambda.SingletonFunction(this, 'RedshiftClusterRebooterFunction', {
uuid: '511e207f-13df-4b8b-b632-c32b30b65ac2',
runtime: lambda.Runtime.NODEJS_18_X,
code: lambda.Code.fromAsset(path.join(__dirname, 'cluster-parameter-change-reboot-handler')),
handler: 'index.handler',
timeout: Duration.seconds(900),
});
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:DescribeClusters'],
resources: ['*'],
}));
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:RebootCluster'],
resources: [
Stack.of(this).formatArn({
service: 'redshift',
resource: 'cluster', | }));
const provider = new Provider(this, 'ResourceProvider', {
onEventHandler: rebootFunction,
});
const customResource = new CustomResource(this, 'RedshiftClusterRebooterCustomResource', {
resourceType: 'Custom::RedshiftClusterRebooter',
serviceToken: provider.serviceToken,
properties: {
ClusterId: this.clusterName,
ParameterGroupName: Lazy.string({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
return this.parameterGroup.clusterParameterGroupName;
},
}),
ParametersString: Lazy.string({
produce: () => {
if (!(this.parameterGroup instanceof ClusterParameterGroup)) {
throw new Error('Cannot enable reboot for parameter changes when using an imported parameter group.');
}
return JSON.stringify(this.parameterGroup.parameters);
},
}),
},
});
Lazy.any({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
customResource.node.addDependency(this, this.parameterGroup);
},
});
}
/**
* Adds default IAM role to cluster. The default IAM role must be already associated to the cluster to be added as the default role.
*
* @param defaultIamRole the IAM role to be set as the default role
*/
public addDefaultIamRole(defaultIamRole: iam.IRole): void {
// Get list of IAM roles attached to cluster
const clusterRoleList = this.roles ?? [];
// Check to see if default role is included in list of cluster IAM roles
var roleAlreadyOnCluster = false;
for (var i = 0; i < clusterRoleList.length; i++) {
if (clusterRoleList[i] === defaultIamRole) {
roleAlreadyOnCluster = true;
break;
}
}
if (!roleAlreadyOnCluster) {
throw new Error('Default role must be associated to the Redshift cluster to be set as the default role.');
}
// On UPDATE or CREATE define the default IAM role. On DELETE, remove the default IAM role
const defaultRoleCustomResource = new AwsCustomResource(this, 'default-role', {
onUpdate: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: defaultIamRole.roleArn,
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
onDelete: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: '',
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
policy: AwsCustomResourcePolicy.fromSdkCalls({
resources: AwsCustomResourcePolicy.ANY_RESOURCE,
}),
installLatestAwsSdk: false,
});
defaultIamRole.grantPassRole(defaultRoleCustomResource.grantPrincipal);
}
/**
* Adds a role to the cluster
*
* @param role the role to add
*/
public addIamRole(role: iam.IRole): void {
const clusterRoleList = this.roles;
if (clusterRoleList.includes(role)) {
throw new Error(`Role '${role.roleArn}' is already attached to the cluster`);
}
clusterRoleList.push(role);
}
} | resourceName: this.clusterName,
arnFormat: ArnFormat.COLON_RESOURCE_NAME,
}),
], | random_line_split |
cluster.ts | import * as path from 'path';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import * as lambda from 'aws-cdk-lib/aws-lambda';
import * as s3 from 'aws-cdk-lib/aws-s3';
import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager';
import { ArnFormat, CustomResource, Duration, IResource, Lazy, RemovalPolicy, Resource, SecretValue, Stack, Token } from 'aws-cdk-lib/core';
import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId, Provider } from 'aws-cdk-lib/custom-resources';
import { Construct } from 'constructs';
import { DatabaseSecret } from './database-secret';
import { Endpoint } from './endpoint';
import { ClusterParameterGroup, IClusterParameterGroup } from './parameter-group';
import { CfnCluster } from 'aws-cdk-lib/aws-redshift';
import { ClusterSubnetGroup, IClusterSubnetGroup } from './subnet-group';
/**
* Possible Node Types to use in the cluster
* used for defining `ClusterProps.nodeType`.
*/
export enum NodeType {
/**
* ds2.xlarge
*/
DS2_XLARGE = 'ds2.xlarge',
/**
* ds2.8xlarge
*/
DS2_8XLARGE = 'ds2.8xlarge',
/**
* dc1.large
*/
DC1_LARGE = 'dc1.large',
/**
* dc1.8xlarge
*/
DC1_8XLARGE = 'dc1.8xlarge',
/**
* dc2.large
*/
DC2_LARGE = 'dc2.large',
/**
* dc2.8xlarge
*/
DC2_8XLARGE = 'dc2.8xlarge',
/**
* ra3.xlplus
*/
RA3_XLPLUS = 'ra3.xlplus',
/**
* ra3.4xlarge
*/
RA3_4XLARGE = 'ra3.4xlarge',
/**
* ra3.16xlarge
*/
RA3_16XLARGE = 'ra3.16xlarge',
}
/**
* What cluster type to use.
* Used by `ClusterProps.clusterType`
*/
export enum ClusterType {
/**
* single-node cluster, the `ClusterProps.numberOfNodes` parameter is not required
*/
SINGLE_NODE = 'single-node',
/**
* multi-node cluster, set the amount of nodes using `ClusterProps.numberOfNodes` parameter
*/
MULTI_NODE = 'multi-node',
}
/**
* Username and password combination
*/
export interface Login {
/**
* Username
*/
readonly masterUsername: string;
/**
* Password
*
* Do not put passwords in your CDK code directly.
*
* @default a Secrets Manager generated password
*/
readonly masterPassword?: SecretValue;
/**
* KMS encryption key to encrypt the generated secret.
*
* @default default master key
*/
readonly encryptionKey?: kms.IKey;
}
/**
* Logging bucket and S3 prefix combination
*/
export interface LoggingProperties {
/**
* Bucket to send logs to.
* Logging information includes queries and connection attempts, for the specified Amazon Redshift cluster.
*
*/
readonly loggingBucket: s3.IBucket
/**
* Prefix used for logging.
*
*/
readonly loggingKeyPrefix: string
}
/**
* Options to add the multi user rotation
*/
export interface RotationMultiUserOptions {
/**
* The secret to rotate. It must be a JSON string with the following format:
* ```
* {
* "engine": <required: database engine>,
* "host": <required: instance host name>,
* "username": <required: username>,
* "password": <required: password>,
* "dbname": <optional: database name>,
* "port": <optional: if not specified, default port will be used>,
* "masterarn": <required: the arn of the master secret which will be used to create users/change passwords>
* }
* ```
*/
readonly secret: secretsmanager.ISecret;
/**
* Specifies the number of days after the previous rotation before
* Secrets Manager triggers the next automatic rotation.
*
* @default Duration.days(30)
*/
readonly automaticallyAfter?: Duration;
}
/**
* Create a Redshift Cluster with a given number of nodes.
* Implemented by `Cluster` via `ClusterBase`.
*/
export interface ICluster extends IResource, ec2.IConnectable, secretsmanager.ISecretAttachmentTarget {
/**
* Name of the cluster
*
* @attribute ClusterName
*/
readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*
* @attribute EndpointAddress,EndpointPort
*/
readonly clusterEndpoint: Endpoint;
}
/**
* Properties that describe an existing cluster instance
*/
export interface ClusterAttributes {
/**
* The security groups of the redshift cluster
*
* @default no security groups will be attached to the import
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* Identifier for the cluster
*/
readonly clusterName: string;
/**
* Cluster endpoint address
*/
readonly clusterEndpointAddress: string;
/**
* Cluster endpoint port
*/
readonly clusterEndpointPort: number;
}
/**
* Properties for a new database cluster
*/
export interface ClusterProps {
/**
* An optional identifier for the cluster
*
* @default - A name is automatically generated.
*/
readonly clusterName?: string;
/**
* Additional parameters to pass to the database engine
* https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html
*
* @default - No parameter group.
*/
readonly parameterGroup?: IClusterParameterGroup;
/**
* Number of compute nodes in the cluster. Only specify this property for multi-node clusters.
*
* Value must be at least 2 and no more than 100.
*
* @default - 2 if `clusterType` is ClusterType.MULTI_NODE, undefined otherwise
*/
readonly numberOfNodes?: number;
/**
* The node type to be provisioned for the cluster.
*
* @default `NodeType.DC2_LARGE`
*/
readonly nodeType?: NodeType;
/**
* Settings for the individual instances that are launched
*
* @default `ClusterType.MULTI_NODE`
*/
readonly clusterType?: ClusterType;
/**
* What port to listen on
*
* @default - The default for the engine is used.
*/
readonly port?: number;
/**
* Whether to enable encryption of data at rest in the cluster.
*
* @default true
*/
readonly encrypted?: boolean
/**
* The KMS key to use for encryption of data at rest.
*
* @default - AWS-managed key, if encryption at rest is enabled
*/
readonly encryptionKey?: kms.IKey;
/**
* A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
*
* Example: 'Sun:23:45-Mon:00:15'
*
* @default - 30-minute window selected at random from an 8-hour block of time for
* each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance
*/
readonly preferredMaintenanceWindow?: string;
/**
* The VPC to place the cluster in.
*/
readonly vpc: ec2.IVpc;
/**
* Where to place the instances within the VPC
*
* @default - private subnets
*/
readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Security group.
*
* @default - a new security group is created.
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* A cluster subnet group to use with this cluster.
*
* @default - a new subnet group will be created.
*/
readonly subnetGroup?: IClusterSubnetGroup;
/**
* Username and password for the administrative user
*/
readonly masterUser: Login;
/**
* A list of AWS Identity and Access Management (IAM) role that can be used by the cluster to access other AWS services.
* The maximum number of roles to attach to a cluster is subject to a quota.
*
* @default - No role is attached to the cluster.
*/
readonly roles?: iam.IRole[];
/**
* A single AWS Identity and Access Management (IAM) role to be used as the default role for the cluster.
* The default role must be included in the roles list.
*
* @default - No default role is specified for the cluster.
*/
readonly defaultRole?: iam.IRole;
/**
* Name of a database which is automatically created inside the cluster
*
* @default - default_db
*/
readonly defaultDatabaseName?: string;
/**
* Bucket details for log files to be sent to, including prefix.
*
* @default - No logging bucket is used
*/
readonly loggingProperties?: LoggingProperties;
/**
* The removal policy to apply when the cluster and its instances are removed
* from the stack or replaced during an update.
*
* @default RemovalPolicy.RETAIN
*/
readonly removalPolicy?: RemovalPolicy
/**
* Whether to make cluster publicly accessible.
*
* @default false
*/
readonly publiclyAccessible?: boolean
/**
* If this flag is set, the cluster resizing type will be set to classic.
* When resizing a cluster, classic resizing will always provision a new cluster and transfer the data there.
*
* Classic resize takes more time to complete, but it can be useful in cases where the change in node count or
* the node type to migrate to doesn't fall within the bounds for elastic resize.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html#elastic-resize
*
* @default - Elastic resize type
*/
readonly classicResizing?: boolean
/**
* The Elastic IP (EIP) address for the cluster.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html
*
* @default - No Elastic IP
*/
readonly elasticIp?: string
/**
* If this flag is set, the cluster will be rebooted when changes to the cluster's parameter group that require a restart to apply.
* @default false
*/
readonly rebootForParameterChanges?: boolean
/**
* If this flag is set, Amazon Redshift forces all COPY and UNLOAD traffic between your cluster and your data repositories through your virtual private cloud (VPC).
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html
*
* @default - false
*/
readonly enhancedVpcRouting?: boolean
}
/**
* A new or imported clustered database.
*/
abstract class ClusterBase extends Resource implements ICluster {
/**
* Name of the cluster
*/
public abstract readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public abstract readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public abstract readonly connections: ec2.Connections;
/**
* Renders the secret attachment target specifications.
*/
public | (): secretsmanager.SecretAttachmentTargetProps {
return {
targetId: this.clusterName,
targetType: secretsmanager.AttachmentTargetType.REDSHIFT_CLUSTER,
};
}
}
/**
* Create a Redshift cluster a given number of nodes.
*
* @resource AWS::Redshift::Cluster
*/
export class Cluster extends ClusterBase {
/**
* Import an existing DatabaseCluster from properties
*/
public static fromClusterAttributes(scope: Construct, id: string, attrs: ClusterAttributes): ICluster {
class Import extends ClusterBase {
public readonly connections = new ec2.Connections({
securityGroups: attrs.securityGroups,
defaultPort: ec2.Port.tcp(attrs.clusterEndpointPort),
});
public readonly clusterName = attrs.clusterName;
public readonly instanceIdentifiers: string[] = [];
public readonly clusterEndpoint = new Endpoint(attrs.clusterEndpointAddress, attrs.clusterEndpointPort);
}
return new Import(scope, id);
}
/**
* Identifier of the cluster
*/
public readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public readonly connections: ec2.Connections;
/**
* The secret attached to this cluster
*/
public readonly secret?: secretsmanager.ISecret;
private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication;
private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication;
/**
* The VPC where the DB subnet group is created.
*/
private readonly vpc: ec2.IVpc;
/**
* The subnets used by the DB subnet group.
*/
private readonly vpcSubnets?: ec2.SubnetSelection;
/**
* The underlying CfnCluster
*/
private readonly cluster: CfnCluster;
/**
* The cluster's parameter group
*/
protected parameterGroup?: IClusterParameterGroup;
/**
* The ARNs of the roles that will be attached to the cluster.
*
* **NOTE** Please do not access this directly, use the `addIamRole` method instead.
*/
private readonly roles: iam.IRole[];
constructor(scope: Construct, id: string, props: ClusterProps) {
super(scope, id);
this.vpc = props.vpc;
this.vpcSubnets = props.vpcSubnets ?? {
subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
};
this.parameterGroup = props.parameterGroup;
this.roles = props?.roles ? [...props.roles] : [];
const removalPolicy = props.removalPolicy ?? RemovalPolicy.RETAIN;
const subnetGroup = props.subnetGroup ?? new ClusterSubnetGroup(this, 'Subnets', {
description: `Subnets for ${id} Redshift cluster`,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
removalPolicy: removalPolicy,
});
const securityGroups = props.securityGroups ?? [new ec2.SecurityGroup(this, 'SecurityGroup', {
description: 'Redshift security group',
vpc: this.vpc,
})];
const securityGroupIds = securityGroups.map(sg => sg.securityGroupId);
let secret: DatabaseSecret | undefined;
if (!props.masterUser.masterPassword) {
secret = new DatabaseSecret(this, 'Secret', {
username: props.masterUser.masterUsername,
encryptionKey: props.masterUser.encryptionKey,
});
}
const clusterType = props.clusterType || ClusterType.MULTI_NODE;
const nodeCount = this.validateNodeCount(clusterType, props.numberOfNodes);
if (props.encrypted === false && props.encryptionKey !== undefined) {
throw new Error('Cannot set property encryptionKey without enabling encryption!');
}
this.singleUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_SINGLE_USER;
this.multiUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_MULTI_USER;
let loggingProperties;
if (props.loggingProperties) {
loggingProperties = {
bucketName: props.loggingProperties.loggingBucket.bucketName,
s3KeyPrefix: props.loggingProperties.loggingKeyPrefix,
};
props.loggingProperties.loggingBucket.addToResourcePolicy(
new iam.PolicyStatement(
{
actions: [
's3:GetBucketAcl',
's3:PutObject',
],
resources: [
props.loggingProperties.loggingBucket.arnForObjects('*'),
props.loggingProperties.loggingBucket.bucketArn,
],
principals: [
new iam.ServicePrincipal('redshift.amazonaws.com'),
],
},
),
);
}
this.cluster = new CfnCluster(this, 'Resource', {
// Basic
allowVersionUpgrade: true,
automatedSnapshotRetentionPeriod: 1,
clusterType,
clusterIdentifier: props.clusterName,
clusterSubnetGroupName: subnetGroup.clusterSubnetGroupName,
vpcSecurityGroupIds: securityGroupIds,
port: props.port,
clusterParameterGroupName: props.parameterGroup && props.parameterGroup.clusterParameterGroupName,
// Admin (unsafeUnwrap here is safe)
masterUsername: secret?.secretValueFromJson('username').unsafeUnwrap() ?? props.masterUser.masterUsername,
masterUserPassword: secret?.secretValueFromJson('password').unsafeUnwrap()
?? props.masterUser.masterPassword?.unsafeUnwrap()
?? 'default',
preferredMaintenanceWindow: props.preferredMaintenanceWindow,
nodeType: props.nodeType || NodeType.DC2_LARGE,
numberOfNodes: nodeCount,
loggingProperties,
iamRoles: Lazy.list({ produce: () => this.roles.map(role => role.roleArn) }, { omitEmpty: true }),
dbName: props.defaultDatabaseName || 'default_db',
publiclyAccessible: props.publiclyAccessible || false,
// Encryption
kmsKeyId: props.encryptionKey?.keyId,
encrypted: props.encrypted ?? true,
classic: props.classicResizing,
elasticIp: props.elasticIp,
enhancedVpcRouting: props.enhancedVpcRouting,
});
this.cluster.applyRemovalPolicy(removalPolicy, {
applyToUpdateReplacePolicy: true,
});
this.clusterName = this.cluster.ref;
// create a number token that represents the port of the cluster
const portAttribute = Token.asNumber(this.cluster.attrEndpointPort);
this.clusterEndpoint = new Endpoint(this.cluster.attrEndpointAddress, portAttribute);
if (secret) {
this.secret = secret.attach(this);
}
const defaultPort = ec2.Port.tcp(this.clusterEndpoint.port);
this.connections = new ec2.Connections({ securityGroups, defaultPort });
if (props.rebootForParameterChanges) {
this.enableRebootForParameterChanges();
}
// Add default role if specified and also available in the roles list
if (props.defaultRole) {
if (props.roles?.some(x => x === props.defaultRole)) {
this.addDefaultIamRole(props.defaultRole);
} else {
throw new Error('Default role must be included in role list.');
}
}
}
/**
* Adds the single user rotation of the master password to this cluster.
*
* @param [automaticallyAfter=Duration.days(30)] Specifies the number of days after the previous rotation
* before Secrets Manager triggers the next automatic rotation.
*/
public addRotationSingleUser(automaticallyAfter?: Duration): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add single user rotation for a cluster without secret.');
}
const id = 'RotationSingleUser';
const existing = this.node.tryFindChild(id);
if (existing) {
throw new Error('A single user rotation was already added to this cluster.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: this.secret,
automaticallyAfter,
application: this.singleUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
/**
* Adds the multi user rotation to this cluster.
*/
public addRotationMultiUser(id: string, options: RotationMultiUserOptions): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add multi user rotation for a cluster without secret.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: options.secret,
masterSecret: this.secret,
automaticallyAfter: options.automaticallyAfter,
application: this.multiUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
private validateNodeCount(clusterType: ClusterType, numberOfNodes?: number): number | undefined {
if (clusterType === ClusterType.SINGLE_NODE) {
// This property must not be set for single-node clusters; be generous and treat a value of 1 node as undefined.
if (numberOfNodes !== undefined && numberOfNodes !== 1) {
throw new Error('Number of nodes must be not be supplied or be 1 for cluster type single-node');
}
return undefined;
} else {
if (Token.isUnresolved(numberOfNodes)) {
return numberOfNodes;
}
const nodeCount = numberOfNodes ?? 2;
if (nodeCount < 2 || nodeCount > 100) {
throw new Error('Number of nodes for cluster type multi-node must be at least 2 and no more than 100');
}
return nodeCount;
}
}
/**
* Adds a parameter to the Clusters' parameter group
*
* @param name the parameter name
* @param value the parameter name
*/
public addToParameterGroup(name: string, value: string): void {
if (!this.parameterGroup) {
const param: { [name: string]: string } = {};
param[name] = value;
this.parameterGroup = new ClusterParameterGroup(this, 'ParameterGroup', {
description: this.cluster.clusterIdentifier ? `Parameter Group for the ${this.cluster.clusterIdentifier} Redshift cluster` : 'Cluster parameter group for family redshift-1.0',
parameters: param,
});
this.cluster.clusterParameterGroupName = this.parameterGroup.clusterParameterGroupName;
} else if (this.parameterGroup instanceof ClusterParameterGroup) {
this.parameterGroup.addParameter(name, value);
} else {
throw new Error('Cannot add a parameter to an imported parameter group.');
}
}
/**
* Enables automatic cluster rebooting when changes to the cluster's parameter group require a restart to apply.
*/
public enableRebootForParameterChanges(): void {
if (this.node.tryFindChild('RedshiftClusterRebooterCustomResource')) {
return;
}
const rebootFunction = new lambda.SingletonFunction(this, 'RedshiftClusterRebooterFunction', {
uuid: '511e207f-13df-4b8b-b632-c32b30b65ac2',
runtime: lambda.Runtime.NODEJS_18_X,
code: lambda.Code.fromAsset(path.join(__dirname, 'cluster-parameter-change-reboot-handler')),
handler: 'index.handler',
timeout: Duration.seconds(900),
});
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:DescribeClusters'],
resources: ['*'],
}));
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:RebootCluster'],
resources: [
Stack.of(this).formatArn({
service: 'redshift',
resource: 'cluster',
resourceName: this.clusterName,
arnFormat: ArnFormat.COLON_RESOURCE_NAME,
}),
],
}));
const provider = new Provider(this, 'ResourceProvider', {
onEventHandler: rebootFunction,
});
const customResource = new CustomResource(this, 'RedshiftClusterRebooterCustomResource', {
resourceType: 'Custom::RedshiftClusterRebooter',
serviceToken: provider.serviceToken,
properties: {
ClusterId: this.clusterName,
ParameterGroupName: Lazy.string({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
return this.parameterGroup.clusterParameterGroupName;
},
}),
ParametersString: Lazy.string({
produce: () => {
if (!(this.parameterGroup instanceof ClusterParameterGroup)) {
throw new Error('Cannot enable reboot for parameter changes when using an imported parameter group.');
}
return JSON.stringify(this.parameterGroup.parameters);
},
}),
},
});
Lazy.any({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
customResource.node.addDependency(this, this.parameterGroup);
},
});
}
/**
* Adds default IAM role to cluster. The default IAM role must be already associated to the cluster to be added as the default role.
*
* @param defaultIamRole the IAM role to be set as the default role
*/
public addDefaultIamRole(defaultIamRole: iam.IRole): void {
// Get list of IAM roles attached to cluster
const clusterRoleList = this.roles ?? [];
// Check to see if default role is included in list of cluster IAM roles
var roleAlreadyOnCluster = false;
for (var i = 0; i < clusterRoleList.length; i++) {
if (clusterRoleList[i] === defaultIamRole) {
roleAlreadyOnCluster = true;
break;
}
}
if (!roleAlreadyOnCluster) {
throw new Error('Default role must be associated to the Redshift cluster to be set as the default role.');
}
// On UPDATE or CREATE define the default IAM role. On DELETE, remove the default IAM role
const defaultRoleCustomResource = new AwsCustomResource(this, 'default-role', {
onUpdate: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: defaultIamRole.roleArn,
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
onDelete: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: '',
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
policy: AwsCustomResourcePolicy.fromSdkCalls({
resources: AwsCustomResourcePolicy.ANY_RESOURCE,
}),
installLatestAwsSdk: false,
});
defaultIamRole.grantPassRole(defaultRoleCustomResource.grantPrincipal);
}
/**
* Adds a role to the cluster
*
* @param role the role to add
*/
public addIamRole(role: iam.IRole): void {
const clusterRoleList = this.roles;
if (clusterRoleList.includes(role)) {
throw new Error(`Role '${role.roleArn}' is already attached to the cluster`);
}
clusterRoleList.push(role);
}
}
| asSecretAttachmentTarget | identifier_name |
cluster.ts | import * as path from 'path';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import * as lambda from 'aws-cdk-lib/aws-lambda';
import * as s3 from 'aws-cdk-lib/aws-s3';
import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager';
import { ArnFormat, CustomResource, Duration, IResource, Lazy, RemovalPolicy, Resource, SecretValue, Stack, Token } from 'aws-cdk-lib/core';
import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId, Provider } from 'aws-cdk-lib/custom-resources';
import { Construct } from 'constructs';
import { DatabaseSecret } from './database-secret';
import { Endpoint } from './endpoint';
import { ClusterParameterGroup, IClusterParameterGroup } from './parameter-group';
import { CfnCluster } from 'aws-cdk-lib/aws-redshift';
import { ClusterSubnetGroup, IClusterSubnetGroup } from './subnet-group';
/**
* Possible Node Types to use in the cluster
* used for defining `ClusterProps.nodeType`.
*/
export enum NodeType {
/**
* ds2.xlarge
*/
DS2_XLARGE = 'ds2.xlarge',
/**
* ds2.8xlarge
*/
DS2_8XLARGE = 'ds2.8xlarge',
/**
* dc1.large
*/
DC1_LARGE = 'dc1.large',
/**
* dc1.8xlarge
*/
DC1_8XLARGE = 'dc1.8xlarge',
/**
* dc2.large
*/
DC2_LARGE = 'dc2.large',
/**
* dc2.8xlarge
*/
DC2_8XLARGE = 'dc2.8xlarge',
/**
* ra3.xlplus
*/
RA3_XLPLUS = 'ra3.xlplus',
/**
* ra3.4xlarge
*/
RA3_4XLARGE = 'ra3.4xlarge',
/**
* ra3.16xlarge
*/
RA3_16XLARGE = 'ra3.16xlarge',
}
/**
* What cluster type to use.
* Used by `ClusterProps.clusterType`
*/
export enum ClusterType {
/**
* single-node cluster, the `ClusterProps.numberOfNodes` parameter is not required
*/
SINGLE_NODE = 'single-node',
/**
* multi-node cluster, set the amount of nodes using `ClusterProps.numberOfNodes` parameter
*/
MULTI_NODE = 'multi-node',
}
/**
* Username and password combination
*/
export interface Login {
/**
* Username
*/
readonly masterUsername: string;
/**
* Password
*
* Do not put passwords in your CDK code directly.
*
* @default a Secrets Manager generated password
*/
readonly masterPassword?: SecretValue;
/**
* KMS encryption key to encrypt the generated secret.
*
* @default default master key
*/
readonly encryptionKey?: kms.IKey;
}
/**
* Logging bucket and S3 prefix combination
*/
export interface LoggingProperties {
/**
* Bucket to send logs to.
* Logging information includes queries and connection attempts, for the specified Amazon Redshift cluster.
*
*/
readonly loggingBucket: s3.IBucket
/**
* Prefix used for logging.
*
*/
readonly loggingKeyPrefix: string
}
/**
* Options to add the multi user rotation
*/
export interface RotationMultiUserOptions {
/**
* The secret to rotate. It must be a JSON string with the following format:
* ```
* {
* "engine": <required: database engine>,
* "host": <required: instance host name>,
* "username": <required: username>,
* "password": <required: password>,
* "dbname": <optional: database name>,
* "port": <optional: if not specified, default port will be used>,
* "masterarn": <required: the arn of the master secret which will be used to create users/change passwords>
* }
* ```
*/
readonly secret: secretsmanager.ISecret;
/**
* Specifies the number of days after the previous rotation before
* Secrets Manager triggers the next automatic rotation.
*
* @default Duration.days(30)
*/
readonly automaticallyAfter?: Duration;
}
/**
* Create a Redshift Cluster with a given number of nodes.
* Implemented by `Cluster` via `ClusterBase`.
*/
export interface ICluster extends IResource, ec2.IConnectable, secretsmanager.ISecretAttachmentTarget {
/**
* Name of the cluster
*
* @attribute ClusterName
*/
readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*
* @attribute EndpointAddress,EndpointPort
*/
readonly clusterEndpoint: Endpoint;
}
/**
* Properties that describe an existing cluster instance
*/
export interface ClusterAttributes {
/**
* The security groups of the redshift cluster
*
* @default no security groups will be attached to the import
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* Identifier for the cluster
*/
readonly clusterName: string;
/**
* Cluster endpoint address
*/
readonly clusterEndpointAddress: string;
/**
* Cluster endpoint port
*/
readonly clusterEndpointPort: number;
}
/**
* Properties for a new database cluster
*/
export interface ClusterProps {
/**
* An optional identifier for the cluster
*
* @default - A name is automatically generated.
*/
readonly clusterName?: string;
/**
* Additional parameters to pass to the database engine
* https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html
*
* @default - No parameter group.
*/
readonly parameterGroup?: IClusterParameterGroup;
/**
* Number of compute nodes in the cluster. Only specify this property for multi-node clusters.
*
* Value must be at least 2 and no more than 100.
*
* @default - 2 if `clusterType` is ClusterType.MULTI_NODE, undefined otherwise
*/
readonly numberOfNodes?: number;
/**
* The node type to be provisioned for the cluster.
*
* @default `NodeType.DC2_LARGE`
*/
readonly nodeType?: NodeType;
/**
* Settings for the individual instances that are launched
*
* @default `ClusterType.MULTI_NODE`
*/
readonly clusterType?: ClusterType;
/**
* What port to listen on
*
* @default - The default for the engine is used.
*/
readonly port?: number;
/**
* Whether to enable encryption of data at rest in the cluster.
*
* @default true
*/
readonly encrypted?: boolean
/**
* The KMS key to use for encryption of data at rest.
*
* @default - AWS-managed key, if encryption at rest is enabled
*/
readonly encryptionKey?: kms.IKey;
/**
* A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
*
* Example: 'Sun:23:45-Mon:00:15'
*
* @default - 30-minute window selected at random from an 8-hour block of time for
* each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance
*/
readonly preferredMaintenanceWindow?: string;
/**
* The VPC to place the cluster in.
*/
readonly vpc: ec2.IVpc;
/**
* Where to place the instances within the VPC
*
* @default - private subnets
*/
readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Security group.
*
* @default - a new security group is created.
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* A cluster subnet group to use with this cluster.
*
* @default - a new subnet group will be created.
*/
readonly subnetGroup?: IClusterSubnetGroup;
/**
* Username and password for the administrative user
*/
readonly masterUser: Login;
/**
* A list of AWS Identity and Access Management (IAM) role that can be used by the cluster to access other AWS services.
* The maximum number of roles to attach to a cluster is subject to a quota.
*
* @default - No role is attached to the cluster.
*/
readonly roles?: iam.IRole[];
/**
* A single AWS Identity and Access Management (IAM) role to be used as the default role for the cluster.
* The default role must be included in the roles list.
*
* @default - No default role is specified for the cluster.
*/
readonly defaultRole?: iam.IRole;
/**
* Name of a database which is automatically created inside the cluster
*
* @default - default_db
*/
readonly defaultDatabaseName?: string;
/**
* Bucket details for log files to be sent to, including prefix.
*
* @default - No logging bucket is used
*/
readonly loggingProperties?: LoggingProperties;
/**
* The removal policy to apply when the cluster and its instances are removed
* from the stack or replaced during an update.
*
* @default RemovalPolicy.RETAIN
*/
readonly removalPolicy?: RemovalPolicy
/**
* Whether to make cluster publicly accessible.
*
* @default false
*/
readonly publiclyAccessible?: boolean
/**
* If this flag is set, the cluster resizing type will be set to classic.
* When resizing a cluster, classic resizing will always provision a new cluster and transfer the data there.
*
* Classic resize takes more time to complete, but it can be useful in cases where the change in node count or
* the node type to migrate to doesn't fall within the bounds for elastic resize.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html#elastic-resize
*
* @default - Elastic resize type
*/
readonly classicResizing?: boolean
/**
* The Elastic IP (EIP) address for the cluster.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html
*
* @default - No Elastic IP
*/
readonly elasticIp?: string
/**
* If this flag is set, the cluster will be rebooted when changes to the cluster's parameter group that require a restart to apply.
* @default false
*/
readonly rebootForParameterChanges?: boolean
/**
* If this flag is set, Amazon Redshift forces all COPY and UNLOAD traffic between your cluster and your data repositories through your virtual private cloud (VPC).
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html
*
* @default - false
*/
readonly enhancedVpcRouting?: boolean
}
/**
* A new or imported clustered database.
*/
abstract class ClusterBase extends Resource implements ICluster {
/**
* Name of the cluster
*/
public abstract readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public abstract readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public abstract readonly connections: ec2.Connections;
/**
* Renders the secret attachment target specifications.
*/
public asSecretAttachmentTarget(): secretsmanager.SecretAttachmentTargetProps {
return {
targetId: this.clusterName,
targetType: secretsmanager.AttachmentTargetType.REDSHIFT_CLUSTER,
};
}
}
/**
* Create a Redshift cluster a given number of nodes.
*
* @resource AWS::Redshift::Cluster
*/
export class Cluster extends ClusterBase {
/**
* Import an existing DatabaseCluster from properties
*/
public static fromClusterAttributes(scope: Construct, id: string, attrs: ClusterAttributes): ICluster {
class Import extends ClusterBase {
public readonly connections = new ec2.Connections({
securityGroups: attrs.securityGroups,
defaultPort: ec2.Port.tcp(attrs.clusterEndpointPort),
});
public readonly clusterName = attrs.clusterName;
public readonly instanceIdentifiers: string[] = [];
public readonly clusterEndpoint = new Endpoint(attrs.clusterEndpointAddress, attrs.clusterEndpointPort);
}
return new Import(scope, id);
}
/**
* Identifier of the cluster
*/
public readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public readonly connections: ec2.Connections;
/**
* The secret attached to this cluster
*/
public readonly secret?: secretsmanager.ISecret;
private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication;
private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication;
/**
* The VPC where the DB subnet group is created.
*/
private readonly vpc: ec2.IVpc;
/**
* The subnets used by the DB subnet group.
*/
private readonly vpcSubnets?: ec2.SubnetSelection;
/**
* The underlying CfnCluster
*/
private readonly cluster: CfnCluster;
/**
* The cluster's parameter group
*/
protected parameterGroup?: IClusterParameterGroup;
/**
* The ARNs of the roles that will be attached to the cluster.
*
* **NOTE** Please do not access this directly, use the `addIamRole` method instead.
*/
private readonly roles: iam.IRole[];
constructor(scope: Construct, id: string, props: ClusterProps) {
super(scope, id);
this.vpc = props.vpc;
this.vpcSubnets = props.vpcSubnets ?? {
subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
};
this.parameterGroup = props.parameterGroup;
this.roles = props?.roles ? [...props.roles] : [];
const removalPolicy = props.removalPolicy ?? RemovalPolicy.RETAIN;
const subnetGroup = props.subnetGroup ?? new ClusterSubnetGroup(this, 'Subnets', {
description: `Subnets for ${id} Redshift cluster`,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
removalPolicy: removalPolicy,
});
const securityGroups = props.securityGroups ?? [new ec2.SecurityGroup(this, 'SecurityGroup', {
description: 'Redshift security group',
vpc: this.vpc,
})];
const securityGroupIds = securityGroups.map(sg => sg.securityGroupId);
let secret: DatabaseSecret | undefined;
if (!props.masterUser.masterPassword) {
secret = new DatabaseSecret(this, 'Secret', {
username: props.masterUser.masterUsername,
encryptionKey: props.masterUser.encryptionKey,
});
}
const clusterType = props.clusterType || ClusterType.MULTI_NODE;
const nodeCount = this.validateNodeCount(clusterType, props.numberOfNodes);
if (props.encrypted === false && props.encryptionKey !== undefined) {
throw new Error('Cannot set property encryptionKey without enabling encryption!');
}
this.singleUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_SINGLE_USER;
this.multiUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_MULTI_USER;
let loggingProperties;
if (props.loggingProperties) {
loggingProperties = {
bucketName: props.loggingProperties.loggingBucket.bucketName,
s3KeyPrefix: props.loggingProperties.loggingKeyPrefix,
};
props.loggingProperties.loggingBucket.addToResourcePolicy(
new iam.PolicyStatement(
{
actions: [
's3:GetBucketAcl',
's3:PutObject',
],
resources: [
props.loggingProperties.loggingBucket.arnForObjects('*'),
props.loggingProperties.loggingBucket.bucketArn,
],
principals: [
new iam.ServicePrincipal('redshift.amazonaws.com'),
],
},
),
);
}
this.cluster = new CfnCluster(this, 'Resource', {
// Basic
allowVersionUpgrade: true,
automatedSnapshotRetentionPeriod: 1,
clusterType,
clusterIdentifier: props.clusterName,
clusterSubnetGroupName: subnetGroup.clusterSubnetGroupName,
vpcSecurityGroupIds: securityGroupIds,
port: props.port,
clusterParameterGroupName: props.parameterGroup && props.parameterGroup.clusterParameterGroupName,
// Admin (unsafeUnwrap here is safe)
masterUsername: secret?.secretValueFromJson('username').unsafeUnwrap() ?? props.masterUser.masterUsername,
masterUserPassword: secret?.secretValueFromJson('password').unsafeUnwrap()
?? props.masterUser.masterPassword?.unsafeUnwrap()
?? 'default',
preferredMaintenanceWindow: props.preferredMaintenanceWindow,
nodeType: props.nodeType || NodeType.DC2_LARGE,
numberOfNodes: nodeCount,
loggingProperties,
iamRoles: Lazy.list({ produce: () => this.roles.map(role => role.roleArn) }, { omitEmpty: true }),
dbName: props.defaultDatabaseName || 'default_db',
publiclyAccessible: props.publiclyAccessible || false,
// Encryption
kmsKeyId: props.encryptionKey?.keyId,
encrypted: props.encrypted ?? true,
classic: props.classicResizing,
elasticIp: props.elasticIp,
enhancedVpcRouting: props.enhancedVpcRouting,
});
this.cluster.applyRemovalPolicy(removalPolicy, {
applyToUpdateReplacePolicy: true,
});
this.clusterName = this.cluster.ref;
// create a number token that represents the port of the cluster
const portAttribute = Token.asNumber(this.cluster.attrEndpointPort);
this.clusterEndpoint = new Endpoint(this.cluster.attrEndpointAddress, portAttribute);
if (secret) {
this.secret = secret.attach(this);
}
const defaultPort = ec2.Port.tcp(this.clusterEndpoint.port);
this.connections = new ec2.Connections({ securityGroups, defaultPort });
if (props.rebootForParameterChanges) {
this.enableRebootForParameterChanges();
}
// Add default role if specified and also available in the roles list
if (props.defaultRole) {
if (props.roles?.some(x => x === props.defaultRole)) {
this.addDefaultIamRole(props.defaultRole);
} else {
throw new Error('Default role must be included in role list.');
}
}
}
/**
* Adds the single user rotation of the master password to this cluster.
*
* @param [automaticallyAfter=Duration.days(30)] Specifies the number of days after the previous rotation
* before Secrets Manager triggers the next automatic rotation.
*/
public addRotationSingleUser(automaticallyAfter?: Duration): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add single user rotation for a cluster without secret.');
}
const id = 'RotationSingleUser';
const existing = this.node.tryFindChild(id);
if (existing) |
return new secretsmanager.SecretRotation(this, id, {
secret: this.secret,
automaticallyAfter,
application: this.singleUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
/**
* Adds the multi user rotation to this cluster.
*/
public addRotationMultiUser(id: string, options: RotationMultiUserOptions): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add multi user rotation for a cluster without secret.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: options.secret,
masterSecret: this.secret,
automaticallyAfter: options.automaticallyAfter,
application: this.multiUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
private validateNodeCount(clusterType: ClusterType, numberOfNodes?: number): number | undefined {
if (clusterType === ClusterType.SINGLE_NODE) {
// This property must not be set for single-node clusters; be generous and treat a value of 1 node as undefined.
if (numberOfNodes !== undefined && numberOfNodes !== 1) {
throw new Error('Number of nodes must be not be supplied or be 1 for cluster type single-node');
}
return undefined;
} else {
if (Token.isUnresolved(numberOfNodes)) {
return numberOfNodes;
}
const nodeCount = numberOfNodes ?? 2;
if (nodeCount < 2 || nodeCount > 100) {
throw new Error('Number of nodes for cluster type multi-node must be at least 2 and no more than 100');
}
return nodeCount;
}
}
/**
* Adds a parameter to the Clusters' parameter group
*
* @param name the parameter name
* @param value the parameter name
*/
public addToParameterGroup(name: string, value: string): void {
if (!this.parameterGroup) {
const param: { [name: string]: string } = {};
param[name] = value;
this.parameterGroup = new ClusterParameterGroup(this, 'ParameterGroup', {
description: this.cluster.clusterIdentifier ? `Parameter Group for the ${this.cluster.clusterIdentifier} Redshift cluster` : 'Cluster parameter group for family redshift-1.0',
parameters: param,
});
this.cluster.clusterParameterGroupName = this.parameterGroup.clusterParameterGroupName;
} else if (this.parameterGroup instanceof ClusterParameterGroup) {
this.parameterGroup.addParameter(name, value);
} else {
throw new Error('Cannot add a parameter to an imported parameter group.');
}
}
/**
* Enables automatic cluster rebooting when changes to the cluster's parameter group require a restart to apply.
*/
public enableRebootForParameterChanges(): void {
if (this.node.tryFindChild('RedshiftClusterRebooterCustomResource')) {
return;
}
const rebootFunction = new lambda.SingletonFunction(this, 'RedshiftClusterRebooterFunction', {
uuid: '511e207f-13df-4b8b-b632-c32b30b65ac2',
runtime: lambda.Runtime.NODEJS_18_X,
code: lambda.Code.fromAsset(path.join(__dirname, 'cluster-parameter-change-reboot-handler')),
handler: 'index.handler',
timeout: Duration.seconds(900),
});
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:DescribeClusters'],
resources: ['*'],
}));
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:RebootCluster'],
resources: [
Stack.of(this).formatArn({
service: 'redshift',
resource: 'cluster',
resourceName: this.clusterName,
arnFormat: ArnFormat.COLON_RESOURCE_NAME,
}),
],
}));
const provider = new Provider(this, 'ResourceProvider', {
onEventHandler: rebootFunction,
});
const customResource = new CustomResource(this, 'RedshiftClusterRebooterCustomResource', {
resourceType: 'Custom::RedshiftClusterRebooter',
serviceToken: provider.serviceToken,
properties: {
ClusterId: this.clusterName,
ParameterGroupName: Lazy.string({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
return this.parameterGroup.clusterParameterGroupName;
},
}),
ParametersString: Lazy.string({
produce: () => {
if (!(this.parameterGroup instanceof ClusterParameterGroup)) {
throw new Error('Cannot enable reboot for parameter changes when using an imported parameter group.');
}
return JSON.stringify(this.parameterGroup.parameters);
},
}),
},
});
Lazy.any({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
customResource.node.addDependency(this, this.parameterGroup);
},
});
}
/**
* Adds default IAM role to cluster. The default IAM role must be already associated to the cluster to be added as the default role.
*
* @param defaultIamRole the IAM role to be set as the default role
*/
public addDefaultIamRole(defaultIamRole: iam.IRole): void {
// Get list of IAM roles attached to cluster
const clusterRoleList = this.roles ?? [];
// Check to see if default role is included in list of cluster IAM roles
var roleAlreadyOnCluster = false;
for (var i = 0; i < clusterRoleList.length; i++) {
if (clusterRoleList[i] === defaultIamRole) {
roleAlreadyOnCluster = true;
break;
}
}
if (!roleAlreadyOnCluster) {
throw new Error('Default role must be associated to the Redshift cluster to be set as the default role.');
}
// On UPDATE or CREATE define the default IAM role. On DELETE, remove the default IAM role
const defaultRoleCustomResource = new AwsCustomResource(this, 'default-role', {
onUpdate: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: defaultIamRole.roleArn,
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
onDelete: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: '',
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
policy: AwsCustomResourcePolicy.fromSdkCalls({
resources: AwsCustomResourcePolicy.ANY_RESOURCE,
}),
installLatestAwsSdk: false,
});
defaultIamRole.grantPassRole(defaultRoleCustomResource.grantPrincipal);
}
/**
* Adds a role to the cluster
*
* @param role the role to add
*/
public addIamRole(role: iam.IRole): void {
const clusterRoleList = this.roles;
if (clusterRoleList.includes(role)) {
throw new Error(`Role '${role.roleArn}' is already attached to the cluster`);
}
clusterRoleList.push(role);
}
}
| {
throw new Error('A single user rotation was already added to this cluster.');
} | conditional_block |
cluster.ts | import * as path from 'path';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import * as lambda from 'aws-cdk-lib/aws-lambda';
import * as s3 from 'aws-cdk-lib/aws-s3';
import * as secretsmanager from 'aws-cdk-lib/aws-secretsmanager';
import { ArnFormat, CustomResource, Duration, IResource, Lazy, RemovalPolicy, Resource, SecretValue, Stack, Token } from 'aws-cdk-lib/core';
import { AwsCustomResource, AwsCustomResourcePolicy, PhysicalResourceId, Provider } from 'aws-cdk-lib/custom-resources';
import { Construct } from 'constructs';
import { DatabaseSecret } from './database-secret';
import { Endpoint } from './endpoint';
import { ClusterParameterGroup, IClusterParameterGroup } from './parameter-group';
import { CfnCluster } from 'aws-cdk-lib/aws-redshift';
import { ClusterSubnetGroup, IClusterSubnetGroup } from './subnet-group';
/**
* Possible Node Types to use in the cluster
* used for defining `ClusterProps.nodeType`.
*/
export enum NodeType {
/**
* ds2.xlarge
*/
DS2_XLARGE = 'ds2.xlarge',
/**
* ds2.8xlarge
*/
DS2_8XLARGE = 'ds2.8xlarge',
/**
* dc1.large
*/
DC1_LARGE = 'dc1.large',
/**
* dc1.8xlarge
*/
DC1_8XLARGE = 'dc1.8xlarge',
/**
* dc2.large
*/
DC2_LARGE = 'dc2.large',
/**
* dc2.8xlarge
*/
DC2_8XLARGE = 'dc2.8xlarge',
/**
* ra3.xlplus
*/
RA3_XLPLUS = 'ra3.xlplus',
/**
* ra3.4xlarge
*/
RA3_4XLARGE = 'ra3.4xlarge',
/**
* ra3.16xlarge
*/
RA3_16XLARGE = 'ra3.16xlarge',
}
/**
* What cluster type to use.
* Used by `ClusterProps.clusterType`
*/
export enum ClusterType {
/**
* single-node cluster, the `ClusterProps.numberOfNodes` parameter is not required
*/
SINGLE_NODE = 'single-node',
/**
* multi-node cluster, set the amount of nodes using `ClusterProps.numberOfNodes` parameter
*/
MULTI_NODE = 'multi-node',
}
/**
* Username and password combination
*/
export interface Login {
/**
* Username
*/
readonly masterUsername: string;
/**
* Password
*
* Do not put passwords in your CDK code directly.
*
* @default a Secrets Manager generated password
*/
readonly masterPassword?: SecretValue;
/**
* KMS encryption key to encrypt the generated secret.
*
* @default default master key
*/
readonly encryptionKey?: kms.IKey;
}
/**
* Logging bucket and S3 prefix combination
*/
export interface LoggingProperties {
/**
* Bucket to send logs to.
* Logging information includes queries and connection attempts, for the specified Amazon Redshift cluster.
*
*/
readonly loggingBucket: s3.IBucket
/**
* Prefix used for logging.
*
*/
readonly loggingKeyPrefix: string
}
/**
* Options to add the multi user rotation
*/
export interface RotationMultiUserOptions {
/**
* The secret to rotate. It must be a JSON string with the following format:
* ```
* {
* "engine": <required: database engine>,
* "host": <required: instance host name>,
* "username": <required: username>,
* "password": <required: password>,
* "dbname": <optional: database name>,
* "port": <optional: if not specified, default port will be used>,
* "masterarn": <required: the arn of the master secret which will be used to create users/change passwords>
* }
* ```
*/
readonly secret: secretsmanager.ISecret;
/**
* Specifies the number of days after the previous rotation before
* Secrets Manager triggers the next automatic rotation.
*
* @default Duration.days(30)
*/
readonly automaticallyAfter?: Duration;
}
/**
* Create a Redshift Cluster with a given number of nodes.
* Implemented by `Cluster` via `ClusterBase`.
*/
export interface ICluster extends IResource, ec2.IConnectable, secretsmanager.ISecretAttachmentTarget {
/**
* Name of the cluster
*
* @attribute ClusterName
*/
readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*
* @attribute EndpointAddress,EndpointPort
*/
readonly clusterEndpoint: Endpoint;
}
/**
* Properties that describe an existing cluster instance
*/
export interface ClusterAttributes {
/**
* The security groups of the redshift cluster
*
* @default no security groups will be attached to the import
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* Identifier for the cluster
*/
readonly clusterName: string;
/**
* Cluster endpoint address
*/
readonly clusterEndpointAddress: string;
/**
* Cluster endpoint port
*/
readonly clusterEndpointPort: number;
}
/**
* Properties for a new database cluster
*/
export interface ClusterProps {
/**
* An optional identifier for the cluster
*
* @default - A name is automatically generated.
*/
readonly clusterName?: string;
/**
* Additional parameters to pass to the database engine
* https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html
*
* @default - No parameter group.
*/
readonly parameterGroup?: IClusterParameterGroup;
/**
* Number of compute nodes in the cluster. Only specify this property for multi-node clusters.
*
* Value must be at least 2 and no more than 100.
*
* @default - 2 if `clusterType` is ClusterType.MULTI_NODE, undefined otherwise
*/
readonly numberOfNodes?: number;
/**
* The node type to be provisioned for the cluster.
*
* @default `NodeType.DC2_LARGE`
*/
readonly nodeType?: NodeType;
/**
* Settings for the individual instances that are launched
*
* @default `ClusterType.MULTI_NODE`
*/
readonly clusterType?: ClusterType;
/**
* What port to listen on
*
* @default - The default for the engine is used.
*/
readonly port?: number;
/**
* Whether to enable encryption of data at rest in the cluster.
*
* @default true
*/
readonly encrypted?: boolean
/**
* The KMS key to use for encryption of data at rest.
*
* @default - AWS-managed key, if encryption at rest is enabled
*/
readonly encryptionKey?: kms.IKey;
/**
* A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
*
* Example: 'Sun:23:45-Mon:00:15'
*
* @default - 30-minute window selected at random from an 8-hour block of time for
* each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance
*/
readonly preferredMaintenanceWindow?: string;
/**
* The VPC to place the cluster in.
*/
readonly vpc: ec2.IVpc;
/**
* Where to place the instances within the VPC
*
* @default - private subnets
*/
readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Security group.
*
* @default - a new security group is created.
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* A cluster subnet group to use with this cluster.
*
* @default - a new subnet group will be created.
*/
readonly subnetGroup?: IClusterSubnetGroup;
/**
* Username and password for the administrative user
*/
readonly masterUser: Login;
/**
* A list of AWS Identity and Access Management (IAM) role that can be used by the cluster to access other AWS services.
* The maximum number of roles to attach to a cluster is subject to a quota.
*
* @default - No role is attached to the cluster.
*/
readonly roles?: iam.IRole[];
/**
* A single AWS Identity and Access Management (IAM) role to be used as the default role for the cluster.
* The default role must be included in the roles list.
*
* @default - No default role is specified for the cluster.
*/
readonly defaultRole?: iam.IRole;
/**
* Name of a database which is automatically created inside the cluster
*
* @default - default_db
*/
readonly defaultDatabaseName?: string;
/**
* Bucket details for log files to be sent to, including prefix.
*
* @default - No logging bucket is used
*/
readonly loggingProperties?: LoggingProperties;
/**
* The removal policy to apply when the cluster and its instances are removed
* from the stack or replaced during an update.
*
* @default RemovalPolicy.RETAIN
*/
readonly removalPolicy?: RemovalPolicy
/**
* Whether to make cluster publicly accessible.
*
* @default false
*/
readonly publiclyAccessible?: boolean
/**
* If this flag is set, the cluster resizing type will be set to classic.
* When resizing a cluster, classic resizing will always provision a new cluster and transfer the data there.
*
* Classic resize takes more time to complete, but it can be useful in cases where the change in node count or
* the node type to migrate to doesn't fall within the bounds for elastic resize.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html#elastic-resize
*
* @default - Elastic resize type
*/
readonly classicResizing?: boolean
/**
* The Elastic IP (EIP) address for the cluster.
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html
*
* @default - No Elastic IP
*/
readonly elasticIp?: string
/**
* If this flag is set, the cluster will be rebooted when changes to the cluster's parameter group that require a restart to apply.
* @default false
*/
readonly rebootForParameterChanges?: boolean
/**
* If this flag is set, Amazon Redshift forces all COPY and UNLOAD traffic between your cluster and your data repositories through your virtual private cloud (VPC).
*
* @see https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html
*
* @default - false
*/
readonly enhancedVpcRouting?: boolean
}
/**
* A new or imported clustered database.
*/
abstract class ClusterBase extends Resource implements ICluster {
/**
* Name of the cluster
*/
public abstract readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public abstract readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public abstract readonly connections: ec2.Connections;
/**
* Renders the secret attachment target specifications.
*/
public asSecretAttachmentTarget(): secretsmanager.SecretAttachmentTargetProps {
return {
targetId: this.clusterName,
targetType: secretsmanager.AttachmentTargetType.REDSHIFT_CLUSTER,
};
}
}
/**
* Create a Redshift cluster a given number of nodes.
*
* @resource AWS::Redshift::Cluster
*/
export class Cluster extends ClusterBase {
/**
* Import an existing DatabaseCluster from properties
*/
public static fromClusterAttributes(scope: Construct, id: string, attrs: ClusterAttributes): ICluster |
/**
* Identifier of the cluster
*/
public readonly clusterName: string;
/**
* The endpoint to use for read/write operations
*/
public readonly clusterEndpoint: Endpoint;
/**
* Access to the network connections
*/
public readonly connections: ec2.Connections;
/**
* The secret attached to this cluster
*/
public readonly secret?: secretsmanager.ISecret;
private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication;
private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication;
/**
* The VPC where the DB subnet group is created.
*/
private readonly vpc: ec2.IVpc;
/**
* The subnets used by the DB subnet group.
*/
private readonly vpcSubnets?: ec2.SubnetSelection;
/**
* The underlying CfnCluster
*/
private readonly cluster: CfnCluster;
/**
* The cluster's parameter group
*/
protected parameterGroup?: IClusterParameterGroup;
/**
* The ARNs of the roles that will be attached to the cluster.
*
* **NOTE** Please do not access this directly, use the `addIamRole` method instead.
*/
private readonly roles: iam.IRole[];
constructor(scope: Construct, id: string, props: ClusterProps) {
super(scope, id);
this.vpc = props.vpc;
this.vpcSubnets = props.vpcSubnets ?? {
subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
};
this.parameterGroup = props.parameterGroup;
this.roles = props?.roles ? [...props.roles] : [];
const removalPolicy = props.removalPolicy ?? RemovalPolicy.RETAIN;
const subnetGroup = props.subnetGroup ?? new ClusterSubnetGroup(this, 'Subnets', {
description: `Subnets for ${id} Redshift cluster`,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
removalPolicy: removalPolicy,
});
const securityGroups = props.securityGroups ?? [new ec2.SecurityGroup(this, 'SecurityGroup', {
description: 'Redshift security group',
vpc: this.vpc,
})];
const securityGroupIds = securityGroups.map(sg => sg.securityGroupId);
let secret: DatabaseSecret | undefined;
if (!props.masterUser.masterPassword) {
secret = new DatabaseSecret(this, 'Secret', {
username: props.masterUser.masterUsername,
encryptionKey: props.masterUser.encryptionKey,
});
}
const clusterType = props.clusterType || ClusterType.MULTI_NODE;
const nodeCount = this.validateNodeCount(clusterType, props.numberOfNodes);
if (props.encrypted === false && props.encryptionKey !== undefined) {
throw new Error('Cannot set property encryptionKey without enabling encryption!');
}
this.singleUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_SINGLE_USER;
this.multiUserRotationApplication = secretsmanager.SecretRotationApplication.REDSHIFT_ROTATION_MULTI_USER;
let loggingProperties;
if (props.loggingProperties) {
loggingProperties = {
bucketName: props.loggingProperties.loggingBucket.bucketName,
s3KeyPrefix: props.loggingProperties.loggingKeyPrefix,
};
props.loggingProperties.loggingBucket.addToResourcePolicy(
new iam.PolicyStatement(
{
actions: [
's3:GetBucketAcl',
's3:PutObject',
],
resources: [
props.loggingProperties.loggingBucket.arnForObjects('*'),
props.loggingProperties.loggingBucket.bucketArn,
],
principals: [
new iam.ServicePrincipal('redshift.amazonaws.com'),
],
},
),
);
}
this.cluster = new CfnCluster(this, 'Resource', {
// Basic
allowVersionUpgrade: true,
automatedSnapshotRetentionPeriod: 1,
clusterType,
clusterIdentifier: props.clusterName,
clusterSubnetGroupName: subnetGroup.clusterSubnetGroupName,
vpcSecurityGroupIds: securityGroupIds,
port: props.port,
clusterParameterGroupName: props.parameterGroup && props.parameterGroup.clusterParameterGroupName,
// Admin (unsafeUnwrap here is safe)
masterUsername: secret?.secretValueFromJson('username').unsafeUnwrap() ?? props.masterUser.masterUsername,
masterUserPassword: secret?.secretValueFromJson('password').unsafeUnwrap()
?? props.masterUser.masterPassword?.unsafeUnwrap()
?? 'default',
preferredMaintenanceWindow: props.preferredMaintenanceWindow,
nodeType: props.nodeType || NodeType.DC2_LARGE,
numberOfNodes: nodeCount,
loggingProperties,
iamRoles: Lazy.list({ produce: () => this.roles.map(role => role.roleArn) }, { omitEmpty: true }),
dbName: props.defaultDatabaseName || 'default_db',
publiclyAccessible: props.publiclyAccessible || false,
// Encryption
kmsKeyId: props.encryptionKey?.keyId,
encrypted: props.encrypted ?? true,
classic: props.classicResizing,
elasticIp: props.elasticIp,
enhancedVpcRouting: props.enhancedVpcRouting,
});
this.cluster.applyRemovalPolicy(removalPolicy, {
applyToUpdateReplacePolicy: true,
});
this.clusterName = this.cluster.ref;
// create a number token that represents the port of the cluster
const portAttribute = Token.asNumber(this.cluster.attrEndpointPort);
this.clusterEndpoint = new Endpoint(this.cluster.attrEndpointAddress, portAttribute);
if (secret) {
this.secret = secret.attach(this);
}
const defaultPort = ec2.Port.tcp(this.clusterEndpoint.port);
this.connections = new ec2.Connections({ securityGroups, defaultPort });
if (props.rebootForParameterChanges) {
this.enableRebootForParameterChanges();
}
// Add default role if specified and also available in the roles list
if (props.defaultRole) {
if (props.roles?.some(x => x === props.defaultRole)) {
this.addDefaultIamRole(props.defaultRole);
} else {
throw new Error('Default role must be included in role list.');
}
}
}
/**
* Adds the single user rotation of the master password to this cluster.
*
* @param [automaticallyAfter=Duration.days(30)] Specifies the number of days after the previous rotation
* before Secrets Manager triggers the next automatic rotation.
*/
public addRotationSingleUser(automaticallyAfter?: Duration): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add single user rotation for a cluster without secret.');
}
const id = 'RotationSingleUser';
const existing = this.node.tryFindChild(id);
if (existing) {
throw new Error('A single user rotation was already added to this cluster.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: this.secret,
automaticallyAfter,
application: this.singleUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
/**
* Adds the multi user rotation to this cluster.
*/
public addRotationMultiUser(id: string, options: RotationMultiUserOptions): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add multi user rotation for a cluster without secret.');
}
return new secretsmanager.SecretRotation(this, id, {
secret: options.secret,
masterSecret: this.secret,
automaticallyAfter: options.automaticallyAfter,
application: this.multiUserRotationApplication,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
target: this,
});
}
private validateNodeCount(clusterType: ClusterType, numberOfNodes?: number): number | undefined {
if (clusterType === ClusterType.SINGLE_NODE) {
// This property must not be set for single-node clusters; be generous and treat a value of 1 node as undefined.
if (numberOfNodes !== undefined && numberOfNodes !== 1) {
throw new Error('Number of nodes must be not be supplied or be 1 for cluster type single-node');
}
return undefined;
} else {
if (Token.isUnresolved(numberOfNodes)) {
return numberOfNodes;
}
const nodeCount = numberOfNodes ?? 2;
if (nodeCount < 2 || nodeCount > 100) {
throw new Error('Number of nodes for cluster type multi-node must be at least 2 and no more than 100');
}
return nodeCount;
}
}
/**
* Adds a parameter to the Clusters' parameter group
*
* @param name the parameter name
* @param value the parameter name
*/
public addToParameterGroup(name: string, value: string): void {
if (!this.parameterGroup) {
const param: { [name: string]: string } = {};
param[name] = value;
this.parameterGroup = new ClusterParameterGroup(this, 'ParameterGroup', {
description: this.cluster.clusterIdentifier ? `Parameter Group for the ${this.cluster.clusterIdentifier} Redshift cluster` : 'Cluster parameter group for family redshift-1.0',
parameters: param,
});
this.cluster.clusterParameterGroupName = this.parameterGroup.clusterParameterGroupName;
} else if (this.parameterGroup instanceof ClusterParameterGroup) {
this.parameterGroup.addParameter(name, value);
} else {
throw new Error('Cannot add a parameter to an imported parameter group.');
}
}
/**
* Enables automatic cluster rebooting when changes to the cluster's parameter group require a restart to apply.
*/
public enableRebootForParameterChanges(): void {
if (this.node.tryFindChild('RedshiftClusterRebooterCustomResource')) {
return;
}
const rebootFunction = new lambda.SingletonFunction(this, 'RedshiftClusterRebooterFunction', {
uuid: '511e207f-13df-4b8b-b632-c32b30b65ac2',
runtime: lambda.Runtime.NODEJS_18_X,
code: lambda.Code.fromAsset(path.join(__dirname, 'cluster-parameter-change-reboot-handler')),
handler: 'index.handler',
timeout: Duration.seconds(900),
});
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:DescribeClusters'],
resources: ['*'],
}));
rebootFunction.addToRolePolicy(new iam.PolicyStatement({
actions: ['redshift:RebootCluster'],
resources: [
Stack.of(this).formatArn({
service: 'redshift',
resource: 'cluster',
resourceName: this.clusterName,
arnFormat: ArnFormat.COLON_RESOURCE_NAME,
}),
],
}));
const provider = new Provider(this, 'ResourceProvider', {
onEventHandler: rebootFunction,
});
const customResource = new CustomResource(this, 'RedshiftClusterRebooterCustomResource', {
resourceType: 'Custom::RedshiftClusterRebooter',
serviceToken: provider.serviceToken,
properties: {
ClusterId: this.clusterName,
ParameterGroupName: Lazy.string({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
return this.parameterGroup.clusterParameterGroupName;
},
}),
ParametersString: Lazy.string({
produce: () => {
if (!(this.parameterGroup instanceof ClusterParameterGroup)) {
throw new Error('Cannot enable reboot for parameter changes when using an imported parameter group.');
}
return JSON.stringify(this.parameterGroup.parameters);
},
}),
},
});
Lazy.any({
produce: () => {
if (!this.parameterGroup) {
throw new Error('Cannot enable reboot for parameter changes when there is no associated ClusterParameterGroup.');
}
customResource.node.addDependency(this, this.parameterGroup);
},
});
}
/**
* Adds default IAM role to cluster. The default IAM role must be already associated to the cluster to be added as the default role.
*
* @param defaultIamRole the IAM role to be set as the default role
*/
public addDefaultIamRole(defaultIamRole: iam.IRole): void {
// Get list of IAM roles attached to cluster
const clusterRoleList = this.roles ?? [];
// Check to see if default role is included in list of cluster IAM roles
var roleAlreadyOnCluster = false;
for (var i = 0; i < clusterRoleList.length; i++) {
if (clusterRoleList[i] === defaultIamRole) {
roleAlreadyOnCluster = true;
break;
}
}
if (!roleAlreadyOnCluster) {
throw new Error('Default role must be associated to the Redshift cluster to be set as the default role.');
}
// On UPDATE or CREATE define the default IAM role. On DELETE, remove the default IAM role
const defaultRoleCustomResource = new AwsCustomResource(this, 'default-role', {
onUpdate: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: defaultIamRole.roleArn,
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
onDelete: {
service: 'Redshift',
action: 'modifyClusterIamRoles',
parameters: {
ClusterIdentifier: this.cluster.ref,
DefaultIamRoleArn: '',
},
physicalResourceId: PhysicalResourceId.of(
`${defaultIamRole.roleArn}-${this.cluster.ref}`,
),
},
policy: AwsCustomResourcePolicy.fromSdkCalls({
resources: AwsCustomResourcePolicy.ANY_RESOURCE,
}),
installLatestAwsSdk: false,
});
defaultIamRole.grantPassRole(defaultRoleCustomResource.grantPrincipal);
}
/**
* Adds a role to the cluster
*
* @param role the role to add
*/
public addIamRole(role: iam.IRole): void {
const clusterRoleList = this.roles;
if (clusterRoleList.includes(role)) {
throw new Error(`Role '${role.roleArn}' is already attached to the cluster`);
}
clusterRoleList.push(role);
}
}
| {
class Import extends ClusterBase {
public readonly connections = new ec2.Connections({
securityGroups: attrs.securityGroups,
defaultPort: ec2.Port.tcp(attrs.clusterEndpointPort),
});
public readonly clusterName = attrs.clusterName;
public readonly instanceIdentifiers: string[] = [];
public readonly clusterEndpoint = new Endpoint(attrs.clusterEndpointAddress, attrs.clusterEndpointPort);
}
return new Import(scope, id);
} | identifier_body |
ipProcess.py | # -*- coding: utf-8 -*-
'''
Created on Mon May 14 16:31:21 2018
@author: TimL
'''
# Copyright (c) 2019. Induced Polarization Associates, LLC, Seattle, WA
import os
import scipy as sp
import commonSense as cs
from scipy import fftpack as spfftpack
from datetime import datetime
import pickle
def ipProcess():
'''
Reads text files in a data folder and saves frequency domain results to a
pickled file to be opened later for plotting.
'''
# Processed result choice.
# 'raw': raw voltage waveforms from one packet in each file.
# 'zAnyF': impedance phase and mag at non-zero fft frequencies,
# not skipping.
saveThis = 'raw'
# Number nonzero frequencies saved to the .pkl file with the zero frequency
freqCount = 200
# Whether to save absolute phase results.
savePhase = False
# Whether to select a specific file for processing, as opposed to all of
# them.
selectedFile = True
selectedFileNum = 5
pklFolder = r'C:\Users\timl\Documents\IP_data_plots\190506_eagle'
rawFolder = os.path.join(pklFolder, 'rawData')
fileList = ([f for f in os.listdir(rawFolder)
if os.path.isfile(os.path.join(rawFolder, f))])
# Catalog the file numbers stored in each file title. Remove from the list
# files that don't have a number.
fileNumList = []
keepFileList = sp.zeros_like(fileList, dtype=bool)
for t in range(len(fileList)):
uscoreIdx = fileList[t].find('_')
dotIdx = fileList[t].find('.')
if uscoreIdx >= 0 and dotIdx >= 0:
try:
fileNum = int(fileList[t][uscoreIdx+1: dotIdx])
except ValueError:
pass
else:
if not selectedFile or (selectedFile and
fileNum == selectedFileNum):
fileNumList.append(fileNum)
# Keep this file in the list.
keepFileList[t] = True
# Drop all files from the list that didn't have a number.
# Walk through in reverse order so as to not disturb index numbers as
# elements are removed.
for t in range(len(fileList))[::-1]:
if ~keepFileList[t]:
del fileList[t]
# Convert to arrays.
fileArr = sp.array(fileList)
fileNumArr = sp.array(fileNumList, dtype=int)
del fileList
del keepFileList
# Sort by file numbers.
sortKey = fileNumArr.argsort()
fileArr = fileArr[sortKey]
fileNumArr = fileNumArr[sortKey]
# List of class instances containing recorded data.
a = []
for t in range(len(fileArr)):
a.append(fileClass())
# Read the data in from the files.
for t in range(len(a)):
# Packet choice if saving raw voltages.
rawPkt = 102 # 1-indexed.
filePath = os.path.join(rawFolder, fileArr[t])
a[t].introduce(fileArr[t])
a[t].readTxt(filePath, saveThis)
# Remove unwanted fields to cut down on the saved file size.
if saveThis == 'zAnyF' or saveThis == 'upsideDown':
del a[t].raw
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
if not savePhase:
del a[t].phase
# Array mask for saving.
mask = sp.zeros(a[t].n, dtype=bool)
# Save non-zero frequencies and the DC frequency.
mask[:freqCount + 1] = True
a[t].freq = a[t].freq[mask]
a[t].phaseDiff = a[t].phaseDiff[..., mask]
a[t].magPhys = a[t].magPhys[..., mask]
a[t].zMag = a[t].zMag[..., mask]
if savePhase:
a[t].phase = a[t].phase[..., mask]
elif saveThis == 'raw':
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
del a[t].phase
del a[t].freq
del a[t].phaseDiff
del a[t].magPhys
del a[t].zMag
p = cs.find(a[t].pkt, rawPkt)
if p == -1:
# Save the last packet if the requested packet number isn't in
# the file.
rawPkt = a[t].pkt[p]
a[t].raw = a[t].raw[:, p, :]
# Overwrite the list of packet numbers with the one packet number
# that was saved.
a[t].pkt = rawPkt
# Save the object to a file named after the folder name.
lastSlash = pklFolder.rfind('\\')
saveFile = pklFolder[lastSlash+1:] + '_' + saveThis + '.pkl'
savePath = os.path.join(pklFolder, saveFile)
# Saving the list object:
with open(savePath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(a, f)
class fileClass:
def introduce(self, fileName):
print('Creating %s from %s.' % (self, fileName))
def readTxt(self, filePath, saveThis):
# Read IP measurements from a text file.
with open(filePath, 'r') as fh:
# Number of lines in the file.
lineCount = self.countLines(fh)
# Rewind the pointer in the file back to the beginning.
fh.seek(0)
# Initialize the packet counter.
p = -1
# Initialize the sample index.
s = -1
for lidx, line in enumerate(fh, 1):
# Strip off trailing newline characters.
line = line.rstrip('\n')
if s >= 0:
# Read in raw voltage values.
self.raw[:, p, s] = (
sp.fromstring(line, dtype=float, sep=','))
if s == self.n - 1:
# Reset the counter to below zero.
s = -1
else:
# Increment the sample counter for the next read.
s += 1
elif lidx > 10:
if line[0] == '$':
# Increment the packet index.
p += 1
# Reset the time domain quality parameter index.
qp = 0
# Packet number
self.pkt[p] = int(line[1:])
elif line[0] == '\'':
# CPU UTC Date and Time Strings.
(self.cpuDTStr[p].d,
self.cpuDTStr[p].t) = line[1:].split(',')
# Translate to datetime object.
self.cpuDT[p] = self.str2DateTime(self.cpuDTStr[p])
elif line[0] == '@':
# GPS UTC Date and Time Strings,
# and latitude and longitude fixes.
(self.gpsDTStr[p].d,
self.gpsDTStr[p].t,
self.lat[p],
self.longi[p]) = line[1:].split(',')
# Translate to datetime object.
self.gpsDT[p] = self.str2DateTime(self.gpsDTStr[p])
# Type casting.
self.lat[p] = float(self.lat[p])
self.longi[p] = float(self.longi[p])
elif qp < 7:
qp += 1
if qp == 3 or qp == 4 or qp == 5:
typ = float # Means are saved as floats.
else:
typ = int # Counts are saved as integers.
assignArr = sp.fromstring(line, dtype=typ, sep=',')
if qp == 1:
# Count of measurements clipped on the high end of
# the MccDaq board's input range.
self.clipHi[:, p] = assignArr
elif qp == 2:
# Count of measurements clipped on the low end of
# the MccDaq board's input range.
self.clipLo[:, p] = assignArr
elif qp == 3:
# Mean measurement value over the packet as a
# percentage of the AIn() half range.
self.meanPct[:, p] = assignArr
elif qp == 4:
# (pct) Mean value of sample measurements above
# or equal to the mean.
self.meanUpPct[:, p] = assignArr
elif qp == 5:
# (pct) Mean value of sample measurements below
# the mean.
self.meanDnPct[:, p] = assignArr
elif qp == 6:
# Count of measurements above or equal to the mean.
self.countUp[:, p] = assignArr
elif qp == 7:
# Count of measurements below the mean.
self.countDn[:, p] = assignArr
# Set the sample index to 0 to start.
s = 0
elif lidx == 1:
(self.fileDateStr, # UTC date file was created.
self.fileNum) = line.split(',') # File number in set.
# Type casting.
self.fileNum = int(self.fileNum)
elif lidx == 2:
self.descript = line # Description of the test.
elif lidx == 3:
self.minor = line # Minor note.
elif lidx == 4:
self.major = line # Major note.
elif lidx == 5:
(self.scanChCount, # number of channels in each A/D scan.
self.chCount, # number of channels written to the file.
self.n, # Number of samples in the FFT time series.
self.fs, # (Hz) FFT sampling frequency.
self.xmitFund) = line.split(',') # (Hz) Transmit Square
# wave fundamental frequency.
# Type casting.
self.scanChCount = int(self.scanChCount)
self.chCount = int(self.chCount)
self.n = int(self.n)
self.fs = int(self.fs)
self.xmitFund = float(self.xmitFund)
# Each file contains a file header of length 10 lines,
# followed by packets. Packets contain (11 + n) lines each.
self.pktCount = int((lineCount - 10)/(11 + self.n))
# Dimension arrays indexed by packet.
self.dimArrays()
elif lidx == 6:
|
elif lidx == 7:
# Voltage measurement names.
# 0-indexed by channel number.
self.measStr = line.split(',')
elif lidx == 8:
# Construct arrays using the scipy package.
# 5B amplifier maximum of the input range (V).
# 0-indexed by channel number.
self.In5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 9:
# 5B amplifier maximum of the output range (V).
# 0-indexed by channel number.
self.Out5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 10:
# MccDaq board AIn() maximum of the input range (V).
# 0-indexed by channel number.
self.ALoadQHi = sp.fromstring(line, dtype=float, sep=',')
# After the file has been read, perform some calculations.
self.postRead(saveThis)
def dimArrays(self):
# Initialize numpy arrays and python lists as zeros.
shape2D = (self.chCount, self.pktCount)
# 0-indexed by packet number.
self.pkt = sp.zeros(self.pktCount, dtype=int)
self.cpuDTStr = [cs.emptyClass()]*self.pktCount
self.cpuDT = [0]*self.pktCount
self.gpsDTStr = [cs.emptyClass()]*self.pktCount
self.gpsDT = [0]*self.pktCount
self.lat = sp.zeros(self.pktCount, dtype=float)
self.longi = sp.zeros(self.pktCount, dtype=float)
# 0-indexed by channel number.
# 0-indexed by packet number.
self.clipHi = sp.zeros(shape2D, dtype=int)
self.clipLo = sp.zeros(shape2D, dtype=int)
self.meanPct = sp.zeros(shape2D, dtype=float)
self.meanUpPct = sp.zeros(shape2D, dtype=float)
self.meanDnPct = sp.zeros(shape2D, dtype=float)
self.meanPhys = sp.zeros(shape2D, dtype=float)
self.meanUpPhys = sp.zeros(shape2D, dtype=float)
self.meanDnPhys = sp.zeros(shape2D, dtype=float)
self.countUp = sp.zeros(shape2D, dtype=int)
self.countDn = sp.zeros(shape2D, dtype=int)
# 0-indexed by channel number.
# 0-indexed by packet number.
# 0-indexed by sample number.
self.raw = sp.zeros((self.chCount, self.pktCount, self.n), dtype=float)
def str2DateTime(self, dTStr):
YY = 2000 + int(dTStr.d[0: 0+2])
MO = int(dTStr.d[2: 2+2])
DD = int(dTStr.d[4: 4+2])
HH = int(dTStr.t[0: 0+2])
MM = int(dTStr.t[2: 2+2])
SS = int(dTStr.t[4: 4+2])
micro = 1000 * int(dTStr.t[7: 7+3])
if YY == 2000:
return datetime.min
else:
return datetime(YY, MO, DD, HH, MM, SS, micro)
def computePhys(self, currentCh):
self.meanPhys = self.pct2Phys(self.meanPct, currentCh)
self.meanUpPhys = self.pct2Phys(self.meanUpPct, currentCh)
self.meanDnPhys = self.pct2Phys(self.meanDnPct, currentCh)
def pct2Phys(self, pct, currentCh):
phys = sp.zeros_like(pct, dtype=float)
for ch in range(self.chCount):
phys[ch, :] = (pct[ch, :] / 100 *
self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on the current measurement channel to a current.
phys[currentCh, :] /= self.rCurrentMeas # (A)
return phys
def countLines(self, fh):
# Counter lidx starts counting at 1 for the first line.
for lidx, line in enumerate(fh, 1):
pass
return lidx
def postRead(self, saveThis):
# Whether to correct for channel skew.
corrChSkewBool = True
# Channel on which the current is measured. This channel's phase is
# subtracted from the other channels in phase difference calculation.
# This channel's voltage is divided by the current measurement
# resistance to obtain a physical magnitude in Ampere units.
# Other channels voltages are divided by this channel's current to find
# impedance magnitude.
currentCh = 0
# Flip voltage channels upside-down if requested.
if saveThis == 'upsideDown':
for ch in range(self.chCount):
if ch != currentCh:
self.raw[ch, ...] *= -1
self.raw[ch, ...] += 2**16 - 1
self.computePhys(currentCh)
# Compute FFTs.
self.freq = spfftpack.fftfreq(self.n, 1 / self.fs, ) # (Hz)
self.fft = spfftpack.fft(self.raw) / self.n
# Magnitude and uncorrected phase.
self.phaseUnCorr = sp.angle(self.fft) # (rad)
self.mag16Bit = sp.absolute(self.fft)
# Convert magnitude to physical units.
f215 = float(2**15)
self.magPhys = self.mag16Bit / f215
for ch in range(self.chCount):
self.magPhys[ch, :, :] *= (self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on ch0 to a current.
self.magPhys[0, :, :] /= self.rCurrentMeas # (A)
# Correct phase for channel skew.
self.phase = self.phaseUnCorr
if corrChSkewBool:
for ch in range(self.chCount):
deltaT = ch / (self.fs * self.scanChCount) # (s)
corrSlope = 2*sp.pi*deltaT # (rad/Hz)
for p in range(self.pktCount):
self.phase[ch, p, :] = sp.subtract(self.phase[ch, p, :],
self.freq * corrSlope)
# Compute phase differences.
# Be careful about angles looping through +/- pi.
# A phase difference absolute value is less than pi radian.
self.phaseDiff = sp.zeros_like(self.phase, dtype=float)
for ch in range(self.chCount):
self.phaseDiff[ch, :, :] = sp.subtract(self.phase[ch, :, :],
self.phase[currentCh, :, :])
self.phaseDiff[self.phaseDiff < -sp.pi] += 2*sp.pi
self.phaseDiff[self.phaseDiff > sp.pi] -= 2*sp.pi
# Convert phase differences from radian to milliradian.
self.phaseDiff *= 1000 # (mrad)
# Calculate apparent impedance magnitude.
self.zMag = sp.zeros_like(self.magPhys)
for ch in range(self.chCount):
# (Ohm)
self.zMag[ch, :, :] = sp.divide(self.magPhys[ch, :, :],
self.magPhys[currentCh, :, :])
# Convert to milliOhm.
self.zMag *= 1000
# Invoke the main function here.
if __name__ == "__main__":
ipProcess()
| (self.rCurrentMeas, # (Ohm) resistance.
self.rExtraSeries) = line.split(',') # (Ohm).
# Type casting.
self.rCurrentMeas = float(self.rCurrentMeas)
self.rExtraSeries = float(self.rCurrentMeas) | conditional_block |
ipProcess.py | # -*- coding: utf-8 -*-
'''
Created on Mon May 14 16:31:21 2018
@author: TimL
'''
# Copyright (c) 2019. Induced Polarization Associates, LLC, Seattle, WA
import os
import scipy as sp
import commonSense as cs
from scipy import fftpack as spfftpack
from datetime import datetime
import pickle
def ipProcess():
'''
Reads text files in a data folder and saves frequency domain results to a
pickled file to be opened later for plotting.
'''
# Processed result choice.
# 'raw': raw voltage waveforms from one packet in each file.
# 'zAnyF': impedance phase and mag at non-zero fft frequencies,
# not skipping.
saveThis = 'raw'
# Number nonzero frequencies saved to the .pkl file with the zero frequency
freqCount = 200
# Whether to save absolute phase results.
savePhase = False
# Whether to select a specific file for processing, as opposed to all of
# them.
selectedFile = True
selectedFileNum = 5
pklFolder = r'C:\Users\timl\Documents\IP_data_plots\190506_eagle'
rawFolder = os.path.join(pklFolder, 'rawData')
fileList = ([f for f in os.listdir(rawFolder)
if os.path.isfile(os.path.join(rawFolder, f))])
# Catalog the file numbers stored in each file title. Remove from the list
# files that don't have a number.
fileNumList = []
keepFileList = sp.zeros_like(fileList, dtype=bool)
for t in range(len(fileList)):
uscoreIdx = fileList[t].find('_')
dotIdx = fileList[t].find('.')
if uscoreIdx >= 0 and dotIdx >= 0:
try:
fileNum = int(fileList[t][uscoreIdx+1: dotIdx])
except ValueError:
pass
else:
if not selectedFile or (selectedFile and
fileNum == selectedFileNum):
fileNumList.append(fileNum)
# Keep this file in the list.
keepFileList[t] = True
# Drop all files from the list that didn't have a number.
# Walk through in reverse order so as to not disturb index numbers as
# elements are removed.
for t in range(len(fileList))[::-1]:
if ~keepFileList[t]:
del fileList[t]
# Convert to arrays.
fileArr = sp.array(fileList)
fileNumArr = sp.array(fileNumList, dtype=int)
del fileList
del keepFileList
# Sort by file numbers.
sortKey = fileNumArr.argsort()
fileArr = fileArr[sortKey]
fileNumArr = fileNumArr[sortKey]
# List of class instances containing recorded data.
a = []
for t in range(len(fileArr)):
a.append(fileClass())
# Read the data in from the files.
for t in range(len(a)):
# Packet choice if saving raw voltages.
rawPkt = 102 # 1-indexed.
filePath = os.path.join(rawFolder, fileArr[t])
a[t].introduce(fileArr[t])
a[t].readTxt(filePath, saveThis)
# Remove unwanted fields to cut down on the saved file size.
if saveThis == 'zAnyF' or saveThis == 'upsideDown':
del a[t].raw
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
if not savePhase:
del a[t].phase
# Array mask for saving.
mask = sp.zeros(a[t].n, dtype=bool)
# Save non-zero frequencies and the DC frequency.
mask[:freqCount + 1] = True
a[t].freq = a[t].freq[mask]
a[t].phaseDiff = a[t].phaseDiff[..., mask]
a[t].magPhys = a[t].magPhys[..., mask]
a[t].zMag = a[t].zMag[..., mask]
if savePhase:
a[t].phase = a[t].phase[..., mask]
elif saveThis == 'raw':
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
del a[t].phase
del a[t].freq
del a[t].phaseDiff
del a[t].magPhys
del a[t].zMag
p = cs.find(a[t].pkt, rawPkt)
if p == -1:
# Save the last packet if the requested packet number isn't in
# the file.
rawPkt = a[t].pkt[p]
a[t].raw = a[t].raw[:, p, :]
# Overwrite the list of packet numbers with the one packet number
# that was saved.
a[t].pkt = rawPkt
# Save the object to a file named after the folder name.
lastSlash = pklFolder.rfind('\\')
saveFile = pklFolder[lastSlash+1:] + '_' + saveThis + '.pkl'
savePath = os.path.join(pklFolder, saveFile)
# Saving the list object:
with open(savePath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(a, f)
class fileClass:
def introduce(self, fileName):
print('Creating %s from %s.' % (self, fileName))
def readTxt(self, filePath, saveThis):
# Read IP measurements from a text file.
with open(filePath, 'r') as fh:
# Number of lines in the file.
lineCount = self.countLines(fh)
# Rewind the pointer in the file back to the beginning.
fh.seek(0)
# Initialize the packet counter.
p = -1
# Initialize the sample index.
s = -1
for lidx, line in enumerate(fh, 1):
# Strip off trailing newline characters.
line = line.rstrip('\n')
if s >= 0:
# Read in raw voltage values.
self.raw[:, p, s] = (
sp.fromstring(line, dtype=float, sep=','))
if s == self.n - 1:
# Reset the counter to below zero.
s = -1
else:
# Increment the sample counter for the next read.
s += 1
elif lidx > 10:
if line[0] == '$':
# Increment the packet index.
p += 1
# Reset the time domain quality parameter index.
qp = 0
# Packet number
self.pkt[p] = int(line[1:])
elif line[0] == '\'':
# CPU UTC Date and Time Strings.
(self.cpuDTStr[p].d,
self.cpuDTStr[p].t) = line[1:].split(',')
# Translate to datetime object.
self.cpuDT[p] = self.str2DateTime(self.cpuDTStr[p])
elif line[0] == '@':
# GPS UTC Date and Time Strings,
# and latitude and longitude fixes.
(self.gpsDTStr[p].d,
self.gpsDTStr[p].t,
self.lat[p],
self.longi[p]) = line[1:].split(',')
# Translate to datetime object.
self.gpsDT[p] = self.str2DateTime(self.gpsDTStr[p])
# Type casting.
self.lat[p] = float(self.lat[p])
self.longi[p] = float(self.longi[p])
elif qp < 7:
qp += 1
if qp == 3 or qp == 4 or qp == 5:
typ = float # Means are saved as floats.
else:
typ = int # Counts are saved as integers.
assignArr = sp.fromstring(line, dtype=typ, sep=',')
if qp == 1:
# Count of measurements clipped on the high end of
# the MccDaq board's input range.
self.clipHi[:, p] = assignArr
elif qp == 2:
# Count of measurements clipped on the low end of
# the MccDaq board's input range.
self.clipLo[:, p] = assignArr
elif qp == 3:
# Mean measurement value over the packet as a
# percentage of the AIn() half range.
self.meanPct[:, p] = assignArr
elif qp == 4:
# (pct) Mean value of sample measurements above
# or equal to the mean.
self.meanUpPct[:, p] = assignArr
elif qp == 5:
# (pct) Mean value of sample measurements below
# the mean.
self.meanDnPct[:, p] = assignArr
elif qp == 6:
# Count of measurements above or equal to the mean.
self.countUp[:, p] = assignArr
elif qp == 7:
# Count of measurements below the mean.
self.countDn[:, p] = assignArr
# Set the sample index to 0 to start.
s = 0
elif lidx == 1:
(self.fileDateStr, # UTC date file was created.
self.fileNum) = line.split(',') # File number in set.
# Type casting.
self.fileNum = int(self.fileNum)
elif lidx == 2:
self.descript = line # Description of the test.
elif lidx == 3:
self.minor = line # Minor note.
elif lidx == 4:
self.major = line # Major note.
elif lidx == 5:
(self.scanChCount, # number of channels in each A/D scan.
self.chCount, # number of channels written to the file.
self.n, # Number of samples in the FFT time series.
self.fs, # (Hz) FFT sampling frequency.
self.xmitFund) = line.split(',') # (Hz) Transmit Square
# wave fundamental frequency.
# Type casting.
self.scanChCount = int(self.scanChCount)
self.chCount = int(self.chCount)
self.n = int(self.n)
self.fs = int(self.fs)
self.xmitFund = float(self.xmitFund)
# Each file contains a file header of length 10 lines,
# followed by packets. Packets contain (11 + n) lines each.
self.pktCount = int((lineCount - 10)/(11 + self.n))
# Dimension arrays indexed by packet.
self.dimArrays()
elif lidx == 6:
(self.rCurrentMeas, # (Ohm) resistance.
self.rExtraSeries) = line.split(',') # (Ohm).
# Type casting.
self.rCurrentMeas = float(self.rCurrentMeas)
self.rExtraSeries = float(self.rCurrentMeas)
elif lidx == 7:
# Voltage measurement names.
# 0-indexed by channel number.
self.measStr = line.split(',')
elif lidx == 8:
# Construct arrays using the scipy package.
# 5B amplifier maximum of the input range (V).
# 0-indexed by channel number.
self.In5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 9:
# 5B amplifier maximum of the output range (V).
# 0-indexed by channel number.
self.Out5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 10:
# MccDaq board AIn() maximum of the input range (V).
# 0-indexed by channel number.
self.ALoadQHi = sp.fromstring(line, dtype=float, sep=',')
# After the file has been read, perform some calculations.
self.postRead(saveThis)
def dimArrays(self):
# Initialize numpy arrays and python lists as zeros.
shape2D = (self.chCount, self.pktCount)
# 0-indexed by packet number.
self.pkt = sp.zeros(self.pktCount, dtype=int)
self.cpuDTStr = [cs.emptyClass()]*self.pktCount
self.cpuDT = [0]*self.pktCount
self.gpsDTStr = [cs.emptyClass()]*self.pktCount
self.gpsDT = [0]*self.pktCount
self.lat = sp.zeros(self.pktCount, dtype=float)
self.longi = sp.zeros(self.pktCount, dtype=float)
# 0-indexed by channel number.
# 0-indexed by packet number.
self.clipHi = sp.zeros(shape2D, dtype=int)
self.clipLo = sp.zeros(shape2D, dtype=int)
self.meanPct = sp.zeros(shape2D, dtype=float)
self.meanUpPct = sp.zeros(shape2D, dtype=float)
self.meanDnPct = sp.zeros(shape2D, dtype=float)
self.meanPhys = sp.zeros(shape2D, dtype=float)
self.meanUpPhys = sp.zeros(shape2D, dtype=float)
self.meanDnPhys = sp.zeros(shape2D, dtype=float)
self.countUp = sp.zeros(shape2D, dtype=int)
self.countDn = sp.zeros(shape2D, dtype=int)
# 0-indexed by channel number.
# 0-indexed by packet number.
# 0-indexed by sample number.
self.raw = sp.zeros((self.chCount, self.pktCount, self.n), dtype=float)
def str2DateTime(self, dTStr):
YY = 2000 + int(dTStr.d[0: 0+2])
MO = int(dTStr.d[2: 2+2])
DD = int(dTStr.d[4: 4+2])
HH = int(dTStr.t[0: 0+2])
MM = int(dTStr.t[2: 2+2])
SS = int(dTStr.t[4: 4+2])
micro = 1000 * int(dTStr.t[7: 7+3])
if YY == 2000:
return datetime.min
else:
return datetime(YY, MO, DD, HH, MM, SS, micro)
def computePhys(self, currentCh):
self.meanPhys = self.pct2Phys(self.meanPct, currentCh)
self.meanUpPhys = self.pct2Phys(self.meanUpPct, currentCh)
self.meanDnPhys = self.pct2Phys(self.meanDnPct, currentCh)
def pct2Phys(self, pct, currentCh):
|
def countLines(self, fh):
# Counter lidx starts counting at 1 for the first line.
for lidx, line in enumerate(fh, 1):
pass
return lidx
def postRead(self, saveThis):
# Whether to correct for channel skew.
corrChSkewBool = True
# Channel on which the current is measured. This channel's phase is
# subtracted from the other channels in phase difference calculation.
# This channel's voltage is divided by the current measurement
# resistance to obtain a physical magnitude in Ampere units.
# Other channels voltages are divided by this channel's current to find
# impedance magnitude.
currentCh = 0
# Flip voltage channels upside-down if requested.
if saveThis == 'upsideDown':
for ch in range(self.chCount):
if ch != currentCh:
self.raw[ch, ...] *= -1
self.raw[ch, ...] += 2**16 - 1
self.computePhys(currentCh)
# Compute FFTs.
self.freq = spfftpack.fftfreq(self.n, 1 / self.fs, ) # (Hz)
self.fft = spfftpack.fft(self.raw) / self.n
# Magnitude and uncorrected phase.
self.phaseUnCorr = sp.angle(self.fft) # (rad)
self.mag16Bit = sp.absolute(self.fft)
# Convert magnitude to physical units.
f215 = float(2**15)
self.magPhys = self.mag16Bit / f215
for ch in range(self.chCount):
self.magPhys[ch, :, :] *= (self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on ch0 to a current.
self.magPhys[0, :, :] /= self.rCurrentMeas # (A)
# Correct phase for channel skew.
self.phase = self.phaseUnCorr
if corrChSkewBool:
for ch in range(self.chCount):
deltaT = ch / (self.fs * self.scanChCount) # (s)
corrSlope = 2*sp.pi*deltaT # (rad/Hz)
for p in range(self.pktCount):
self.phase[ch, p, :] = sp.subtract(self.phase[ch, p, :],
self.freq * corrSlope)
# Compute phase differences.
# Be careful about angles looping through +/- pi.
# A phase difference absolute value is less than pi radian.
self.phaseDiff = sp.zeros_like(self.phase, dtype=float)
for ch in range(self.chCount):
self.phaseDiff[ch, :, :] = sp.subtract(self.phase[ch, :, :],
self.phase[currentCh, :, :])
self.phaseDiff[self.phaseDiff < -sp.pi] += 2*sp.pi
self.phaseDiff[self.phaseDiff > sp.pi] -= 2*sp.pi
# Convert phase differences from radian to milliradian.
self.phaseDiff *= 1000 # (mrad)
# Calculate apparent impedance magnitude.
self.zMag = sp.zeros_like(self.magPhys)
for ch in range(self.chCount):
# (Ohm)
self.zMag[ch, :, :] = sp.divide(self.magPhys[ch, :, :],
self.magPhys[currentCh, :, :])
# Convert to milliOhm.
self.zMag *= 1000
# Invoke the main function here.
if __name__ == "__main__":
ipProcess()
| phys = sp.zeros_like(pct, dtype=float)
for ch in range(self.chCount):
phys[ch, :] = (pct[ch, :] / 100 *
self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on the current measurement channel to a current.
phys[currentCh, :] /= self.rCurrentMeas # (A)
return phys | identifier_body |
ipProcess.py | # -*- coding: utf-8 -*-
'''
Created on Mon May 14 16:31:21 2018
@author: TimL
'''
# Copyright (c) 2019. Induced Polarization Associates, LLC, Seattle, WA
import os
import scipy as sp
import commonSense as cs
from scipy import fftpack as spfftpack
from datetime import datetime
import pickle
def ipProcess():
'''
Reads text files in a data folder and saves frequency domain results to a
pickled file to be opened later for plotting.
'''
# Processed result choice.
# 'raw': raw voltage waveforms from one packet in each file.
# 'zAnyF': impedance phase and mag at non-zero fft frequencies,
# not skipping.
saveThis = 'raw'
# Number nonzero frequencies saved to the .pkl file with the zero frequency
freqCount = 200
# Whether to save absolute phase results.
savePhase = False
# Whether to select a specific file for processing, as opposed to all of
# them.
selectedFile = True
selectedFileNum = 5
pklFolder = r'C:\Users\timl\Documents\IP_data_plots\190506_eagle'
rawFolder = os.path.join(pklFolder, 'rawData')
fileList = ([f for f in os.listdir(rawFolder)
if os.path.isfile(os.path.join(rawFolder, f))])
# Catalog the file numbers stored in each file title. Remove from the list
# files that don't have a number.
fileNumList = []
keepFileList = sp.zeros_like(fileList, dtype=bool)
for t in range(len(fileList)):
uscoreIdx = fileList[t].find('_')
dotIdx = fileList[t].find('.')
if uscoreIdx >= 0 and dotIdx >= 0:
try:
fileNum = int(fileList[t][uscoreIdx+1: dotIdx])
except ValueError:
pass
else:
if not selectedFile or (selectedFile and
fileNum == selectedFileNum):
fileNumList.append(fileNum)
# Keep this file in the list.
keepFileList[t] = True
# Drop all files from the list that didn't have a number.
# Walk through in reverse order so as to not disturb index numbers as
# elements are removed.
for t in range(len(fileList))[::-1]:
if ~keepFileList[t]:
del fileList[t]
# Convert to arrays.
fileArr = sp.array(fileList)
fileNumArr = sp.array(fileNumList, dtype=int)
del fileList
del keepFileList
# Sort by file numbers.
sortKey = fileNumArr.argsort()
fileArr = fileArr[sortKey]
fileNumArr = fileNumArr[sortKey]
# List of class instances containing recorded data.
a = []
for t in range(len(fileArr)):
a.append(fileClass())
# Read the data in from the files.
for t in range(len(a)):
# Packet choice if saving raw voltages.
rawPkt = 102 # 1-indexed.
filePath = os.path.join(rawFolder, fileArr[t])
a[t].introduce(fileArr[t])
a[t].readTxt(filePath, saveThis)
# Remove unwanted fields to cut down on the saved file size.
if saveThis == 'zAnyF' or saveThis == 'upsideDown':
del a[t].raw
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
if not savePhase:
del a[t].phase
# Array mask for saving.
mask = sp.zeros(a[t].n, dtype=bool)
# Save non-zero frequencies and the DC frequency.
mask[:freqCount + 1] = True
a[t].freq = a[t].freq[mask]
a[t].phaseDiff = a[t].phaseDiff[..., mask]
a[t].magPhys = a[t].magPhys[..., mask]
a[t].zMag = a[t].zMag[..., mask]
if savePhase:
a[t].phase = a[t].phase[..., mask]
elif saveThis == 'raw':
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
del a[t].phase
del a[t].freq
del a[t].phaseDiff
del a[t].magPhys
del a[t].zMag
p = cs.find(a[t].pkt, rawPkt)
if p == -1:
# Save the last packet if the requested packet number isn't in
# the file.
rawPkt = a[t].pkt[p]
a[t].raw = a[t].raw[:, p, :]
# Overwrite the list of packet numbers with the one packet number
# that was saved.
a[t].pkt = rawPkt
# Save the object to a file named after the folder name.
lastSlash = pklFolder.rfind('\\')
saveFile = pklFolder[lastSlash+1:] + '_' + saveThis + '.pkl'
savePath = os.path.join(pklFolder, saveFile)
# Saving the list object:
with open(savePath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(a, f)
class fileClass:
def introduce(self, fileName):
print('Creating %s from %s.' % (self, fileName))
def readTxt(self, filePath, saveThis):
# Read IP measurements from a text file.
with open(filePath, 'r') as fh:
# Number of lines in the file.
lineCount = self.countLines(fh)
# Rewind the pointer in the file back to the beginning.
fh.seek(0)
# Initialize the packet counter.
p = -1
# Initialize the sample index.
s = -1
for lidx, line in enumerate(fh, 1):
# Strip off trailing newline characters.
line = line.rstrip('\n')
if s >= 0:
# Read in raw voltage values.
self.raw[:, p, s] = (
sp.fromstring(line, dtype=float, sep=','))
if s == self.n - 1:
# Reset the counter to below zero.
s = -1
else:
# Increment the sample counter for the next read.
s += 1
elif lidx > 10:
if line[0] == '$':
# Increment the packet index.
p += 1
# Reset the time domain quality parameter index.
qp = 0
# Packet number
self.pkt[p] = int(line[1:])
elif line[0] == '\'':
# CPU UTC Date and Time Strings.
(self.cpuDTStr[p].d,
self.cpuDTStr[p].t) = line[1:].split(',')
# Translate to datetime object.
self.cpuDT[p] = self.str2DateTime(self.cpuDTStr[p])
elif line[0] == '@':
# GPS UTC Date and Time Strings,
# and latitude and longitude fixes.
(self.gpsDTStr[p].d,
self.gpsDTStr[p].t,
self.lat[p],
self.longi[p]) = line[1:].split(',')
# Translate to datetime object.
self.gpsDT[p] = self.str2DateTime(self.gpsDTStr[p])
# Type casting.
self.lat[p] = float(self.lat[p])
self.longi[p] = float(self.longi[p])
elif qp < 7:
qp += 1
if qp == 3 or qp == 4 or qp == 5:
typ = float # Means are saved as floats.
else:
typ = int # Counts are saved as integers.
assignArr = sp.fromstring(line, dtype=typ, sep=',')
if qp == 1:
# Count of measurements clipped on the high end of
# the MccDaq board's input range.
self.clipHi[:, p] = assignArr
elif qp == 2:
# Count of measurements clipped on the low end of
# the MccDaq board's input range.
self.clipLo[:, p] = assignArr
elif qp == 3:
# Mean measurement value over the packet as a
# percentage of the AIn() half range.
self.meanPct[:, p] = assignArr
elif qp == 4:
# (pct) Mean value of sample measurements above
# or equal to the mean.
self.meanUpPct[:, p] = assignArr
elif qp == 5:
# (pct) Mean value of sample measurements below
# the mean.
self.meanDnPct[:, p] = assignArr
elif qp == 6:
# Count of measurements above or equal to the mean.
self.countUp[:, p] = assignArr
elif qp == 7:
# Count of measurements below the mean.
self.countDn[:, p] = assignArr
# Set the sample index to 0 to start.
s = 0
elif lidx == 1:
(self.fileDateStr, # UTC date file was created.
self.fileNum) = line.split(',') # File number in set.
# Type casting.
self.fileNum = int(self.fileNum)
elif lidx == 2:
self.descript = line # Description of the test.
elif lidx == 3:
self.minor = line # Minor note.
elif lidx == 4:
self.major = line # Major note.
elif lidx == 5:
(self.scanChCount, # number of channels in each A/D scan.
self.chCount, # number of channels written to the file.
self.n, # Number of samples in the FFT time series.
self.fs, # (Hz) FFT sampling frequency.
self.xmitFund) = line.split(',') # (Hz) Transmit Square
# wave fundamental frequency.
# Type casting.
self.scanChCount = int(self.scanChCount)
self.chCount = int(self.chCount)
self.n = int(self.n)
self.fs = int(self.fs)
self.xmitFund = float(self.xmitFund)
# Each file contains a file header of length 10 lines,
# followed by packets. Packets contain (11 + n) lines each.
self.pktCount = int((lineCount - 10)/(11 + self.n))
# Dimension arrays indexed by packet.
self.dimArrays()
elif lidx == 6:
(self.rCurrentMeas, # (Ohm) resistance.
self.rExtraSeries) = line.split(',') # (Ohm).
# Type casting.
self.rCurrentMeas = float(self.rCurrentMeas)
self.rExtraSeries = float(self.rCurrentMeas)
elif lidx == 7:
# Voltage measurement names.
# 0-indexed by channel number.
self.measStr = line.split(',')
elif lidx == 8:
# Construct arrays using the scipy package.
# 5B amplifier maximum of the input range (V).
# 0-indexed by channel number.
self.In5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 9:
# 5B amplifier maximum of the output range (V).
# 0-indexed by channel number.
self.Out5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 10:
# MccDaq board AIn() maximum of the input range (V).
# 0-indexed by channel number.
self.ALoadQHi = sp.fromstring(line, dtype=float, sep=',')
# After the file has been read, perform some calculations.
self.postRead(saveThis)
def dimArrays(self):
# Initialize numpy arrays and python lists as zeros.
shape2D = (self.chCount, self.pktCount)
# 0-indexed by packet number.
self.pkt = sp.zeros(self.pktCount, dtype=int)
self.cpuDTStr = [cs.emptyClass()]*self.pktCount
self.cpuDT = [0]*self.pktCount
self.gpsDTStr = [cs.emptyClass()]*self.pktCount
self.gpsDT = [0]*self.pktCount
self.lat = sp.zeros(self.pktCount, dtype=float)
self.longi = sp.zeros(self.pktCount, dtype=float)
# 0-indexed by channel number.
# 0-indexed by packet number.
self.clipHi = sp.zeros(shape2D, dtype=int)
self.clipLo = sp.zeros(shape2D, dtype=int)
self.meanPct = sp.zeros(shape2D, dtype=float)
self.meanUpPct = sp.zeros(shape2D, dtype=float)
self.meanDnPct = sp.zeros(shape2D, dtype=float)
self.meanPhys = sp.zeros(shape2D, dtype=float)
self.meanUpPhys = sp.zeros(shape2D, dtype=float)
self.meanDnPhys = sp.zeros(shape2D, dtype=float)
self.countUp = sp.zeros(shape2D, dtype=int)
self.countDn = sp.zeros(shape2D, dtype=int)
# 0-indexed by channel number.
# 0-indexed by packet number.
# 0-indexed by sample number.
self.raw = sp.zeros((self.chCount, self.pktCount, self.n), dtype=float)
def str2DateTime(self, dTStr):
YY = 2000 + int(dTStr.d[0: 0+2])
MO = int(dTStr.d[2: 2+2])
DD = int(dTStr.d[4: 4+2])
HH = int(dTStr.t[0: 0+2])
MM = int(dTStr.t[2: 2+2])
SS = int(dTStr.t[4: 4+2])
micro = 1000 * int(dTStr.t[7: 7+3])
if YY == 2000:
return datetime.min
else:
return datetime(YY, MO, DD, HH, MM, SS, micro)
def computePhys(self, currentCh):
self.meanPhys = self.pct2Phys(self.meanPct, currentCh)
self.meanUpPhys = self.pct2Phys(self.meanUpPct, currentCh)
self.meanDnPhys = self.pct2Phys(self.meanDnPct, currentCh)
def pct2Phys(self, pct, currentCh):
phys = sp.zeros_like(pct, dtype=float)
for ch in range(self.chCount):
phys[ch, :] = (pct[ch, :] / 100 *
self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on the current measurement channel to a current.
phys[currentCh, :] /= self.rCurrentMeas # (A)
return phys
def countLines(self, fh):
# Counter lidx starts counting at 1 for the first line.
for lidx, line in enumerate(fh, 1):
pass
return lidx
def | (self, saveThis):
# Whether to correct for channel skew.
corrChSkewBool = True
# Channel on which the current is measured. This channel's phase is
# subtracted from the other channels in phase difference calculation.
# This channel's voltage is divided by the current measurement
# resistance to obtain a physical magnitude in Ampere units.
# Other channels voltages are divided by this channel's current to find
# impedance magnitude.
currentCh = 0
# Flip voltage channels upside-down if requested.
if saveThis == 'upsideDown':
for ch in range(self.chCount):
if ch != currentCh:
self.raw[ch, ...] *= -1
self.raw[ch, ...] += 2**16 - 1
self.computePhys(currentCh)
# Compute FFTs.
self.freq = spfftpack.fftfreq(self.n, 1 / self.fs, ) # (Hz)
self.fft = spfftpack.fft(self.raw) / self.n
# Magnitude and uncorrected phase.
self.phaseUnCorr = sp.angle(self.fft) # (rad)
self.mag16Bit = sp.absolute(self.fft)
# Convert magnitude to physical units.
f215 = float(2**15)
self.magPhys = self.mag16Bit / f215
for ch in range(self.chCount):
self.magPhys[ch, :, :] *= (self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on ch0 to a current.
self.magPhys[0, :, :] /= self.rCurrentMeas # (A)
# Correct phase for channel skew.
self.phase = self.phaseUnCorr
if corrChSkewBool:
for ch in range(self.chCount):
deltaT = ch / (self.fs * self.scanChCount) # (s)
corrSlope = 2*sp.pi*deltaT # (rad/Hz)
for p in range(self.pktCount):
self.phase[ch, p, :] = sp.subtract(self.phase[ch, p, :],
self.freq * corrSlope)
# Compute phase differences.
# Be careful about angles looping through +/- pi.
# A phase difference absolute value is less than pi radian.
self.phaseDiff = sp.zeros_like(self.phase, dtype=float)
for ch in range(self.chCount):
self.phaseDiff[ch, :, :] = sp.subtract(self.phase[ch, :, :],
self.phase[currentCh, :, :])
self.phaseDiff[self.phaseDiff < -sp.pi] += 2*sp.pi
self.phaseDiff[self.phaseDiff > sp.pi] -= 2*sp.pi
# Convert phase differences from radian to milliradian.
self.phaseDiff *= 1000 # (mrad)
# Calculate apparent impedance magnitude.
self.zMag = sp.zeros_like(self.magPhys)
for ch in range(self.chCount):
# (Ohm)
self.zMag[ch, :, :] = sp.divide(self.magPhys[ch, :, :],
self.magPhys[currentCh, :, :])
# Convert to milliOhm.
self.zMag *= 1000
# Invoke the main function here.
if __name__ == "__main__":
ipProcess()
| postRead | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.