file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
provision_utils.go | package sparta
import (
"archive/zip"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
spartaIAM "github.com/mweagle/Sparta/aws/iam"
"github.com/mweagle/cloudformationresources"
gocf "github.com/mweagle/go-cloudformation"
)
const (
// ScratchDirectory is the cwd relative path component
// where intermediate build artifacts are created
ScratchDirectory = ".sparta"
salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// The relative path of the custom scripts that is used
// to create the filename relative path when creating the custom archive
provisioningResourcesRelPath = "/resources/provision"
)
// The basename of the scripts that are embedded into CONSTANTS.go
// by `esc` during the generate phase. In order to export these, there
// MUST be a corresponding PROXIED_MODULES entry for the base filename
// in resources/index.js
var customResourceScripts = []string{"sparta_utils.js",
"golang-constants.json"}
var golangCustomResourceTypes = []string{
cloudformationresources.SESLambdaEventSource,
cloudformationresources.S3LambdaEventSource,
cloudformationresources.SNSLambdaEventSource,
cloudformationresources.CloudWatchLogsLambdaEventSource,
cloudformationresources.ZipToS3Bucket,
}
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = struct {
SNSLambdaEventSource []string
S3LambdaEventSource []string
SESLambdaEventSource []string
CloudWatchLogsLambdaEventSource []string
}{
SNSLambdaEventSource: []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"},
S3LambdaEventSource: []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"},
SESLambdaEventSource: []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"},
CloudWatchLogsLambdaEventSource: []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter",
},
}
// Create a stable temporary filename in the current working
// directory
func temporaryFile(name string) (*os.File, error) {
workingDir, err := os.Getwd()
if nil != err {
return nil, err
}
// Put everything in the ./sparta directory
buildDir := filepath.Join(workingDir, ".sparta")
mkdirErr := os.MkdirAll(buildDir, os.ModePerm)
if nil != mkdirErr {
return nil, mkdirErr
}
// Use a stable temporary name
temporaryPath := filepath.Join(buildDir, name)
tmpFile, err := os.Create(temporaryPath)
if err != nil {
return nil, errors.New("Failed to create temporary file: " + err.Error())
}
return tmpFile, nil
}
func runOSCommand(cmd *exec.Cmd, logger *logrus.Logger) error {
logger.WithFields(logrus.Fields{
"Arguments": cmd.Args,
"Dir": cmd.Dir,
"Path": cmd.Path,
"Env": cmd.Env,
}).Debug("Running Command")
outputWriter := logger.Writer()
defer outputWriter.Close()
cmd.Stdout = outputWriter
cmd.Stderr = outputWriter
return cmd.Run()
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
useCGO bool,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
resourceBaseName := fmt.Sprintf("%sCustomResource", awsServiceName)
subscriberHandlerName := CloudFormationResourceName(resourceBaseName, string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName,
sourceArn,
template,
logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"ScriptExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Timeout: gocf.Integer(30),
}
if useCGO {
customResourceHandlerDef.Runtime = gocf.String(PythonVersion)
} else {
customResourceHandlerDef.Runtime = gocf.String(NodeJSVersion)
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
var principalActions []string
switch awsPrincipalName {
case cloudformationresources.SNSLambdaEventSource:
principalActions = PushSourceConfigurationActions.SNSLambdaEventSource
case cloudformationresources.S3LambdaEventSource:
principalActions = PushSourceConfigurationActions.S3LambdaEventSource
case cloudformationresources.SESLambdaEventSource:
principalActions = PushSourceConfigurationActions.SESLambdaEventSource
case cloudformationresources.CloudWatchLogsLambdaEventSource:
principalActions = PushSourceConfigurationActions.CloudWatchLogsLambdaEventSource
default:
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements.Core
iamPolicyList := gocf.IAMRolePolicyList{}
iamPolicyList = append(iamPolicyList,
gocf.IAMRolePolicy{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
)
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &iamPolicyList,
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]spartaIAM.PolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]spartaIAM.PolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, spartaIAM.PolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
}
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
func writeCustomResources(zipWriter *zip.Writer,
logger *logrus.Logger) error {
for _, eachName := range customResourceScripts {
resourceName := fmt.Sprintf("%s/%s", provisioningResourcesRelPath, eachName)
resourceContent := _escFSMustString(false, resourceName)
stringReader := strings.NewReader(resourceContent)
embedWriter, errCreate := zipWriter.Create(eachName)
if nil != errCreate {
return errCreate
}
logger.WithFields(logrus.Fields{
"Name": eachName,
}).Debug("Script name")
_, copyErr := io.Copy(embedWriter, stringReader)
if nil != copyErr {
return copyErr
}
}
return nil
}
func createUserCustomResourceEntry(customResource *customResourceInfo, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
logger.WithFields(logrus.Fields{
"UserFunction": customResource.userFunctionName,
"NodeJSFunctionName": customResource.scriptExportHandlerName(),
}).Debug("Registering User CustomResource function")
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
customResource.scriptExportHandlerName(),
customResource.userFunctionName)
return primaryEntry
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewNodeJSProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta JS function")
// We do know the CF resource name here - could write this into
// index.js and expose a GET localhost:9000/lambdaMetadata
// which wraps up DescribeStackResource for the running
// lambda function
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaNodeJSCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
jsName := scriptExportNameForCustomResourceType(resourceName)
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
jsName,
resourceName)
return primaryEntry
}
func insertNodeJSProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
// Add the string literal adapter, which requires us to add exported
// functions to the end of index.js. These NodeJS exports will be
// linked to the AWS Lambda NodeJS function name, and are basically
// automatically generated pass through proxies to the golang HTTP handler.
nodeJSWriter, err := zipWriter.Create("index.js")
if err != nil {
return errors.New("Failed to create ZIP entry: index.js")
}
nodeJSSource := _escFSMustString(false, "/resources/index.js")
nodeJSSource += "\n// DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
nodeJSSource += createNewNodeJSProxyEntry(eachLambda, logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
nodeJSSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
nodeJSSource += createNewSpartaNodeJSCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, replace
// SPARTA_BINARY_NAME = 'Sparta.lambda.amd64';
// with the service binary name
nodeJSSource += fmt.Sprintf("SPARTA_BINARY_NAME='%s';\n", executableOutput)
// And the service name
nodeJSSource += fmt.Sprintf("SPARTA_SERVICE_NAME='%s';\n", serviceName)
logger.WithFields(logrus.Fields{
"index.js": nodeJSSource,
}).Debug("Dynamically generated NodeJS adapter")
stringReader := strings.NewReader(nodeJSSource)
_, copyErr := io.Copy(nodeJSWriter, stringReader)
if nil != copyErr {
return copyErr
}
// Next embed the custom resource scripts into the package.
logger.Debug("Embedding CustomResource scripts")
return writeCustomResources(zipWriter, logger)
}
func pythonFunctionEntry(scriptExportName string,
lambdaFunctionName string,
logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"ScriptName": scriptExportName,
"LambdaName": lambdaFunctionName,
}).Debug("Registering Sparta Python function")
return fmt.Sprintf(`def %s(event, context):
return lambda_handler("%s", event, context)
`,
scriptExportName,
lambdaFunctionName)
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewPythonProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta Python function")
primaryEntry := fmt.Sprintf(`def %s(event, context):
return lambda_handler(%s, event, context)
`,
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaPythonCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier | executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
pythonWriter, err := zipWriter.Create("index.py")
if err != nil {
return errors.New("Failed to create ZIP entry: index.py")
}
pythonTemplate := _escFSMustString(false, "/resources/index.template.py")
pythonSource := "\n#DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
// Great, let's assemble all the Python function names, then
// supply them to the template expansion to perform the final
// magic
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachLambda.scriptExportHandlerName(),
eachLambda.lambdaFunctionName(),
logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachCustomResource.scriptExportHandlerName(),
eachCustomResource.userFunctionName,
logger)
pythonSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
pythonSource += createNewSpartaPythonCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, pump the index.template.py through
// the Go template engine so that we can substitute the
// library name and the python functions we've built up...
data := struct {
LibraryName string
PythonFunctions string
}{
executableOutput,
pythonSource,
}
pyTemplate, pyTemplateErr := template.New("PythonHandler").Parse(pythonTemplate)
if nil != pyTemplateErr {
return pyTemplateErr
}
var pyDoc bytes.Buffer
pyTemplateErr = pyTemplate.Execute(&pyDoc, data)
if nil != pyTemplateErr {
return pyTemplateErr
}
// Log the Python handler...
logger.WithFields(logrus.Fields{
"index.py": pyDoc.String(),
}).Debug("Dynamically generated Python ctypes adapter")
_, copyErr := io.WriteString(pythonWriter, pyDoc.String())
return copyErr
}
func systemGoVersion(logger *logrus.Logger) (string, error) {
runtimeVersion := runtime.Version()
// Get the golang version from the output:
// Matts-MBP:Sparta mweagle$ go version
// go version go1.8.1 darwin/amd64
golangVersionRE := regexp.MustCompile(`go(\d+\.\d+(\.\d+)?)`)
matches := golangVersionRE.FindStringSubmatch(runtimeVersion)
if len(matches) > 2 {
return matches[1], nil
}
logger.WithFields(logrus.Fields{
"Output": runtimeVersion,
}).Warn("Unable to find Golang version using RegExp - using current version")
return runtimeVersion, nil
} | pyName := scriptExportNameForCustomResourceType(resourceName)
return pythonFunctionEntry(pyName, resourceName, logger)
}
func insertPythonProxyResources(serviceName string, | random_line_split |
provision_utils.go | package sparta
import (
"archive/zip"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
spartaIAM "github.com/mweagle/Sparta/aws/iam"
"github.com/mweagle/cloudformationresources"
gocf "github.com/mweagle/go-cloudformation"
)
const (
// ScratchDirectory is the cwd relative path component
// where intermediate build artifacts are created
ScratchDirectory = ".sparta"
salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// The relative path of the custom scripts that is used
// to create the filename relative path when creating the custom archive
provisioningResourcesRelPath = "/resources/provision"
)
// The basename of the scripts that are embedded into CONSTANTS.go
// by `esc` during the generate phase. In order to export these, there
// MUST be a corresponding PROXIED_MODULES entry for the base filename
// in resources/index.js
var customResourceScripts = []string{"sparta_utils.js",
"golang-constants.json"}
var golangCustomResourceTypes = []string{
cloudformationresources.SESLambdaEventSource,
cloudformationresources.S3LambdaEventSource,
cloudformationresources.SNSLambdaEventSource,
cloudformationresources.CloudWatchLogsLambdaEventSource,
cloudformationresources.ZipToS3Bucket,
}
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = struct {
SNSLambdaEventSource []string
S3LambdaEventSource []string
SESLambdaEventSource []string
CloudWatchLogsLambdaEventSource []string
}{
SNSLambdaEventSource: []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"},
S3LambdaEventSource: []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"},
SESLambdaEventSource: []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"},
CloudWatchLogsLambdaEventSource: []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter",
},
}
// Create a stable temporary filename in the current working
// directory
func temporaryFile(name string) (*os.File, error) {
workingDir, err := os.Getwd()
if nil != err {
return nil, err
}
// Put everything in the ./sparta directory
buildDir := filepath.Join(workingDir, ".sparta")
mkdirErr := os.MkdirAll(buildDir, os.ModePerm)
if nil != mkdirErr {
return nil, mkdirErr
}
// Use a stable temporary name
temporaryPath := filepath.Join(buildDir, name)
tmpFile, err := os.Create(temporaryPath)
if err != nil {
return nil, errors.New("Failed to create temporary file: " + err.Error())
}
return tmpFile, nil
}
func runOSCommand(cmd *exec.Cmd, logger *logrus.Logger) error {
logger.WithFields(logrus.Fields{
"Arguments": cmd.Args,
"Dir": cmd.Dir,
"Path": cmd.Path,
"Env": cmd.Env,
}).Debug("Running Command")
outputWriter := logger.Writer()
defer outputWriter.Close()
cmd.Stdout = outputWriter
cmd.Stderr = outputWriter
return cmd.Run()
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
useCGO bool,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
resourceBaseName := fmt.Sprintf("%sCustomResource", awsServiceName)
subscriberHandlerName := CloudFormationResourceName(resourceBaseName, string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName,
sourceArn,
template,
logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"ScriptExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Timeout: gocf.Integer(30),
}
if useCGO {
customResourceHandlerDef.Runtime = gocf.String(PythonVersion)
} else {
customResourceHandlerDef.Runtime = gocf.String(NodeJSVersion)
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
var principalActions []string
switch awsPrincipalName {
case cloudformationresources.SNSLambdaEventSource:
principalActions = PushSourceConfigurationActions.SNSLambdaEventSource
case cloudformationresources.S3LambdaEventSource:
principalActions = PushSourceConfigurationActions.S3LambdaEventSource
case cloudformationresources.SESLambdaEventSource:
principalActions = PushSourceConfigurationActions.SESLambdaEventSource
case cloudformationresources.CloudWatchLogsLambdaEventSource:
principalActions = PushSourceConfigurationActions.CloudWatchLogsLambdaEventSource
default:
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements.Core
iamPolicyList := gocf.IAMRolePolicyList{}
iamPolicyList = append(iamPolicyList,
gocf.IAMRolePolicy{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
)
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &iamPolicyList,
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]spartaIAM.PolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]spartaIAM.PolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, spartaIAM.PolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
}
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
func writeCustomResources(zipWriter *zip.Writer,
logger *logrus.Logger) error {
for _, eachName := range customResourceScripts {
resourceName := fmt.Sprintf("%s/%s", provisioningResourcesRelPath, eachName)
resourceContent := _escFSMustString(false, resourceName)
stringReader := strings.NewReader(resourceContent)
embedWriter, errCreate := zipWriter.Create(eachName)
if nil != errCreate {
return errCreate
}
logger.WithFields(logrus.Fields{
"Name": eachName,
}).Debug("Script name")
_, copyErr := io.Copy(embedWriter, stringReader)
if nil != copyErr {
return copyErr
}
}
return nil
}
func createUserCustomResourceEntry(customResource *customResourceInfo, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
logger.WithFields(logrus.Fields{
"UserFunction": customResource.userFunctionName,
"NodeJSFunctionName": customResource.scriptExportHandlerName(),
}).Debug("Registering User CustomResource function")
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
customResource.scriptExportHandlerName(),
customResource.userFunctionName)
return primaryEntry
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewNodeJSProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta JS function")
// We do know the CF resource name here - could write this into
// index.js and expose a GET localhost:9000/lambdaMetadata
// which wraps up DescribeStackResource for the running
// lambda function
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaNodeJSCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
jsName := scriptExportNameForCustomResourceType(resourceName)
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
jsName,
resourceName)
return primaryEntry
}
func insertNodeJSProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
// Add the string literal adapter, which requires us to add exported
// functions to the end of index.js. These NodeJS exports will be
// linked to the AWS Lambda NodeJS function name, and are basically
// automatically generated pass through proxies to the golang HTTP handler.
nodeJSWriter, err := zipWriter.Create("index.js")
if err != nil {
return errors.New("Failed to create ZIP entry: index.js")
}
nodeJSSource := _escFSMustString(false, "/resources/index.js")
nodeJSSource += "\n// DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
nodeJSSource += createNewNodeJSProxyEntry(eachLambda, logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
nodeJSSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
nodeJSSource += createNewSpartaNodeJSCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, replace
// SPARTA_BINARY_NAME = 'Sparta.lambda.amd64';
// with the service binary name
nodeJSSource += fmt.Sprintf("SPARTA_BINARY_NAME='%s';\n", executableOutput)
// And the service name
nodeJSSource += fmt.Sprintf("SPARTA_SERVICE_NAME='%s';\n", serviceName)
logger.WithFields(logrus.Fields{
"index.js": nodeJSSource,
}).Debug("Dynamically generated NodeJS adapter")
stringReader := strings.NewReader(nodeJSSource)
_, copyErr := io.Copy(nodeJSWriter, stringReader)
if nil != copyErr {
return copyErr
}
// Next embed the custom resource scripts into the package.
logger.Debug("Embedding CustomResource scripts")
return writeCustomResources(zipWriter, logger)
}
func pythonFunctionEntry(scriptExportName string,
lambdaFunctionName string,
logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"ScriptName": scriptExportName,
"LambdaName": lambdaFunctionName,
}).Debug("Registering Sparta Python function")
return fmt.Sprintf(`def %s(event, context):
return lambda_handler("%s", event, context)
`,
scriptExportName,
lambdaFunctionName)
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func | (lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta Python function")
primaryEntry := fmt.Sprintf(`def %s(event, context):
return lambda_handler(%s, event, context)
`,
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaPythonCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
pyName := scriptExportNameForCustomResourceType(resourceName)
return pythonFunctionEntry(pyName, resourceName, logger)
}
func insertPythonProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
pythonWriter, err := zipWriter.Create("index.py")
if err != nil {
return errors.New("Failed to create ZIP entry: index.py")
}
pythonTemplate := _escFSMustString(false, "/resources/index.template.py")
pythonSource := "\n#DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
// Great, let's assemble all the Python function names, then
// supply them to the template expansion to perform the final
// magic
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachLambda.scriptExportHandlerName(),
eachLambda.lambdaFunctionName(),
logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachCustomResource.scriptExportHandlerName(),
eachCustomResource.userFunctionName,
logger)
pythonSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
pythonSource += createNewSpartaPythonCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, pump the index.template.py through
// the Go template engine so that we can substitute the
// library name and the python functions we've built up...
data := struct {
LibraryName string
PythonFunctions string
}{
executableOutput,
pythonSource,
}
pyTemplate, pyTemplateErr := template.New("PythonHandler").Parse(pythonTemplate)
if nil != pyTemplateErr {
return pyTemplateErr
}
var pyDoc bytes.Buffer
pyTemplateErr = pyTemplate.Execute(&pyDoc, data)
if nil != pyTemplateErr {
return pyTemplateErr
}
// Log the Python handler...
logger.WithFields(logrus.Fields{
"index.py": pyDoc.String(),
}).Debug("Dynamically generated Python ctypes adapter")
_, copyErr := io.WriteString(pythonWriter, pyDoc.String())
return copyErr
}
func systemGoVersion(logger *logrus.Logger) (string, error) {
runtimeVersion := runtime.Version()
// Get the golang version from the output:
// Matts-MBP:Sparta mweagle$ go version
// go version go1.8.1 darwin/amd64
golangVersionRE := regexp.MustCompile(`go(\d+\.\d+(\.\d+)?)`)
matches := golangVersionRE.FindStringSubmatch(runtimeVersion)
if len(matches) > 2 {
return matches[1], nil
}
logger.WithFields(logrus.Fields{
"Output": runtimeVersion,
}).Warn("Unable to find Golang version using RegExp - using current version")
return runtimeVersion, nil
}
| createNewPythonProxyEntry | identifier_name |
vk.rs | use vulkano::device::Queue;
use vulkano::swapchain::Surface;
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer };
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
use vulkano::device::{Device, DeviceExtensions};
use vulkano::format::Format;
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
use vulkano::image::SwapchainImage;
use vulkano::image::attachment::AttachmentImage;
use vulkano::instance::Instance;
use vulkano::instance::PhysicalDevice;
use vulkano::pipeline::vertex::TwoBuffersDefinition;
use vulkano::pipeline::viewport::Viewport;
use vulkano::pipeline::{GraphicsPipeline, GraphicsPipelineAbstract};
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
use simple_error::SimpleError;
use std::sync::Arc;
use std::iter;
use std::time::Instant;
use std::error::Error;
use crate::graphics::*;
pub struct VulkanBackend {
show_fps: bool,
device: Arc<Device>,
vs: vs::Shader,
fs: fs::Shader,
swapchain: Arc<Swapchain<winit::Window>>,
images: Vec<Arc<SwapchainImage<Window>>>,
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
surface: Arc<Surface<winit::Window>>,
queue: Arc<Queue>,
events_loop: EventsLoop,
phys_dims: [u32; 2],
log_dims: [u32; 2],
}
impl VulkanBackend {
fn window_size_dependent_setup(&self) -> Result<(Arc<(dyn GraphicsPipelineAbstract + Send + Sync)>,
Vec<Arc<dyn FramebufferAbstract + Send + Sync>>),
Box<dyn Error>>{
let dimensions = self.images[0].dimensions();
let depth_buffer = AttachmentImage::transient(
self.device.clone(),
dimensions,
Format::D16Unorm)?;
let framebuffers = self.images.iter().map(|image| {
let buf = Framebuffer::start(self.render_pass.clone())
.add(image.clone())?
.add(depth_buffer.clone())?
.build()?;
Ok(Arc::new(
buf
) as Arc<dyn FramebufferAbstract + Send + Sync>)
}).collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input(TwoBuffersDefinition::<VkVertex, VkColour>::new())
.vertex_shader(self.vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.viewports(iter::once(Viewport {
origin: [0.0, 0.0],
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
depth_range: 0.0..1.0,
}))
.fragment_shader(self.fs.main_entry_point(), ())
.blend_alpha_blending()
.depth_stencil_simple_depth()
.render_pass(Subpass::from(self.render_pass.clone(), 0)
.ok_or(SimpleError::new("Failed to load subpass"))?)
.build(self.device.clone())?);
Ok((pipeline, framebuffers))
}
fn convert_vertex(&self, vert: Vertex) -> VkVertex {
let mut position = match vert {
Vertex::Xy(x, y) => [x, y, 0.0],
Vertex::Xyz(x, y, z) => [x, y, z]
};
position[0] /= self.log_dims[0] as f32;
position[1] /= self.log_dims[1] as f32;
position[0] -= 0.5;
position[1] -= 0.5;
position[0] *= 2.;
position[1] *= 2.;
VkVertex { position }
}
}
impl GfxProvider for VulkanBackend {
fn | () -> Result<Self, Box<dyn Error>> {
println!("Beginning Vulkan setup...");
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None)
}?;
// We then choose which physical device to use.
//
// In a real application, there are three things to take into consideration:
//
// - Some devices may not support some of the optional features that may be required by your
// application. You should filter out the devices that don't support your app.
//
// - Not all devices can draw to a certain surface. Once you create your window, you have to
// choose a device that is capable of drawing to it.
//
// - You probably want to leave the choice between the remaining devices to the user.
//
let mut physical_devices = PhysicalDevice::enumerate(&instance);
for device in physical_devices.clone() {
println!("Found device: {} (type: {:?})", device.name(), device.ty());
}
let physical = physical_devices.next().ok_or(SimpleError::new("Found no devices"))?;
// Some debug info.
println!("Using {}.", physical.name());
let events_loop = EventsLoop::new();
let surface = WindowBuilder::new()
// .with_transparency(true)
.with_decorations(false)
.build_vk_surface(&events_loop, instance.clone())?;
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).ok_or(SimpleError::new("Found no suitable devices"))?;
let device_ext = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned())?;
let queue = queues.next().ok_or(SimpleError::new("Failed to create queue"))?;
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
let (swapchain, images) = {
let caps = surface.capabilities(physical)?;
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next()
.ok_or(SimpleError::new("Found no transparency-supporting devices"))?;
let format = caps.supported_formats[0].0;
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
phys_dims, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None)
}?;
let vs = vs::Shader::load(device.clone())?;
let fs = fs::Shader::load(device.clone())?;
let render_pass = Arc::new(vulkano::single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
},
depth: {
load: Clear,
store: DontCare,
format: Format::D16Unorm,
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {depth}
}
)?);
let show_fps = false;
let images = images.to_vec();
Ok(Self {
show_fps,
device,
vs,
fs,
images,
render_pass,
swapchain,
surface,
queue,
events_loop,
phys_dims,
log_dims
})
}
fn show_fps(mut self) -> Self {
self.show_fps = true;
self
}
fn run(mut self, mut vertex_producer: Box<dyn VertexProducer>) -> Result<(), Box<dyn Error>> {
let (mut pipeline, mut framebuffers) = self.window_size_dependent_setup()?;
let mut recreate_swapchain = false;
let window = self.surface.window();
let mut previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<dyn GpuFuture>;
let mut t0 = Instant::now();
let mut updates = 0;
let fps_freq = 100;
loop {
if self.show_fps {
// The below line panics on my Intel Ultra HD 620 setup,
// but only on debug. It seems to be a bug in Vulkano, specifically
// a race condition caused by the driver behaving differently to how
// they thought it would.
previous_frame_end.cleanup_finished();
updates += 1;
if updates % fps_freq == 0 {
let t = Instant::now();
let ms = t.duration_since(t0).as_millis() as f32 / fps_freq as f32;
let fps = 1000.0 / ms;
println!("{} fps", fps);
t0 = Instant::now();
}
}
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
self.phys_dims = phys_dims;
self.log_dims = log_dims;
let (new_swapchain, new_images) = match self.swapchain.recreate_with_dimension(phys_dims) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
Err(err) => panic!("{:?}", err)
};
self.swapchain = new_swapchain;
self.images = new_images.to_vec();
let (new_pipeline, new_framebuffers) = self.window_size_dependent_setup()?;
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let (image_num, acquire_future) = match swapchain::acquire_next_image(self.swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
continue;
},
Err(err) => panic!("{:?}", err)
};
let clear_values = vec![[1., 0., 1., 1.].into(), 1f32.into()];
let (vertices, colours, indices) = vertex_producer.get_data(RuntimeParams {
window_width: self.log_dims[0] as u16,
window_height: self.log_dims[1] as u16
});
let vertices: Vec<VkVertex> = vertices.into_iter().map(|vert| self.convert_vertex(vert)).collect();
let colours: Vec<VkColour> = colours.into_iter().map(|col| VkColour::from(col)).collect();
let vertex_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), vertices.iter().cloned())?;
let colour_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), colours.iter().cloned())?;
let index_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), indices.iter().cloned())?;
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(), self.queue.family())?
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)?
.draw_indexed(
pipeline.clone(),
&DynamicState::none(),
vec!(vertex_buffer.clone(), colour_buffer.clone()),
index_buffer.clone(), (), ())?
.end_render_pass()?
.build()?;
let future = previous_frame_end.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)?
.then_swapchain_present(self.queue.clone(), self.swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Box::new(future) as Box<_>;
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
}
let mut done = false;
self.events_loop.poll_events(|ev| {
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
_ => ()
}
});
if done {
return Ok(());
}
}
}
}
#[derive(Default, Debug, Clone)]
struct VkVertex { position: [f32; 3] }
vulkano::impl_vertex!(VkVertex, position);
#[derive(Default, Debug, Clone)]
struct VkColour { colour: [f32; 4] }
vulkano::impl_vertex!(VkColour, colour);
impl From<Colour> for VkColour {
fn from(col: Colour) -> Self {
let mut colour = match col {
Colour::Rgb(r, g, b) => [r, g, b, 1.0],
Colour::Rgba(r, g, b, a) => [r, g, b, a]
};
// Convert from sRGB; the Vulkano API doesn't allow us to change the colour space
for i in colour.iter_mut() {
*i = i.powf(2.2);
}
Self { colour }
}
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec4 colour;
layout(location = 0) out vec4 fragColour;
void main() {
gl_Position = vec4(position, 1.0);
fragColour = colour;
}
"
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
#version 450
layout(location = 0) in vec4 fragColour;
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(fragColour);
}
"
}
} | new | identifier_name |
vk.rs | use vulkano::device::Queue;
use vulkano::swapchain::Surface;
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer };
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
use vulkano::device::{Device, DeviceExtensions};
use vulkano::format::Format;
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
use vulkano::image::SwapchainImage;
use vulkano::image::attachment::AttachmentImage;
use vulkano::instance::Instance;
use vulkano::instance::PhysicalDevice;
use vulkano::pipeline::vertex::TwoBuffersDefinition;
use vulkano::pipeline::viewport::Viewport;
use vulkano::pipeline::{GraphicsPipeline, GraphicsPipelineAbstract};
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
use simple_error::SimpleError;
use std::sync::Arc;
use std::iter;
use std::time::Instant;
use std::error::Error;
use crate::graphics::*;
pub struct VulkanBackend {
show_fps: bool,
device: Arc<Device>,
vs: vs::Shader,
fs: fs::Shader,
swapchain: Arc<Swapchain<winit::Window>>,
images: Vec<Arc<SwapchainImage<Window>>>,
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
surface: Arc<Surface<winit::Window>>,
queue: Arc<Queue>,
events_loop: EventsLoop,
phys_dims: [u32; 2],
log_dims: [u32; 2],
}
impl VulkanBackend {
fn window_size_dependent_setup(&self) -> Result<(Arc<(dyn GraphicsPipelineAbstract + Send + Sync)>,
Vec<Arc<dyn FramebufferAbstract + Send + Sync>>),
Box<dyn Error>>{
let dimensions = self.images[0].dimensions();
let depth_buffer = AttachmentImage::transient(
self.device.clone(),
dimensions,
Format::D16Unorm)?;
let framebuffers = self.images.iter().map(|image| {
let buf = Framebuffer::start(self.render_pass.clone())
.add(image.clone())?
.add(depth_buffer.clone())?
.build()?;
Ok(Arc::new(
buf
) as Arc<dyn FramebufferAbstract + Send + Sync>)
}).collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input(TwoBuffersDefinition::<VkVertex, VkColour>::new())
.vertex_shader(self.vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.viewports(iter::once(Viewport {
origin: [0.0, 0.0],
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
depth_range: 0.0..1.0,
}))
.fragment_shader(self.fs.main_entry_point(), ())
.blend_alpha_blending()
.depth_stencil_simple_depth()
.render_pass(Subpass::from(self.render_pass.clone(), 0)
.ok_or(SimpleError::new("Failed to load subpass"))?)
.build(self.device.clone())?);
Ok((pipeline, framebuffers))
}
fn convert_vertex(&self, vert: Vertex) -> VkVertex {
let mut position = match vert {
Vertex::Xy(x, y) => [x, y, 0.0],
Vertex::Xyz(x, y, z) => [x, y, z]
};
position[0] /= self.log_dims[0] as f32;
position[1] /= self.log_dims[1] as f32;
position[0] -= 0.5;
position[1] -= 0.5;
position[0] *= 2.;
position[1] *= 2.;
VkVertex { position }
}
}
impl GfxProvider for VulkanBackend {
fn new() -> Result<Self, Box<dyn Error>> {
println!("Beginning Vulkan setup...");
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None)
}?;
// We then choose which physical device to use.
//
// In a real application, there are three things to take into consideration:
//
// - Some devices may not support some of the optional features that may be required by your
// application. You should filter out the devices that don't support your app.
//
// - Not all devices can draw to a certain surface. Once you create your window, you have to
// choose a device that is capable of drawing to it.
//
// - You probably want to leave the choice between the remaining devices to the user.
//
let mut physical_devices = PhysicalDevice::enumerate(&instance);
for device in physical_devices.clone() {
println!("Found device: {} (type: {:?})", device.name(), device.ty());
}
let physical = physical_devices.next().ok_or(SimpleError::new("Found no devices"))?;
// Some debug info.
println!("Using {}.", physical.name());
let events_loop = EventsLoop::new();
let surface = WindowBuilder::new()
// .with_transparency(true)
.with_decorations(false)
.build_vk_surface(&events_loop, instance.clone())?;
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).ok_or(SimpleError::new("Found no suitable devices"))?;
let device_ext = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned())?;
let queue = queues.next().ok_or(SimpleError::new("Failed to create queue"))?;
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
let (swapchain, images) = {
let caps = surface.capabilities(physical)?;
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next()
.ok_or(SimpleError::new("Found no transparency-supporting devices"))?;
let format = caps.supported_formats[0].0;
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
phys_dims, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None)
}?;
let vs = vs::Shader::load(device.clone())?;
let fs = fs::Shader::load(device.clone())?;
let render_pass = Arc::new(vulkano::single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
},
depth: {
load: Clear,
store: DontCare,
format: Format::D16Unorm,
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {depth}
}
)?);
let show_fps = false;
let images = images.to_vec();
Ok(Self {
show_fps,
device,
vs,
fs,
images,
render_pass,
swapchain,
surface,
queue,
events_loop,
phys_dims,
log_dims
})
}
fn show_fps(mut self) -> Self {
self.show_fps = true;
self
}
fn run(mut self, mut vertex_producer: Box<dyn VertexProducer>) -> Result<(), Box<dyn Error>> {
let (mut pipeline, mut framebuffers) = self.window_size_dependent_setup()?;
let mut recreate_swapchain = false;
let window = self.surface.window();
let mut previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<dyn GpuFuture>;
let mut t0 = Instant::now();
let mut updates = 0;
let fps_freq = 100;
loop {
if self.show_fps {
// The below line panics on my Intel Ultra HD 620 setup,
// but only on debug. It seems to be a bug in Vulkano, specifically
// a race condition caused by the driver behaving differently to how
// they thought it would.
previous_frame_end.cleanup_finished();
updates += 1;
if updates % fps_freq == 0 {
let t = Instant::now();
let ms = t.duration_since(t0).as_millis() as f32 / fps_freq as f32;
let fps = 1000.0 / ms;
println!("{} fps", fps);
t0 = Instant::now();
}
}
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
self.phys_dims = phys_dims;
self.log_dims = log_dims;
let (new_swapchain, new_images) = match self.swapchain.recreate_with_dimension(phys_dims) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
Err(err) => panic!("{:?}", err)
};
self.swapchain = new_swapchain;
self.images = new_images.to_vec();
let (new_pipeline, new_framebuffers) = self.window_size_dependent_setup()?;
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let (image_num, acquire_future) = match swapchain::acquire_next_image(self.swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
continue;
},
Err(err) => panic!("{:?}", err)
};
let clear_values = vec![[1., 0., 1., 1.].into(), 1f32.into()];
let (vertices, colours, indices) = vertex_producer.get_data(RuntimeParams {
window_width: self.log_dims[0] as u16,
window_height: self.log_dims[1] as u16
});
let vertices: Vec<VkVertex> = vertices.into_iter().map(|vert| self.convert_vertex(vert)).collect();
let colours: Vec<VkColour> = colours.into_iter().map(|col| VkColour::from(col)).collect();
let vertex_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), vertices.iter().cloned())?;
let colour_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), colours.iter().cloned())?;
let index_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), indices.iter().cloned())?;
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(), self.queue.family())?
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)?
.draw_indexed(
pipeline.clone(),
&DynamicState::none(),
vec!(vertex_buffer.clone(), colour_buffer.clone()),
index_buffer.clone(), (), ())?
.end_render_pass()?
.build()?;
let future = previous_frame_end.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)?
.then_swapchain_present(self.queue.clone(), self.swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Box::new(future) as Box<_>;
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
}
let mut done = false;
self.events_loop.poll_events(|ev| {
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
_ => ()
}
});
if done {
return Ok(());
}
}
}
}
#[derive(Default, Debug, Clone)]
struct VkVertex { position: [f32; 3] }
vulkano::impl_vertex!(VkVertex, position);
#[derive(Default, Debug, Clone)]
struct VkColour { colour: [f32; 4] }
vulkano::impl_vertex!(VkColour, colour);
impl From<Colour> for VkColour {
fn from(col: Colour) -> Self {
let mut colour = match col {
Colour::Rgb(r, g, b) => [r, g, b, 1.0],
Colour::Rgba(r, g, b, a) => [r, g, b, a]
};
// Convert from sRGB; the Vulkano API doesn't allow us to change the colour space
for i in colour.iter_mut() {
*i = i.powf(2.2);
}
Self { colour }
}
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec4 colour;
layout(location = 0) out vec4 fragColour; |
void main() {
gl_Position = vec4(position, 1.0);
fragColour = colour;
}
"
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
#version 450
layout(location = 0) in vec4 fragColour;
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(fragColour);
}
"
}
} | random_line_split | |
ovs_configurator.py | import subprocess
import yaml
import click
import json
import copy
import jinja2
import random
import os
from baldur.remote import Remote
REPO_PATH = "/opt/code/github/0-complexity/openvcloud_installer"
with open("specs.yaml", "r") as spcs:
specs = yaml.load(spcs)
nodes_specs = specs["nodes"]
disks_specs = specs["disks"]
sizes_specs = specs["sizes"]
large_ssd = sizes_specs["large_ssd"]
small_ssd = sizes_specs["small_ssd"]
hdd = sizes_specs["hdd"]
def prepare_config(config_path):
with open(config_path, "r") as cfg:
config = yaml.load(cfg)
cmd = ["ssh-keygen", "-y", "-f", "/dev/stdin"]
key = config["ssh"]["private-key"]
public_key = subprocess.run(
cmd, stdout=subprocess.PIPE, input=key.encode("utf-8")
).stdout.decode("utf-8")
config["ssh"]["public-key"] = public_key
return config
def render(context, loader, filename):
data = jinja2.Environment(loader=loader).get_template(filename).render(context)
os.makedirs("output", exist_ok=True)
with open("output/{}".format(filename), "w") as f:
f.write(data)
def is_mounted(disk):
if disk.get("mountpoint"):
return True
for child in disk.get("children", []):
if is_mounted(child):
return True
return False
def get_node_disks(nodeip, config):
remote = Remote(nodeip, username="root", pkey=config["ssh"]["private-key"])
res = remote.run("lsblk -b -J -o name,size,rota,mountpoint", check=False)
disks = []
if res.exit_status == 0: | for disk in disks:
disk["mounted"] = is_mounted(disk)
disk["size"] = float(disk["size"]) / (1024 ** 3)
print(disk["name"] + ": " + str(disk["size"]))
if disk["name"] == "sr0":
disk["type"] = "cdrom"
if disk["size"] == float("60.0"):
disk["type"] = "boot"
if disk["size"] == float("30.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("50.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("200.0"):
disk["type"] = "hdd"
print("hdd:")
if disk["size"] == float("100.0"):
disk["type"] = "nvme"
print("nvme")
# print(disk)
return disks
def validate_node_disks(nodename, disks_specs, disks):
if nodename.startswith("cpu"):
specs = disks_specs["cpu"]
else:
specs = disks_specs["storage"]
large_ssd_count = 0
small_ssd_count = 0
hdd_count = 0
nvme_count = 0
for disk in disks:
try:
if disk["type"] == "nvme":
nvme_count += 1
if disk["type"] == "ssd":
if disk["size"] >= large_ssd:
large_ssd_count += 1
continue
if disk["size"] >= small_ssd:
small_ssd_count += 1
else:
if disk["size"] >= hdd:
hdd_count += 1
except KeyError as eee:
print(eee)
print(disk["name"])
continue
if specs.get("large_ssd", {}).get("num", 0) > large_ssd_count:
raise Exception(
"Not enough large ssd disks on {}, found {}, required {}".format(
nodename, large_ssd_count, specs.get("large_ssd", {}).get("num", 0)
)
)
if specs.get("small_ssd", {}).get("num", 0) > small_ssd_count:
raise Exception(
"Not enough small ssd disks on {}, found {}, required {}".format(
nodename, small_ssd_count, specs.get("small_ssd", {}).get("num", 0)
)
)
if specs.get("hdd", {}).get("num", 0) > hdd_count:
raise Exception(
"Not enough hdd disks on {}, found {}, required{}".format(
nodename, hdd_count, specs.get("hdd", {}).get("num", 0)
)
)
if specs.get("nvme", {}).get("num", 0) > nvme_count:
raise Exception(
"Not enough nvme disks on {}, found {}, required {}".format(
nodename, nvme_count, specs.get("nvme", {}).get("num", 0)
)
)
def get_nodes(config, role="cpu"):
nodes = []
for node in config["nodes"]:
if role in node["roles"]:
nodes.append(node)
return nodes
@click.command()
@click.option(
"--config_path", default="system-config.yaml", help="Path to system-config"
)
def main(config_path):
config = prepare_config(config_path)
env_type = config["environment"]["type"]
loader = jinja2.FileSystemLoader("./templates/{}".format(env_type))
nodes_ips = {}
nodes = []
storage_nodes = get_nodes(config, "storage")
cpu_nodes = get_nodes(config, "cpu")
cpucount = specs["nodes"][env_type]["cpu"]
storagecount = specs["nodes"][env_type]["storage"]
cachebackendcount = specs["disks"]["cpu"]["large_ssd"]["num"]
backendcount = specs["disks"]["storage"]["hdd"]["num"]
# validate nodes number
if len(storage_nodes) < storagecount:
raise Exception("Number of storage nodes is not enough")
if len(cpu_nodes) < cpucount:
raise Exception("Number of cpu nodes is not enough")
for idx, node in enumerate(storage_nodes):
node_number = idx + 1
node["alias"] = "storage_%02d" % node_number
for idx, node in enumerate(cpu_nodes):
node_number = idx + 1
node["alias"] = "cpu_%02d" % node_number
nodes.extend(storage_nodes)
nodes.extend(cpu_nodes)
for node in nodes:
# get nodes disks and validate them
node_disks = get_node_disks(
node["management"]["ipaddress"].split("/")[0], config
)
validate_node_disks(node["alias"], disks_specs, node_disks)
# get nodes ips
node_ip_key = "{}_ip".format(node["alias"])
ip = node["storage"]["ipaddress"].split("/")[0]
nodes_ips[node_ip_key] = ip
node["ip"] = ip
node["disks"] = node_disks
render(
{"storage_nodes": storage_nodes, "cpu_nodes": cpu_nodes, "cpucount": cpucount},
loader,
"inventory",
)
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
password = ""
for i in range(10):
password += random.choice(chars)
data = {}
data["env_name"] = config["environment"]["subdomain"]
data["password"] = password
data["ovs_repo_url"] = config["environment"]["ovs_repo_url"]
data["ovs_version"] = config["environment"]["ovs_version"]
data.update(nodes_ips)
render(data, loader, "all")
with open("./templates/{}/setup.json".format(env_type)) as fd:
setup = json.load(fd)
setup["ci"]["grid_ip"] = storage_nodes[0]["ip"]
# storage backends
for backend in setup["setup"]["backends"]:
if backend["name"].startswith("cachebackend"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for cpunode in cpu_nodes[:cpucount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "ssd",
cpunode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(cachebackendcount / 2)]
else:
# second half
useabledisks = useabledisks[
int(cachebackendcount / 2) : cachebackendcount
]
osds[cpunode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 2
elif backend["name"] in ("backend01", "backend02"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for storagenode in storage_nodes[:storagecount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "hdd",
storagenode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(backendcount / 2)]
else:
# second half
useabledisks = useabledisks[int(backendcount / 2) :]
osds[storagenode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 1
# storagerouters
storagerouters = {}
storageroutertemplate = list(setup["setup"]["storagerouters"].values())[0]
setup["setup"]["storagerouters"] = storagerouters
for storagenode in storage_nodes:
storagerouter = copy.deepcopy(storageroutertemplate)
storagerouters[storagenode["ip"]] = storagerouter
storagerouter["hostname"] = storagenode["name"]
# configure roles
disks = {}
storagerouter["disks"] = disks
writeroles = 0
scrub = 0
db = 0
for disk in storagenode["disks"]:
if disk["mounted"] and scrub == 0:
disks[disk["name"]] = {"roles": ["SCRUB"]}
scrub += 1
elif not disk["mounted"] and disk["type"] == "ssd" and writeroles < 2:
disks[disk["name"]] = {"roles": ["WRITE"]}
writeroles += 1
elif disk["type"] == "nvme" and db == 0:
disks[disk["name"]] = {"roles": ["DB", "DTL"]}
db += 1
for name, vpool in storagerouter["vpools"].items():
vpool["storage_ip"] = storagenode["ip"]
with open("output/setup.json", "w") as f:
json.dump(setup, f)
if __name__ == "__main__":
main() | disks = json.loads(res.stdout)["blockdevices"] | random_line_split |
ovs_configurator.py | import subprocess
import yaml
import click
import json
import copy
import jinja2
import random
import os
from baldur.remote import Remote
REPO_PATH = "/opt/code/github/0-complexity/openvcloud_installer"
with open("specs.yaml", "r") as spcs:
specs = yaml.load(spcs)
nodes_specs = specs["nodes"]
disks_specs = specs["disks"]
sizes_specs = specs["sizes"]
large_ssd = sizes_specs["large_ssd"]
small_ssd = sizes_specs["small_ssd"]
hdd = sizes_specs["hdd"]
def prepare_config(config_path):
with open(config_path, "r") as cfg:
config = yaml.load(cfg)
cmd = ["ssh-keygen", "-y", "-f", "/dev/stdin"]
key = config["ssh"]["private-key"]
public_key = subprocess.run(
cmd, stdout=subprocess.PIPE, input=key.encode("utf-8")
).stdout.decode("utf-8")
config["ssh"]["public-key"] = public_key
return config
def render(context, loader, filename):
data = jinja2.Environment(loader=loader).get_template(filename).render(context)
os.makedirs("output", exist_ok=True)
with open("output/{}".format(filename), "w") as f:
f.write(data)
def is_mounted(disk):
if disk.get("mountpoint"):
return True
for child in disk.get("children", []):
if is_mounted(child):
return True
return False
def get_node_disks(nodeip, config):
remote = Remote(nodeip, username="root", pkey=config["ssh"]["private-key"])
res = remote.run("lsblk -b -J -o name,size,rota,mountpoint", check=False)
disks = []
if res.exit_status == 0:
disks = json.loads(res.stdout)["blockdevices"]
for disk in disks:
disk["mounted"] = is_mounted(disk)
disk["size"] = float(disk["size"]) / (1024 ** 3)
print(disk["name"] + ": " + str(disk["size"]))
if disk["name"] == "sr0":
disk["type"] = "cdrom"
if disk["size"] == float("60.0"):
disk["type"] = "boot"
if disk["size"] == float("30.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("50.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("200.0"):
disk["type"] = "hdd"
print("hdd:")
if disk["size"] == float("100.0"):
disk["type"] = "nvme"
print("nvme")
# print(disk)
return disks
def validate_node_disks(nodename, disks_specs, disks):
if nodename.startswith("cpu"):
specs = disks_specs["cpu"]
else:
specs = disks_specs["storage"]
large_ssd_count = 0
small_ssd_count = 0
hdd_count = 0
nvme_count = 0
for disk in disks:
try:
if disk["type"] == "nvme":
nvme_count += 1
if disk["type"] == "ssd":
if disk["size"] >= large_ssd:
large_ssd_count += 1
continue
if disk["size"] >= small_ssd:
small_ssd_count += 1
else:
if disk["size"] >= hdd:
hdd_count += 1
except KeyError as eee:
print(eee)
print(disk["name"])
continue
if specs.get("large_ssd", {}).get("num", 0) > large_ssd_count:
raise Exception(
"Not enough large ssd disks on {}, found {}, required {}".format(
nodename, large_ssd_count, specs.get("large_ssd", {}).get("num", 0)
)
)
if specs.get("small_ssd", {}).get("num", 0) > small_ssd_count:
raise Exception(
"Not enough small ssd disks on {}, found {}, required {}".format(
nodename, small_ssd_count, specs.get("small_ssd", {}).get("num", 0)
)
)
if specs.get("hdd", {}).get("num", 0) > hdd_count:
raise Exception(
"Not enough hdd disks on {}, found {}, required{}".format(
nodename, hdd_count, specs.get("hdd", {}).get("num", 0)
)
)
if specs.get("nvme", {}).get("num", 0) > nvme_count:
raise Exception(
"Not enough nvme disks on {}, found {}, required {}".format(
nodename, nvme_count, specs.get("nvme", {}).get("num", 0)
)
)
def get_nodes(config, role="cpu"):
|
@click.command()
@click.option(
"--config_path", default="system-config.yaml", help="Path to system-config"
)
def main(config_path):
config = prepare_config(config_path)
env_type = config["environment"]["type"]
loader = jinja2.FileSystemLoader("./templates/{}".format(env_type))
nodes_ips = {}
nodes = []
storage_nodes = get_nodes(config, "storage")
cpu_nodes = get_nodes(config, "cpu")
cpucount = specs["nodes"][env_type]["cpu"]
storagecount = specs["nodes"][env_type]["storage"]
cachebackendcount = specs["disks"]["cpu"]["large_ssd"]["num"]
backendcount = specs["disks"]["storage"]["hdd"]["num"]
# validate nodes number
if len(storage_nodes) < storagecount:
raise Exception("Number of storage nodes is not enough")
if len(cpu_nodes) < cpucount:
raise Exception("Number of cpu nodes is not enough")
for idx, node in enumerate(storage_nodes):
node_number = idx + 1
node["alias"] = "storage_%02d" % node_number
for idx, node in enumerate(cpu_nodes):
node_number = idx + 1
node["alias"] = "cpu_%02d" % node_number
nodes.extend(storage_nodes)
nodes.extend(cpu_nodes)
for node in nodes:
# get nodes disks and validate them
node_disks = get_node_disks(
node["management"]["ipaddress"].split("/")[0], config
)
validate_node_disks(node["alias"], disks_specs, node_disks)
# get nodes ips
node_ip_key = "{}_ip".format(node["alias"])
ip = node["storage"]["ipaddress"].split("/")[0]
nodes_ips[node_ip_key] = ip
node["ip"] = ip
node["disks"] = node_disks
render(
{"storage_nodes": storage_nodes, "cpu_nodes": cpu_nodes, "cpucount": cpucount},
loader,
"inventory",
)
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
password = ""
for i in range(10):
password += random.choice(chars)
data = {}
data["env_name"] = config["environment"]["subdomain"]
data["password"] = password
data["ovs_repo_url"] = config["environment"]["ovs_repo_url"]
data["ovs_version"] = config["environment"]["ovs_version"]
data.update(nodes_ips)
render(data, loader, "all")
with open("./templates/{}/setup.json".format(env_type)) as fd:
setup = json.load(fd)
setup["ci"]["grid_ip"] = storage_nodes[0]["ip"]
# storage backends
for backend in setup["setup"]["backends"]:
if backend["name"].startswith("cachebackend"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for cpunode in cpu_nodes[:cpucount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "ssd",
cpunode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(cachebackendcount / 2)]
else:
# second half
useabledisks = useabledisks[
int(cachebackendcount / 2) : cachebackendcount
]
osds[cpunode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 2
elif backend["name"] in ("backend01", "backend02"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for storagenode in storage_nodes[:storagecount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "hdd",
storagenode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(backendcount / 2)]
else:
# second half
useabledisks = useabledisks[int(backendcount / 2) :]
osds[storagenode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 1
# storagerouters
storagerouters = {}
storageroutertemplate = list(setup["setup"]["storagerouters"].values())[0]
setup["setup"]["storagerouters"] = storagerouters
for storagenode in storage_nodes:
storagerouter = copy.deepcopy(storageroutertemplate)
storagerouters[storagenode["ip"]] = storagerouter
storagerouter["hostname"] = storagenode["name"]
# configure roles
disks = {}
storagerouter["disks"] = disks
writeroles = 0
scrub = 0
db = 0
for disk in storagenode["disks"]:
if disk["mounted"] and scrub == 0:
disks[disk["name"]] = {"roles": ["SCRUB"]}
scrub += 1
elif not disk["mounted"] and disk["type"] == "ssd" and writeroles < 2:
disks[disk["name"]] = {"roles": ["WRITE"]}
writeroles += 1
elif disk["type"] == "nvme" and db == 0:
disks[disk["name"]] = {"roles": ["DB", "DTL"]}
db += 1
for name, vpool in storagerouter["vpools"].items():
vpool["storage_ip"] = storagenode["ip"]
with open("output/setup.json", "w") as f:
json.dump(setup, f)
if __name__ == "__main__":
main()
| nodes = []
for node in config["nodes"]:
if role in node["roles"]:
nodes.append(node)
return nodes | identifier_body |
ovs_configurator.py | import subprocess
import yaml
import click
import json
import copy
import jinja2
import random
import os
from baldur.remote import Remote
REPO_PATH = "/opt/code/github/0-complexity/openvcloud_installer"
with open("specs.yaml", "r") as spcs:
specs = yaml.load(spcs)
nodes_specs = specs["nodes"]
disks_specs = specs["disks"]
sizes_specs = specs["sizes"]
large_ssd = sizes_specs["large_ssd"]
small_ssd = sizes_specs["small_ssd"]
hdd = sizes_specs["hdd"]
def prepare_config(config_path):
with open(config_path, "r") as cfg:
config = yaml.load(cfg)
cmd = ["ssh-keygen", "-y", "-f", "/dev/stdin"]
key = config["ssh"]["private-key"]
public_key = subprocess.run(
cmd, stdout=subprocess.PIPE, input=key.encode("utf-8")
).stdout.decode("utf-8")
config["ssh"]["public-key"] = public_key
return config
def render(context, loader, filename):
data = jinja2.Environment(loader=loader).get_template(filename).render(context)
os.makedirs("output", exist_ok=True)
with open("output/{}".format(filename), "w") as f:
f.write(data)
def is_mounted(disk):
if disk.get("mountpoint"):
return True
for child in disk.get("children", []):
if is_mounted(child):
return True
return False
def get_node_disks(nodeip, config):
remote = Remote(nodeip, username="root", pkey=config["ssh"]["private-key"])
res = remote.run("lsblk -b -J -o name,size,rota,mountpoint", check=False)
disks = []
if res.exit_status == 0:
disks = json.loads(res.stdout)["blockdevices"]
for disk in disks:
disk["mounted"] = is_mounted(disk)
disk["size"] = float(disk["size"]) / (1024 ** 3)
print(disk["name"] + ": " + str(disk["size"]))
if disk["name"] == "sr0":
disk["type"] = "cdrom"
if disk["size"] == float("60.0"):
disk["type"] = "boot"
if disk["size"] == float("30.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("50.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("200.0"):
disk["type"] = "hdd"
print("hdd:")
if disk["size"] == float("100.0"):
disk["type"] = "nvme"
print("nvme")
# print(disk)
return disks
def validate_node_disks(nodename, disks_specs, disks):
if nodename.startswith("cpu"):
specs = disks_specs["cpu"]
else:
specs = disks_specs["storage"]
large_ssd_count = 0
small_ssd_count = 0
hdd_count = 0
nvme_count = 0
for disk in disks:
try:
if disk["type"] == "nvme":
nvme_count += 1
if disk["type"] == "ssd":
if disk["size"] >= large_ssd:
large_ssd_count += 1
continue
if disk["size"] >= small_ssd:
small_ssd_count += 1
else:
if disk["size"] >= hdd:
hdd_count += 1
except KeyError as eee:
print(eee)
print(disk["name"])
continue
if specs.get("large_ssd", {}).get("num", 0) > large_ssd_count:
raise Exception(
"Not enough large ssd disks on {}, found {}, required {}".format(
nodename, large_ssd_count, specs.get("large_ssd", {}).get("num", 0)
)
)
if specs.get("small_ssd", {}).get("num", 0) > small_ssd_count:
raise Exception(
"Not enough small ssd disks on {}, found {}, required {}".format(
nodename, small_ssd_count, specs.get("small_ssd", {}).get("num", 0)
)
)
if specs.get("hdd", {}).get("num", 0) > hdd_count:
raise Exception(
"Not enough hdd disks on {}, found {}, required{}".format(
nodename, hdd_count, specs.get("hdd", {}).get("num", 0)
)
)
if specs.get("nvme", {}).get("num", 0) > nvme_count:
raise Exception(
"Not enough nvme disks on {}, found {}, required {}".format(
nodename, nvme_count, specs.get("nvme", {}).get("num", 0)
)
)
def get_nodes(config, role="cpu"):
nodes = []
for node in config["nodes"]:
if role in node["roles"]:
nodes.append(node)
return nodes
@click.command()
@click.option(
"--config_path", default="system-config.yaml", help="Path to system-config"
)
def main(config_path):
config = prepare_config(config_path)
env_type = config["environment"]["type"]
loader = jinja2.FileSystemLoader("./templates/{}".format(env_type))
nodes_ips = {}
nodes = []
storage_nodes = get_nodes(config, "storage")
cpu_nodes = get_nodes(config, "cpu")
cpucount = specs["nodes"][env_type]["cpu"]
storagecount = specs["nodes"][env_type]["storage"]
cachebackendcount = specs["disks"]["cpu"]["large_ssd"]["num"]
backendcount = specs["disks"]["storage"]["hdd"]["num"]
# validate nodes number
if len(storage_nodes) < storagecount:
raise Exception("Number of storage nodes is not enough")
if len(cpu_nodes) < cpucount:
raise Exception("Number of cpu nodes is not enough")
for idx, node in enumerate(storage_nodes):
node_number = idx + 1
node["alias"] = "storage_%02d" % node_number
for idx, node in enumerate(cpu_nodes):
node_number = idx + 1
node["alias"] = "cpu_%02d" % node_number
nodes.extend(storage_nodes)
nodes.extend(cpu_nodes)
for node in nodes:
# get nodes disks and validate them
node_disks = get_node_disks(
node["management"]["ipaddress"].split("/")[0], config
)
validate_node_disks(node["alias"], disks_specs, node_disks)
# get nodes ips
node_ip_key = "{}_ip".format(node["alias"])
ip = node["storage"]["ipaddress"].split("/")[0]
nodes_ips[node_ip_key] = ip
node["ip"] = ip
node["disks"] = node_disks
render(
{"storage_nodes": storage_nodes, "cpu_nodes": cpu_nodes, "cpucount": cpucount},
loader,
"inventory",
)
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
password = ""
for i in range(10):
password += random.choice(chars)
data = {}
data["env_name"] = config["environment"]["subdomain"]
data["password"] = password
data["ovs_repo_url"] = config["environment"]["ovs_repo_url"]
data["ovs_version"] = config["environment"]["ovs_version"]
data.update(nodes_ips)
render(data, loader, "all")
with open("./templates/{}/setup.json".format(env_type)) as fd:
setup = json.load(fd)
setup["ci"]["grid_ip"] = storage_nodes[0]["ip"]
# storage backends
for backend in setup["setup"]["backends"]:
if backend["name"].startswith("cachebackend"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for cpunode in cpu_nodes[:cpucount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "ssd",
cpunode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(cachebackendcount / 2)]
else:
# second half
|
osds[cpunode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 2
elif backend["name"] in ("backend01", "backend02"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for storagenode in storage_nodes[:storagecount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "hdd",
storagenode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(backendcount / 2)]
else:
# second half
useabledisks = useabledisks[int(backendcount / 2) :]
osds[storagenode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 1
# storagerouters
storagerouters = {}
storageroutertemplate = list(setup["setup"]["storagerouters"].values())[0]
setup["setup"]["storagerouters"] = storagerouters
for storagenode in storage_nodes:
storagerouter = copy.deepcopy(storageroutertemplate)
storagerouters[storagenode["ip"]] = storagerouter
storagerouter["hostname"] = storagenode["name"]
# configure roles
disks = {}
storagerouter["disks"] = disks
writeroles = 0
scrub = 0
db = 0
for disk in storagenode["disks"]:
if disk["mounted"] and scrub == 0:
disks[disk["name"]] = {"roles": ["SCRUB"]}
scrub += 1
elif not disk["mounted"] and disk["type"] == "ssd" and writeroles < 2:
disks[disk["name"]] = {"roles": ["WRITE"]}
writeroles += 1
elif disk["type"] == "nvme" and db == 0:
disks[disk["name"]] = {"roles": ["DB", "DTL"]}
db += 1
for name, vpool in storagerouter["vpools"].items():
vpool["storage_ip"] = storagenode["ip"]
with open("output/setup.json", "w") as f:
json.dump(setup, f)
if __name__ == "__main__":
main()
| useabledisks = useabledisks[
int(cachebackendcount / 2) : cachebackendcount
] | conditional_block |
ovs_configurator.py | import subprocess
import yaml
import click
import json
import copy
import jinja2
import random
import os
from baldur.remote import Remote
REPO_PATH = "/opt/code/github/0-complexity/openvcloud_installer"
with open("specs.yaml", "r") as spcs:
specs = yaml.load(spcs)
nodes_specs = specs["nodes"]
disks_specs = specs["disks"]
sizes_specs = specs["sizes"]
large_ssd = sizes_specs["large_ssd"]
small_ssd = sizes_specs["small_ssd"]
hdd = sizes_specs["hdd"]
def prepare_config(config_path):
with open(config_path, "r") as cfg:
config = yaml.load(cfg)
cmd = ["ssh-keygen", "-y", "-f", "/dev/stdin"]
key = config["ssh"]["private-key"]
public_key = subprocess.run(
cmd, stdout=subprocess.PIPE, input=key.encode("utf-8")
).stdout.decode("utf-8")
config["ssh"]["public-key"] = public_key
return config
def render(context, loader, filename):
data = jinja2.Environment(loader=loader).get_template(filename).render(context)
os.makedirs("output", exist_ok=True)
with open("output/{}".format(filename), "w") as f:
f.write(data)
def is_mounted(disk):
if disk.get("mountpoint"):
return True
for child in disk.get("children", []):
if is_mounted(child):
return True
return False
def get_node_disks(nodeip, config):
remote = Remote(nodeip, username="root", pkey=config["ssh"]["private-key"])
res = remote.run("lsblk -b -J -o name,size,rota,mountpoint", check=False)
disks = []
if res.exit_status == 0:
disks = json.loads(res.stdout)["blockdevices"]
for disk in disks:
disk["mounted"] = is_mounted(disk)
disk["size"] = float(disk["size"]) / (1024 ** 3)
print(disk["name"] + ": " + str(disk["size"]))
if disk["name"] == "sr0":
disk["type"] = "cdrom"
if disk["size"] == float("60.0"):
disk["type"] = "boot"
if disk["size"] == float("30.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("50.0"):
disk["type"] = "ssd"
print("ssd:")
if disk["size"] == float("200.0"):
disk["type"] = "hdd"
print("hdd:")
if disk["size"] == float("100.0"):
disk["type"] = "nvme"
print("nvme")
# print(disk)
return disks
def validate_node_disks(nodename, disks_specs, disks):
if nodename.startswith("cpu"):
specs = disks_specs["cpu"]
else:
specs = disks_specs["storage"]
large_ssd_count = 0
small_ssd_count = 0
hdd_count = 0
nvme_count = 0
for disk in disks:
try:
if disk["type"] == "nvme":
nvme_count += 1
if disk["type"] == "ssd":
if disk["size"] >= large_ssd:
large_ssd_count += 1
continue
if disk["size"] >= small_ssd:
small_ssd_count += 1
else:
if disk["size"] >= hdd:
hdd_count += 1
except KeyError as eee:
print(eee)
print(disk["name"])
continue
if specs.get("large_ssd", {}).get("num", 0) > large_ssd_count:
raise Exception(
"Not enough large ssd disks on {}, found {}, required {}".format(
nodename, large_ssd_count, specs.get("large_ssd", {}).get("num", 0)
)
)
if specs.get("small_ssd", {}).get("num", 0) > small_ssd_count:
raise Exception(
"Not enough small ssd disks on {}, found {}, required {}".format(
nodename, small_ssd_count, specs.get("small_ssd", {}).get("num", 0)
)
)
if specs.get("hdd", {}).get("num", 0) > hdd_count:
raise Exception(
"Not enough hdd disks on {}, found {}, required{}".format(
nodename, hdd_count, specs.get("hdd", {}).get("num", 0)
)
)
if specs.get("nvme", {}).get("num", 0) > nvme_count:
raise Exception(
"Not enough nvme disks on {}, found {}, required {}".format(
nodename, nvme_count, specs.get("nvme", {}).get("num", 0)
)
)
def get_nodes(config, role="cpu"):
nodes = []
for node in config["nodes"]:
if role in node["roles"]:
nodes.append(node)
return nodes
@click.command()
@click.option(
"--config_path", default="system-config.yaml", help="Path to system-config"
)
def | (config_path):
config = prepare_config(config_path)
env_type = config["environment"]["type"]
loader = jinja2.FileSystemLoader("./templates/{}".format(env_type))
nodes_ips = {}
nodes = []
storage_nodes = get_nodes(config, "storage")
cpu_nodes = get_nodes(config, "cpu")
cpucount = specs["nodes"][env_type]["cpu"]
storagecount = specs["nodes"][env_type]["storage"]
cachebackendcount = specs["disks"]["cpu"]["large_ssd"]["num"]
backendcount = specs["disks"]["storage"]["hdd"]["num"]
# validate nodes number
if len(storage_nodes) < storagecount:
raise Exception("Number of storage nodes is not enough")
if len(cpu_nodes) < cpucount:
raise Exception("Number of cpu nodes is not enough")
for idx, node in enumerate(storage_nodes):
node_number = idx + 1
node["alias"] = "storage_%02d" % node_number
for idx, node in enumerate(cpu_nodes):
node_number = idx + 1
node["alias"] = "cpu_%02d" % node_number
nodes.extend(storage_nodes)
nodes.extend(cpu_nodes)
for node in nodes:
# get nodes disks and validate them
node_disks = get_node_disks(
node["management"]["ipaddress"].split("/")[0], config
)
validate_node_disks(node["alias"], disks_specs, node_disks)
# get nodes ips
node_ip_key = "{}_ip".format(node["alias"])
ip = node["storage"]["ipaddress"].split("/")[0]
nodes_ips[node_ip_key] = ip
node["ip"] = ip
node["disks"] = node_disks
render(
{"storage_nodes": storage_nodes, "cpu_nodes": cpu_nodes, "cpucount": cpucount},
loader,
"inventory",
)
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
password = ""
for i in range(10):
password += random.choice(chars)
data = {}
data["env_name"] = config["environment"]["subdomain"]
data["password"] = password
data["ovs_repo_url"] = config["environment"]["ovs_repo_url"]
data["ovs_version"] = config["environment"]["ovs_version"]
data.update(nodes_ips)
render(data, loader, "all")
with open("./templates/{}/setup.json".format(env_type)) as fd:
setup = json.load(fd)
setup["ci"]["grid_ip"] = storage_nodes[0]["ip"]
# storage backends
for backend in setup["setup"]["backends"]:
if backend["name"].startswith("cachebackend"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for cpunode in cpu_nodes[:cpucount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "ssd",
cpunode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(cachebackendcount / 2)]
else:
# second half
useabledisks = useabledisks[
int(cachebackendcount / 2) : cachebackendcount
]
osds[cpunode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 2
elif backend["name"] in ("backend01", "backend02"):
# cachecbakcend01 takes first half of the large ssd on cpu nodes
osds = {}
backend["osds"] = osds
for storagenode in storage_nodes[:storagecount]:
# disk count
disks = {}
useabledisks = list(
filter(
lambda x: not x["mounted"] and x["type"] == "hdd",
storagenode["disks"],
)
)
if backend["name"].endswith("01"):
# first half
useabledisks = useabledisks[: int(backendcount / 2)]
else:
# second half
useabledisks = useabledisks[int(backendcount / 2) :]
osds[storagenode["ip"]] = disks
for disk in useabledisks:
disks[disk["name"]] = 1
# storagerouters
storagerouters = {}
storageroutertemplate = list(setup["setup"]["storagerouters"].values())[0]
setup["setup"]["storagerouters"] = storagerouters
for storagenode in storage_nodes:
storagerouter = copy.deepcopy(storageroutertemplate)
storagerouters[storagenode["ip"]] = storagerouter
storagerouter["hostname"] = storagenode["name"]
# configure roles
disks = {}
storagerouter["disks"] = disks
writeroles = 0
scrub = 0
db = 0
for disk in storagenode["disks"]:
if disk["mounted"] and scrub == 0:
disks[disk["name"]] = {"roles": ["SCRUB"]}
scrub += 1
elif not disk["mounted"] and disk["type"] == "ssd" and writeroles < 2:
disks[disk["name"]] = {"roles": ["WRITE"]}
writeroles += 1
elif disk["type"] == "nvme" and db == 0:
disks[disk["name"]] = {"roles": ["DB", "DTL"]}
db += 1
for name, vpool in storagerouter["vpools"].items():
vpool["storage_ip"] = storagenode["ip"]
with open("output/setup.json", "w") as f:
json.dump(setup, f)
if __name__ == "__main__":
main()
| main | identifier_name |
windows.rs | #![allow(non_camel_case_types)]
#![allow(dead_code)]
use crate::{Interests, Token};
use std::collections::LinkedList;
use std::io::{self, Read, Write};
use std::net;
use std::os::windows::io::{AsRawSocket, RawSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub type Event = ffi::OVERLAPPED_ENTRY;
#[derive(Debug)]
pub struct TcpStream {
inner: net::TcpStream,
buffer: Vec<u8>,
wsabuf: Vec<ffi::WSABUF>,
event: Option<ffi::WSAOVERLAPPED>,
token: Option<usize>,
pos: usize,
operations: LinkedList<ffi::Operation>,
}
// On Windows we need to be careful when using IOCP on a server. Since we're "lending"
// access to the OS over memory we crate (we're not giving over ownership,
// but can't touch while it's lent either),
// it's easy to exploit this by issuing a lot of requests while delaying our
// responses. By doing this we would force the server to hand over so many write
// read buffers while waiting for clients to respond that it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
|
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if !std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res != 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut ...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 1, Interests::READABLE)
.expect("Error registering sock read event");
}
#[test]
fn selector_select() {
let mut selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 2, Interests::READABLE)
.expect("Error registering sock read event");
let entry = ffi::OVERLAPPED_ENTRY::zeroed();
let mut events: Vec<ffi::OVERLAPPED_ENTRY> = vec![entry; 255];
selector.select(&mut events, None).expect("Select failed");
for event in events {
println!("COMPL_KEY: {:?}", event.id());
assert_eq!(2, event.id());
}
println!("SOCKET AFTER EVENT RETURN: {:?}", sock);
let mut buffer = String::new();
sock.read_to_string(&mut buffer).unwrap();
println!("BUFFERS: {}", buffer);
assert!(!buffer.is_empty())
}
}
| {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
} | conditional_block |
windows.rs | #![allow(non_camel_case_types)]
#![allow(dead_code)]
use crate::{Interests, Token};
use std::collections::LinkedList;
use std::io::{self, Read, Write};
use std::net;
use std::os::windows::io::{AsRawSocket, RawSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub type Event = ffi::OVERLAPPED_ENTRY;
#[derive(Debug)]
pub struct TcpStream {
inner: net::TcpStream,
buffer: Vec<u8>,
wsabuf: Vec<ffi::WSABUF>,
event: Option<ffi::WSAOVERLAPPED>,
token: Option<usize>,
pos: usize,
operations: LinkedList<ffi::Operation>,
}
// On Windows we need to be careful when using IOCP on a server. Since we're "lending"
// access to the OS over memory we crate (we're not giving over ownership,
// but can't touch while it's lent either),
// it's easy to exploit this by issuing a lot of requests while delaying our
// responses. By doing this we would force the server to hand over so many write
// read buffers while waiting for clients to respond that it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if !std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn | (token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res != 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut ...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 1, Interests::READABLE)
.expect("Error registering sock read event");
}
#[test]
fn selector_select() {
let mut selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 2, Interests::READABLE)
.expect("Error registering sock read event");
let entry = ffi::OVERLAPPED_ENTRY::zeroed();
let mut events: Vec<ffi::OVERLAPPED_ENTRY> = vec![entry; 255];
selector.select(&mut events, None).expect("Select failed");
for event in events {
println!("COMPL_KEY: {:?}", event.id());
assert_eq!(2, event.id());
}
println!("SOCKET AFTER EVENT RETURN: {:?}", sock);
let mut buffer = String::new();
sock.read_to_string(&mut buffer).unwrap();
println!("BUFFERS: {}", buffer);
assert!(!buffer.is_empty())
}
}
| new | identifier_name |
windows.rs | #![allow(non_camel_case_types)]
#![allow(dead_code)]
use crate::{Interests, Token};
use std::collections::LinkedList;
use std::io::{self, Read, Write};
use std::net;
use std::os::windows::io::{AsRawSocket, RawSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub type Event = ffi::OVERLAPPED_ENTRY;
#[derive(Debug)]
pub struct TcpStream {
inner: net::TcpStream,
buffer: Vec<u8>,
wsabuf: Vec<ffi::WSABUF>,
event: Option<ffi::WSAOVERLAPPED>,
token: Option<usize>,
pos: usize,
operations: LinkedList<ffi::Operation>,
}
// On Windows we need to be careful when using IOCP on a server. Since we're "lending"
// access to the OS over memory we crate (we're not giving over ownership,
// but can't touch while it's lent either),
// it's easy to exploit this by issuing a lot of requests while delaying our
// responses. By doing this we would force the server to hand over so many write
// read buffers while waiting for clients to respond that it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if !std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res != 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> |
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 1, Interests::READABLE)
.expect("Error registering sock read event");
}
#[test]
fn selector_select() {
let mut selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 2, Interests::READABLE)
.expect("Error registering sock read event");
let entry = ffi::OVERLAPPED_ENTRY::zeroed();
let mut events: Vec<ffi::OVERLAPPED_ENTRY> = vec![entry; 255];
selector.select(&mut events, None).expect("Select failed");
for event in events {
println!("COMPL_KEY: {:?}", event.id());
assert_eq!(2, event.id());
}
println!("SOCKET AFTER EVENT RETURN: {:?}", sock);
let mut buffer = String::new();
sock.read_to_string(&mut buffer).unwrap();
println!("BUFFERS: {}", buffer);
assert!(!buffer.is_empty())
}
}
| {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut ...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
} | identifier_body |
windows.rs | #![allow(non_camel_case_types)]
#![allow(dead_code)]
use crate::{Interests, Token};
use std::collections::LinkedList;
use std::io::{self, Read, Write};
use std::net;
use std::os::windows::io::{AsRawSocket, RawSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
pub type Event = ffi::OVERLAPPED_ENTRY;
#[derive(Debug)]
pub struct TcpStream {
inner: net::TcpStream,
buffer: Vec<u8>,
wsabuf: Vec<ffi::WSABUF>,
event: Option<ffi::WSAOVERLAPPED>,
token: Option<usize>,
pos: usize,
operations: LinkedList<ffi::Operation>,
}
// On Windows we need to be careful when using IOCP on a server. Since we're "lending"
// access to the OS over memory we crate (we're not giving over ownership,
// but can't touch while it's lent either),
// it's easy to exploit this by issuing a lot of requests while delaying our
// responses. By doing this we would force the server to hand over so many write
// read buffers while waiting for clients to respond that it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if !std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935 | pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res != 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut ...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 1, Interests::READABLE)
.expect("Error registering sock read event");
}
#[test]
fn selector_select() {
let mut selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\r\n";
sock.write_all(request.as_bytes())
.expect("Error writing to stream");
registrator
.register(&mut sock, 2, Interests::READABLE)
.expect("Error registering sock read event");
let entry = ffi::OVERLAPPED_ENTRY::zeroed();
let mut events: Vec<ffi::OVERLAPPED_ENTRY> = vec![entry; 255];
selector.select(&mut events, None).expect("Select failed");
for event in events {
println!("COMPL_KEY: {:?}", event.id());
assert_eq!(2, event.id());
}
println!("SOCKET AFTER EVENT RETURN: {:?}", sock);
let mut buffer = String::new();
sock.read_to_string(&mut buffer).unwrap();
println!("BUFFERS: {}", buffer);
assert!(!buffer.is_empty())
}
} | random_line_split | |
igor_test.go | /*
Copyright 2016-2023 Paolo Galeone. All right reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package igor_test
import (
"database/sql"
"fmt"
"log"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/galeone/igor"
)
var db *igor.Database
var e error
// Create a user igor and a db igor writeable by igor before to run tests
// Define models
type Profile struct {
Counter uint64 `igor:"primary_key"`
Website string
Quotes string
Biography string
Github string
Skype string
Jabber string
Yahoo string
Userscript string
Template uint8
MobileTemplate uint8
Dateformat string
Facebook string
Twitter string
Steam string
Push bool
Pushregtime time.Time `sql:"default:(now() at time zone 'utc')"`
Closed bool
}
// TableName returns the table name associated with the structure
func (Profile) TableName() string {
return "profiles"
}
// The User type do not have every field with a counter part on the db side
// as you can see in init(). The non present fields, have a default value associated and handled by the DBMS
type User struct {
Counter uint64 `igor:"primary_key"`
Last time.Time `sql:"default:(now() at time zone 'utc')"`
NotifyStory igor.JSON `sql:"default:'{}'::jsonb"`
Private bool
Lang string `sql:"default:en"`
Username string
Password string
Email string
Name string
Surname string
Gender bool
BirthDate time.Time `sql:"default:(now() at time zone 'utc')"`
BoardLang string `sql:"default:en"`
Timezone string
Viewonline bool
RegistrationTime time.Time `sql:"default:(now() at time zone 'utc')"`
// Relation. Manually fill the field when required
Profile Profile `sql:"-"`
// Nullable foreign key relationship
OtherTableID sql.NullInt64
}
// TableName returns the table name associated with the structure
func (User) TableName() string {
return "users"
}
type NestMe struct {
ID int64 `igor:"primary_key"`
OverwriteMe int64
SliceOfString []string
SliceOfInt64 []int64
}
type NestTable struct {
NestMe
OverwriteMe int64 `sql:"-"`
}
// TableName returns the table name associated with the structure
func (NestTable) TableName() string {
return "nest_table"
}
func init() {
if db, e = igor.Connect("user=donotexists dbname=wat sslmode=error"); e == nil {
panic("Connect with a wrong connection string should fail, but succeeded")
}
connectionString := "host=localhost port=5432 user=igor dbname=igor password=igor sslmode=disable connect_timeout=10"
if db, e = igor.Connect(connectionString); e != nil {
panic(e.Error())
}
// Test igor.Wrap
var connection *sql.DB
if connection, e = sql.Open("postgres", connectionString); e != nil {
panic(fmt.Sprintf("unable to connect to the databse with default connection string: %s", connectionString))
}
if _, e := igor.Wrap(connection); e != nil {
panic(fmt.Sprintf("Wrap: %s", e))
}
// Exec raw query to create tables and test transactions (and Exec)
tx := db.Begin()
e = tx.Exec("DROP TABLE IF EXISTS users CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("DROP TABLE IF EXISTS nest_table CASCADE; DROP TABLE IF EXISTS other_table CASCADE;")
if e != nil {
panic(e.Error())
}
e = tx.Exec(`
CREATE TABLE other_table(
id bigserial not null primary key,
random_value text
);
CREATE TABLE users (
counter bigserial NOT NULL PRIMARY KEY,
last timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
notify_story jsonb DEFAULT '{}'::jsonb NOT NULL,
private boolean DEFAULT false NOT NULL,
lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
username character varying(90) NOT NULL,
password character varying(60) NOT NULL,
name character varying(60) NOT NULL,
surname character varying(60) NOT NULL,
email character varying(350) NOT NULL,
gender boolean NOT NULL,
birth_date date NOT NULL,
board_lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
timezone character varying(35) DEFAULT 'UTC'::character varying NOT NULL,
viewonline boolean DEFAULT true NOT NULL,
remote_addr inet DEFAULT '127.0.0.1'::inet NOT NULL,
http_user_agent text DEFAULT ''::text NOT NULL,
registration_time timestamp(0) with time zone DEFAULT now() NOT NULL,
-- NULLABLE FK
other_table_id bigint references other_table(id)
)`)
if e != nil {
panic(e.Error())
}
// Exec can work with multiple statements if there are not parameters
// and thus we are not using prepared statements.
e = tx.Exec(`DROP TABLE IF EXISTS profiles CASCADE;
CREATE TABLE profiles (
counter bigserial NOT NULL PRIMARY KEY,
website character varying(350) DEFAULT ''::character varying NOT NULL,
quotes text DEFAULT ''::text NOT NULL,
biography text DEFAULT ''::text NOT NULL,
github character varying(350) DEFAULT ''::character varying NOT NULL,
skype character varying(350) DEFAULT ''::character varying NOT NULL,
jabber character varying(350) DEFAULT ''::character varying NOT NULL,
yahoo character varying(350) DEFAULT ''::character varying NOT NULL,
userscript character varying(128) DEFAULT ''::character varying NOT NULL,
template smallint DEFAULT 0 NOT NULL,
dateformat character varying(25) DEFAULT 'd/m/Y, H:i'::character varying NOT NULL,
facebook character varying(350) DEFAULT ''::character varying NOT NULL,
twitter character varying(350) DEFAULT ''::character varying NOT NULL,
steam character varying(350) DEFAULT ''::character varying NOT NULL,
push boolean DEFAULT false NOT NULL,
pushregtime timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
mobile_template smallint DEFAULT 1 NOT NULL,
closed boolean DEFAULT false NOT NULL,
template_variables jsonb DEFAULT '{}'::jsonb NOT NULL
)`)
if e != nil {
panic(e.Error())
}
e = tx.Exec("ALTER TABLE profiles ADD CONSTRAINT profiles_users_fk FOREIGN KEY(counter) references users(counter) ON DELETE CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("CREATE TABLE nest_table(id bigserial not null PRIMARY KEY, slice_of_string text[] not null, slice_of_int64 bigint[] not null)")
if e != nil {
panic(e.Error())
}
if e = tx.Commit(); e != nil {
panic(e.Error())
}
logger := log.New(os.Stdout, "igor-log: ", log.LUTC)
db.Log(logger)
}
// createUser creates a test user (since the primary key is a bigserial, each call creates a new user)
func createUser() User {
user := User{
Username: "igor",
Password: "please store hashed password",
Name: "Paolo",
Surname: "Galeone",
Email: "please validate the @email . com",
Gender: true,
BirthDate: time.Now(),
}
if e = db.Create(&user); e != nil {
panic(fmt.Sprintf("Create(&user) filling fields having no default should work, but got: %s\n", e.Error()))
}
return user
}
// createProfile creates the profile for a test user (since the primary key is a bigserial, each call creates a new user)
func createProfile(id uint64) Profile {
profile := Profile{Counter: id}
if e = db.Create(&profile); e != nil {
panic(fmt.Sprintf("Create(&profile) failed: %s\n", e.Error()))
}
return profile
}
func TestPanicWhenCallingOnEmptyModel(t *testing.T) {
panicNumber := 0
defer func() {
// catch panic of db.Model(nil)
if r := recover(); r != nil {
if panicNumber == 0 {
t.Log("All right")
panicNumber++
} else {
t.Error("Too many panics")
}
}
}()
// must panic
db.Model(nil)
}
func TestCreateWithNestedStruct(t *testing.T) |
func TestModelCreateUpdatesSelectDelete(t *testing.T) {
if db.Create(&User{}) == nil {
t.Error("Create an user without assign a value to fields that have no default should fail")
}
user := createUser()
user.Profile = createProfile(user.Counter)
// First
var p Profile
if e = db.First(&p, uint64(99)); e == nil {
t.Errorf("Expected First to return an error when there are no rows to fetch, but succeeded: %v", p)
}
zeroValue := Profile{}
if !reflect.DeepEqual(p, zeroValue) {
t.Errorf("After a failed First, the input parameter should remain unchanged, but are different. Got %v expected %v", p, zeroValue)
}
if e = db.First(&p, user.Counter); e != nil {
t.Errorf("First failed: %s\n", e.Error())
}
if !reflect.DeepEqual(p, user.Profile) {
t.Error("Fetched profile should be deep equals to the created profile")
}
if user.Lang != "en" {
t.Errorf("Auto update of struct fields having default values on the DBMS should work, but failed. Expected lang=en got %s", user.Lang)
}
// change user language
user.Lang = "it"
if e = db.Updates(&user); e != nil {
t.Errorf("Updates should work but got: %s\n", e.Error())
}
// Scan without parameters should fail
if e = db.Model(User{}).Select("lang").Where(user).Scan(); e == nil {
t.Error("Scan without a parameter should fail, but succeeded")
}
// Select lang stored in the db
var lang string
if e = db.Model(User{}).Select("lang").Where(user).Scan(&lang); e != nil {
t.Errorf("Scan failed: %s\n", e.Error())
}
if lang != "it" {
t.Errorf("The fetched language (%s) is different to the expected one (%s)\n", lang, user.Lang)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete of a user (using the primary key) should work, but got: %s\n", e.Error())
}
// Now user is empty. Thus a new .Delete(&user) should fail
if e = db.Delete(&user); e == nil {
t.Error("Delete of an empty object should fail, but succeeded")
}
}
func TestJoinsTableSelectDeleteWhere(t *testing.T) {
// create 6 user and profiles
var ids []uint64
for i := 0; i < 6; i++ {
user := createUser()
ids = append(ids, user.Counter)
createProfile(user.Counter)
}
var users []User
if e = db.Model(User{}).Scan(&users); e != nil {
t.Errorf("Scan on structs should work but got: %s\n", e.Error())
}
if len(users) != 6 {
t.Errorf("Expected 6 users but got: %d\n", len(users))
}
var fetchedIds []uint64
if e = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds); e != nil {
t.Errorf("Pluck should work but got: %s\n", e.Error())
}
for i := 0; i < 6; i++ {
if ids[i] != fetchedIds[i] {
t.Errorf("Expected %d in position %d but got: %d\n", ids[i], i, fetchedIds[i])
}
}
// select $1::int, $2::int, $3::it, counter from users join profiles on user.counter = profiles.counter
// where user.counter = $4
var one, two, three, four int
u := (User{}).TableName()
p := (Profile{}).TableName()
if e = db.Select("?::int, ?::int, ?::int, "+u+".counter", 1, 2, 3).
Table(u).
Joins("JOIN "+p+" ON "+u+".counter = "+p+".counter").
Where(&User{Counter: 4}).Scan(&one, &two, &three, &four); e != nil {
t.Error(e.Error())
}
db.Log(nil)
if one != 1 || two != 2 || three != 3 || four != 4 {
t.Errorf("problem in scanning results, expected 1,2,3,4 got: %d,%d,%d,%d", one, two, three, four)
}
// Count
var count uint8
if e = db.Model(User{}).Count(&count); e != nil {
t.Errorf("problem counting users: %s\n", e.Error())
}
if count != 6 {
t.Errorf("Problem with count. Expected 6 users but counted %d", count)
}
if e = db.Where("counter IN (?)", ids).Delete(User{}); e != nil {
t.Errorf("delete in range should work but got: %s\n", e.Error())
}
// clear slice and pluck again
fetchedIds = nil
_ = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds)
if len(fetchedIds) != 0 {
t.Errorf("delete in range failed, pluck returned ids that must have been deleted")
}
}
func TestJSON(t *testing.T) {
user := createUser()
var emptyJSON = make(igor.JSON)
if !reflect.DeepEqual(user.NotifyStory, emptyJSON) {
t.Errorf("JSON notifyStory should be empty but got: %s instead of %s\n", user.NotifyStory, emptyJSON)
}
var ns = make(igor.JSON)
ns["0"] = struct {
From uint64 `json:"from"`
To uint64 `json:"to"`
Message string `json:"message"`
}{
From: 1,
To: 1,
Message: "hi bob",
}
ns["numbers"] = 1
ns["test"] = 2
user.NotifyStory = ns
if e = db.Updates(&user); e != nil {
t.Errorf("updates should work but got: %s\n", e.Error())
}
// To use JSON with json, use:
// printableJSON, _ := json.Marshal(user.NotifyStory)
// fmt.Printf("%s\n", printableJSON)
var nsNew igor.JSON
if e = db.Model(User{}).Select("notify_story").Where(&user).Scan(&nsNew); e != nil {
t.Errorf("Problem scanning into igor.JSON: %s\n", e.Error())
}
if !reflect.DeepEqual(ns, nsNew) {
t.Errorf("fetched notify story is different from the saved one\n%s vs %s", ns, nsNew)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
func TestNotifications(t *testing.T) {
count := 0
if e = db.Listen("notification_without_payload", func(payload ...string) {
count++
t.Log("Received notification on channel: notification_without_payload\n")
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
for i := 0; i < 4; i++ {
if e = db.Notify("notification_without_payload"); e != nil {
t.Fatalf("Unable to send notification: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// listen on an opened channel should fail
if e = db.Listen("notification_without_payload", func(payload ...string) {}); e == nil {
t.Errorf("Listen on an opened channel should fail, but succeeded\n")
}
// Handle payload
// listen on more channels, with payload
count = 0
if e = db.Listen("np", func(payload ...string) {
count++
t.Logf("channel np: received payload: %s\n", payload)
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
// test sending payload with notify
for i := 0; i < 4; i++ {
if e = db.Notify("np", strconv.Itoa(i)+" payload"); e != nil {
t.Fatalf("Unable to send notification with payload: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// test unlisten
if e = db.Unlisten("notification_without_payload"); e != nil {
t.Errorf("Unable to unlisten from notification_without_payload, got: %s\n", e.Error())
}
// test UnlistenAll
if e = db.UnlistenAll(); e != nil {
t.Errorf("Unable to unlistenAll, got: %s\n", e.Error())
}
}
func TestCTE(t *testing.T) {
createUser()
createUser()
createUser()
var usernames []string
e = db.CTE(`WITH full_users_id AS (
SELECT counter FROM users WHERE name = ?)`, "Paolo").Table("full_users_id as fui").Select("username").Joins("JOIN users ON fui.counter = users.counter").Scan(&usernames)
if e != nil {
t.Fatalf(e.Error())
}
if len(usernames) != 3 {
t.Fatalf("Expected 3, but got: %d\n", len(usernames))
}
if e = db.Model(User{}).Where("name", "Paolo").Delete(User{}); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
| {
row := NestTable{}
row.ID = 1
row.SliceOfInt64 = []int64{1, 2}
row.SliceOfString = []string{"slice", "support yeah"}
if e = db.Create(&row); e != nil {
t.Errorf("Inserting a new row with a type that uses a nested struct should be possible. But got %v", e)
}
} | identifier_body |
igor_test.go | /*
Copyright 2016-2023 Paolo Galeone. All right reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package igor_test
import (
"database/sql"
"fmt"
"log"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/galeone/igor"
)
var db *igor.Database
var e error
// Create a user igor and a db igor writeable by igor before to run tests
// Define models
type Profile struct {
Counter uint64 `igor:"primary_key"`
Website string
Quotes string
Biography string
Github string
Skype string
Jabber string
Yahoo string
Userscript string
Template uint8
MobileTemplate uint8
Dateformat string
Facebook string
Twitter string
Steam string
Push bool
Pushregtime time.Time `sql:"default:(now() at time zone 'utc')"`
Closed bool
}
// TableName returns the table name associated with the structure
func (Profile) TableName() string {
return "profiles"
}
// The User type do not have every field with a counter part on the db side
// as you can see in init(). The non present fields, have a default value associated and handled by the DBMS
type User struct {
Counter uint64 `igor:"primary_key"`
Last time.Time `sql:"default:(now() at time zone 'utc')"`
NotifyStory igor.JSON `sql:"default:'{}'::jsonb"`
Private bool
Lang string `sql:"default:en"`
Username string
Password string
Email string
Name string
Surname string
Gender bool
BirthDate time.Time `sql:"default:(now() at time zone 'utc')"`
BoardLang string `sql:"default:en"`
Timezone string
Viewonline bool
RegistrationTime time.Time `sql:"default:(now() at time zone 'utc')"`
// Relation. Manually fill the field when required
Profile Profile `sql:"-"`
// Nullable foreign key relationship
OtherTableID sql.NullInt64
}
// TableName returns the table name associated with the structure
func (User) TableName() string {
return "users"
}
type NestMe struct {
ID int64 `igor:"primary_key"`
OverwriteMe int64
SliceOfString []string
SliceOfInt64 []int64
}
type NestTable struct {
NestMe
OverwriteMe int64 `sql:"-"`
}
// TableName returns the table name associated with the structure
func (NestTable) | () string {
return "nest_table"
}
func init() {
if db, e = igor.Connect("user=donotexists dbname=wat sslmode=error"); e == nil {
panic("Connect with a wrong connection string should fail, but succeeded")
}
connectionString := "host=localhost port=5432 user=igor dbname=igor password=igor sslmode=disable connect_timeout=10"
if db, e = igor.Connect(connectionString); e != nil {
panic(e.Error())
}
// Test igor.Wrap
var connection *sql.DB
if connection, e = sql.Open("postgres", connectionString); e != nil {
panic(fmt.Sprintf("unable to connect to the databse with default connection string: %s", connectionString))
}
if _, e := igor.Wrap(connection); e != nil {
panic(fmt.Sprintf("Wrap: %s", e))
}
// Exec raw query to create tables and test transactions (and Exec)
tx := db.Begin()
e = tx.Exec("DROP TABLE IF EXISTS users CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("DROP TABLE IF EXISTS nest_table CASCADE; DROP TABLE IF EXISTS other_table CASCADE;")
if e != nil {
panic(e.Error())
}
e = tx.Exec(`
CREATE TABLE other_table(
id bigserial not null primary key,
random_value text
);
CREATE TABLE users (
counter bigserial NOT NULL PRIMARY KEY,
last timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
notify_story jsonb DEFAULT '{}'::jsonb NOT NULL,
private boolean DEFAULT false NOT NULL,
lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
username character varying(90) NOT NULL,
password character varying(60) NOT NULL,
name character varying(60) NOT NULL,
surname character varying(60) NOT NULL,
email character varying(350) NOT NULL,
gender boolean NOT NULL,
birth_date date NOT NULL,
board_lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
timezone character varying(35) DEFAULT 'UTC'::character varying NOT NULL,
viewonline boolean DEFAULT true NOT NULL,
remote_addr inet DEFAULT '127.0.0.1'::inet NOT NULL,
http_user_agent text DEFAULT ''::text NOT NULL,
registration_time timestamp(0) with time zone DEFAULT now() NOT NULL,
-- NULLABLE FK
other_table_id bigint references other_table(id)
)`)
if e != nil {
panic(e.Error())
}
// Exec can work with multiple statements if there are not parameters
// and thus we are not using prepared statements.
e = tx.Exec(`DROP TABLE IF EXISTS profiles CASCADE;
CREATE TABLE profiles (
counter bigserial NOT NULL PRIMARY KEY,
website character varying(350) DEFAULT ''::character varying NOT NULL,
quotes text DEFAULT ''::text NOT NULL,
biography text DEFAULT ''::text NOT NULL,
github character varying(350) DEFAULT ''::character varying NOT NULL,
skype character varying(350) DEFAULT ''::character varying NOT NULL,
jabber character varying(350) DEFAULT ''::character varying NOT NULL,
yahoo character varying(350) DEFAULT ''::character varying NOT NULL,
userscript character varying(128) DEFAULT ''::character varying NOT NULL,
template smallint DEFAULT 0 NOT NULL,
dateformat character varying(25) DEFAULT 'd/m/Y, H:i'::character varying NOT NULL,
facebook character varying(350) DEFAULT ''::character varying NOT NULL,
twitter character varying(350) DEFAULT ''::character varying NOT NULL,
steam character varying(350) DEFAULT ''::character varying NOT NULL,
push boolean DEFAULT false NOT NULL,
pushregtime timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
mobile_template smallint DEFAULT 1 NOT NULL,
closed boolean DEFAULT false NOT NULL,
template_variables jsonb DEFAULT '{}'::jsonb NOT NULL
)`)
if e != nil {
panic(e.Error())
}
e = tx.Exec("ALTER TABLE profiles ADD CONSTRAINT profiles_users_fk FOREIGN KEY(counter) references users(counter) ON DELETE CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("CREATE TABLE nest_table(id bigserial not null PRIMARY KEY, slice_of_string text[] not null, slice_of_int64 bigint[] not null)")
if e != nil {
panic(e.Error())
}
if e = tx.Commit(); e != nil {
panic(e.Error())
}
logger := log.New(os.Stdout, "igor-log: ", log.LUTC)
db.Log(logger)
}
// createUser creates a test user (since the primary key is a bigserial, each call creates a new user)
func createUser() User {
user := User{
Username: "igor",
Password: "please store hashed password",
Name: "Paolo",
Surname: "Galeone",
Email: "please validate the @email . com",
Gender: true,
BirthDate: time.Now(),
}
if e = db.Create(&user); e != nil {
panic(fmt.Sprintf("Create(&user) filling fields having no default should work, but got: %s\n", e.Error()))
}
return user
}
// createProfile creates the profile for a test user (since the primary key is a bigserial, each call creates a new user)
func createProfile(id uint64) Profile {
profile := Profile{Counter: id}
if e = db.Create(&profile); e != nil {
panic(fmt.Sprintf("Create(&profile) failed: %s\n", e.Error()))
}
return profile
}
func TestPanicWhenCallingOnEmptyModel(t *testing.T) {
panicNumber := 0
defer func() {
// catch panic of db.Model(nil)
if r := recover(); r != nil {
if panicNumber == 0 {
t.Log("All right")
panicNumber++
} else {
t.Error("Too many panics")
}
}
}()
// must panic
db.Model(nil)
}
func TestCreateWithNestedStruct(t *testing.T) {
row := NestTable{}
row.ID = 1
row.SliceOfInt64 = []int64{1, 2}
row.SliceOfString = []string{"slice", "support yeah"}
if e = db.Create(&row); e != nil {
t.Errorf("Inserting a new row with a type that uses a nested struct should be possible. But got %v", e)
}
}
func TestModelCreateUpdatesSelectDelete(t *testing.T) {
if db.Create(&User{}) == nil {
t.Error("Create an user without assign a value to fields that have no default should fail")
}
user := createUser()
user.Profile = createProfile(user.Counter)
// First
var p Profile
if e = db.First(&p, uint64(99)); e == nil {
t.Errorf("Expected First to return an error when there are no rows to fetch, but succeeded: %v", p)
}
zeroValue := Profile{}
if !reflect.DeepEqual(p, zeroValue) {
t.Errorf("After a failed First, the input parameter should remain unchanged, but are different. Got %v expected %v", p, zeroValue)
}
if e = db.First(&p, user.Counter); e != nil {
t.Errorf("First failed: %s\n", e.Error())
}
if !reflect.DeepEqual(p, user.Profile) {
t.Error("Fetched profile should be deep equals to the created profile")
}
if user.Lang != "en" {
t.Errorf("Auto update of struct fields having default values on the DBMS should work, but failed. Expected lang=en got %s", user.Lang)
}
// change user language
user.Lang = "it"
if e = db.Updates(&user); e != nil {
t.Errorf("Updates should work but got: %s\n", e.Error())
}
// Scan without parameters should fail
if e = db.Model(User{}).Select("lang").Where(user).Scan(); e == nil {
t.Error("Scan without a parameter should fail, but succeeded")
}
// Select lang stored in the db
var lang string
if e = db.Model(User{}).Select("lang").Where(user).Scan(&lang); e != nil {
t.Errorf("Scan failed: %s\n", e.Error())
}
if lang != "it" {
t.Errorf("The fetched language (%s) is different to the expected one (%s)\n", lang, user.Lang)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete of a user (using the primary key) should work, but got: %s\n", e.Error())
}
// Now user is empty. Thus a new .Delete(&user) should fail
if e = db.Delete(&user); e == nil {
t.Error("Delete of an empty object should fail, but succeeded")
}
}
func TestJoinsTableSelectDeleteWhere(t *testing.T) {
// create 6 user and profiles
var ids []uint64
for i := 0; i < 6; i++ {
user := createUser()
ids = append(ids, user.Counter)
createProfile(user.Counter)
}
var users []User
if e = db.Model(User{}).Scan(&users); e != nil {
t.Errorf("Scan on structs should work but got: %s\n", e.Error())
}
if len(users) != 6 {
t.Errorf("Expected 6 users but got: %d\n", len(users))
}
var fetchedIds []uint64
if e = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds); e != nil {
t.Errorf("Pluck should work but got: %s\n", e.Error())
}
for i := 0; i < 6; i++ {
if ids[i] != fetchedIds[i] {
t.Errorf("Expected %d in position %d but got: %d\n", ids[i], i, fetchedIds[i])
}
}
// select $1::int, $2::int, $3::it, counter from users join profiles on user.counter = profiles.counter
// where user.counter = $4
var one, two, three, four int
u := (User{}).TableName()
p := (Profile{}).TableName()
if e = db.Select("?::int, ?::int, ?::int, "+u+".counter", 1, 2, 3).
Table(u).
Joins("JOIN "+p+" ON "+u+".counter = "+p+".counter").
Where(&User{Counter: 4}).Scan(&one, &two, &three, &four); e != nil {
t.Error(e.Error())
}
db.Log(nil)
if one != 1 || two != 2 || three != 3 || four != 4 {
t.Errorf("problem in scanning results, expected 1,2,3,4 got: %d,%d,%d,%d", one, two, three, four)
}
// Count
var count uint8
if e = db.Model(User{}).Count(&count); e != nil {
t.Errorf("problem counting users: %s\n", e.Error())
}
if count != 6 {
t.Errorf("Problem with count. Expected 6 users but counted %d", count)
}
if e = db.Where("counter IN (?)", ids).Delete(User{}); e != nil {
t.Errorf("delete in range should work but got: %s\n", e.Error())
}
// clear slice and pluck again
fetchedIds = nil
_ = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds)
if len(fetchedIds) != 0 {
t.Errorf("delete in range failed, pluck returned ids that must have been deleted")
}
}
func TestJSON(t *testing.T) {
user := createUser()
var emptyJSON = make(igor.JSON)
if !reflect.DeepEqual(user.NotifyStory, emptyJSON) {
t.Errorf("JSON notifyStory should be empty but got: %s instead of %s\n", user.NotifyStory, emptyJSON)
}
var ns = make(igor.JSON)
ns["0"] = struct {
From uint64 `json:"from"`
To uint64 `json:"to"`
Message string `json:"message"`
}{
From: 1,
To: 1,
Message: "hi bob",
}
ns["numbers"] = 1
ns["test"] = 2
user.NotifyStory = ns
if e = db.Updates(&user); e != nil {
t.Errorf("updates should work but got: %s\n", e.Error())
}
// To use JSON with json, use:
// printableJSON, _ := json.Marshal(user.NotifyStory)
// fmt.Printf("%s\n", printableJSON)
var nsNew igor.JSON
if e = db.Model(User{}).Select("notify_story").Where(&user).Scan(&nsNew); e != nil {
t.Errorf("Problem scanning into igor.JSON: %s\n", e.Error())
}
if !reflect.DeepEqual(ns, nsNew) {
t.Errorf("fetched notify story is different from the saved one\n%s vs %s", ns, nsNew)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
func TestNotifications(t *testing.T) {
count := 0
if e = db.Listen("notification_without_payload", func(payload ...string) {
count++
t.Log("Received notification on channel: notification_without_payload\n")
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
for i := 0; i < 4; i++ {
if e = db.Notify("notification_without_payload"); e != nil {
t.Fatalf("Unable to send notification: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// listen on an opened channel should fail
if e = db.Listen("notification_without_payload", func(payload ...string) {}); e == nil {
t.Errorf("Listen on an opened channel should fail, but succeeded\n")
}
// Handle payload
// listen on more channels, with payload
count = 0
if e = db.Listen("np", func(payload ...string) {
count++
t.Logf("channel np: received payload: %s\n", payload)
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
// test sending payload with notify
for i := 0; i < 4; i++ {
if e = db.Notify("np", strconv.Itoa(i)+" payload"); e != nil {
t.Fatalf("Unable to send notification with payload: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// test unlisten
if e = db.Unlisten("notification_without_payload"); e != nil {
t.Errorf("Unable to unlisten from notification_without_payload, got: %s\n", e.Error())
}
// test UnlistenAll
if e = db.UnlistenAll(); e != nil {
t.Errorf("Unable to unlistenAll, got: %s\n", e.Error())
}
}
func TestCTE(t *testing.T) {
createUser()
createUser()
createUser()
var usernames []string
e = db.CTE(`WITH full_users_id AS (
SELECT counter FROM users WHERE name = ?)`, "Paolo").Table("full_users_id as fui").Select("username").Joins("JOIN users ON fui.counter = users.counter").Scan(&usernames)
if e != nil {
t.Fatalf(e.Error())
}
if len(usernames) != 3 {
t.Fatalf("Expected 3, but got: %d\n", len(usernames))
}
if e = db.Model(User{}).Where("name", "Paolo").Delete(User{}); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
| TableName | identifier_name |
igor_test.go | /*
Copyright 2016-2023 Paolo Galeone. All right reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. |
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package igor_test
import (
"database/sql"
"fmt"
"log"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/galeone/igor"
)
var db *igor.Database
var e error
// Create a user igor and a db igor writeable by igor before to run tests
// Define models
type Profile struct {
Counter uint64 `igor:"primary_key"`
Website string
Quotes string
Biography string
Github string
Skype string
Jabber string
Yahoo string
Userscript string
Template uint8
MobileTemplate uint8
Dateformat string
Facebook string
Twitter string
Steam string
Push bool
Pushregtime time.Time `sql:"default:(now() at time zone 'utc')"`
Closed bool
}
// TableName returns the table name associated with the structure
func (Profile) TableName() string {
return "profiles"
}
// The User type do not have every field with a counter part on the db side
// as you can see in init(). The non present fields, have a default value associated and handled by the DBMS
type User struct {
Counter uint64 `igor:"primary_key"`
Last time.Time `sql:"default:(now() at time zone 'utc')"`
NotifyStory igor.JSON `sql:"default:'{}'::jsonb"`
Private bool
Lang string `sql:"default:en"`
Username string
Password string
Email string
Name string
Surname string
Gender bool
BirthDate time.Time `sql:"default:(now() at time zone 'utc')"`
BoardLang string `sql:"default:en"`
Timezone string
Viewonline bool
RegistrationTime time.Time `sql:"default:(now() at time zone 'utc')"`
// Relation. Manually fill the field when required
Profile Profile `sql:"-"`
// Nullable foreign key relationship
OtherTableID sql.NullInt64
}
// TableName returns the table name associated with the structure
func (User) TableName() string {
return "users"
}
type NestMe struct {
ID int64 `igor:"primary_key"`
OverwriteMe int64
SliceOfString []string
SliceOfInt64 []int64
}
type NestTable struct {
NestMe
OverwriteMe int64 `sql:"-"`
}
// TableName returns the table name associated with the structure
func (NestTable) TableName() string {
return "nest_table"
}
func init() {
if db, e = igor.Connect("user=donotexists dbname=wat sslmode=error"); e == nil {
panic("Connect with a wrong connection string should fail, but succeeded")
}
connectionString := "host=localhost port=5432 user=igor dbname=igor password=igor sslmode=disable connect_timeout=10"
if db, e = igor.Connect(connectionString); e != nil {
panic(e.Error())
}
// Test igor.Wrap
var connection *sql.DB
if connection, e = sql.Open("postgres", connectionString); e != nil {
panic(fmt.Sprintf("unable to connect to the databse with default connection string: %s", connectionString))
}
if _, e := igor.Wrap(connection); e != nil {
panic(fmt.Sprintf("Wrap: %s", e))
}
// Exec raw query to create tables and test transactions (and Exec)
tx := db.Begin()
e = tx.Exec("DROP TABLE IF EXISTS users CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("DROP TABLE IF EXISTS nest_table CASCADE; DROP TABLE IF EXISTS other_table CASCADE;")
if e != nil {
panic(e.Error())
}
e = tx.Exec(`
CREATE TABLE other_table(
id bigserial not null primary key,
random_value text
);
CREATE TABLE users (
counter bigserial NOT NULL PRIMARY KEY,
last timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
notify_story jsonb DEFAULT '{}'::jsonb NOT NULL,
private boolean DEFAULT false NOT NULL,
lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
username character varying(90) NOT NULL,
password character varying(60) NOT NULL,
name character varying(60) NOT NULL,
surname character varying(60) NOT NULL,
email character varying(350) NOT NULL,
gender boolean NOT NULL,
birth_date date NOT NULL,
board_lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
timezone character varying(35) DEFAULT 'UTC'::character varying NOT NULL,
viewonline boolean DEFAULT true NOT NULL,
remote_addr inet DEFAULT '127.0.0.1'::inet NOT NULL,
http_user_agent text DEFAULT ''::text NOT NULL,
registration_time timestamp(0) with time zone DEFAULT now() NOT NULL,
-- NULLABLE FK
other_table_id bigint references other_table(id)
)`)
if e != nil {
panic(e.Error())
}
// Exec can work with multiple statements if there are not parameters
// and thus we are not using prepared statements.
e = tx.Exec(`DROP TABLE IF EXISTS profiles CASCADE;
CREATE TABLE profiles (
counter bigserial NOT NULL PRIMARY KEY,
website character varying(350) DEFAULT ''::character varying NOT NULL,
quotes text DEFAULT ''::text NOT NULL,
biography text DEFAULT ''::text NOT NULL,
github character varying(350) DEFAULT ''::character varying NOT NULL,
skype character varying(350) DEFAULT ''::character varying NOT NULL,
jabber character varying(350) DEFAULT ''::character varying NOT NULL,
yahoo character varying(350) DEFAULT ''::character varying NOT NULL,
userscript character varying(128) DEFAULT ''::character varying NOT NULL,
template smallint DEFAULT 0 NOT NULL,
dateformat character varying(25) DEFAULT 'd/m/Y, H:i'::character varying NOT NULL,
facebook character varying(350) DEFAULT ''::character varying NOT NULL,
twitter character varying(350) DEFAULT ''::character varying NOT NULL,
steam character varying(350) DEFAULT ''::character varying NOT NULL,
push boolean DEFAULT false NOT NULL,
pushregtime timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
mobile_template smallint DEFAULT 1 NOT NULL,
closed boolean DEFAULT false NOT NULL,
template_variables jsonb DEFAULT '{}'::jsonb NOT NULL
)`)
if e != nil {
panic(e.Error())
}
e = tx.Exec("ALTER TABLE profiles ADD CONSTRAINT profiles_users_fk FOREIGN KEY(counter) references users(counter) ON DELETE CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("CREATE TABLE nest_table(id bigserial not null PRIMARY KEY, slice_of_string text[] not null, slice_of_int64 bigint[] not null)")
if e != nil {
panic(e.Error())
}
if e = tx.Commit(); e != nil {
panic(e.Error())
}
logger := log.New(os.Stdout, "igor-log: ", log.LUTC)
db.Log(logger)
}
// createUser creates a test user (since the primary key is a bigserial, each call creates a new user)
func createUser() User {
user := User{
Username: "igor",
Password: "please store hashed password",
Name: "Paolo",
Surname: "Galeone",
Email: "please validate the @email . com",
Gender: true,
BirthDate: time.Now(),
}
if e = db.Create(&user); e != nil {
panic(fmt.Sprintf("Create(&user) filling fields having no default should work, but got: %s\n", e.Error()))
}
return user
}
// createProfile creates the profile for a test user (since the primary key is a bigserial, each call creates a new user)
func createProfile(id uint64) Profile {
profile := Profile{Counter: id}
if e = db.Create(&profile); e != nil {
panic(fmt.Sprintf("Create(&profile) failed: %s\n", e.Error()))
}
return profile
}
func TestPanicWhenCallingOnEmptyModel(t *testing.T) {
panicNumber := 0
defer func() {
// catch panic of db.Model(nil)
if r := recover(); r != nil {
if panicNumber == 0 {
t.Log("All right")
panicNumber++
} else {
t.Error("Too many panics")
}
}
}()
// must panic
db.Model(nil)
}
func TestCreateWithNestedStruct(t *testing.T) {
row := NestTable{}
row.ID = 1
row.SliceOfInt64 = []int64{1, 2}
row.SliceOfString = []string{"slice", "support yeah"}
if e = db.Create(&row); e != nil {
t.Errorf("Inserting a new row with a type that uses a nested struct should be possible. But got %v", e)
}
}
func TestModelCreateUpdatesSelectDelete(t *testing.T) {
if db.Create(&User{}) == nil {
t.Error("Create an user without assign a value to fields that have no default should fail")
}
user := createUser()
user.Profile = createProfile(user.Counter)
// First
var p Profile
if e = db.First(&p, uint64(99)); e == nil {
t.Errorf("Expected First to return an error when there are no rows to fetch, but succeeded: %v", p)
}
zeroValue := Profile{}
if !reflect.DeepEqual(p, zeroValue) {
t.Errorf("After a failed First, the input parameter should remain unchanged, but are different. Got %v expected %v", p, zeroValue)
}
if e = db.First(&p, user.Counter); e != nil {
t.Errorf("First failed: %s\n", e.Error())
}
if !reflect.DeepEqual(p, user.Profile) {
t.Error("Fetched profile should be deep equals to the created profile")
}
if user.Lang != "en" {
t.Errorf("Auto update of struct fields having default values on the DBMS should work, but failed. Expected lang=en got %s", user.Lang)
}
// change user language
user.Lang = "it"
if e = db.Updates(&user); e != nil {
t.Errorf("Updates should work but got: %s\n", e.Error())
}
// Scan without parameters should fail
if e = db.Model(User{}).Select("lang").Where(user).Scan(); e == nil {
t.Error("Scan without a parameter should fail, but succeeded")
}
// Select lang stored in the db
var lang string
if e = db.Model(User{}).Select("lang").Where(user).Scan(&lang); e != nil {
t.Errorf("Scan failed: %s\n", e.Error())
}
if lang != "it" {
t.Errorf("The fetched language (%s) is different to the expected one (%s)\n", lang, user.Lang)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete of a user (using the primary key) should work, but got: %s\n", e.Error())
}
// Now user is empty. Thus a new .Delete(&user) should fail
if e = db.Delete(&user); e == nil {
t.Error("Delete of an empty object should fail, but succeeded")
}
}
func TestJoinsTableSelectDeleteWhere(t *testing.T) {
// create 6 user and profiles
var ids []uint64
for i := 0; i < 6; i++ {
user := createUser()
ids = append(ids, user.Counter)
createProfile(user.Counter)
}
var users []User
if e = db.Model(User{}).Scan(&users); e != nil {
t.Errorf("Scan on structs should work but got: %s\n", e.Error())
}
if len(users) != 6 {
t.Errorf("Expected 6 users but got: %d\n", len(users))
}
var fetchedIds []uint64
if e = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds); e != nil {
t.Errorf("Pluck should work but got: %s\n", e.Error())
}
for i := 0; i < 6; i++ {
if ids[i] != fetchedIds[i] {
t.Errorf("Expected %d in position %d but got: %d\n", ids[i], i, fetchedIds[i])
}
}
// select $1::int, $2::int, $3::it, counter from users join profiles on user.counter = profiles.counter
// where user.counter = $4
var one, two, three, four int
u := (User{}).TableName()
p := (Profile{}).TableName()
if e = db.Select("?::int, ?::int, ?::int, "+u+".counter", 1, 2, 3).
Table(u).
Joins("JOIN "+p+" ON "+u+".counter = "+p+".counter").
Where(&User{Counter: 4}).Scan(&one, &two, &three, &four); e != nil {
t.Error(e.Error())
}
db.Log(nil)
if one != 1 || two != 2 || three != 3 || four != 4 {
t.Errorf("problem in scanning results, expected 1,2,3,4 got: %d,%d,%d,%d", one, two, three, four)
}
// Count
var count uint8
if e = db.Model(User{}).Count(&count); e != nil {
t.Errorf("problem counting users: %s\n", e.Error())
}
if count != 6 {
t.Errorf("Problem with count. Expected 6 users but counted %d", count)
}
if e = db.Where("counter IN (?)", ids).Delete(User{}); e != nil {
t.Errorf("delete in range should work but got: %s\n", e.Error())
}
// clear slice and pluck again
fetchedIds = nil
_ = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds)
if len(fetchedIds) != 0 {
t.Errorf("delete in range failed, pluck returned ids that must have been deleted")
}
}
func TestJSON(t *testing.T) {
user := createUser()
var emptyJSON = make(igor.JSON)
if !reflect.DeepEqual(user.NotifyStory, emptyJSON) {
t.Errorf("JSON notifyStory should be empty but got: %s instead of %s\n", user.NotifyStory, emptyJSON)
}
var ns = make(igor.JSON)
ns["0"] = struct {
From uint64 `json:"from"`
To uint64 `json:"to"`
Message string `json:"message"`
}{
From: 1,
To: 1,
Message: "hi bob",
}
ns["numbers"] = 1
ns["test"] = 2
user.NotifyStory = ns
if e = db.Updates(&user); e != nil {
t.Errorf("updates should work but got: %s\n", e.Error())
}
// To use JSON with json, use:
// printableJSON, _ := json.Marshal(user.NotifyStory)
// fmt.Printf("%s\n", printableJSON)
var nsNew igor.JSON
if e = db.Model(User{}).Select("notify_story").Where(&user).Scan(&nsNew); e != nil {
t.Errorf("Problem scanning into igor.JSON: %s\n", e.Error())
}
if !reflect.DeepEqual(ns, nsNew) {
t.Errorf("fetched notify story is different from the saved one\n%s vs %s", ns, nsNew)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
func TestNotifications(t *testing.T) {
count := 0
if e = db.Listen("notification_without_payload", func(payload ...string) {
count++
t.Log("Received notification on channel: notification_without_payload\n")
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
for i := 0; i < 4; i++ {
if e = db.Notify("notification_without_payload"); e != nil {
t.Fatalf("Unable to send notification: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// listen on an opened channel should fail
if e = db.Listen("notification_without_payload", func(payload ...string) {}); e == nil {
t.Errorf("Listen on an opened channel should fail, but succeeded\n")
}
// Handle payload
// listen on more channels, with payload
count = 0
if e = db.Listen("np", func(payload ...string) {
count++
t.Logf("channel np: received payload: %s\n", payload)
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
// test sending payload with notify
for i := 0; i < 4; i++ {
if e = db.Notify("np", strconv.Itoa(i)+" payload"); e != nil {
t.Fatalf("Unable to send notification with payload: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// test unlisten
if e = db.Unlisten("notification_without_payload"); e != nil {
t.Errorf("Unable to unlisten from notification_without_payload, got: %s\n", e.Error())
}
// test UnlistenAll
if e = db.UnlistenAll(); e != nil {
t.Errorf("Unable to unlistenAll, got: %s\n", e.Error())
}
}
func TestCTE(t *testing.T) {
createUser()
createUser()
createUser()
var usernames []string
e = db.CTE(`WITH full_users_id AS (
SELECT counter FROM users WHERE name = ?)`, "Paolo").Table("full_users_id as fui").Select("username").Joins("JOIN users ON fui.counter = users.counter").Scan(&usernames)
if e != nil {
t.Fatalf(e.Error())
}
if len(usernames) != 3 {
t.Fatalf("Expected 3, but got: %d\n", len(usernames))
}
if e = db.Model(User{}).Where("name", "Paolo").Delete(User{}); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
} | You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
igor_test.go | /*
Copyright 2016-2023 Paolo Galeone. All right reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package igor_test
import (
"database/sql"
"fmt"
"log"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/galeone/igor"
)
var db *igor.Database
var e error
// Create a user igor and a db igor writeable by igor before to run tests
// Define models
type Profile struct {
Counter uint64 `igor:"primary_key"`
Website string
Quotes string
Biography string
Github string
Skype string
Jabber string
Yahoo string
Userscript string
Template uint8
MobileTemplate uint8
Dateformat string
Facebook string
Twitter string
Steam string
Push bool
Pushregtime time.Time `sql:"default:(now() at time zone 'utc')"`
Closed bool
}
// TableName returns the table name associated with the structure
func (Profile) TableName() string {
return "profiles"
}
// The User type do not have every field with a counter part on the db side
// as you can see in init(). The non present fields, have a default value associated and handled by the DBMS
type User struct {
Counter uint64 `igor:"primary_key"`
Last time.Time `sql:"default:(now() at time zone 'utc')"`
NotifyStory igor.JSON `sql:"default:'{}'::jsonb"`
Private bool
Lang string `sql:"default:en"`
Username string
Password string
Email string
Name string
Surname string
Gender bool
BirthDate time.Time `sql:"default:(now() at time zone 'utc')"`
BoardLang string `sql:"default:en"`
Timezone string
Viewonline bool
RegistrationTime time.Time `sql:"default:(now() at time zone 'utc')"`
// Relation. Manually fill the field when required
Profile Profile `sql:"-"`
// Nullable foreign key relationship
OtherTableID sql.NullInt64
}
// TableName returns the table name associated with the structure
func (User) TableName() string {
return "users"
}
type NestMe struct {
ID int64 `igor:"primary_key"`
OverwriteMe int64
SliceOfString []string
SliceOfInt64 []int64
}
type NestTable struct {
NestMe
OverwriteMe int64 `sql:"-"`
}
// TableName returns the table name associated with the structure
func (NestTable) TableName() string {
return "nest_table"
}
func init() {
if db, e = igor.Connect("user=donotexists dbname=wat sslmode=error"); e == nil {
panic("Connect with a wrong connection string should fail, but succeeded")
}
connectionString := "host=localhost port=5432 user=igor dbname=igor password=igor sslmode=disable connect_timeout=10"
if db, e = igor.Connect(connectionString); e != nil {
panic(e.Error())
}
// Test igor.Wrap
var connection *sql.DB
if connection, e = sql.Open("postgres", connectionString); e != nil {
panic(fmt.Sprintf("unable to connect to the databse with default connection string: %s", connectionString))
}
if _, e := igor.Wrap(connection); e != nil {
panic(fmt.Sprintf("Wrap: %s", e))
}
// Exec raw query to create tables and test transactions (and Exec)
tx := db.Begin()
e = tx.Exec("DROP TABLE IF EXISTS users CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("DROP TABLE IF EXISTS nest_table CASCADE; DROP TABLE IF EXISTS other_table CASCADE;")
if e != nil {
panic(e.Error())
}
e = tx.Exec(`
CREATE TABLE other_table(
id bigserial not null primary key,
random_value text
);
CREATE TABLE users (
counter bigserial NOT NULL PRIMARY KEY,
last timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
notify_story jsonb DEFAULT '{}'::jsonb NOT NULL,
private boolean DEFAULT false NOT NULL,
lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
username character varying(90) NOT NULL,
password character varying(60) NOT NULL,
name character varying(60) NOT NULL,
surname character varying(60) NOT NULL,
email character varying(350) NOT NULL,
gender boolean NOT NULL,
birth_date date NOT NULL,
board_lang character varying(2) DEFAULT 'en'::character varying NOT NULL,
timezone character varying(35) DEFAULT 'UTC'::character varying NOT NULL,
viewonline boolean DEFAULT true NOT NULL,
remote_addr inet DEFAULT '127.0.0.1'::inet NOT NULL,
http_user_agent text DEFAULT ''::text NOT NULL,
registration_time timestamp(0) with time zone DEFAULT now() NOT NULL,
-- NULLABLE FK
other_table_id bigint references other_table(id)
)`)
if e != nil {
panic(e.Error())
}
// Exec can work with multiple statements if there are not parameters
// and thus we are not using prepared statements.
e = tx.Exec(`DROP TABLE IF EXISTS profiles CASCADE;
CREATE TABLE profiles (
counter bigserial NOT NULL PRIMARY KEY,
website character varying(350) DEFAULT ''::character varying NOT NULL,
quotes text DEFAULT ''::text NOT NULL,
biography text DEFAULT ''::text NOT NULL,
github character varying(350) DEFAULT ''::character varying NOT NULL,
skype character varying(350) DEFAULT ''::character varying NOT NULL,
jabber character varying(350) DEFAULT ''::character varying NOT NULL,
yahoo character varying(350) DEFAULT ''::character varying NOT NULL,
userscript character varying(128) DEFAULT ''::character varying NOT NULL,
template smallint DEFAULT 0 NOT NULL,
dateformat character varying(25) DEFAULT 'd/m/Y, H:i'::character varying NOT NULL,
facebook character varying(350) DEFAULT ''::character varying NOT NULL,
twitter character varying(350) DEFAULT ''::character varying NOT NULL,
steam character varying(350) DEFAULT ''::character varying NOT NULL,
push boolean DEFAULT false NOT NULL,
pushregtime timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
mobile_template smallint DEFAULT 1 NOT NULL,
closed boolean DEFAULT false NOT NULL,
template_variables jsonb DEFAULT '{}'::jsonb NOT NULL
)`)
if e != nil {
panic(e.Error())
}
e = tx.Exec("ALTER TABLE profiles ADD CONSTRAINT profiles_users_fk FOREIGN KEY(counter) references users(counter) ON DELETE CASCADE")
if e != nil {
panic(e.Error())
}
e = tx.Exec("CREATE TABLE nest_table(id bigserial not null PRIMARY KEY, slice_of_string text[] not null, slice_of_int64 bigint[] not null)")
if e != nil {
panic(e.Error())
}
if e = tx.Commit(); e != nil {
panic(e.Error())
}
logger := log.New(os.Stdout, "igor-log: ", log.LUTC)
db.Log(logger)
}
// createUser creates a test user (since the primary key is a bigserial, each call creates a new user)
func createUser() User {
user := User{
Username: "igor",
Password: "please store hashed password",
Name: "Paolo",
Surname: "Galeone",
Email: "please validate the @email . com",
Gender: true,
BirthDate: time.Now(),
}
if e = db.Create(&user); e != nil {
panic(fmt.Sprintf("Create(&user) filling fields having no default should work, but got: %s\n", e.Error()))
}
return user
}
// createProfile creates the profile for a test user (since the primary key is a bigserial, each call creates a new user)
func createProfile(id uint64) Profile {
profile := Profile{Counter: id}
if e = db.Create(&profile); e != nil {
panic(fmt.Sprintf("Create(&profile) failed: %s\n", e.Error()))
}
return profile
}
func TestPanicWhenCallingOnEmptyModel(t *testing.T) {
panicNumber := 0
defer func() {
// catch panic of db.Model(nil)
if r := recover(); r != nil {
if panicNumber == 0 {
t.Log("All right")
panicNumber++
} else {
t.Error("Too many panics")
}
}
}()
// must panic
db.Model(nil)
}
func TestCreateWithNestedStruct(t *testing.T) {
row := NestTable{}
row.ID = 1
row.SliceOfInt64 = []int64{1, 2}
row.SliceOfString = []string{"slice", "support yeah"}
if e = db.Create(&row); e != nil {
t.Errorf("Inserting a new row with a type that uses a nested struct should be possible. But got %v", e)
}
}
func TestModelCreateUpdatesSelectDelete(t *testing.T) {
if db.Create(&User{}) == nil {
t.Error("Create an user without assign a value to fields that have no default should fail")
}
user := createUser()
user.Profile = createProfile(user.Counter)
// First
var p Profile
if e = db.First(&p, uint64(99)); e == nil {
t.Errorf("Expected First to return an error when there are no rows to fetch, but succeeded: %v", p)
}
zeroValue := Profile{}
if !reflect.DeepEqual(p, zeroValue) {
t.Errorf("After a failed First, the input parameter should remain unchanged, but are different. Got %v expected %v", p, zeroValue)
}
if e = db.First(&p, user.Counter); e != nil {
t.Errorf("First failed: %s\n", e.Error())
}
if !reflect.DeepEqual(p, user.Profile) {
t.Error("Fetched profile should be deep equals to the created profile")
}
if user.Lang != "en" {
t.Errorf("Auto update of struct fields having default values on the DBMS should work, but failed. Expected lang=en got %s", user.Lang)
}
// change user language
user.Lang = "it"
if e = db.Updates(&user); e != nil {
t.Errorf("Updates should work but got: %s\n", e.Error())
}
// Scan without parameters should fail
if e = db.Model(User{}).Select("lang").Where(user).Scan(); e == nil {
t.Error("Scan without a parameter should fail, but succeeded")
}
// Select lang stored in the db
var lang string
if e = db.Model(User{}).Select("lang").Where(user).Scan(&lang); e != nil {
t.Errorf("Scan failed: %s\n", e.Error())
}
if lang != "it" {
t.Errorf("The fetched language (%s) is different to the expected one (%s)\n", lang, user.Lang)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete of a user (using the primary key) should work, but got: %s\n", e.Error())
}
// Now user is empty. Thus a new .Delete(&user) should fail
if e = db.Delete(&user); e == nil {
t.Error("Delete of an empty object should fail, but succeeded")
}
}
func TestJoinsTableSelectDeleteWhere(t *testing.T) {
// create 6 user and profiles
var ids []uint64
for i := 0; i < 6; i++ {
user := createUser()
ids = append(ids, user.Counter)
createProfile(user.Counter)
}
var users []User
if e = db.Model(User{}).Scan(&users); e != nil {
t.Errorf("Scan on structs should work but got: %s\n", e.Error())
}
if len(users) != 6 {
t.Errorf("Expected 6 users but got: %d\n", len(users))
}
var fetchedIds []uint64
if e = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds); e != nil {
t.Errorf("Pluck should work but got: %s\n", e.Error())
}
for i := 0; i < 6; i++ {
if ids[i] != fetchedIds[i] {
t.Errorf("Expected %d in position %d but got: %d\n", ids[i], i, fetchedIds[i])
}
}
// select $1::int, $2::int, $3::it, counter from users join profiles on user.counter = profiles.counter
// where user.counter = $4
var one, two, three, four int
u := (User{}).TableName()
p := (Profile{}).TableName()
if e = db.Select("?::int, ?::int, ?::int, "+u+".counter", 1, 2, 3).
Table(u).
Joins("JOIN "+p+" ON "+u+".counter = "+p+".counter").
Where(&User{Counter: 4}).Scan(&one, &two, &three, &four); e != nil {
t.Error(e.Error())
}
db.Log(nil)
if one != 1 || two != 2 || three != 3 || four != 4 {
t.Errorf("problem in scanning results, expected 1,2,3,4 got: %d,%d,%d,%d", one, two, three, four)
}
// Count
var count uint8
if e = db.Model(User{}).Count(&count); e != nil {
t.Errorf("problem counting users: %s\n", e.Error())
}
if count != 6 {
t.Errorf("Problem with count. Expected 6 users but counted %d", count)
}
if e = db.Where("counter IN (?)", ids).Delete(User{}); e != nil {
t.Errorf("delete in range should work but got: %s\n", e.Error())
}
// clear slice and pluck again
fetchedIds = nil
_ = db.Model(User{}).Order("counter asc").Pluck("counter", &fetchedIds)
if len(fetchedIds) != 0 {
t.Errorf("delete in range failed, pluck returned ids that must have been deleted")
}
}
func TestJSON(t *testing.T) {
user := createUser()
var emptyJSON = make(igor.JSON)
if !reflect.DeepEqual(user.NotifyStory, emptyJSON) {
t.Errorf("JSON notifyStory should be empty but got: %s instead of %s\n", user.NotifyStory, emptyJSON)
}
var ns = make(igor.JSON)
ns["0"] = struct {
From uint64 `json:"from"`
To uint64 `json:"to"`
Message string `json:"message"`
}{
From: 1,
To: 1,
Message: "hi bob",
}
ns["numbers"] = 1
ns["test"] = 2
user.NotifyStory = ns
if e = db.Updates(&user); e != nil {
t.Errorf("updates should work but got: %s\n", e.Error())
}
// To use JSON with json, use:
// printableJSON, _ := json.Marshal(user.NotifyStory)
// fmt.Printf("%s\n", printableJSON)
var nsNew igor.JSON
if e = db.Model(User{}).Select("notify_story").Where(&user).Scan(&nsNew); e != nil {
t.Errorf("Problem scanning into igor.JSON: %s\n", e.Error())
}
if !reflect.DeepEqual(ns, nsNew) {
t.Errorf("fetched notify story is different from the saved one\n%s vs %s", ns, nsNew)
}
if e = db.Delete(&user); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
func TestNotifications(t *testing.T) {
count := 0
if e = db.Listen("notification_without_payload", func(payload ...string) {
count++
t.Log("Received notification on channel: notification_without_payload\n")
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
for i := 0; i < 4; i++ {
if e = db.Notify("notification_without_payload"); e != nil {
t.Fatalf("Unable to send notification: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 |
// listen on an opened channel should fail
if e = db.Listen("notification_without_payload", func(payload ...string) {}); e == nil {
t.Errorf("Listen on an opened channel should fail, but succeeded\n")
}
// Handle payload
// listen on more channels, with payload
count = 0
if e = db.Listen("np", func(payload ...string) {
count++
t.Logf("channel np: received payload: %s\n", payload)
}); e != nil {
t.Fatalf("Unable to listen on channel: %s\n", e.Error())
}
// test sending payload with notify
for i := 0; i < 4; i++ {
if e = db.Notify("np", strconv.Itoa(i)+" payload"); e != nil {
t.Fatalf("Unable to send notification with payload: %s\n", e.Error())
}
}
// wait some time to handle all notifications
time.Sleep(100 * time.Millisecond)
if count != 4 {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
}
// test unlisten
if e = db.Unlisten("notification_without_payload"); e != nil {
t.Errorf("Unable to unlisten from notification_without_payload, got: %s\n", e.Error())
}
// test UnlistenAll
if e = db.UnlistenAll(); e != nil {
t.Errorf("Unable to unlistenAll, got: %s\n", e.Error())
}
}
func TestCTE(t *testing.T) {
createUser()
createUser()
createUser()
var usernames []string
e = db.CTE(`WITH full_users_id AS (
SELECT counter FROM users WHERE name = ?)`, "Paolo").Table("full_users_id as fui").Select("username").Joins("JOIN users ON fui.counter = users.counter").Scan(&usernames)
if e != nil {
t.Fatalf(e.Error())
}
if len(usernames) != 3 {
t.Fatalf("Expected 3, but got: %d\n", len(usernames))
}
if e = db.Model(User{}).Where("name", "Paolo").Delete(User{}); e != nil {
t.Errorf("Delete should work but returned %s", e.Error())
}
}
| {
t.Errorf("Expected to receive 4 notifications, but counted only: %d\n", count)
} | conditional_block |
strobe.rs | use crate::{
keccak::{keccakf_u8, AlignedKeccakState, KECCAK_BLOCK_SIZE},
prelude::*,
};
use bitflags::bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap() != is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos != 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags & !OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C | else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if !bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if !more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ad,
OpFlags::A,
"Mixes associated data into the internal state."
);
def_op_no_mut!(
key,
meta_key,
OpFlags::A | OpFlags::C,
"Sets a symmetric cipher key."
);
}
| {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} | conditional_block |
strobe.rs | use crate::{
keccak::{keccakf_u8, AlignedKeccakState, KECCAK_BLOCK_SIZE},
prelude::*,
};
use bitflags::bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => { | self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap() != is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos != 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags & !OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if !bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if !more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ad,
OpFlags::A,
"Mixes associated data into the internal state."
);
def_op_no_mut!(
key,
meta_key,
OpFlags::A | OpFlags::C,
"Sets a symmetric cipher key."
);
} | #[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags; | random_line_split |
strobe.rs | use crate::{
keccak::{keccakf_u8, AlignedKeccakState, KECCAK_BLOCK_SIZE},
prelude::*,
};
use bitflags::bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn | (&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap() != is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos != 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags & !OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if !bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if !more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ad,
OpFlags::A,
"Mixes associated data into the internal state."
);
def_op_no_mut!(
key,
meta_key,
OpFlags::A | OpFlags::C,
"Sets a symmetric cipher key."
);
}
| absorb | identifier_name |
strobe.rs | use crate::{
keccak::{keccakf_u8, AlignedKeccakState, KECCAK_BLOCK_SIZE},
prelude::*,
};
use bitflags::bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap() != is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos != 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags & !OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if !more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) && !flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if !bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if !more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) |
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ad,
OpFlags::A,
"Mixes associated data into the internal state."
);
def_op_no_mut!(
key,
meta_key,
OpFlags::A | OpFlags::C,
"Sets a symmetric cipher key."
);
}
| {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
} | identifier_body |
serve_core.py | #!/usr/bin/env python3
#
# usage: tools/serve_core.py build/fw/objs/fw.elf /tmp/console.log
#
# Then you can connect with gdb. The ESP8266 SDK image provides a debugger with
# reasonable support of lx106. Example invocation:
#
# docker run -v $PWD:/cesanta -ti \
# docker.cesanta.com/esp8266-build-oss:latest \
# xt-gdb /cesanta/fw/platforms/esp8266/build/fw.out \
# -ex "target remote localhost:1234"
#
# If you run on OSX or windows, you have to put the IP of your host instead of
# localhost since gdb will run in a virtualmachine.
import argparse
import base64
import binascii
import ctypes
import json
import os
import re
import socketserver
import struct
import sys
import elftools.elf.elffile # apt install python-pyelftools
parser = argparse.ArgumentParser(description='Serve ESP core dump to GDB')
parser.add_argument('--port', default=1234, type=int, help='listening port')
parser.add_argument('--rom', required=False, help='rom section')
parser.add_argument('--rom_addr', required=False, type=lambda x: int(x,16), help='rom map addr')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--xtensa_addr_fixup', default=False, type=bool)
parser.add_argument('--target_descriptions', default='/opt/serve_core')
parser.add_argument('elf', help='Program executable')
parser.add_argument('log', help='serial log containing core dump snippet')
args = parser.parse_args()
START_DELIM = b'--- BEGIN CORE DUMP ---'
END_DELIM = b'---- END CORE DUMP ----'
class FreeRTOSTask(object):
def __init__(self, e):
self.xHandle = e["h"]
self.pcTaskName = e["n"]
self.eCurrentState = e["st"]
self.uxCurrentPriority = e["cpri"]
self.uxBasePriority = e["bpri"]
self.pxStackBase = e["sb"]
self.pxTopOfStack = e["sp"]
if "regs" in e:
self.regs = base64.decodebytes(bytes(e["regs"]["data"], "ascii"))
else:
self.regs = None
def __str__(self):
return "0x%x '%s' pri %d/%d sp 0x%x (%d free)" % (
self.xHandle, self.pcTaskName, self.uxCurrentPriority, self.uxBasePriority,
self.pxTopOfStack, self.pxTopOfStack - self.pxStackBase)
class Core(object):
def __init__(self, filename):
self._dump = self._read(filename)
self.mem = self._map_core(self._dump)
if args.rom:
self.mem.extend(self._map_firmware(args.rom_addr, args.rom))
self.mem.extend(self._map_elf(args.elf))
self.regs = base64.decodebytes(bytes(self._dump["REGS"]["data"], "ascii"))
if "freertos" in self._dump:
print("Dump contains FreeRTOS task info")
self.tasks = dict((t["h"], FreeRTOSTask(t)) for t in self._dump["freertos"]["tasks"])
else:
self.tasks = {}
self.target_features = self._dump.get("target_features")
def get_cur_task(self):
return self._dump.get("freertos", {}).get("cur", None)
def _search_backwards(self, f, start_offset, pattern):
offset = start_offset
while True:
offset = max(0, offset - 10000)
f.seek(offset)
data = f.read(min(10000, start_offset))
pos = data.rfind(pattern)
if pos >= 0:
return offset + pos
elif offset == 0:
return -1
offset += 5000
def _read(self, filename):
with open(filename, "rb") as f:
f.seek(0, os.SEEK_END)
size = f.tell()
end_pos = self._search_backwards(f, f.tell(), END_DELIM)
if end_pos == -1:
print("Cannot find end delimiter:", END_DELIM, file=sys.stderr)
sys.exit(1)
start_pos = self._search_backwards(f, end_pos, START_DELIM)
if start_pos == -1:
print("Cannot find start delimiter:", START_DELIM, file=sys.stderr)
sys.exit(1)
start_pos += len(START_DELIM)
print("Found core at %d - %d" % (start_pos, end_pos), file=sys.stderr)
f.seek(start_pos)
core_lines = []
while True:
l = f.readline().strip()
if l == END_DELIM:
break
core_lines.append(l.decode("ascii"))
core_json = ''.join(core_lines)
stripped = re.sub(r'(?im)\s+(\[.{1,40}\])?\s*', '', core_json)
return json.loads(stripped)
def _map_core(self, core):
mem = []
for k, v in list(core.items()):
if not isinstance(v, dict) or k == 'REGS' or "addr" not in v:
continue
data = base64.decodebytes(bytes(v["data"], "ascii"))
print("Mapping {0}: {1} @ {2:#02x}".format(k, len(data), v["addr"]), file=sys.stderr)
if "crc32" in v:
crc32 = ctypes.c_uint32(binascii.crc32(data))
expected_crc32 = ctypes.c_uint32(v["crc32"])
if crc32.value != expected_crc32.value:
print("CRC mismatch, section corrupted %s %s" % (crc32, expected_crc32), file=sys.stderr)
sys.exit(1)
mem.append((v["addr"], v["addr"] + len(data), data))
return mem
def _map_firmware(self, addr, filename):
with open(filename, "rb") as f:
data = f.read()
result = []
i = 0
magic, count = struct.unpack('<BB', data[i:i+2])
if magic == 0xea and count == 0x04:
# This is a V2 image, IRAM will be inside.
(magic, count, f1, f2, entry, _, irom_len) = struct.unpack('<BBBBIII', data[i:i+16])
print("Mapping IROM: {0} @ {1:#02x}".format(irom_len, addr), file=sys.stderr)
result.append((addr, addr + irom_len, data[i:i+irom_len+16]))
# The rest (IRAM) will be in the core.
else:
print("Mapping {0} at {1:#02x}".format(filename, addr), file=sys.stderr)
result.append((addr, addr + len(data), data))
return result
def _map_elf(self, elf_file_name):
result = []
f = open(elf_file_name, "rb")
ef = elftools.elf.elffile.ELFFile(f)
for i, sec in enumerate(ef.iter_sections()):
addr, size, off = sec["sh_addr"], sec["sh_size"], sec["sh_offset"]
if addr > 0 and size > 0:
print("Mapping {0} {1}: {2} @ {3:#02x}".format(elf_file_name, sec.name, size, addr), file=sys.stderr)
f.seek(off)
assert f.tell() == off
data = f.read(size)
assert len(data) == size
result.append((addr, addr + size, data))
return result
def read(self, addr, size):
for base, end, data in self.mem:
if addr >= base and addr < end:
return data[addr - base : addr - base + size]
print("Unmapped addr", hex(addr), file=sys.stderr)
return b"\0" * size
class GDBHandler(socketserver.BaseRequestHandler):
def handle(self):
self._core = core = Core(args.log)
self._curtask = None
print("Loaded core dump from last snippet in ", args.log, file=sys.stderr)
while self.expect_packet_start():
pkt = self.read_packet()
if args.debug:
print("<<", pkt, file=sys.stderr)
if pkt == "?": # status -> trap
self.send_str("S09")
elif pkt == "g": # dump registers
if self._curtask and self._curtask.regs:
# Dump specific task's registers
regs = self._curtask.regs
else:
regs = core.regs
self.send_str(self.encode_bytes(regs))
elif pkt[0] == "G": # set registers
core.regs = self.decode_bytes(pkt[1:])
self.send_str("OK")
elif pkt[0] == "m": # read memory
addr, size = [int(n, 16) for n in pkt[1:].split(',')]
if args.xtensa_addr_fixup and addr < 0x10000000 and addr > 0x80000:
print('fixup %08x' % addr, file=sys.stderr)
addr |= 0x40000000
bs = core.read(addr, size)
#if bs == "\0\0\0\0":
# bs = "\x01\0\0\0"
#print >>sys.stderr, "<<", " ".join("{:02x}".format(ord(c)) for c in bs)
self.send_str(self.encode_bytes(bs))
elif pkt.startswith("Hg"):
tid = int(pkt[2:], 16)
self._curtask = core.tasks.get(tid)
self.send_str("OK")
elif pkt.startswith("Hc-1"):
# cannot continue, this is post mortem debugging
self.send_str("E01")
elif pkt == "qC":
t = core.get_cur_task()
if t:
self.send_str("QC%016x" % t)
else:
self.send_str("1")
elif pkt == "qAttached":
self.send_str("1")
elif pkt == "qSymbol::":
self.send_str("OK")
elif pkt == "qfThreadInfo":
if core.tasks:
self.send_str("m%s" % ",".join("%016x" % t for t in core.tasks))
else:
self.send_str("l")
elif pkt == "qsThreadInfo":
self.send_str("l")
elif pkt.startswith("qThreadExtraInfo,"):
self.send_thread_extra_info(int(pkt[17:], 16))
elif pkt[0] == "T":
tid = int(pkt[1:], 16)
if tid in core.tasks:
self.send_str("OK")
else:
self.send_str("ERR00")
elif pkt == "D":
self.send_str("OK")
elif pkt in ("qTStatus", "qOffsets", "vMustReplyEmpty"):
# silently ignore
self.send_str("")
elif pkt.startswith("qSupported"):
features = []
if self._core.target_features:
features.append("qXfer:features:read+")
print("Target features: %s" % self._core.target_features)
else:
features.append("qXfer:features:read-")
self.send_str(";".join(features))
elif pkt.startswith("qXfer:features:read:"):
self.send_file(pkt)
else:
print("Ignoring unknown command '%s'" % (pkt,), file=sys.stderr)
self.send_str("")
print("GDB closed the connection", file=sys.stderr)
sys.exit(0)
def send_file(self, pkt):
_, _, _, fname, off_len = pkt.split(":")
if fname == "target.xml":
fname = self._core.target_features
if "/" in fname:
self.send_str("E00")
return
fname = os.path.join(args.target_descriptions, fname)
off_s, length_s = off_len.split(",")
off, length = int(off_s, 16), int(length_s, 16)
try:
if off == 0:
print("Serving %s" % fname)
with open(fname, "rb") as f:
if f.seek(off) != off:
self.send_str("l")
return
data = f.read(length)
if len(data) == length:
self.send_str("m" + data.decode("ascii"))
elif len(data) > 0:
self.send_str("l" + data.decode("ascii"))
else:
self.send_str("l")
except IOError as e:
print("error reading %s, %d @ %d: %s" % (fname, length, off, e))
self.send_str("E00")
def encode_bytes(self, bs):
return binascii.hexlify(bs).decode("ascii")
def decode_bytes(self, s):
|
def send_ack(self):
self.request.sendall(b"+");
def send_nack(self):
self.request.sendall(b"-");
def send_str(self, s):
if type(s) is bytes:
s = s.decode("ascii")
if args.debug:
print(">>", s, file=sys.stderr)
self.request.sendall("${0}#{1:02x}".format(s, self._checksum(s)).encode("ascii"))
def _checksum(self, s):
if type(s) is str:
return sum(ord(i) for i in s) % 0x100
else: # bytes
return sum(s) % 0x100
def expect_packet_start(self):
return len(self.read_until('$')) > 0
def read_packet(self):
pkt = self.read_until('#')
chk = b""
chk += self.request.recv(1)
chk += self.request.recv(1)
if len(chk) != 2:
return ""
if int(chk, 16) != self._checksum(pkt):
print("Bad checksum for {0}; got: {1} want: {2:02x}".format(pkt, chk, "want:", self._checksum(pkt)), file=sys.stderr)
self.send_nack()
return ""
self.send_ack()
return pkt.decode("ascii")
def read_until(self, limit):
buf = b""
limit = bytes(limit, "ascii")
while True:
ch = self.request.recv(1)
if len(ch) == 0: # eof
return ""
if ch == limit:
return buf
buf += ch
def send_thread_extra_info(self, tid):
task = self._core.tasks.get(tid)
if task:
self.send_str(binascii.hexlify(str(task).encode("ascii")))
else:
self.send_str(binascii.hexlify(("[Invalid task 0x%08x]" % tid).encode("ascii")))
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
server = TCPServer(('0.0.0.0', args.port), GDBHandler)
print("Waiting for gdb on", args.port)
server.serve_forever()
| return binascii.unhexlify(s) | identifier_body |
serve_core.py | #!/usr/bin/env python3
#
# usage: tools/serve_core.py build/fw/objs/fw.elf /tmp/console.log
#
# Then you can connect with gdb. The ESP8266 SDK image provides a debugger with
# reasonable support of lx106. Example invocation:
#
# docker run -v $PWD:/cesanta -ti \
# docker.cesanta.com/esp8266-build-oss:latest \
# xt-gdb /cesanta/fw/platforms/esp8266/build/fw.out \
# -ex "target remote localhost:1234"
#
# If you run on OSX or windows, you have to put the IP of your host instead of
# localhost since gdb will run in a virtualmachine.
import argparse
import base64
import binascii
import ctypes
import json
import os
import re
import socketserver
import struct
import sys
import elftools.elf.elffile # apt install python-pyelftools
parser = argparse.ArgumentParser(description='Serve ESP core dump to GDB')
parser.add_argument('--port', default=1234, type=int, help='listening port')
parser.add_argument('--rom', required=False, help='rom section')
parser.add_argument('--rom_addr', required=False, type=lambda x: int(x,16), help='rom map addr')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--xtensa_addr_fixup', default=False, type=bool)
parser.add_argument('--target_descriptions', default='/opt/serve_core')
parser.add_argument('elf', help='Program executable')
parser.add_argument('log', help='serial log containing core dump snippet')
args = parser.parse_args()
START_DELIM = b'--- BEGIN CORE DUMP ---'
END_DELIM = b'---- END CORE DUMP ----'
class FreeRTOSTask(object):
def __init__(self, e):
self.xHandle = e["h"]
self.pcTaskName = e["n"]
self.eCurrentState = e["st"]
self.uxCurrentPriority = e["cpri"]
self.uxBasePriority = e["bpri"]
self.pxStackBase = e["sb"]
self.pxTopOfStack = e["sp"]
if "regs" in e:
self.regs = base64.decodebytes(bytes(e["regs"]["data"], "ascii"))
else:
self.regs = None
def __str__(self):
return "0x%x '%s' pri %d/%d sp 0x%x (%d free)" % (
self.xHandle, self.pcTaskName, self.uxCurrentPriority, self.uxBasePriority,
self.pxTopOfStack, self.pxTopOfStack - self.pxStackBase)
class Core(object):
def __init__(self, filename):
self._dump = self._read(filename)
self.mem = self._map_core(self._dump)
if args.rom:
self.mem.extend(self._map_firmware(args.rom_addr, args.rom))
self.mem.extend(self._map_elf(args.elf))
self.regs = base64.decodebytes(bytes(self._dump["REGS"]["data"], "ascii"))
if "freertos" in self._dump:
print("Dump contains FreeRTOS task info")
self.tasks = dict((t["h"], FreeRTOSTask(t)) for t in self._dump["freertos"]["tasks"])
else:
self.tasks = {}
self.target_features = self._dump.get("target_features")
def get_cur_task(self):
return self._dump.get("freertos", {}).get("cur", None)
def _search_backwards(self, f, start_offset, pattern):
offset = start_offset
while True:
offset = max(0, offset - 10000)
f.seek(offset)
data = f.read(min(10000, start_offset))
pos = data.rfind(pattern)
if pos >= 0:
return offset + pos
elif offset == 0:
return -1
offset += 5000
def _read(self, filename):
with open(filename, "rb") as f:
f.seek(0, os.SEEK_END)
size = f.tell()
end_pos = self._search_backwards(f, f.tell(), END_DELIM)
if end_pos == -1:
print("Cannot find end delimiter:", END_DELIM, file=sys.stderr)
sys.exit(1)
start_pos = self._search_backwards(f, end_pos, START_DELIM)
if start_pos == -1:
print("Cannot find start delimiter:", START_DELIM, file=sys.stderr)
sys.exit(1)
start_pos += len(START_DELIM)
print("Found core at %d - %d" % (start_pos, end_pos), file=sys.stderr)
f.seek(start_pos)
core_lines = []
while True:
l = f.readline().strip()
if l == END_DELIM:
break
core_lines.append(l.decode("ascii"))
core_json = ''.join(core_lines)
stripped = re.sub(r'(?im)\s+(\[.{1,40}\])?\s*', '', core_json)
return json.loads(stripped)
def _map_core(self, core):
mem = []
for k, v in list(core.items()):
if not isinstance(v, dict) or k == 'REGS' or "addr" not in v:
continue
data = base64.decodebytes(bytes(v["data"], "ascii"))
print("Mapping {0}: {1} @ {2:#02x}".format(k, len(data), v["addr"]), file=sys.stderr)
if "crc32" in v:
crc32 = ctypes.c_uint32(binascii.crc32(data))
expected_crc32 = ctypes.c_uint32(v["crc32"])
if crc32.value != expected_crc32.value:
print("CRC mismatch, section corrupted %s %s" % (crc32, expected_crc32), file=sys.stderr)
sys.exit(1)
mem.append((v["addr"], v["addr"] + len(data), data))
return mem
def _map_firmware(self, addr, filename):
with open(filename, "rb") as f:
data = f.read()
result = []
i = 0
magic, count = struct.unpack('<BB', data[i:i+2])
if magic == 0xea and count == 0x04:
# This is a V2 image, IRAM will be inside.
(magic, count, f1, f2, entry, _, irom_len) = struct.unpack('<BBBBIII', data[i:i+16])
print("Mapping IROM: {0} @ {1:#02x}".format(irom_len, addr), file=sys.stderr)
result.append((addr, addr + irom_len, data[i:i+irom_len+16]))
# The rest (IRAM) will be in the core.
else:
print("Mapping {0} at {1:#02x}".format(filename, addr), file=sys.stderr)
result.append((addr, addr + len(data), data))
return result
def _map_elf(self, elf_file_name):
result = []
f = open(elf_file_name, "rb")
ef = elftools.elf.elffile.ELFFile(f)
for i, sec in enumerate(ef.iter_sections()):
addr, size, off = sec["sh_addr"], sec["sh_size"], sec["sh_offset"]
if addr > 0 and size > 0:
print("Mapping {0} {1}: {2} @ {3:#02x}".format(elf_file_name, sec.name, size, addr), file=sys.stderr)
f.seek(off)
assert f.tell() == off
data = f.read(size)
assert len(data) == size
result.append((addr, addr + size, data))
return result
def read(self, addr, size):
for base, end, data in self.mem:
if addr >= base and addr < end:
return data[addr - base : addr - base + size]
print("Unmapped addr", hex(addr), file=sys.stderr)
return b"\0" * size
class GDBHandler(socketserver.BaseRequestHandler):
def handle(self):
self._core = core = Core(args.log)
self._curtask = None
print("Loaded core dump from last snippet in ", args.log, file=sys.stderr)
while self.expect_packet_start():
pkt = self.read_packet()
if args.debug:
print("<<", pkt, file=sys.stderr)
if pkt == "?": # status -> trap
self.send_str("S09")
elif pkt == "g": # dump registers
if self._curtask and self._curtask.regs:
# Dump specific task's registers
regs = self._curtask.regs
else:
regs = core.regs
self.send_str(self.encode_bytes(regs))
elif pkt[0] == "G": # set registers
core.regs = self.decode_bytes(pkt[1:])
self.send_str("OK")
elif pkt[0] == "m": # read memory
addr, size = [int(n, 16) for n in pkt[1:].split(',')]
if args.xtensa_addr_fixup and addr < 0x10000000 and addr > 0x80000:
print('fixup %08x' % addr, file=sys.stderr)
addr |= 0x40000000
bs = core.read(addr, size)
#if bs == "\0\0\0\0":
# bs = "\x01\0\0\0"
#print >>sys.stderr, "<<", " ".join("{:02x}".format(ord(c)) for c in bs)
self.send_str(self.encode_bytes(bs))
elif pkt.startswith("Hg"):
tid = int(pkt[2:], 16)
self._curtask = core.tasks.get(tid)
self.send_str("OK")
elif pkt.startswith("Hc-1"):
# cannot continue, this is post mortem debugging
self.send_str("E01")
elif pkt == "qC":
t = core.get_cur_task()
if t:
self.send_str("QC%016x" % t)
else:
self.send_str("1")
elif pkt == "qAttached":
self.send_str("1")
elif pkt == "qSymbol::":
self.send_str("OK")
elif pkt == "qfThreadInfo":
if core.tasks:
self.send_str("m%s" % ",".join("%016x" % t for t in core.tasks))
else:
self.send_str("l")
elif pkt == "qsThreadInfo":
self.send_str("l")
elif pkt.startswith("qThreadExtraInfo,"):
self.send_thread_extra_info(int(pkt[17:], 16))
elif pkt[0] == "T":
tid = int(pkt[1:], 16)
if tid in core.tasks:
self.send_str("OK")
else:
self.send_str("ERR00")
elif pkt == "D":
self.send_str("OK")
elif pkt in ("qTStatus", "qOffsets", "vMustReplyEmpty"):
# silently ignore
self.send_str("")
elif pkt.startswith("qSupported"):
features = []
if self._core.target_features:
features.append("qXfer:features:read+")
print("Target features: %s" % self._core.target_features)
else:
features.append("qXfer:features:read-")
self.send_str(";".join(features))
elif pkt.startswith("qXfer:features:read:"):
self.send_file(pkt)
else:
print("Ignoring unknown command '%s'" % (pkt,), file=sys.stderr)
self.send_str("")
print("GDB closed the connection", file=sys.stderr)
sys.exit(0)
def send_file(self, pkt):
_, _, _, fname, off_len = pkt.split(":")
if fname == "target.xml":
fname = self._core.target_features
if "/" in fname:
self.send_str("E00")
return
fname = os.path.join(args.target_descriptions, fname)
off_s, length_s = off_len.split(",")
off, length = int(off_s, 16), int(length_s, 16)
try:
if off == 0:
print("Serving %s" % fname)
with open(fname, "rb") as f:
if f.seek(off) != off:
self.send_str("l")
return
data = f.read(length)
if len(data) == length:
self.send_str("m" + data.decode("ascii"))
elif len(data) > 0:
self.send_str("l" + data.decode("ascii"))
else:
self.send_str("l")
except IOError as e:
print("error reading %s, %d @ %d: %s" % (fname, length, off, e))
self.send_str("E00")
def encode_bytes(self, bs):
return binascii.hexlify(bs).decode("ascii")
def decode_bytes(self, s):
return binascii.unhexlify(s)
def send_ack(self):
self.request.sendall(b"+");
def | (self):
self.request.sendall(b"-");
def send_str(self, s):
if type(s) is bytes:
s = s.decode("ascii")
if args.debug:
print(">>", s, file=sys.stderr)
self.request.sendall("${0}#{1:02x}".format(s, self._checksum(s)).encode("ascii"))
def _checksum(self, s):
if type(s) is str:
return sum(ord(i) for i in s) % 0x100
else: # bytes
return sum(s) % 0x100
def expect_packet_start(self):
return len(self.read_until('$')) > 0
def read_packet(self):
pkt = self.read_until('#')
chk = b""
chk += self.request.recv(1)
chk += self.request.recv(1)
if len(chk) != 2:
return ""
if int(chk, 16) != self._checksum(pkt):
print("Bad checksum for {0}; got: {1} want: {2:02x}".format(pkt, chk, "want:", self._checksum(pkt)), file=sys.stderr)
self.send_nack()
return ""
self.send_ack()
return pkt.decode("ascii")
def read_until(self, limit):
buf = b""
limit = bytes(limit, "ascii")
while True:
ch = self.request.recv(1)
if len(ch) == 0: # eof
return ""
if ch == limit:
return buf
buf += ch
def send_thread_extra_info(self, tid):
task = self._core.tasks.get(tid)
if task:
self.send_str(binascii.hexlify(str(task).encode("ascii")))
else:
self.send_str(binascii.hexlify(("[Invalid task 0x%08x]" % tid).encode("ascii")))
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
server = TCPServer(('0.0.0.0', args.port), GDBHandler)
print("Waiting for gdb on", args.port)
server.serve_forever()
| send_nack | identifier_name |
serve_core.py | #!/usr/bin/env python3
#
# usage: tools/serve_core.py build/fw/objs/fw.elf /tmp/console.log
#
# Then you can connect with gdb. The ESP8266 SDK image provides a debugger with
# reasonable support of lx106. Example invocation:
#
# docker run -v $PWD:/cesanta -ti \
# docker.cesanta.com/esp8266-build-oss:latest \
# xt-gdb /cesanta/fw/platforms/esp8266/build/fw.out \
# -ex "target remote localhost:1234"
#
# If you run on OSX or windows, you have to put the IP of your host instead of
# localhost since gdb will run in a virtualmachine.
import argparse
import base64
import binascii
import ctypes
import json
import os
import re
import socketserver
import struct
import sys
import elftools.elf.elffile # apt install python-pyelftools
parser = argparse.ArgumentParser(description='Serve ESP core dump to GDB')
parser.add_argument('--port', default=1234, type=int, help='listening port')
parser.add_argument('--rom', required=False, help='rom section')
parser.add_argument('--rom_addr', required=False, type=lambda x: int(x,16), help='rom map addr')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--xtensa_addr_fixup', default=False, type=bool)
parser.add_argument('--target_descriptions', default='/opt/serve_core')
parser.add_argument('elf', help='Program executable')
parser.add_argument('log', help='serial log containing core dump snippet')
args = parser.parse_args()
START_DELIM = b'--- BEGIN CORE DUMP ---'
END_DELIM = b'---- END CORE DUMP ----'
class FreeRTOSTask(object):
def __init__(self, e):
self.xHandle = e["h"]
self.pcTaskName = e["n"]
self.eCurrentState = e["st"]
self.uxCurrentPriority = e["cpri"]
self.uxBasePriority = e["bpri"]
self.pxStackBase = e["sb"]
self.pxTopOfStack = e["sp"]
if "regs" in e:
self.regs = base64.decodebytes(bytes(e["regs"]["data"], "ascii"))
else:
self.regs = None
def __str__(self):
return "0x%x '%s' pri %d/%d sp 0x%x (%d free)" % (
self.xHandle, self.pcTaskName, self.uxCurrentPriority, self.uxBasePriority,
self.pxTopOfStack, self.pxTopOfStack - self.pxStackBase)
class Core(object):
def __init__(self, filename):
self._dump = self._read(filename)
self.mem = self._map_core(self._dump)
if args.rom:
self.mem.extend(self._map_firmware(args.rom_addr, args.rom))
self.mem.extend(self._map_elf(args.elf))
self.regs = base64.decodebytes(bytes(self._dump["REGS"]["data"], "ascii"))
if "freertos" in self._dump:
print("Dump contains FreeRTOS task info")
self.tasks = dict((t["h"], FreeRTOSTask(t)) for t in self._dump["freertos"]["tasks"])
else:
self.tasks = {}
self.target_features = self._dump.get("target_features")
def get_cur_task(self):
return self._dump.get("freertos", {}).get("cur", None)
def _search_backwards(self, f, start_offset, pattern):
offset = start_offset
while True:
offset = max(0, offset - 10000)
f.seek(offset)
data = f.read(min(10000, start_offset))
pos = data.rfind(pattern)
if pos >= 0:
return offset + pos
elif offset == 0:
return -1
offset += 5000
def _read(self, filename):
with open(filename, "rb") as f:
f.seek(0, os.SEEK_END)
size = f.tell()
end_pos = self._search_backwards(f, f.tell(), END_DELIM)
if end_pos == -1:
print("Cannot find end delimiter:", END_DELIM, file=sys.stderr)
sys.exit(1)
start_pos = self._search_backwards(f, end_pos, START_DELIM)
if start_pos == -1:
print("Cannot find start delimiter:", START_DELIM, file=sys.stderr)
sys.exit(1)
start_pos += len(START_DELIM)
print("Found core at %d - %d" % (start_pos, end_pos), file=sys.stderr)
f.seek(start_pos)
core_lines = []
while True:
l = f.readline().strip()
if l == END_DELIM:
break
core_lines.append(l.decode("ascii"))
core_json = ''.join(core_lines)
stripped = re.sub(r'(?im)\s+(\[.{1,40}\])?\s*', '', core_json)
return json.loads(stripped)
def _map_core(self, core):
mem = []
for k, v in list(core.items()):
if not isinstance(v, dict) or k == 'REGS' or "addr" not in v:
continue
data = base64.decodebytes(bytes(v["data"], "ascii"))
print("Mapping {0}: {1} @ {2:#02x}".format(k, len(data), v["addr"]), file=sys.stderr)
if "crc32" in v:
crc32 = ctypes.c_uint32(binascii.crc32(data))
expected_crc32 = ctypes.c_uint32(v["crc32"])
if crc32.value != expected_crc32.value:
print("CRC mismatch, section corrupted %s %s" % (crc32, expected_crc32), file=sys.stderr)
sys.exit(1)
mem.append((v["addr"], v["addr"] + len(data), data))
return mem
def _map_firmware(self, addr, filename):
with open(filename, "rb") as f:
data = f.read()
result = []
i = 0
magic, count = struct.unpack('<BB', data[i:i+2])
if magic == 0xea and count == 0x04:
# This is a V2 image, IRAM will be inside.
(magic, count, f1, f2, entry, _, irom_len) = struct.unpack('<BBBBIII', data[i:i+16])
print("Mapping IROM: {0} @ {1:#02x}".format(irom_len, addr), file=sys.stderr)
result.append((addr, addr + irom_len, data[i:i+irom_len+16]))
# The rest (IRAM) will be in the core.
else:
print("Mapping {0} at {1:#02x}".format(filename, addr), file=sys.stderr)
result.append((addr, addr + len(data), data))
return result
def _map_elf(self, elf_file_name):
result = []
f = open(elf_file_name, "rb")
ef = elftools.elf.elffile.ELFFile(f)
for i, sec in enumerate(ef.iter_sections()):
addr, size, off = sec["sh_addr"], sec["sh_size"], sec["sh_offset"]
if addr > 0 and size > 0:
print("Mapping {0} {1}: {2} @ {3:#02x}".format(elf_file_name, sec.name, size, addr), file=sys.stderr)
f.seek(off)
assert f.tell() == off
data = f.read(size)
assert len(data) == size
result.append((addr, addr + size, data))
return result
def read(self, addr, size):
for base, end, data in self.mem:
if addr >= base and addr < end:
return data[addr - base : addr - base + size]
print("Unmapped addr", hex(addr), file=sys.stderr)
return b"\0" * size
class GDBHandler(socketserver.BaseRequestHandler):
def handle(self):
self._core = core = Core(args.log)
self._curtask = None
print("Loaded core dump from last snippet in ", args.log, file=sys.stderr)
while self.expect_packet_start():
pkt = self.read_packet()
if args.debug:
print("<<", pkt, file=sys.stderr)
if pkt == "?": # status -> trap
self.send_str("S09")
elif pkt == "g": # dump registers
if self._curtask and self._curtask.regs:
# Dump specific task's registers
regs = self._curtask.regs
else:
regs = core.regs
self.send_str(self.encode_bytes(regs))
elif pkt[0] == "G": # set registers
core.regs = self.decode_bytes(pkt[1:])
self.send_str("OK")
elif pkt[0] == "m": # read memory
addr, size = [int(n, 16) for n in pkt[1:].split(',')]
if args.xtensa_addr_fixup and addr < 0x10000000 and addr > 0x80000:
print('fixup %08x' % addr, file=sys.stderr)
addr |= 0x40000000
bs = core.read(addr, size)
#if bs == "\0\0\0\0":
# bs = "\x01\0\0\0"
#print >>sys.stderr, "<<", " ".join("{:02x}".format(ord(c)) for c in bs)
self.send_str(self.encode_bytes(bs))
elif pkt.startswith("Hg"):
tid = int(pkt[2:], 16)
self._curtask = core.tasks.get(tid)
self.send_str("OK")
elif pkt.startswith("Hc-1"):
# cannot continue, this is post mortem debugging
self.send_str("E01")
elif pkt == "qC":
t = core.get_cur_task() | self.send_str("1")
elif pkt == "qAttached":
self.send_str("1")
elif pkt == "qSymbol::":
self.send_str("OK")
elif pkt == "qfThreadInfo":
if core.tasks:
self.send_str("m%s" % ",".join("%016x" % t for t in core.tasks))
else:
self.send_str("l")
elif pkt == "qsThreadInfo":
self.send_str("l")
elif pkt.startswith("qThreadExtraInfo,"):
self.send_thread_extra_info(int(pkt[17:], 16))
elif pkt[0] == "T":
tid = int(pkt[1:], 16)
if tid in core.tasks:
self.send_str("OK")
else:
self.send_str("ERR00")
elif pkt == "D":
self.send_str("OK")
elif pkt in ("qTStatus", "qOffsets", "vMustReplyEmpty"):
# silently ignore
self.send_str("")
elif pkt.startswith("qSupported"):
features = []
if self._core.target_features:
features.append("qXfer:features:read+")
print("Target features: %s" % self._core.target_features)
else:
features.append("qXfer:features:read-")
self.send_str(";".join(features))
elif pkt.startswith("qXfer:features:read:"):
self.send_file(pkt)
else:
print("Ignoring unknown command '%s'" % (pkt,), file=sys.stderr)
self.send_str("")
print("GDB closed the connection", file=sys.stderr)
sys.exit(0)
def send_file(self, pkt):
_, _, _, fname, off_len = pkt.split(":")
if fname == "target.xml":
fname = self._core.target_features
if "/" in fname:
self.send_str("E00")
return
fname = os.path.join(args.target_descriptions, fname)
off_s, length_s = off_len.split(",")
off, length = int(off_s, 16), int(length_s, 16)
try:
if off == 0:
print("Serving %s" % fname)
with open(fname, "rb") as f:
if f.seek(off) != off:
self.send_str("l")
return
data = f.read(length)
if len(data) == length:
self.send_str("m" + data.decode("ascii"))
elif len(data) > 0:
self.send_str("l" + data.decode("ascii"))
else:
self.send_str("l")
except IOError as e:
print("error reading %s, %d @ %d: %s" % (fname, length, off, e))
self.send_str("E00")
def encode_bytes(self, bs):
return binascii.hexlify(bs).decode("ascii")
def decode_bytes(self, s):
return binascii.unhexlify(s)
def send_ack(self):
self.request.sendall(b"+");
def send_nack(self):
self.request.sendall(b"-");
def send_str(self, s):
if type(s) is bytes:
s = s.decode("ascii")
if args.debug:
print(">>", s, file=sys.stderr)
self.request.sendall("${0}#{1:02x}".format(s, self._checksum(s)).encode("ascii"))
def _checksum(self, s):
if type(s) is str:
return sum(ord(i) for i in s) % 0x100
else: # bytes
return sum(s) % 0x100
def expect_packet_start(self):
return len(self.read_until('$')) > 0
def read_packet(self):
pkt = self.read_until('#')
chk = b""
chk += self.request.recv(1)
chk += self.request.recv(1)
if len(chk) != 2:
return ""
if int(chk, 16) != self._checksum(pkt):
print("Bad checksum for {0}; got: {1} want: {2:02x}".format(pkt, chk, "want:", self._checksum(pkt)), file=sys.stderr)
self.send_nack()
return ""
self.send_ack()
return pkt.decode("ascii")
def read_until(self, limit):
buf = b""
limit = bytes(limit, "ascii")
while True:
ch = self.request.recv(1)
if len(ch) == 0: # eof
return ""
if ch == limit:
return buf
buf += ch
def send_thread_extra_info(self, tid):
task = self._core.tasks.get(tid)
if task:
self.send_str(binascii.hexlify(str(task).encode("ascii")))
else:
self.send_str(binascii.hexlify(("[Invalid task 0x%08x]" % tid).encode("ascii")))
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
server = TCPServer(('0.0.0.0', args.port), GDBHandler)
print("Waiting for gdb on", args.port)
server.serve_forever() | if t:
self.send_str("QC%016x" % t)
else: | random_line_split |
serve_core.py | #!/usr/bin/env python3
#
# usage: tools/serve_core.py build/fw/objs/fw.elf /tmp/console.log
#
# Then you can connect with gdb. The ESP8266 SDK image provides a debugger with
# reasonable support of lx106. Example invocation:
#
# docker run -v $PWD:/cesanta -ti \
# docker.cesanta.com/esp8266-build-oss:latest \
# xt-gdb /cesanta/fw/platforms/esp8266/build/fw.out \
# -ex "target remote localhost:1234"
#
# If you run on OSX or windows, you have to put the IP of your host instead of
# localhost since gdb will run in a virtualmachine.
import argparse
import base64
import binascii
import ctypes
import json
import os
import re
import socketserver
import struct
import sys
import elftools.elf.elffile # apt install python-pyelftools
parser = argparse.ArgumentParser(description='Serve ESP core dump to GDB')
parser.add_argument('--port', default=1234, type=int, help='listening port')
parser.add_argument('--rom', required=False, help='rom section')
parser.add_argument('--rom_addr', required=False, type=lambda x: int(x,16), help='rom map addr')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--xtensa_addr_fixup', default=False, type=bool)
parser.add_argument('--target_descriptions', default='/opt/serve_core')
parser.add_argument('elf', help='Program executable')
parser.add_argument('log', help='serial log containing core dump snippet')
args = parser.parse_args()
START_DELIM = b'--- BEGIN CORE DUMP ---'
END_DELIM = b'---- END CORE DUMP ----'
class FreeRTOSTask(object):
def __init__(self, e):
self.xHandle = e["h"]
self.pcTaskName = e["n"]
self.eCurrentState = e["st"]
self.uxCurrentPriority = e["cpri"]
self.uxBasePriority = e["bpri"]
self.pxStackBase = e["sb"]
self.pxTopOfStack = e["sp"]
if "regs" in e:
self.regs = base64.decodebytes(bytes(e["regs"]["data"], "ascii"))
else:
self.regs = None
def __str__(self):
return "0x%x '%s' pri %d/%d sp 0x%x (%d free)" % (
self.xHandle, self.pcTaskName, self.uxCurrentPriority, self.uxBasePriority,
self.pxTopOfStack, self.pxTopOfStack - self.pxStackBase)
class Core(object):
def __init__(self, filename):
self._dump = self._read(filename)
self.mem = self._map_core(self._dump)
if args.rom:
self.mem.extend(self._map_firmware(args.rom_addr, args.rom))
self.mem.extend(self._map_elf(args.elf))
self.regs = base64.decodebytes(bytes(self._dump["REGS"]["data"], "ascii"))
if "freertos" in self._dump:
print("Dump contains FreeRTOS task info")
self.tasks = dict((t["h"], FreeRTOSTask(t)) for t in self._dump["freertos"]["tasks"])
else:
self.tasks = {}
self.target_features = self._dump.get("target_features")
def get_cur_task(self):
return self._dump.get("freertos", {}).get("cur", None)
def _search_backwards(self, f, start_offset, pattern):
offset = start_offset
while True:
offset = max(0, offset - 10000)
f.seek(offset)
data = f.read(min(10000, start_offset))
pos = data.rfind(pattern)
if pos >= 0:
return offset + pos
elif offset == 0:
return -1
offset += 5000
def _read(self, filename):
with open(filename, "rb") as f:
f.seek(0, os.SEEK_END)
size = f.tell()
end_pos = self._search_backwards(f, f.tell(), END_DELIM)
if end_pos == -1:
print("Cannot find end delimiter:", END_DELIM, file=sys.stderr)
sys.exit(1)
start_pos = self._search_backwards(f, end_pos, START_DELIM)
if start_pos == -1:
print("Cannot find start delimiter:", START_DELIM, file=sys.stderr)
sys.exit(1)
start_pos += len(START_DELIM)
print("Found core at %d - %d" % (start_pos, end_pos), file=sys.stderr)
f.seek(start_pos)
core_lines = []
while True:
l = f.readline().strip()
if l == END_DELIM:
break
core_lines.append(l.decode("ascii"))
core_json = ''.join(core_lines)
stripped = re.sub(r'(?im)\s+(\[.{1,40}\])?\s*', '', core_json)
return json.loads(stripped)
def _map_core(self, core):
mem = []
for k, v in list(core.items()):
if not isinstance(v, dict) or k == 'REGS' or "addr" not in v:
continue
data = base64.decodebytes(bytes(v["data"], "ascii"))
print("Mapping {0}: {1} @ {2:#02x}".format(k, len(data), v["addr"]), file=sys.stderr)
if "crc32" in v:
crc32 = ctypes.c_uint32(binascii.crc32(data))
expected_crc32 = ctypes.c_uint32(v["crc32"])
if crc32.value != expected_crc32.value:
print("CRC mismatch, section corrupted %s %s" % (crc32, expected_crc32), file=sys.stderr)
sys.exit(1)
mem.append((v["addr"], v["addr"] + len(data), data))
return mem
def _map_firmware(self, addr, filename):
with open(filename, "rb") as f:
data = f.read()
result = []
i = 0
magic, count = struct.unpack('<BB', data[i:i+2])
if magic == 0xea and count == 0x04:
# This is a V2 image, IRAM will be inside.
(magic, count, f1, f2, entry, _, irom_len) = struct.unpack('<BBBBIII', data[i:i+16])
print("Mapping IROM: {0} @ {1:#02x}".format(irom_len, addr), file=sys.stderr)
result.append((addr, addr + irom_len, data[i:i+irom_len+16]))
# The rest (IRAM) will be in the core.
else:
print("Mapping {0} at {1:#02x}".format(filename, addr), file=sys.stderr)
result.append((addr, addr + len(data), data))
return result
def _map_elf(self, elf_file_name):
result = []
f = open(elf_file_name, "rb")
ef = elftools.elf.elffile.ELFFile(f)
for i, sec in enumerate(ef.iter_sections()):
addr, size, off = sec["sh_addr"], sec["sh_size"], sec["sh_offset"]
if addr > 0 and size > 0:
print("Mapping {0} {1}: {2} @ {3:#02x}".format(elf_file_name, sec.name, size, addr), file=sys.stderr)
f.seek(off)
assert f.tell() == off
data = f.read(size)
assert len(data) == size
result.append((addr, addr + size, data))
return result
def read(self, addr, size):
for base, end, data in self.mem:
if addr >= base and addr < end:
return data[addr - base : addr - base + size]
print("Unmapped addr", hex(addr), file=sys.stderr)
return b"\0" * size
class GDBHandler(socketserver.BaseRequestHandler):
def handle(self):
self._core = core = Core(args.log)
self._curtask = None
print("Loaded core dump from last snippet in ", args.log, file=sys.stderr)
while self.expect_packet_start():
pkt = self.read_packet()
if args.debug:
print("<<", pkt, file=sys.stderr)
if pkt == "?": # status -> trap
self.send_str("S09")
elif pkt == "g": # dump registers
if self._curtask and self._curtask.regs:
# Dump specific task's registers
regs = self._curtask.regs
else:
regs = core.regs
self.send_str(self.encode_bytes(regs))
elif pkt[0] == "G": # set registers
core.regs = self.decode_bytes(pkt[1:])
self.send_str("OK")
elif pkt[0] == "m": # read memory
addr, size = [int(n, 16) for n in pkt[1:].split(',')]
if args.xtensa_addr_fixup and addr < 0x10000000 and addr > 0x80000:
print('fixup %08x' % addr, file=sys.stderr)
addr |= 0x40000000
bs = core.read(addr, size)
#if bs == "\0\0\0\0":
# bs = "\x01\0\0\0"
#print >>sys.stderr, "<<", " ".join("{:02x}".format(ord(c)) for c in bs)
self.send_str(self.encode_bytes(bs))
elif pkt.startswith("Hg"):
tid = int(pkt[2:], 16)
self._curtask = core.tasks.get(tid)
self.send_str("OK")
elif pkt.startswith("Hc-1"):
# cannot continue, this is post mortem debugging
self.send_str("E01")
elif pkt == "qC":
t = core.get_cur_task()
if t:
self.send_str("QC%016x" % t)
else:
self.send_str("1")
elif pkt == "qAttached":
self.send_str("1")
elif pkt == "qSymbol::":
|
elif pkt == "qfThreadInfo":
if core.tasks:
self.send_str("m%s" % ",".join("%016x" % t for t in core.tasks))
else:
self.send_str("l")
elif pkt == "qsThreadInfo":
self.send_str("l")
elif pkt.startswith("qThreadExtraInfo,"):
self.send_thread_extra_info(int(pkt[17:], 16))
elif pkt[0] == "T":
tid = int(pkt[1:], 16)
if tid in core.tasks:
self.send_str("OK")
else:
self.send_str("ERR00")
elif pkt == "D":
self.send_str("OK")
elif pkt in ("qTStatus", "qOffsets", "vMustReplyEmpty"):
# silently ignore
self.send_str("")
elif pkt.startswith("qSupported"):
features = []
if self._core.target_features:
features.append("qXfer:features:read+")
print("Target features: %s" % self._core.target_features)
else:
features.append("qXfer:features:read-")
self.send_str(";".join(features))
elif pkt.startswith("qXfer:features:read:"):
self.send_file(pkt)
else:
print("Ignoring unknown command '%s'" % (pkt,), file=sys.stderr)
self.send_str("")
print("GDB closed the connection", file=sys.stderr)
sys.exit(0)
def send_file(self, pkt):
_, _, _, fname, off_len = pkt.split(":")
if fname == "target.xml":
fname = self._core.target_features
if "/" in fname:
self.send_str("E00")
return
fname = os.path.join(args.target_descriptions, fname)
off_s, length_s = off_len.split(",")
off, length = int(off_s, 16), int(length_s, 16)
try:
if off == 0:
print("Serving %s" % fname)
with open(fname, "rb") as f:
if f.seek(off) != off:
self.send_str("l")
return
data = f.read(length)
if len(data) == length:
self.send_str("m" + data.decode("ascii"))
elif len(data) > 0:
self.send_str("l" + data.decode("ascii"))
else:
self.send_str("l")
except IOError as e:
print("error reading %s, %d @ %d: %s" % (fname, length, off, e))
self.send_str("E00")
def encode_bytes(self, bs):
return binascii.hexlify(bs).decode("ascii")
def decode_bytes(self, s):
return binascii.unhexlify(s)
def send_ack(self):
self.request.sendall(b"+");
def send_nack(self):
self.request.sendall(b"-");
def send_str(self, s):
if type(s) is bytes:
s = s.decode("ascii")
if args.debug:
print(">>", s, file=sys.stderr)
self.request.sendall("${0}#{1:02x}".format(s, self._checksum(s)).encode("ascii"))
def _checksum(self, s):
if type(s) is str:
return sum(ord(i) for i in s) % 0x100
else: # bytes
return sum(s) % 0x100
def expect_packet_start(self):
return len(self.read_until('$')) > 0
def read_packet(self):
pkt = self.read_until('#')
chk = b""
chk += self.request.recv(1)
chk += self.request.recv(1)
if len(chk) != 2:
return ""
if int(chk, 16) != self._checksum(pkt):
print("Bad checksum for {0}; got: {1} want: {2:02x}".format(pkt, chk, "want:", self._checksum(pkt)), file=sys.stderr)
self.send_nack()
return ""
self.send_ack()
return pkt.decode("ascii")
def read_until(self, limit):
buf = b""
limit = bytes(limit, "ascii")
while True:
ch = self.request.recv(1)
if len(ch) == 0: # eof
return ""
if ch == limit:
return buf
buf += ch
def send_thread_extra_info(self, tid):
task = self._core.tasks.get(tid)
if task:
self.send_str(binascii.hexlify(str(task).encode("ascii")))
else:
self.send_str(binascii.hexlify(("[Invalid task 0x%08x]" % tid).encode("ascii")))
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
server = TCPServer(('0.0.0.0', args.port), GDBHandler)
print("Waiting for gdb on", args.port)
server.serve_forever()
| self.send_str("OK") | conditional_block |
methods.ts | import { Meteor } from "meteor/meteor";
import { _ } from "meteor/underscore";
import { v4 as uuidv4 } from 'uuid';
import math from 'mathjs'
import { Report_Data, Report_Structures } from '../../../../imports/api/collections';
import { ReportStructure, ReportData, Table, TableRow, TableColumn, FormulaValue } from '../../../../imports/api/types/reports'
import { getUserDetails } from "./functions";
import { check, Match } from 'meteor/check'
import { enforceRole } from '../roles/enforceRoles'
import { getAccount } from "../accounts/functions";
Meteor.methods({
/*
Used to create a new report data, from rest API
TODO: restrict to account and user roles
*/
Insert_Report_Data: function(json) {
enforceRole(this.userId, 'Editor')
let account = getAccount(this.userId)
check(json, Match.ObjectIncluding({collection_name : String}))
Report_Data.insert({account_id: account._id, ...json})
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to user roles
*/
Fetch_Collection_Names: function() {
enforceRole(this.userId, 'Editor')
const user = getUserDetails(Meteor.user())
let distinct = _.uniq(Report_Data.find({ account_id: user.account_id }, {
sort: { collection_name: 1 }, fields: { collection_name: 1 }
}).fetch().map(function(x) {
return x.collection_name;
}), true);
return distinct
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to account and user roles
*/
Fetch_All_Collection_Keys: function () {
enforceRole(this.userId, 'Editor')
const user = getUserDetails(Meteor.user())
let distinctCollections = _.uniq(Report_Data.find({}, {
sort: { collection_name: 1 }, fields: { collection_name: 1 }
}).fetch().map(function (x) {
return x.collection_name;
}), true);
return distinctCollections.map(collection => {
let keys = []
let obj = Report_Data.findOne({ account_id: user.account_id, collection_name: collection })
_.each(obj, function (val, key) {
if (val) {
keys.push(key);
}
});
return {
collection_name: collection,
keys: keys
}
})
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to account and user roles
*/
Fetch_Single_Collection_Keys: function (collection_name) {
enforceRole(this.userId, 'Editor')
check(collection_name, String)
const user = getUserDetails(Meteor.user())
let keys = []
let obj = Report_Data.findOne({ account_id: user.account_id, collection_name: collection_name })
_.each(obj, function (val, key) {
if (val) {
keys.push(key);
}
});
return {
collection_name: collection_name,
keys: keys
}
},
/*
Used to create a new report, or to update one
*/
Upsert_Report: function(report: ReportStructure) {
enforceRole(this.userId, 'Editor')
check(report, {_id : Match.Maybe(String), account_id : Match.Maybe(String), name : String,
tables : [{id : String, title : String, type : String,
columns : [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}],
rows : [{id : String,
cells : [{id : String, index : Number, type : String, property : String, propertyValue : String, value : String, expression : String}] }],
collection : String, sort_by : String}],
formulas : [{id : String, tableId : String, columnId : String, columnIndex : Number, expression : String,
// some of the formula types were decided by looking at how they are created in the columnToolBar
values : [{key : String, type : String, operation : String, collection_name : Match.Maybe(String), queryModifier : String, query : {collection_name : String}, property : Match.Maybe(String), path : Match.Maybe(String), columnId : Match.Maybe(String), cellIndex : Match.Maybe(String)}] }],
public : Boolean, tags : [String] })
const user = getUserDetails(Meteor.user())
let action = null;
if(!report._id) {
report.account_id = user.account_id // set the account_id
action = Report_Structures.insert(report)
console.log('Created report', action)
return Report_Structures.findOne({_id: action})
}
if(report._id) {
action = Report_Structures.update({_id: report._id}, report)
console.log('Updated report', action)
return Report_Structures.findOne({_id: report._id})
}
},
/*
For Viewing a Report
Determine if table is collection driven, or static
if collection driven, fetch, and loop
for each "row", process each column
if the column has a formula, do the math
the goal is to populate the rows, and cells within, with correct information
This method, mutates the original report object, and returns it
*/
Compose_Report: function(reportId: string) {
if (!this.userId) {
throw new Meteor.Error('No Permission', 'user is not logged in')
}
check(reportId, String)
let user = getUserDetails(Meteor.user())
let report: ReportStructure | null | undefined = null;
const setReportToDisplay = () => {
report = Report_Structures.findOne({ _id: reportId, account_id: user?.account_id })
}
// used to generate rows, if table is collection driven
const performQuery = (collection: string) => {
check(collection, String)
if (report.public || user.role === 'Editor') {
return Report_Data.find({
account_id: user.account_id,
collection_name: collection
}).fetch()
} else { // must be viewer if not editor. Will need to change if more roles are added
return Report_Data.find({
account_id: user.account_id,
collection_name: collection,
$or: [{ viewer_id: user.viewer_id }, { viewer_id: { $exists: false } }]
}).fetch()
}
}
// generates cells, for a given row, if table is collection driven
const generateCells = (columns: Array<TableColumn>, document: ReportData) => {
check(columns, [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}])
check(document, Match.ObjectIncluding({collection_name : String, viewer_id : String}))
return columns.map((column, i) => {
let doc = document
let type = '', property = null, propertyValue = null, value: number | Object| string | null | undefined = 0;
// if there is a relation key, we overide the document from table collection, to the column specific collection
if(column.relation_key) {
let query = {
account_id: user.account_id,
collection_name: column.collection_name,
}
if (!report.public && user.role === 'Viewer') {
query['viewer_id'] = user.viewer_id
}
query[column.relation_key] = doc[column.relation_key]
doc = Report_Data.findOne(query)
}
// a column should only have either a formula, or a property assigned, never both
property = column.property
propertyValue = doc[property]
if(!column.formulaId) |
if(column.formulaId) {
type = 'formula'
}
return { index: i, id: uuidv4(), type, property, propertyValue, value }
})
}
// generates rows within a table, if collection driven
const generateRows = (table: Table) => {
check(table, {id : String, title : String, type : String,
columns : [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}],
rows : [{id : String,
cells : [{id : String, index : Number, type : String, property : String, propertyValue : String, value : String, expression : String}] }],
collection : String, sort_by : String})
// if type is "static", the rows should already be defined
// TODO : allow user to defind rows for static table
if(table.type === 'collection') {
const collection = performQuery(table.collection)
return collection.map((document: ReportData) => ({
id: uuidv4(),
cells: generateCells(table.columns, document)
}))
} else return table.rows
}
const createRowsInTable = () => {
// run for each table, ensuring proper amount of rows
report?.tables.forEach((table: Table) => {
table.rows = <Array<TableRow>> generateRows(table)
return table
});
}
const sortTables = () => {
// run for each table, ensuring each table is sorted
report?.tables.forEach((table: Table) => {
if (table.sort_by) {
const sortedRows = table.rows.sort((a, b) => a.cells.find(c => c.property === table.sort_by)?.propertyValue - b.cells.find(c => c.property === table.sort_by)?.propertyValue)
table.rows = sortedRows
return table
}
})
}
const computeFormulas = async () => {
// we must loop over every table, row, so that formula results can be applied to individual cells, under a column
report?.tables.forEach(table => {
table.rows.forEach(row => {
let expression = '';
report?.formulas.forEach(formula => {
expression = formula.expression;
console.log("Before: ", formula.expression)
// individually process each value, for the final expression
formula.values.forEach((value: FormulaValue) => {
if(value.type === 'query') {
if(value.queryModifier) {
const cellPropertyValue = row.cells[formula.columnIndex].propertyValue
value.query[value.queryModifier] = cellPropertyValue
}
const query = Report_Data.find(value.query).fetch()
if(value.operation === 'sum') {
let values = query.map((obj: any) => obj[value.property])
expression = expression.replace(value.key, math.sum(values))
}
}
if(value.type === 'query_count') {
const count = Report_Data.find(value.query).count()
expression = expression.replace(value.key, String(count))
}
})
// evaluate the expression, after the values have been harvested
const result = math.evaluate(expression)
row.cells[formula.columnIndex].value = result
row.cells[formula.columnIndex].expression = expression
console.log("After: ", expression)
console.log("Eval: ", result, "\n\n" )
})
})
})
}
const run = async () => {
setReportToDisplay()
createRowsInTable()
await computeFormulas()
await sortTables()
// return the mutated report, containing the accurate values to display
return report
}
return run()
}
}) | {
type = 'property'
value = doc[property]
} | conditional_block |
methods.ts | import { Meteor } from "meteor/meteor";
import { _ } from "meteor/underscore";
import { v4 as uuidv4 } from 'uuid';
import math from 'mathjs'
import { Report_Data, Report_Structures } from '../../../../imports/api/collections';
import { ReportStructure, ReportData, Table, TableRow, TableColumn, FormulaValue } from '../../../../imports/api/types/reports'
import { getUserDetails } from "./functions";
import { check, Match } from 'meteor/check'
import { enforceRole } from '../roles/enforceRoles'
import { getAccount } from "../accounts/functions";
Meteor.methods({
/*
Used to create a new report data, from rest API
TODO: restrict to account and user roles
*/
Insert_Report_Data: function(json) {
enforceRole(this.userId, 'Editor')
let account = getAccount(this.userId)
check(json, Match.ObjectIncluding({collection_name : String}))
Report_Data.insert({account_id: account._id, ...json})
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to user roles
*/
Fetch_Collection_Names: function() {
enforceRole(this.userId, 'Editor')
const user = getUserDetails(Meteor.user())
let distinct = _.uniq(Report_Data.find({ account_id: user.account_id }, {
sort: { collection_name: 1 }, fields: { collection_name: 1 }
}).fetch().map(function(x) {
return x.collection_name;
}), true);
return distinct
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to account and user roles
*/
Fetch_All_Collection_Keys: function () {
enforceRole(this.userId, 'Editor')
const user = getUserDetails(Meteor.user())
let distinctCollections = _.uniq(Report_Data.find({}, {
sort: { collection_name: 1 }, fields: { collection_name: 1 }
}).fetch().map(function (x) {
return x.collection_name;
}), true);
return distinctCollections.map(collection => {
let keys = []
let obj = Report_Data.findOne({ account_id: user.account_id, collection_name: collection })
_.each(obj, function (val, key) {
if (val) {
keys.push(key);
}
});
return {
collection_name: collection,
keys: keys
}
})
},
/*
Used to fetch distinct collection names belonging to an account
TODO: restrict to account and user roles
*/
Fetch_Single_Collection_Keys: function (collection_name) {
enforceRole(this.userId, 'Editor')
check(collection_name, String)
const user = getUserDetails(Meteor.user())
let keys = []
let obj = Report_Data.findOne({ account_id: user.account_id, collection_name: collection_name })
_.each(obj, function (val, key) {
if (val) {
keys.push(key);
}
});
return {
collection_name: collection_name,
keys: keys
}
},
/*
Used to create a new report, or to update one
*/
Upsert_Report: function(report: ReportStructure) {
enforceRole(this.userId, 'Editor')
check(report, {_id : Match.Maybe(String), account_id : Match.Maybe(String), name : String,
tables : [{id : String, title : String, type : String,
columns : [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}],
rows : [{id : String,
cells : [{id : String, index : Number, type : String, property : String, propertyValue : String, value : String, expression : String}] }],
collection : String, sort_by : String}],
formulas : [{id : String, tableId : String, columnId : String, columnIndex : Number, expression : String,
// some of the formula types were decided by looking at how they are created in the columnToolBar
values : [{key : String, type : String, operation : String, collection_name : Match.Maybe(String), queryModifier : String, query : {collection_name : String}, property : Match.Maybe(String), path : Match.Maybe(String), columnId : Match.Maybe(String), cellIndex : Match.Maybe(String)}] }],
public : Boolean, tags : [String] })
const user = getUserDetails(Meteor.user())
let action = null;
if(!report._id) {
report.account_id = user.account_id // set the account_id
action = Report_Structures.insert(report)
console.log('Created report', action)
return Report_Structures.findOne({_id: action})
}
if(report._id) {
action = Report_Structures.update({_id: report._id}, report)
console.log('Updated report', action)
return Report_Structures.findOne({_id: report._id})
}
},
/*
For Viewing a Report
Determine if table is collection driven, or static
if collection driven, fetch, and loop
for each "row", process each column
if the column has a formula, do the math
the goal is to populate the rows, and cells within, with correct information
This method, mutates the original report object, and returns it
*/
Compose_Report: function(reportId: string) {
if (!this.userId) {
throw new Meteor.Error('No Permission', 'user is not logged in')
}
check(reportId, String)
let user = getUserDetails(Meteor.user())
let report: ReportStructure | null | undefined = null;
const setReportToDisplay = () => {
report = Report_Structures.findOne({ _id: reportId, account_id: user?.account_id })
}
// used to generate rows, if table is collection driven
const performQuery = (collection: string) => {
check(collection, String)
if (report.public || user.role === 'Editor') {
return Report_Data.find({
account_id: user.account_id,
collection_name: collection
}).fetch()
} else { // must be viewer if not editor. Will need to change if more roles are added
return Report_Data.find({
account_id: user.account_id,
collection_name: collection,
$or: [{ viewer_id: user.viewer_id }, { viewer_id: { $exists: false } }]
}).fetch()
}
}
// generates cells, for a given row, if table is collection driven
const generateCells = (columns: Array<TableColumn>, document: ReportData) => {
check(columns, [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}])
check(document, Match.ObjectIncluding({collection_name : String, viewer_id : String}))
return columns.map((column, i) => {
let doc = document
let type = '', property = null, propertyValue = null, value: number | Object| string | null | undefined = 0;
// if there is a relation key, we overide the document from table collection, to the column specific collection
if(column.relation_key) {
let query = {
account_id: user.account_id,
collection_name: column.collection_name,
}
if (!report.public && user.role === 'Viewer') {
query['viewer_id'] = user.viewer_id
}
query[column.relation_key] = doc[column.relation_key]
doc = Report_Data.findOne(query)
}
// a column should only have either a formula, or a property assigned, never both
property = column.property
propertyValue = doc[property]
if(!column.formulaId) {
type = 'property'
value = doc[property]
}
if(column.formulaId) {
type = 'formula'
}
return { index: i, id: uuidv4(), type, property, propertyValue, value }
})
}
// generates rows within a table, if collection driven
const generateRows = (table: Table) => {
check(table, {id : String, title : String, type : String,
columns : [{id : String, label : String, formulaId : Match.Maybe(String), property : Match.Maybe(String), collection_name : String, relation_key : Match.Maybe(String), enum : String, symbol : Match.Maybe(String)}],
rows : [{id : String,
cells : [{id : String, index : Number, type : String, property : String, propertyValue : String, value : String, expression : String}] }],
collection : String, sort_by : String})
// if type is "static", the rows should already be defined
// TODO : allow user to defind rows for static table
if(table.type === 'collection') { | id: uuidv4(),
cells: generateCells(table.columns, document)
}))
} else return table.rows
}
const createRowsInTable = () => {
// run for each table, ensuring proper amount of rows
report?.tables.forEach((table: Table) => {
table.rows = <Array<TableRow>> generateRows(table)
return table
});
}
const sortTables = () => {
// run for each table, ensuring each table is sorted
report?.tables.forEach((table: Table) => {
if (table.sort_by) {
const sortedRows = table.rows.sort((a, b) => a.cells.find(c => c.property === table.sort_by)?.propertyValue - b.cells.find(c => c.property === table.sort_by)?.propertyValue)
table.rows = sortedRows
return table
}
})
}
const computeFormulas = async () => {
// we must loop over every table, row, so that formula results can be applied to individual cells, under a column
report?.tables.forEach(table => {
table.rows.forEach(row => {
let expression = '';
report?.formulas.forEach(formula => {
expression = formula.expression;
console.log("Before: ", formula.expression)
// individually process each value, for the final expression
formula.values.forEach((value: FormulaValue) => {
if(value.type === 'query') {
if(value.queryModifier) {
const cellPropertyValue = row.cells[formula.columnIndex].propertyValue
value.query[value.queryModifier] = cellPropertyValue
}
const query = Report_Data.find(value.query).fetch()
if(value.operation === 'sum') {
let values = query.map((obj: any) => obj[value.property])
expression = expression.replace(value.key, math.sum(values))
}
}
if(value.type === 'query_count') {
const count = Report_Data.find(value.query).count()
expression = expression.replace(value.key, String(count))
}
})
// evaluate the expression, after the values have been harvested
const result = math.evaluate(expression)
row.cells[formula.columnIndex].value = result
row.cells[formula.columnIndex].expression = expression
console.log("After: ", expression)
console.log("Eval: ", result, "\n\n" )
})
})
})
}
const run = async () => {
setReportToDisplay()
createRowsInTable()
await computeFormulas()
await sortTables()
// return the mutated report, containing the accurate values to display
return report
}
return run()
}
}) | const collection = performQuery(table.collection)
return collection.map((document: ReportData) => ({ | random_line_split |
string_parser.py | # -*- coding:utf-8 -*-
import sys
import re
import string
import random
import string
from django.conf import settings
rega_space = re.compile('[\s]+', re.I+re.U+re.DOTALL)
rega_int = re.compile('[^0-9]', re.U+re.I+re.DOTALL)
rega_quotes = re.compile('[\'"`]', re.U+re.I+re.DOTALL)
rega_strict_text = re.compile('[^0-9a-zA-Zа-яА-ЯёЁ/-]', re.U+re.I+re.DOTALL)
rega_html = re.compile('(<[^>]+>)?', re.U+re.I+re.DOTALL)
rega_style = re.compile('(<style[^>]*>.+</style>)?', re.U+re.I+re.DOTALL)
rega_ip = re.compile('([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})')
rega_dashes = re.compile('[-]+', re.I+re.U+re.DOTALL)
def fix_multiple_dashes(text: str):
"""Убирает повторяющиеся дефизы,
например, в ссылках
/sections--parallax/ => /sections-parallax/
:param text: исходный текст
"""
if not text:
return text
return rega_dashes.sub('-', text)
def check_ip(ip: str):
"""Проверка на айпи адрес
:param ip: айпи адрес
:return: айпи адрес
"""
search_ip = rega_ip.search(ip)
if search_ip:
return '%s.%s.%s.%s' % (search_ip.group(1),
search_ip.group(2),
search_ip.group(3),
search_ip.group(4), )
return None
def load_iptables(path='/home/jocker/iptables'):
"""Загрузить таблицу фаервола
:param path: путь к файлу с сохраненной таблицей фаервола
:return: список с заблоченными айпи-адресами
"""
fips = []
with open(path, 'r') as f:
content = f.readlines()
for line in content:
ip = check_ip(line)
if ip:
fips.append(ip)
return fips
def domain2punycode(domain: str) -> str:
"""Преобразуем домен к punycode
обратно 'xn--p1ai'.encode('idna').decode('idna') = 'ру'
:param domain: Домен
:return: домен строкой в punycode
"""
if not domain:
return ''
if '://' in domain:
domain = domain.split('://')[1]
if '?' in domain:
domain = domain.split("?")[0]
if '/' in domain:
domain = domain.split('/')[0]
if '#' in domain:
domain = domain.split('#')[0]
domain = domain.encode('idna')
return domain.decode('utf-8')
def generate_base_auth(passwd: str = 'bugoga', salt: str = '86') -> str:
"""CRYPT HTPASSWD, например, для nginx
т.к. утилита htpasswd только в апаче
Генерация хэша пароля для базовой авторизации
формат файла
jocker:86PZUBArg1zu6
86 - соль, остальное хэш пароля
:param passwd: пароль
:param salt: соль
:return: зашифрованная строка
"""
import crypt
return crypt.crypt(passwd, salt)
def q_string_fill(request, q_string):
"""Заполняем параметры для запроса q_string page, by
:param request: HttpRequest
:param q_string: параметры запроса
"""
page = 1
by = 20
method = request.GET if request.method == 'GET' else request.POST
# by=20, view=grid можно брать из сессии/кук
q_vars = {'page':1, 'by':20, 'size': None}
for var in q_vars:
value = None
# -----------------------------------------
# Значения уже могут содержаться в q_string
# -----------------------------------------
if var in q_string:
q_vars[var] = q_string[var]
# --------
# GET/POST
# --------
if method.get(var):
try:
value = int(request.GET[var])
except ValueError:
value = None
if value:
q_vars[var] = value
# ----------------------------------
# Вместо by можно передаваться size,
# вписываем его как by
# ----------------------------------
if var == 'size' and value:
q_vars['by'] = value
q_string['page'] = q_vars['page']
if q_string['page'] < 1:
q_string['page'] = 1
q_string['by'] = q_vars['by']
if q_string['by'] < 1:
q_string['by'] = 20
q_string['link'] = request.META['PATH_INFO']
if not 'q' in q_string:
q_string['q'] = {}
if method.get('q'):
q_string['q']['q'] = method['q']
# Дополняем всей мусоркой,
# которую передают в параметрах
for key in method.keys():
if not key in ('q', 'page', 'by', 'size'):
value = method.getlist(key)
if len(value) == 1:
q_string['q'][key] = value[0]
else:
q_string['q'][key] = value
def prepare_simple_text(text:str):
"""Убрать из текста хтмл-пробелы типа
которые хуй увидишь - даже в mysql они
печатаются обычным пробелом"""
rega_search = rega_space.search(text)
if rega_search:
text = rega_space.sub(' ', text)
return text.strip()
def string_size(text:str):
"""Размер строки в байтах"""
size = 0
if type(text) == str or type(text) == unicode:
size = sys.getsizeof(text)
return size
def kill_quotes(item, rega=None, replace=''):
"""Замена в строке на replace символы
(обычно на "", но можно на пробел
Своя регулярка, например,
заменить несколько пробелов на один
kill_quotes(" Радужный 42", "rega[\s]+", " ")"""
if not rega:
rega = rega_space
elif rega.startswith('rega[') and rega.enswith(']'):
rega = rega.replace('rega', '')
rega = re.compile(rega, re.U+re.I+re.DOTALL)
elif rega == 'int':
rega = rega_int
elif rega == 'quotes':
rega = rega_quotes
elif rega == 'strict_text':
rega = rega_strict_text
else:
return item
return rega.sub(replace, item)
def get_request_ip(request):
"""Получаем ip пользователя из request"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for #.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def GenPasswd(length: int = 7, alphabet: str = None):
"""Генератор пароля"""
passwd = []
if not alphabet:
alphabet = '%s%s' % (string.ascii_lowercase, string.digits)
for i in range(length):
letter = random.randrange(0, len(alphabet))
passwd.append(alphabet[letter])
return ''.join(passwd)
def random_boolean():
"""Случайное bool значение"""
variants = (True, False)
return variants[random.randrange(0, 2)]
def kill_html(text):
"""Убирает все теги из текста
:param text: текст с хтмл
"""
if text:
text = rega_style.sub('', text)
text = rega_html.sub('', text)
return text
def translit(text: str):
"""Транслит текста с русского в латиницу
:param text: текст для транслита
"""
if not text:
return ''
alphabet = string.ascii_lowercase
rus = ('абвгдеёжзийклмнопрстуфхцчшщъыьэюя №')
eng = ('a', 'b', 'v', 'g', 'd', 'e', 'yo', 'j', 'z', 'i', 'y', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'f', 'h', 'c', 'ch', 'sh', 'shh', '', 'y', '', 'e', 'yu', 'ya', '-', '-')
chars = ('-0123456789_%s' % (alphabet, ))
result = ''
for item in text:
letter = ''
item = item.lower()
if item in rus:
ind = rus.index(item)
letter = eng[ind]
else:
if item in chars:
letter = item
result += letter
return result
def digit_to_str(digit):
"""Число записываем прописью
:param digit: число, которое будем писать текстом
"""
result = ''
measure = ('', '', 'миллион', 'миллиард', 'триллион',
'квадриллион', 'квинтиллион', 'секстиллион',
'септиллион', 'октиллион', 'нониллион', 'дециллион')
measure_len = len(measure)
if not type(digit) == int:
try:
digit = float(digit)
except ValueError:
digit = 0
try:
digit = int(digit)
except ValueError:
digit = 0
digit_str = str(digit)
summa = summa_format(digit)
if ' ' in summa:
digits = summa.split(' ')
else:
digits = [summa, ]
digits.reverse()
# Каждый триптих обрабатываем, добавляя пояснение (тысячи, милионы)
z = 0
for item in digits:
woman = False
voca = None
triptix = item
if z == 1: # тысячу передаем в женском роде
woman = True
voca = ('тысяча', 'тысяч', 'тысячи')
else:
if z > 1 and z < (measure_len -1):
inf = measure[z]
voca = (inf, inf + 'ов', inf + 'а')
cur_digit = analyze_triptix(triptix, woman)
cur_name = analyze_digit(triptix, voca)
if result:
result = ' ' + result
cur_result = cur_digit
if cur_name:
cur_result += ' ' + cur_name
result = cur_result + result
z += 1
return result
def analyze_triptix(digit, woman=False):
"""Передаем число (максимум трехзначное), пишем его прописью
woman = False по умолчанию - мужской род, например, "один" (м.б одна)
:param digit: число
:param woman: если женский род
"""
result = ''
hundred = dozen = unit = None
hundred_str = dozen_str = unit_str = ''
units = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
units_str = ('', 'один', 'два', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
units_str_woman = ('', 'одна', 'две', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
dozens_ten = (10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
dozens_ten_str = ('десять', 'одиннадцать', 'двенадцать', 'тринадцать',
'четырнадцать', 'пятнадцать', 'шестнадцать',
'семнадцать', 'восемнадцать', 'девятнадцать')
dozens_str = ('', '', 'двадцать', 'тридцать', 'сорок', 'пятьдесят',
'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто')
hundreds_str = ('', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот',
'шестьсот', 'семьсот', 'восемьсот', 'девятьсот')
digit_str = str(digit)
digit_len = len(digit_str)
# Записываем unit
unit = int(digit_str[-1])
# Записываем dozen & hundred
if digit_len == 2: # Два разряда
dozen = int(digit_str[0])
if digit_len == 3: # Три разряда
hundred = int(digit_str[0])
dozen = int(digit_str[1])
# Если это 10-19
if dozen == 1:
# Создаем новый dozen - dozen_ten
dozen_ten = int("%s%s" % (dozen, unit))
if dozen_ten in dozens_ten:
dozen_str = dozens_ten_str[dozens_ten.index(dozen_ten)]
# Если это 20-99
else:
if dozen in units:
dozen_str = dozens_str[units.index(dozen)]
if unit in units and not dozen == 1:
if woman:
unit_str = units_str_woman[units.index(unit)]
else:
unit_str = units_str[units.index(unit)]
if hundred:
if hundred in units:
hundred_str = hundreds_str[units.index(hundred)]
# Пишем результат
for item in (hundred_str, dozen_str, unit_str):
if result and not result.endswith(' '):
result += ' '
result += item
return result.strip()
def analyze_digit(digit, end:tuple = ('тысяча', 'тысяч', 'тысячи')):
"""Пишет прописью с нужным окончанием слово (день, год, месяц)
digit = цифра, от которой зависит окончание
end = варианты окончаний:
1 миллион, 10 миллионов, 2 миллиона
(u"год", u"лет", u"года")
(u"месяц", u"месяцев", u"месяца")
(u"день", u"дней", u"дня")"""
result = ''
try:
digit = int(digit)
except ValueError:
return result
if not end:
return result
digit_str = str(digit)
digit_str = int(digit_str[-1])
# ---------------------------------------
# На окончания влияет 2 последних символа
# ---------------------------------------
if digit_str == 0 or digit_str >=5:
result = end[1] # тысяч
if digit_str == 1:
result = end[0] # тысяча
if digit_str > 1 and digit_str < 5:
result = end[2] # тысячи
if digit > 9:
digit_str2 = int(str(digit)[-2])
if digit_str2 == 1:
result = end[1] # тысяч
return result
def summa_format(summa):
"""Деньга с пробелами через 3 знака с конца
:param summa: число, которое будем разбивать
"""
if not summa:
return summa
summa_str = str(summa)
if ',' in summa_str:
summa_str = summa_str.replace(',', '.')
rub, kop = summa_str, 0
if '.' in summa_str:
rub, kop = summa_str.split('.')
summa_tmp = ''
summa_len = len(rub)
zero_kop = kop
try:
kop = int(kop)
except ValueError:
kop = 0
| else:
for i in range(summa_len):
if i > 0 and i % 3 == 0:
summa_tmp = ' ' + summa_tmp
summa_tmp = rub[summa_len-i-1] + summa_tmp
summa = summa_tmp
if kop > 0:
summa = '%s.%s' % (summa, zero_kop)
return summa
def ip2long(ip):
"""Преобразуем ip-адрес в число
:param ip: ip адрес
:return: число
"""
#>>> o = map(int, "146.0.238.42".split("."))
#[146, 0, 238, 42]
#>>> res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3];
#2449534506
result = None
search_ip = rega_ip.match(ip)
if search_ip:
a, b, c, d = search_ip.group(1), search_ip.group(2), search_ip.group(3), search_ip.group(4)
### << Binary Left Shift
### The left operands value is moved left by the number of bits specified by the right operand. a << = 240 (means 1111 0000)
### >> Binary Right Shift
### The left operands value is moved right by the number of bits specified by the right operand. a >> = 15 (means 0000 1111)
result = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + int(d)
return result
def date_translater(date):
"""Пишет прописью в нужном падеже дату (день, год, месяц)
date = {"years":years, "months":months, "days":days}"""
period_digit = None
result = ''
if not date:
return result
if "years" in date:
period_digit = date['years']
period_padej = ('год', 'лет', 'года')
if "months" in date:
period_digit = date['months']
period_padej = (u"месяц", u"месяцев", u"месяца")
if "days" in date:
period_digit = date['days']
period_padej = (u"день", u"дней", u"дня")
if not period_digit:
return result
result = '%s ' % (period_digit, )
result += analyze_digit(period_digit, period_padej)
return result
def punto(text: str, direction: str = 'eng2rus'):
"""Перевод английской раскладки в русские буквы,
например, для ввода паролей по логину
:param text: текст для изменения
:param direction: тип для изменения
"""
if not text:
return text
text = '%s' % text
eng = ('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']',
'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '\\',
'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/')
rus = ('й', 'ц', 'у', 'к', 'е', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ',
'ф', 'ы', 'в', 'а', 'п', 'р', 'о', 'л', 'д', 'ж', 'э', 'ё',
'я', 'ч', 'с', 'м', 'и', 'т', 'ь', 'б', 'ю', '/')
def get_letter(letter: str):
"""Возвращаем букву после преобразований
:param letter: буква до преобразований
"""
if direction == 'eng2rus':
source = eng
dest = rus
elif direction == 'rus2eng':
source = rus
dest = eng
ind = None
if letter in source:
ind = source.index(letter)
return dest[ind]
return letter
result = ''
for letter in text:
result += get_letter(letter)
return result
def convert2camelcase(text: str, capitalize_flag: bool = True):
"""Преобразовываем в camelCase
:param text: исходный текст
:param capitalize_flag: нужно ли делать первую букву заглавной
"""
if not text:
return text
if '_' not in text:
return text
new_text = []
flag_underline = False
for i, letter in enumerate(text):
if i == 0:
letter = letter.upper()
if letter == '_':
flag_underline = True
continue
if flag_underline:
flag_underline = False
new_text.append(letter.upper())
else:
new_text.append(letter)
return ''.join(new_text)
def convert2snakecase(text: str):
"""Преобразовываем в snake_case
:param text: исходный текст
"""
if not text:
return text
text_len = len(text)
new_text = []
for i, letter in enumerate(text):
if letter.isupper():
# Если после этой буквы идет нижний регистр,
# тогда нужно подчеркивание
next_index_exists = i + 1 < text_len
if next_index_exists and text[i+1].islower():
new_text.append('_')
new_text.append(letter.lower())
continue
new_text.append(letter)
return ''.join(new_text)
| if summa_len <= 3:
if kop > 0:
return summa_str
return rub
| conditional_block |
string_parser.py | # -*- coding:utf-8 -*-
import sys
import re
import string
import random
import string
from django.conf import settings
rega_space = re.compile('[\s]+', re.I+re.U+re.DOTALL)
rega_int = re.compile('[^0-9]', re.U+re.I+re.DOTALL)
rega_quotes = re.compile('[\'"`]', re.U+re.I+re.DOTALL)
rega_strict_text = re.compile('[^0-9a-zA-Zа-яА-ЯёЁ/-]', re.U+re.I+re.DOTALL)
rega_html = re.compile('(<[^>]+>)?', re.U+re.I+re.DOTALL)
rega_style = re.compile('(<style[^>]*>.+</style>)?', re.U+re.I+re.DOTALL)
rega_ip = re.compile('([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})')
rega_dashes = re.compile('[-]+', re.I+re.U+re.DOTALL)
def fix_multiple_dashes(text: str):
"""Убирает повторяющиеся дефизы,
например, в ссылках
/sections--parallax/ => /sections-parallax/
:param text: исходный текст
"""
if not text:
return text
return rega_dashes.sub('-', text)
def check_ip(ip: str):
"""Проверка на айпи адрес
:param ip: айпи адрес
:return: айпи адрес
"""
search_ip = rega_ip.search(ip)
if search_ip:
return '%s.%s.%s.%s' % (search_ip.group(1),
search_ip.group(2),
search_ip.group(3),
search_ip.group(4), )
return None
def load_iptables(path='/home/jocker/iptables'):
"""Загрузить таблицу фаервола
:param path: путь к файлу с сохраненной таблицей фаервола
:return: список с заблоченными айпи-адресами
"""
fips = []
with open(path, 'r') as f:
content = f.readlines()
for line in content:
ip = check_ip(line)
if ip:
fips.append(ip)
return fips
def domain2punycode(domain: str) -> str:
"""Преобразуем домен к punycode
обратно 'xn--p1ai'.encode('idna').decode('idna') = 'ру'
:param domain: Домен
:return: домен строкой в punycode
"""
if not domain:
return ''
if '://' in domain:
domain = domain.split('://')[1]
if '?' in domain:
domain = domain.split("?")[0]
if '/' in domain:
domain = domain.split('/')[0]
if '#' in domain:
domain = domain.split('#')[0]
domain = domain.encode('idna')
return domain.decode('utf-8')
def generate_base_auth(passwd: str = 'bugoga', salt: str = '86') -> str:
"""CRYPT HTPASSWD, например, для nginx
т.к. утилита htpasswd только в апаче
Генерация хэша пароля для базовой авторизации
формат файла
jocker:86PZUBArg1zu6
86 - соль, остальное хэш пароля
:param passwd: пароль
:param salt: соль
:return: зашифрованная строка
"""
import crypt
return crypt.crypt(passwd, salt)
def q_string_fill(request, q_string):
"""Заполняем параметры для запроса q_string page, by
:param request: HttpRequest
:param q_string: параметры запроса
"""
page = 1
by = 20
method = request.GET if request.method == 'GET' else request.POST
# by=20, view=grid можно брать из сессии/кук
q_vars = {'page':1, 'by':20, 'size': None}
for var in q_vars:
value = None
# -----------------------------------------
# Значения уже могут содержаться в q_string
# -----------------------------------------
if var in q_string:
q_vars[var] = q_string[var]
# --------
# GET/POST
# --------
if method.get(var):
try:
value = int(request.GET[var])
except ValueError:
value = None
if value:
q_vars[var] = value
# ----------------------------------
# Вместо by можно передаваться size,
# вписываем его как by
# ----------------------------------
if var == 'size' and value:
q_vars['by'] = value
q_string['page'] = q_vars['page']
if q_string['page'] < 1:
q_string['page'] = 1
q_string['by'] = q_vars['by']
if q_string['by'] < 1:
q_string['by'] = 20
q_string['link'] = request.META['PATH_INFO']
if not 'q' in q_string:
q_string['q'] = {}
if method.get('q'):
q_string['q']['q'] = method['q']
# Дополняем всей мусоркой,
# которую передают в параметрах
for key in method.keys():
if not key in ('q', 'page', 'by', 'size'):
value = method.getlist(key)
if len(value) == 1:
q_string['q'][key] = value[0]
else:
q_string['q'][key] = value
def prepare_simple_text(text:str):
"""Убрать из текста хтмл-пробелы типа
которые хуй увидишь - даже в mysql они
печатаются обычным пробелом"""
rega_search = rega_space.search(text)
if rega_search:
text = rega_space.sub(' ', text)
return text.strip()
def string_size(text:str):
"""Размер строки в байтах"""
size = 0
if type(text) == str or type(text) == unicode:
size = sys.getsizeof(text)
return size
def kill_quotes(item, rega=None, replace=''):
"""Замена в строке на replace символы
(обычно на "", но можно на пробел
Своя регулярка, например,
заменить несколько пробелов на один
kill_quotes(" Радужный 42", "rega[\s]+", " ")"""
if not rega:
rega = rega_space
elif rega.startswith('rega[') and rega.enswith(']'):
rega = rega.replace('rega', '')
rega = re.compile(rega, re.U+re.I+re.DOTALL)
elif rega == 'int':
rega = rega_int
elif rega == 'quotes':
rega = rega_quotes
elif rega == 'strict_text':
rega = rega_strict_text
else:
return item
return rega.sub(replace, item)
def get_request_ip(request):
"""Получаем ip пользователя из request"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for #.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def GenPasswd(length: int = 7, alphabet: str = None):
"""Генератор пароля"""
passwd = []
if not alphabet:
alphabet = '%s%s' % (string.ascii_lowercase, string.digits)
for i in range(length):
letter = random.randrange(0, len(alphabet))
passwd.append(alphabet[letter])
return ''.join(passwd)
def random_boolean():
"""Случайное bool значение"""
variants = (True, False)
return variants[random.randrange(0, 2)]
def kill_html(text):
"""Убирает все теги из текста
:param text: текст с хтмл
"""
if text:
text = rega_style.sub('', text)
text = rega_html.sub('', text)
return text
def translit(text: str):
"""Транслит текста с русского в латиницу
:param text: текст для транслита
"""
if not text:
return ''
alphabet = string.ascii_lowercase
rus = ('абвгдеёжзийклмнопрстуфхцчшщъыьэюя №')
eng = ('a', 'b', 'v', 'g', 'd', 'e', 'yo', 'j', 'z', 'i', 'y', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'f', 'h', 'c', 'ch', 'sh', 'shh', '', 'y', '', 'e', 'yu', 'ya', '-', '-')
chars = ('-0123456789_%s' % (alphabet, ))
result = ''
for item in text:
letter = ''
item = item.lower()
if item in rus:
ind = rus.index(item)
letter = eng[ind]
else:
if item in chars:
letter = item
result += letter
return result
def digit_to_str(digit):
"""Число записываем прописью
:param digit: число, которое будем писать текстом
"""
result = ''
measure = ('', '', 'миллион', 'миллиард', 'триллион',
'квадриллион', 'квинтиллион', 'секстиллион',
'септиллион', 'октиллион', 'нониллион', 'дециллион')
measure_len = len(measure) | digit = float(digit)
except ValueError:
digit = 0
try:
digit = int(digit)
except ValueError:
digit = 0
digit_str = str(digit)
summa = summa_format(digit)
if ' ' in summa:
digits = summa.split(' ')
else:
digits = [summa, ]
digits.reverse()
# Каждый триптих обрабатываем, добавляя пояснение (тысячи, милионы)
z = 0
for item in digits:
woman = False
voca = None
triptix = item
if z == 1: # тысячу передаем в женском роде
woman = True
voca = ('тысяча', 'тысяч', 'тысячи')
else:
if z > 1 and z < (measure_len -1):
inf = measure[z]
voca = (inf, inf + 'ов', inf + 'а')
cur_digit = analyze_triptix(triptix, woman)
cur_name = analyze_digit(triptix, voca)
if result:
result = ' ' + result
cur_result = cur_digit
if cur_name:
cur_result += ' ' + cur_name
result = cur_result + result
z += 1
return result
def analyze_triptix(digit, woman=False):
"""Передаем число (максимум трехзначное), пишем его прописью
woman = False по умолчанию - мужской род, например, "один" (м.б одна)
:param digit: число
:param woman: если женский род
"""
result = ''
hundred = dozen = unit = None
hundred_str = dozen_str = unit_str = ''
units = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
units_str = ('', 'один', 'два', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
units_str_woman = ('', 'одна', 'две', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
dozens_ten = (10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
dozens_ten_str = ('десять', 'одиннадцать', 'двенадцать', 'тринадцать',
'четырнадцать', 'пятнадцать', 'шестнадцать',
'семнадцать', 'восемнадцать', 'девятнадцать')
dozens_str = ('', '', 'двадцать', 'тридцать', 'сорок', 'пятьдесят',
'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто')
hundreds_str = ('', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот',
'шестьсот', 'семьсот', 'восемьсот', 'девятьсот')
digit_str = str(digit)
digit_len = len(digit_str)
# Записываем unit
unit = int(digit_str[-1])
# Записываем dozen & hundred
if digit_len == 2: # Два разряда
dozen = int(digit_str[0])
if digit_len == 3: # Три разряда
hundred = int(digit_str[0])
dozen = int(digit_str[1])
# Если это 10-19
if dozen == 1:
# Создаем новый dozen - dozen_ten
dozen_ten = int("%s%s" % (dozen, unit))
if dozen_ten in dozens_ten:
dozen_str = dozens_ten_str[dozens_ten.index(dozen_ten)]
# Если это 20-99
else:
if dozen in units:
dozen_str = dozens_str[units.index(dozen)]
if unit in units and not dozen == 1:
if woman:
unit_str = units_str_woman[units.index(unit)]
else:
unit_str = units_str[units.index(unit)]
if hundred:
if hundred in units:
hundred_str = hundreds_str[units.index(hundred)]
# Пишем результат
for item in (hundred_str, dozen_str, unit_str):
if result and not result.endswith(' '):
result += ' '
result += item
return result.strip()
def analyze_digit(digit, end:tuple = ('тысяча', 'тысяч', 'тысячи')):
"""Пишет прописью с нужным окончанием слово (день, год, месяц)
digit = цифра, от которой зависит окончание
end = варианты окончаний:
1 миллион, 10 миллионов, 2 миллиона
(u"год", u"лет", u"года")
(u"месяц", u"месяцев", u"месяца")
(u"день", u"дней", u"дня")"""
result = ''
try:
digit = int(digit)
except ValueError:
return result
if not end:
return result
digit_str = str(digit)
digit_str = int(digit_str[-1])
# ---------------------------------------
# На окончания влияет 2 последних символа
# ---------------------------------------
if digit_str == 0 or digit_str >=5:
result = end[1] # тысяч
if digit_str == 1:
result = end[0] # тысяча
if digit_str > 1 and digit_str < 5:
result = end[2] # тысячи
if digit > 9:
digit_str2 = int(str(digit)[-2])
if digit_str2 == 1:
result = end[1] # тысяч
return result
def summa_format(summa):
"""Деньга с пробелами через 3 знака с конца
:param summa: число, которое будем разбивать
"""
if not summa:
return summa
summa_str = str(summa)
if ',' in summa_str:
summa_str = summa_str.replace(',', '.')
rub, kop = summa_str, 0
if '.' in summa_str:
rub, kop = summa_str.split('.')
summa_tmp = ''
summa_len = len(rub)
zero_kop = kop
try:
kop = int(kop)
except ValueError:
kop = 0
if summa_len <= 3:
if kop > 0:
return summa_str
return rub
else:
for i in range(summa_len):
if i > 0 and i % 3 == 0:
summa_tmp = ' ' + summa_tmp
summa_tmp = rub[summa_len-i-1] + summa_tmp
summa = summa_tmp
if kop > 0:
summa = '%s.%s' % (summa, zero_kop)
return summa
def ip2long(ip):
"""Преобразуем ip-адрес в число
:param ip: ip адрес
:return: число
"""
#>>> o = map(int, "146.0.238.42".split("."))
#[146, 0, 238, 42]
#>>> res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3];
#2449534506
result = None
search_ip = rega_ip.match(ip)
if search_ip:
a, b, c, d = search_ip.group(1), search_ip.group(2), search_ip.group(3), search_ip.group(4)
### << Binary Left Shift
### The left operands value is moved left by the number of bits specified by the right operand. a << = 240 (means 1111 0000)
### >> Binary Right Shift
### The left operands value is moved right by the number of bits specified by the right operand. a >> = 15 (means 0000 1111)
result = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + int(d)
return result
def date_translater(date):
"""Пишет прописью в нужном падеже дату (день, год, месяц)
date = {"years":years, "months":months, "days":days}"""
period_digit = None
result = ''
if not date:
return result
if "years" in date:
period_digit = date['years']
period_padej = ('год', 'лет', 'года')
if "months" in date:
period_digit = date['months']
period_padej = (u"месяц", u"месяцев", u"месяца")
if "days" in date:
period_digit = date['days']
period_padej = (u"день", u"дней", u"дня")
if not period_digit:
return result
result = '%s ' % (period_digit, )
result += analyze_digit(period_digit, period_padej)
return result
def punto(text: str, direction: str = 'eng2rus'):
"""Перевод английской раскладки в русские буквы,
например, для ввода паролей по логину
:param text: текст для изменения
:param direction: тип для изменения
"""
if not text:
return text
text = '%s' % text
eng = ('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']',
'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '\\',
'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/')
rus = ('й', 'ц', 'у', 'к', 'е', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ',
'ф', 'ы', 'в', 'а', 'п', 'р', 'о', 'л', 'д', 'ж', 'э', 'ё',
'я', 'ч', 'с', 'м', 'и', 'т', 'ь', 'б', 'ю', '/')
def get_letter(letter: str):
"""Возвращаем букву после преобразований
:param letter: буква до преобразований
"""
if direction == 'eng2rus':
source = eng
dest = rus
elif direction == 'rus2eng':
source = rus
dest = eng
ind = None
if letter in source:
ind = source.index(letter)
return dest[ind]
return letter
result = ''
for letter in text:
result += get_letter(letter)
return result
def convert2camelcase(text: str, capitalize_flag: bool = True):
"""Преобразовываем в camelCase
:param text: исходный текст
:param capitalize_flag: нужно ли делать первую букву заглавной
"""
if not text:
return text
if '_' not in text:
return text
new_text = []
flag_underline = False
for i, letter in enumerate(text):
if i == 0:
letter = letter.upper()
if letter == '_':
flag_underline = True
continue
if flag_underline:
flag_underline = False
new_text.append(letter.upper())
else:
new_text.append(letter)
return ''.join(new_text)
def convert2snakecase(text: str):
"""Преобразовываем в snake_case
:param text: исходный текст
"""
if not text:
return text
text_len = len(text)
new_text = []
for i, letter in enumerate(text):
if letter.isupper():
# Если после этой буквы идет нижний регистр,
# тогда нужно подчеркивание
next_index_exists = i + 1 < text_len
if next_index_exists and text[i+1].islower():
new_text.append('_')
new_text.append(letter.lower())
continue
new_text.append(letter)
return ''.join(new_text) | if not type(digit) == int:
try: | random_line_split |
string_parser.py | # -*- coding:utf-8 -*-
import sys
import re
import string
import random
import string
from django.conf import settings
rega_space = re.compile('[\s]+', re.I+re.U+re.DOTALL)
rega_int = re.compile('[^0-9]', re.U+re.I+re.DOTALL)
rega_quotes = re.compile('[\'"`]', re.U+re.I+re.DOTALL)
rega_strict_text = re.compile('[^0-9a-zA-Zа-яА-ЯёЁ/-]', re.U+re.I+re.DOTALL)
rega_html = re.compile('(<[^>]+>)?', re.U+re.I+re.DOTALL)
rega_style = re.compile('(<style[^>]*>.+</style>)?', re.U+re.I+re.DOTALL)
rega_ip = re.compile('([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})')
rega_dashes = re.compile('[-]+', re.I+re.U+re.DOTALL)
def fix_multiple_dashes(text: str):
"""Убирает повторяющиеся дефизы,
например, в ссылках
/sections--parallax/ => /sections-parallax/
:param text: исходный текст
"""
if not text:
return text
return rega_dashes.sub('-', text)
def check_ip(ip: str):
"""Проверка на айпи адрес
:param ip: айпи адрес
:return: айпи адрес
"""
search_ip = rega_ip.search(ip)
if search_ip:
return '%s.%s.%s.%s' % (search_ip.group(1),
search_ip.group(2),
search_ip.group(3),
search_ip.group(4), )
return None
def load_iptables(path='/home/jocker/iptables'):
"""Загрузить таблицу фаервола
:param path: путь к файлу с сохраненной таблицей фаервола
:return: список с заблоченными айпи-адресами
"""
fips = []
with open(path, 'r') as f:
content = f.readlines()
for line in content:
ip = check_ip(line)
if ip:
fips.append(ip)
return fips
def domain2punycode(domain: str) -> str:
"""Преобразуем домен к punycode
обратно 'xn--p1ai'.encode('idna').decode('idna') = 'ру'
:param domain: Домен
:return: домен строкой в punycode
"""
if not domain:
return ''
if '://' in domain:
domain = domain.split('://')[1]
if '?' in domain:
domain = domain.split("?")[0]
if '/' in domain:
domain = domain.split('/')[0]
if '#' in domain:
domain = domain.split('#')[0]
domain = domain.encode('idna')
return domain.decode('utf-8')
def generate_base_auth(passwd: str = 'bugoga', salt: str = '86') -> str:
"""CRYPT HTPASSWD, например, для nginx
т.к. утилита htpasswd только в апаче
Генерация хэша пароля для базовой авторизации
формат файла
jocker:86PZUBArg1zu6
86 - соль, остальное хэш пароля
:param passwd: пароль
:param salt: соль
:return: зашифрованная строка
"""
import crypt
return crypt.crypt(passwd, salt)
def q_string_fill(request, q_string):
"""Заполняем параметры для запроса q_string page, by
:param request: HttpRequest
:param q_string: параметры запроса
"""
page = 1
by = 20
method = request.GET if request.method == 'GET' else request.POST
# by=20, view=grid можно брать из сессии/кук
q_vars = {'page':1, 'by':20, 'size': None}
for var in q_vars:
value = None
# -----------------------------------------
# Значения уже могут содержаться в q_string
# -----------------------------------------
if var in q_string:
q_vars[var] = q_string[var]
# --------
# GET/POST
# --------
if method.get(var):
try:
value = int(request.GET[var])
except ValueError:
value = None
if value:
q_vars[var] = value
# ----------------------------------
# Вместо by можно передаваться size,
# вписываем его как by
# ----------------------------------
if var == 'size' and value:
q_vars['by'] = value
q_string['page'] = q_vars['page']
if q_string['page'] < 1:
q_string['page'] = 1
q_string['by'] = q_vars['by']
if q_string['by'] < 1:
q_string['by'] = 20
q_string['link'] = request.META['PATH_INFO']
if not 'q' in q_string:
q_string['q'] = {}
if method.get('q'):
q_string['q']['q'] = method['q']
# Дополняем всей мусоркой,
# которую передают в параметрах
for key in method.keys():
if not key in ('q', 'page', 'by', 'size'):
value = method.getlist(key)
if len(value) == 1:
q_string['q'][key] = value[0]
else:
q_string['q'][key] = value
def prepare_simple_text(text:str):
"""Убрать из текста хтмл-пробелы типа
которые хуй увидишь - даже в mysql они
печатаются обычным пробелом"""
rega_search = rega_space.search(text)
if rega_search:
text = rega_space.sub(' ', text)
return text.strip()
def string_size(text:str):
"""Размер строки в байтах"""
size = 0
if type(text) == str or type(text) == unicode:
size = sys.getsizeof(text)
return size
def kill_quotes(item, rega=None, replace=''):
"""Замена в строке на replace символы
(обычно на "", но можно на пробел
Своя регулярка, например,
заменить несколько пробелов на один
kill_quotes(" Радужный 42", "rega[\s]+", " ")"""
if not rega:
rega = rega_space
elif rega.startswith('rega[') and rega.enswith(']'):
rega = rega.replace('rega', '')
rega = re.compile(rega, re.U+re.I+re.DOTALL)
elif rega == 'int':
rega = rega_int
elif rega == 'quotes':
rega = rega_quotes
elif rega == 'strict_text':
rega = rega_strict_text
else:
return item
return rega.sub(replace, item)
def get_request_ip(request):
"""Получаем ip пользователя из request"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for #.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def GenPasswd(length: int = 7, alphabet: str = None):
"""Генератор пароля"""
passwd = []
if not alphabet:
alphabet = '%s%s' % (string.ascii_lowercase, string.digits)
for i in range(length):
letter = random.randrange(0, len(alphabet))
passwd.append(alphabet[letter])
return ''.join(passwd)
def random_boolean():
"""Случайное bool значение"""
variants = (True, False)
return variants[random.randrange(0, 2)]
def kill_html(text):
"""Убирает все теги из текста
:param text: текст с хтмл
"""
if text:
text = rega_style.sub('', text)
text = rega_html.sub('', text)
return text
def translit(text: str):
"""Транслит текста с русского в латиницу
:param text: текст для транслита
"""
if not text:
return ''
alphabet = string. | (alphabet, ))
result = ''
for item in text:
letter = ''
item = item.lower()
if item in rus:
ind = rus.index(item)
letter = eng[ind]
else:
if item in chars:
letter = item
result += letter
return result
def digit_to_str(digit):
"""Число записываем прописью
:param digit: число, которое будем писать текстом
"""
result = ''
measure = ('', '', 'миллион', 'миллиард', 'триллион',
'квадриллион', 'квинтиллион', 'секстиллион',
'септиллион', 'октиллион', 'нониллион', 'дециллион')
measure_len = len(measure)
if not type(digit) == int:
try:
digit = float(digit)
except ValueError:
digit = 0
try:
digit = int(digit)
except ValueError:
digit = 0
digit_str = str(digit)
summa = summa_format(digit)
if ' ' in summa:
digits = summa.split(' ')
else:
digits = [summa, ]
digits.reverse()
# Каждый триптих обрабатываем, добавляя пояснение (тысячи, милионы)
z = 0
for item in digits:
woman = False
voca = None
triptix = item
if z == 1: # тысячу передаем в женском роде
woman = True
voca = ('тысяча', 'тысяч', 'тысячи')
else:
if z > 1 and z < (measure_len -1):
inf = measure[z]
voca = (inf, inf + 'ов', inf + 'а')
cur_digit = analyze_triptix(triptix, woman)
cur_name = analyze_digit(triptix, voca)
if result:
result = ' ' + result
cur_result = cur_digit
if cur_name:
cur_result += ' ' + cur_name
result = cur_result + result
z += 1
return result
def analyze_triptix(digit, woman=False):
"""Передаем число (максимум трехзначное), пишем его прописью
woman = False по умолчанию - мужской род, например, "один" (м.б одна)
:param digit: число
:param woman: если женский род
"""
result = ''
hundred = dozen = unit = None
hundred_str = dozen_str = unit_str = ''
units = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
units_str = ('', 'один', 'два', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
units_str_woman = ('', 'одна', 'две', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
dozens_ten = (10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
dozens_ten_str = ('десять', 'одиннадцать', 'двенадцать', 'тринадцать',
'четырнадцать', 'пятнадцать', 'шестнадцать',
'семнадцать', 'восемнадцать', 'девятнадцать')
dozens_str = ('', '', 'двадцать', 'тридцать', 'сорок', 'пятьдесят',
'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто')
hundreds_str = ('', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот',
'шестьсот', 'семьсот', 'восемьсот', 'девятьсот')
digit_str = str(digit)
digit_len = len(digit_str)
# Записываем unit
unit = int(digit_str[-1])
# Записываем dozen & hundred
if digit_len == 2: # Два разряда
dozen = int(digit_str[0])
if digit_len == 3: # Три разряда
hundred = int(digit_str[0])
dozen = int(digit_str[1])
# Если это 10-19
if dozen == 1:
# Создаем новый dozen - dozen_ten
dozen_ten = int("%s%s" % (dozen, unit))
if dozen_ten in dozens_ten:
dozen_str = dozens_ten_str[dozens_ten.index(dozen_ten)]
# Если это 20-99
else:
if dozen in units:
dozen_str = dozens_str[units.index(dozen)]
if unit in units and not dozen == 1:
if woman:
unit_str = units_str_woman[units.index(unit)]
else:
unit_str = units_str[units.index(unit)]
if hundred:
if hundred in units:
hundred_str = hundreds_str[units.index(hundred)]
# Пишем результат
for item in (hundred_str, dozen_str, unit_str):
if result and not result.endswith(' '):
result += ' '
result += item
return result.strip()
def analyze_digit(digit, end:tuple = ('тысяча', 'тысяч', 'тысячи')):
"""Пишет прописью с нужным окончанием слово (день, год, месяц)
digit = цифра, от которой зависит окончание
end = варианты окончаний:
1 миллион, 10 миллионов, 2 миллиона
(u"год", u"лет", u"года")
(u"месяц", u"месяцев", u"месяца")
(u"день", u"дней", u"дня")"""
result = ''
try:
digit = int(digit)
except ValueError:
return result
if not end:
return result
digit_str = str(digit)
digit_str = int(digit_str[-1])
# ---------------------------------------
# На окончания влияет 2 последних символа
# ---------------------------------------
if digit_str == 0 or digit_str >=5:
result = end[1] # тысяч
if digit_str == 1:
result = end[0] # тысяча
if digit_str > 1 and digit_str < 5:
result = end[2] # тысячи
if digit > 9:
digit_str2 = int(str(digit)[-2])
if digit_str2 == 1:
result = end[1] # тысяч
return result
def summa_format(summa):
"""Деньга с пробелами через 3 знака с конца
:param summa: число, которое будем разбивать
"""
if not summa:
return summa
summa_str = str(summa)
if ',' in summa_str:
summa_str = summa_str.replace(',', '.')
rub, kop = summa_str, 0
if '.' in summa_str:
rub, kop = summa_str.split('.')
summa_tmp = ''
summa_len = len(rub)
zero_kop = kop
try:
kop = int(kop)
except ValueError:
kop = 0
if summa_len <= 3:
if kop > 0:
return summa_str
return rub
else:
for i in range(summa_len):
if i > 0 and i % 3 == 0:
summa_tmp = ' ' + summa_tmp
summa_tmp = rub[summa_len-i-1] + summa_tmp
summa = summa_tmp
if kop > 0:
summa = '%s.%s' % (summa, zero_kop)
return summa
def ip2long(ip):
"""Преобразуем ip-адрес в число
:param ip: ip адрес
:return: число
"""
#>>> o = map(int, "146.0.238.42".split("."))
#[146, 0, 238, 42]
#>>> res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3];
#2449534506
result = None
search_ip = rega_ip.match(ip)
if search_ip:
a, b, c, d = search_ip.group(1), search_ip.group(2), search_ip.group(3), search_ip.group(4)
### << Binary Left Shift
### The left operands value is moved left by the number of bits specified by the right operand. a << = 240 (means 1111 0000)
### >> Binary Right Shift
### The left operands value is moved right by the number of bits specified by the right operand. a >> = 15 (means 0000 1111)
result = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + int(d)
return result
def date_translater(date):
"""Пишет прописью в нужном падеже дату (день, год, месяц)
date = {"years":years, "months":months, "days":days}"""
period_digit = None
result = ''
if not date:
return result
if "years" in date:
period_digit = date['years']
period_padej = ('год', 'лет', 'года')
if "months" in date:
period_digit = date['months']
period_padej = (u"месяц", u"месяцев", u"месяца")
if "days" in date:
period_digit = date['days']
period_padej = (u"день", u"дней", u"дня")
if not period_digit:
return result
result = '%s ' % (period_digit, )
result += analyze_digit(period_digit, period_padej)
return result
def punto(text: str, direction: str = 'eng2rus'):
"""Перевод английской раскладки в русские буквы,
например, для ввода паролей по логину
:param text: текст для изменения
:param direction: тип для изменения
"""
if not text:
return text
text = '%s' % text
eng = ('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']',
'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '\\',
'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/')
rus = ('й', 'ц', 'у', 'к', 'е', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ',
'ф', 'ы', 'в', 'а', 'п', 'р', 'о', 'л', 'д', 'ж', 'э', 'ё',
'я', 'ч', 'с', 'м', 'и', 'т', 'ь', 'б', 'ю', '/')
def get_letter(letter: str):
"""Возвращаем букву после преобразований
:param letter: буква до преобразований
"""
if direction == 'eng2rus':
source = eng
dest = rus
elif direction == 'rus2eng':
source = rus
dest = eng
ind = None
if letter in source:
ind = source.index(letter)
return dest[ind]
return letter
result = ''
for letter in text:
result += get_letter(letter)
return result
def convert2camelcase(text: str, capitalize_flag: bool = True):
"""Преобразовываем в camelCase
:param text: исходный текст
:param capitalize_flag: нужно ли делать первую букву заглавной
"""
if not text:
return text
if '_' not in text:
return text
new_text = []
flag_underline = False
for i, letter in enumerate(text):
if i == 0:
letter = letter.upper()
if letter == '_':
flag_underline = True
continue
if flag_underline:
flag_underline = False
new_text.append(letter.upper())
else:
new_text.append(letter)
return ''.join(new_text)
def convert2snakecase(text: str):
"""Преобразовываем в snake_case
:param text: исходный текст
"""
if not text:
return text
text_len = len(text)
new_text = []
for i, letter in enumerate(text):
if letter.isupper():
# Если после этой буквы идет нижний регистр,
# тогда нужно подчеркивание
next_index_exists = i + 1 < text_len
if next_index_exists and text[i+1].islower():
new_text.append('_')
new_text.append(letter.lower())
continue
new_text.append(letter)
return ''.join(new_text)
| ascii_lowercase
rus = ('абвгдеёжзийклмнопрстуфхцчшщъыьэюя №')
eng = ('a', 'b', 'v', 'g', 'd', 'e', 'yo', 'j', 'z', 'i', 'y', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'f', 'h', 'c', 'ch', 'sh', 'shh', '', 'y', '', 'e', 'yu', 'ya', '-', '-')
chars = ('-0123456789_%s' % | identifier_body |
string_parser.py | # -*- coding:utf-8 -*-
import sys
import re
import string
import random
import string
from django.conf import settings
rega_space = re.compile('[\s]+', re.I+re.U+re.DOTALL)
rega_int = re.compile('[^0-9]', re.U+re.I+re.DOTALL)
rega_quotes = re.compile('[\'"`]', re.U+re.I+re.DOTALL)
rega_strict_text = re.compile('[^0-9a-zA-Zа-яА-ЯёЁ/-]', re.U+re.I+re.DOTALL)
rega_html = re.compile('(<[^>]+>)?', re.U+re.I+re.DOTALL)
rega_style = re.compile('(<style[^>]*>.+</style>)?', re.U+re.I+re.DOTALL)
rega_ip = re.compile('([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})')
rega_dashes = re.compile('[-]+', re.I+re.U+re.DOTALL)
def fix_multiple_dashes(text: str):
"""Убирает повторяющиеся дефизы,
например, в ссылках
/sections--parallax/ => /sections-parallax/
:param text: исходный текст
"""
if not text:
return text
return rega_dashes.sub('-', text)
def check_ip(ip: str):
"""Проверка на айпи адрес
:param ip: айпи адрес
:return: айпи адрес
"""
search_ip = rega_ip.search(ip)
if search_ip:
return '%s.%s.%s.%s' % (search_ip.group(1),
search_ip.group(2),
search_ip.group(3),
search_ip.group(4), )
return None
def load_iptables(path='/home/jocker/iptables'):
"""Загрузить таблицу фаервола
:param path: путь к файлу с сохраненной таблицей фаервола
:return: список с заблоченными айпи-адресами
"""
fips = []
with open(path, 'r') as f:
content = f.readlines()
for line in content:
ip = check_ip(line)
if ip:
fips.append(ip)
return fips
def domain2punycode(domain: str) -> str:
"""Преобразуем домен к punycode
обратно 'xn--p1ai'.encode('idna').decode('idna') = 'ру'
:param domain: Домен
:return: домен строк | """
if not domain:
return ''
if '://' in domain:
domain = domain.split('://')[1]
if '?' in domain:
domain = domain.split("?")[0]
if '/' in domain:
domain = domain.split('/')[0]
if '#' in domain:
domain = domain.split('#')[0]
domain = domain.encode('idna')
return domain.decode('utf-8')
def generate_base_auth(passwd: str = 'bugoga', salt: str = '86') -> str:
"""CRYPT HTPASSWD, например, для nginx
т.к. утилита htpasswd только в апаче
Генерация хэша пароля для базовой авторизации
формат файла
jocker:86PZUBArg1zu6
86 - соль, остальное хэш пароля
:param passwd: пароль
:param salt: соль
:return: зашифрованная строка
"""
import crypt
return crypt.crypt(passwd, salt)
def q_string_fill(request, q_string):
"""Заполняем параметры для запроса q_string page, by
:param request: HttpRequest
:param q_string: параметры запроса
"""
page = 1
by = 20
method = request.GET if request.method == 'GET' else request.POST
# by=20, view=grid можно брать из сессии/кук
q_vars = {'page':1, 'by':20, 'size': None}
for var in q_vars:
value = None
# -----------------------------------------
# Значения уже могут содержаться в q_string
# -----------------------------------------
if var in q_string:
q_vars[var] = q_string[var]
# --------
# GET/POST
# --------
if method.get(var):
try:
value = int(request.GET[var])
except ValueError:
value = None
if value:
q_vars[var] = value
# ----------------------------------
# Вместо by можно передаваться size,
# вписываем его как by
# ----------------------------------
if var == 'size' and value:
q_vars['by'] = value
q_string['page'] = q_vars['page']
if q_string['page'] < 1:
q_string['page'] = 1
q_string['by'] = q_vars['by']
if q_string['by'] < 1:
q_string['by'] = 20
q_string['link'] = request.META['PATH_INFO']
if not 'q' in q_string:
q_string['q'] = {}
if method.get('q'):
q_string['q']['q'] = method['q']
# Дополняем всей мусоркой,
# которую передают в параметрах
for key in method.keys():
if not key in ('q', 'page', 'by', 'size'):
value = method.getlist(key)
if len(value) == 1:
q_string['q'][key] = value[0]
else:
q_string['q'][key] = value
def prepare_simple_text(text:str):
"""Убрать из текста хтмл-пробелы типа
которые хуй увидишь - даже в mysql они
печатаются обычным пробелом"""
rega_search = rega_space.search(text)
if rega_search:
text = rega_space.sub(' ', text)
return text.strip()
def string_size(text:str):
"""Размер строки в байтах"""
size = 0
if type(text) == str or type(text) == unicode:
size = sys.getsizeof(text)
return size
def kill_quotes(item, rega=None, replace=''):
"""Замена в строке на replace символы
(обычно на "", но можно на пробел
Своя регулярка, например,
заменить несколько пробелов на один
kill_quotes(" Радужный 42", "rega[\s]+", " ")"""
if not rega:
rega = rega_space
elif rega.startswith('rega[') and rega.enswith(']'):
rega = rega.replace('rega', '')
rega = re.compile(rega, re.U+re.I+re.DOTALL)
elif rega == 'int':
rega = rega_int
elif rega == 'quotes':
rega = rega_quotes
elif rega == 'strict_text':
rega = rega_strict_text
else:
return item
return rega.sub(replace, item)
def get_request_ip(request):
"""Получаем ip пользователя из request"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for #.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def GenPasswd(length: int = 7, alphabet: str = None):
"""Генератор пароля"""
passwd = []
if not alphabet:
alphabet = '%s%s' % (string.ascii_lowercase, string.digits)
for i in range(length):
letter = random.randrange(0, len(alphabet))
passwd.append(alphabet[letter])
return ''.join(passwd)
def random_boolean():
"""Случайное bool значение"""
variants = (True, False)
return variants[random.randrange(0, 2)]
def kill_html(text):
"""Убирает все теги из текста
:param text: текст с хтмл
"""
if text:
text = rega_style.sub('', text)
text = rega_html.sub('', text)
return text
def translit(text: str):
"""Транслит текста с русского в латиницу
:param text: текст для транслита
"""
if not text:
return ''
alphabet = string.ascii_lowercase
rus = ('абвгдеёжзийклмнопрстуфхцчшщъыьэюя №')
eng = ('a', 'b', 'v', 'g', 'd', 'e', 'yo', 'j', 'z', 'i', 'y', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'f', 'h', 'c', 'ch', 'sh', 'shh', '', 'y', '', 'e', 'yu', 'ya', '-', '-')
chars = ('-0123456789_%s' % (alphabet, ))
result = ''
for item in text:
letter = ''
item = item.lower()
if item in rus:
ind = rus.index(item)
letter = eng[ind]
else:
if item in chars:
letter = item
result += letter
return result
def digit_to_str(digit):
"""Число записываем прописью
:param digit: число, которое будем писать текстом
"""
result = ''
measure = ('', '', 'миллион', 'миллиард', 'триллион',
'квадриллион', 'квинтиллион', 'секстиллион',
'септиллион', 'октиллион', 'нониллион', 'дециллион')
measure_len = len(measure)
if not type(digit) == int:
try:
digit = float(digit)
except ValueError:
digit = 0
try:
digit = int(digit)
except ValueError:
digit = 0
digit_str = str(digit)
summa = summa_format(digit)
if ' ' in summa:
digits = summa.split(' ')
else:
digits = [summa, ]
digits.reverse()
# Каждый триптих обрабатываем, добавляя пояснение (тысячи, милионы)
z = 0
for item in digits:
woman = False
voca = None
triptix = item
if z == 1: # тысячу передаем в женском роде
woman = True
voca = ('тысяча', 'тысяч', 'тысячи')
else:
if z > 1 and z < (measure_len -1):
inf = measure[z]
voca = (inf, inf + 'ов', inf + 'а')
cur_digit = analyze_triptix(triptix, woman)
cur_name = analyze_digit(triptix, voca)
if result:
result = ' ' + result
cur_result = cur_digit
if cur_name:
cur_result += ' ' + cur_name
result = cur_result + result
z += 1
return result
def analyze_triptix(digit, woman=False):
"""Передаем число (максимум трехзначное), пишем его прописью
woman = False по умолчанию - мужской род, например, "один" (м.б одна)
:param digit: число
:param woman: если женский род
"""
result = ''
hundred = dozen = unit = None
hundred_str = dozen_str = unit_str = ''
units = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
units_str = ('', 'один', 'два', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
units_str_woman = ('', 'одна', 'две', 'три', 'четыре', 'пять',
'шесть', 'семь', 'восемь', 'девять')
dozens_ten = (10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
dozens_ten_str = ('десять', 'одиннадцать', 'двенадцать', 'тринадцать',
'четырнадцать', 'пятнадцать', 'шестнадцать',
'семнадцать', 'восемнадцать', 'девятнадцать')
dozens_str = ('', '', 'двадцать', 'тридцать', 'сорок', 'пятьдесят',
'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто')
hundreds_str = ('', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот',
'шестьсот', 'семьсот', 'восемьсот', 'девятьсот')
digit_str = str(digit)
digit_len = len(digit_str)
# Записываем unit
unit = int(digit_str[-1])
# Записываем dozen & hundred
if digit_len == 2: # Два разряда
dozen = int(digit_str[0])
if digit_len == 3: # Три разряда
hundred = int(digit_str[0])
dozen = int(digit_str[1])
# Если это 10-19
if dozen == 1:
# Создаем новый dozen - dozen_ten
dozen_ten = int("%s%s" % (dozen, unit))
if dozen_ten in dozens_ten:
dozen_str = dozens_ten_str[dozens_ten.index(dozen_ten)]
# Если это 20-99
else:
if dozen in units:
dozen_str = dozens_str[units.index(dozen)]
if unit in units and not dozen == 1:
if woman:
unit_str = units_str_woman[units.index(unit)]
else:
unit_str = units_str[units.index(unit)]
if hundred:
if hundred in units:
hundred_str = hundreds_str[units.index(hundred)]
# Пишем результат
for item in (hundred_str, dozen_str, unit_str):
if result and not result.endswith(' '):
result += ' '
result += item
return result.strip()
def analyze_digit(digit, end:tuple = ('тысяча', 'тысяч', 'тысячи')):
"""Пишет прописью с нужным окончанием слово (день, год, месяц)
digit = цифра, от которой зависит окончание
end = варианты окончаний:
1 миллион, 10 миллионов, 2 миллиона
(u"год", u"лет", u"года")
(u"месяц", u"месяцев", u"месяца")
(u"день", u"дней", u"дня")"""
result = ''
try:
digit = int(digit)
except ValueError:
return result
if not end:
return result
digit_str = str(digit)
digit_str = int(digit_str[-1])
# ---------------------------------------
# На окончания влияет 2 последних символа
# ---------------------------------------
if digit_str == 0 or digit_str >=5:
result = end[1] # тысяч
if digit_str == 1:
result = end[0] # тысяча
if digit_str > 1 and digit_str < 5:
result = end[2] # тысячи
if digit > 9:
digit_str2 = int(str(digit)[-2])
if digit_str2 == 1:
result = end[1] # тысяч
return result
def summa_format(summa):
"""Деньга с пробелами через 3 знака с конца
:param summa: число, которое будем разбивать
"""
if not summa:
return summa
summa_str = str(summa)
if ',' in summa_str:
summa_str = summa_str.replace(',', '.')
rub, kop = summa_str, 0
if '.' in summa_str:
rub, kop = summa_str.split('.')
summa_tmp = ''
summa_len = len(rub)
zero_kop = kop
try:
kop = int(kop)
except ValueError:
kop = 0
if summa_len <= 3:
if kop > 0:
return summa_str
return rub
else:
for i in range(summa_len):
if i > 0 and i % 3 == 0:
summa_tmp = ' ' + summa_tmp
summa_tmp = rub[summa_len-i-1] + summa_tmp
summa = summa_tmp
if kop > 0:
summa = '%s.%s' % (summa, zero_kop)
return summa
def ip2long(ip):
"""Преобразуем ip-адрес в число
:param ip: ip адрес
:return: число
"""
#>>> o = map(int, "146.0.238.42".split("."))
#[146, 0, 238, 42]
#>>> res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3];
#2449534506
result = None
search_ip = rega_ip.match(ip)
if search_ip:
a, b, c, d = search_ip.group(1), search_ip.group(2), search_ip.group(3), search_ip.group(4)
### << Binary Left Shift
### The left operands value is moved left by the number of bits specified by the right operand. a << = 240 (means 1111 0000)
### >> Binary Right Shift
### The left operands value is moved right by the number of bits specified by the right operand. a >> = 15 (means 0000 1111)
result = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + int(d)
return result
def date_translater(date):
"""Пишет прописью в нужном падеже дату (день, год, месяц)
date = {"years":years, "months":months, "days":days}"""
period_digit = None
result = ''
if not date:
return result
if "years" in date:
period_digit = date['years']
period_padej = ('год', 'лет', 'года')
if "months" in date:
period_digit = date['months']
period_padej = (u"месяц", u"месяцев", u"месяца")
if "days" in date:
period_digit = date['days']
period_padej = (u"день", u"дней", u"дня")
if not period_digit:
return result
result = '%s ' % (period_digit, )
result += analyze_digit(period_digit, period_padej)
return result
def punto(text: str, direction: str = 'eng2rus'):
"""Перевод английской раскладки в русские буквы,
например, для ввода паролей по логину
:param text: текст для изменения
:param direction: тип для изменения
"""
if not text:
return text
text = '%s' % text
eng = ('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']',
'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '\\',
'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/')
rus = ('й', 'ц', 'у', 'к', 'е', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ',
'ф', 'ы', 'в', 'а', 'п', 'р', 'о', 'л', 'д', 'ж', 'э', 'ё',
'я', 'ч', 'с', 'м', 'и', 'т', 'ь', 'б', 'ю', '/')
def get_letter(letter: str):
"""Возвращаем букву после преобразований
:param letter: буква до преобразований
"""
if direction == 'eng2rus':
source = eng
dest = rus
elif direction == 'rus2eng':
source = rus
dest = eng
ind = None
if letter in source:
ind = source.index(letter)
return dest[ind]
return letter
result = ''
for letter in text:
result += get_letter(letter)
return result
def convert2camelcase(text: str, capitalize_flag: bool = True):
"""Преобразовываем в camelCase
:param text: исходный текст
:param capitalize_flag: нужно ли делать первую букву заглавной
"""
if not text:
return text
if '_' not in text:
return text
new_text = []
flag_underline = False
for i, letter in enumerate(text):
if i == 0:
letter = letter.upper()
if letter == '_':
flag_underline = True
continue
if flag_underline:
flag_underline = False
new_text.append(letter.upper())
else:
new_text.append(letter)
return ''.join(new_text)
def convert2snakecase(text: str):
"""Преобразовываем в snake_case
:param text: исходный текст
"""
if not text:
return text
text_len = len(text)
new_text = []
for i, letter in enumerate(text):
if letter.isupper():
# Если после этой буквы идет нижний регистр,
# тогда нужно подчеркивание
next_index_exists = i + 1 < text_len
if next_index_exists and text[i+1].islower():
new_text.append('_')
new_text.append(letter.lower())
continue
new_text.append(letter)
return ''.join(new_text)
| ой в punycode
| identifier_name |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP),
);
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg != non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn sel_mc_account(_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if !success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id != msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => |
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
}
| {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
} | conditional_block |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP),
);
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg != non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn | (_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if !success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id != msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
}
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
}
| sel_mc_account | identifier_name |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
| );
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg != non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn sel_mc_account(_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if !success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id != msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
}
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
} | client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP), | random_line_split |
address_transactions.go | package state
import (
"bufio"
"encoding/binary"
"io"
"os"
"path/filepath"
"runtime/debug"
"github.com/pkg/errors"
"github.com/starius/emsort"
"github.com/wavesplatform/gowaves/pkg/keyvalue"
"github.com/wavesplatform/gowaves/pkg/proto"
"go.uber.org/zap"
)
const (
// AddressID size + length of block num + transaction offset length.
addrTxRecordSize = proto.AddressIDSize + blockNumLen + txMetaSize
maxEmsortMem = 200 * 1024 * 1024 // 200 MiB.
txMetaSize = 8 + 1
)
var (
fileSizeKeyBytes = []byte{txsByAddressesFileSizeKeyPrefix}
)
type txMeta struct {
offset uint64
failed bool
}
func (m *txMeta) bytes() []byte {
buf := make([]byte, txMetaSize)
binary.BigEndian.PutUint64(buf, m.offset)
buf[8] = 0
if m.failed {
buf[8] = 1
}
return buf
}
func (m *txMeta) unmarshal(data []byte) error {
if len(data) < txMetaSize {
return errInvalidDataSize
}
m.offset = binary.BigEndian.Uint64(data)
if data[8] == 1 {
m.failed = true
}
return nil
}
type txIter struct {
rw *blockReadWriter
iter *recordIterator
err error
}
func newTxIter(rw *blockReadWriter, iter *recordIterator) *txIter {
return &txIter{rw: rw, iter: iter}
}
func (i *txIter) Transaction() (proto.Transaction, bool, error) {
value, err := i.iter.currentRecord()
if err != nil {
return nil, false, err
}
var meta txMeta
err = meta.unmarshal(value)
if err != nil {
return nil, false, err
}
tx, err := i.rw.readTransactionByOffset(meta.offset)
if err != nil {
return nil, false, err
}
return tx, meta.failed, nil
}
func (i *txIter) Next() bool {
return i.iter.next()
}
func (i *txIter) Error() error {
if err := i.iter.error(); err != nil {
return err
}
return i.err
}
func (i *txIter) Release() {
i.iter.release()
}
func manageFile(file *os.File, db keyvalue.IterableKeyVal) error {
var properFileSize uint64
fileSizeBytes, err := db.Get(fileSizeKeyBytes)
if err == keyvalue.ErrNotFound {
properFileSize = 0
} else if err == nil {
properFileSize = binary.BigEndian.Uint64(fileSizeBytes)
} else {
return err
}
fileStats, err := os.Stat(file.Name())
if err != nil {
return err
}
size := uint64(fileStats.Size())
if size < properFileSize {
return errors.New("data loss: file size is less than it should be")
} else if size == properFileSize {
return nil
}
if err := file.Truncate(int64(properFileSize)); err != nil {
return err
}
if _, err := file.Seek(int64(properFileSize), 0); err != nil {
return err
}
return nil
}
type addressTransactionsParams struct {
dir string // Directory for address_transactions file.
batchedStorMemLimit int // Maximum size of batchedStor db batch.
batchedStorMaxKeys int // Maximum number of keys per flush().
maxFileSize int64 // Maximum size of address_transactions file.
providesData bool // True if transaction iterators can be used.
}
type addressTransactions struct {
stateDB *stateDB
rw *blockReadWriter
stor *batchedStorage
amend bool
filePath string
addrTransactions *os.File
addrTransactionsBuf *bufio.Writer
params *addressTransactionsParams
}
func newAddressTransactions(
db keyvalue.IterableKeyVal,
stateDB *stateDB,
rw *blockReadWriter,
params *addressTransactionsParams,
amend bool,
) (*addressTransactions, error) {
bsParams := &batchedStorParams{
maxBatchSize: maxTransactionIdsBatchSize,
recordSize: txMetaSize,
prefix: transactionIdsPrefix,
}
filePath := filepath.Join(filepath.Clean(params.dir), "address_transactions")
addrTransactionsFile, _, err := openOrCreateForAppending(filePath)
if err != nil {
return nil, err
}
if err := manageFile(addrTransactionsFile, db); err != nil {
return nil, err
}
stor, err := newBatchedStorage(db, stateDB, bsParams, params.batchedStorMemLimit, params.batchedStorMaxKeys, amend)
if err != nil {
return nil, err
}
atx := &addressTransactions{
stateDB: stateDB,
rw: rw,
stor: stor,
filePath: filePath,
addrTransactions: addrTransactionsFile,
addrTransactionsBuf: bufio.NewWriter(addrTransactionsFile),
params: params,
amend: amend,
}
if params.providesData {
if err := atx.persist(); err != nil {
return nil, errors.Wrap(err, "failed to persist")
}
}
return atx, nil
}
func (at *addressTransactions) saveTxIdByAddress(addr proto.Address, txID []byte, blockID proto.BlockID) error {
if at.rw.offsetLen != 8 {
return errors.New("unsupported meta length")
}
newRecord := make([]byte, addrTxRecordSize)
blockNum, err := at.stateDB.newestBlockIdToNum(blockID)
if err != nil {
return err
}
copy(newRecord[:proto.AddressIDSize], addr.ID().Bytes())
pos := proto.AddressIDSize
info, err := at.rw.newestTransactionInfoByID(txID)
if err != nil {
return err
}
meta := txMeta{info.offset, info.failed}
binary.BigEndian.PutUint32(newRecord[pos:], blockNum)
pos += blockNumLen
copy(newRecord[pos:], meta.bytes())
if at.params.providesData {
return at.stor.addRecordBytes(newRecord[:proto.AddressIDSize], newRecord[proto.AddressIDSize:])
}
if _, err := at.addrTransactionsBuf.Write(newRecord); err != nil {
return err
}
return nil
}
func (at *addressTransactions) newTransactionsByAddrIterator(addr proto.Address) (*txIter, error) {
if !at.params.providesData {
return nil, errors.New("state does not provide transactions by addresses now")
}
key := addr.ID().Bytes()
iter, err := at.stor.newBackwardRecordIterator(key)
if err != nil {
return nil, err
}
return newTxIter(at.rw, iter), nil
}
func (at *addressTransactions) startProvidingData() error {
if at.params.providesData |
if err := at.persist(); err != nil {
return err
}
at.params.providesData = true
return nil
}
func (at *addressTransactions) offsetFromBytes(offsetBytes []byte) uint64 {
return binary.BigEndian.Uint64(offsetBytes)
}
func (at *addressTransactions) handleRecord(record []byte) error {
key := record[:proto.AddressIDSize]
newRecordBytes := record[proto.AddressIDSize:]
lastOffsetBytes, err := at.stor.newestLastRecordByKey(key)
if err == errNotFound {
// The first record for this key.
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
} else if err != nil {
return errors.Wrap(err, "newestLastRecordByKey() failed")
}
// Make sure the offset we add is greater than any other offsets
// by comparing it to the last (= maximum) offset.
// This makes adding from file to batchedStorage idempotent.
newOffsetBytes := newRecordBytes[blockNumLen:]
offset := at.offsetFromBytes(newOffsetBytes)
lastOffset := at.offsetFromBytes(lastOffsetBytes)
if lastOffset > at.rw.blockchainLen {
return errors.Errorf("invalid offset in storage: %d, max is: %d", lastOffset, at.rw.blockchainLen)
}
if offset <= lastOffset {
return nil
}
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
}
func (at *addressTransactions) shouldPersist() (bool, error) {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return false, err
}
size := fileStats.Size()
zap.S().Debugf("TransactionsByAddresses file size: %d; max is %d", size, at.params.maxFileSize)
return size >= at.params.maxFileSize, nil
}
func (at *addressTransactions) persist() error {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := fileStats.Size()
zap.S().Info("Starting to sort TransactionsByAddresses file, will take awhile...")
debug.FreeOSMemory()
// Create file for emsort and set emsort over it.
tempFile, err := os.CreateTemp(os.TempDir(), "emsort")
if err != nil {
return errors.Wrap(err, "failed to create temp file for emsort")
}
defer func(name string) {
err := os.Remove(name)
if err != nil {
zap.S().Warnf("Failed to remove temporary file: %v", err)
}
}(tempFile.Name())
sort, err := emsort.NewFixedSize(addrTxRecordSize, maxEmsortMem, tempFile)
if err != nil {
return errors.Wrap(err, "emsort.NewFixedSize() failed")
}
// Read records from file and append to emsort.
for readPos := int64(0); readPos < size; readPos += addrTxRecordSize {
record := make([]byte, addrTxRecordSize)
if n, err := at.addrTransactions.ReadAt(record, readPos); err != nil {
return err
} else if n != addrTxRecordSize {
return errors.New("failed to read full record")
}
// Filtering optimization: if all blocks are valid,
// we shouldn't check isValid() on records.
isValid := true
if at.amend {
blockNum := binary.BigEndian.Uint32(record[proto.AddressIDSize : proto.AddressIDSize+4])
isValid, err = at.stateDB.isValidBlock(blockNum)
if err != nil {
return errors.Wrap(err, "isValidBlock() failed")
}
}
if !isValid {
// Invalid record, we should skip it.
continue
}
if err := sort.Push(record); err != nil {
return errors.Wrap(err, "emsort.Push() failed")
}
}
// Tell emsort that we have finished appending records.
if err := sort.StopWriting(); err != nil {
return errors.Wrap(err, "emsort.StopWriting() failed")
}
zap.S().Info("Finished to sort TransactionsByAddresses file")
debug.FreeOSMemory()
zap.S().Info("Writing sorted records to database, will take awhile...")
// Read records from emsort in sorted order and save to batchedStorage.
for {
record, err := sort.Pop()
if err == io.EOF {
// All records were read.
break
} else if err != nil {
return errors.Wrap(err, "emsort.Pop() failed")
}
if err := at.handleRecord(record); err != nil {
return errors.Wrap(err, "failed to add record")
}
}
// Write 0 size to database batch.
// This way 0 size will be written to database together with new records.
// If program crashes after batch is flushed but before we truncate the file,
// next time 0 size will be read and file will be truncated upon next start.
if err := at.saveFileSizeToBatch(at.stor.dbBatch, 0); err != nil {
return errors.Wrap(err, "failed to write file size to db batch")
}
// Flush batchedStorage.
if err := at.stor.flush(); err != nil {
return errors.Wrap(err, "batchedStorage(): failed to flush")
}
// Clear batchedStorage.
at.stor.reset()
// Clear address transactions file.
if err := at.addrTransactions.Truncate(0); err != nil {
return err
}
if _, err := at.addrTransactions.Seek(0, 0); err != nil {
return err
}
at.addrTransactionsBuf.Reset(at.addrTransactions)
zap.S().Info("Successfully finished moving records from file to database")
debug.FreeOSMemory()
return nil
}
func (at *addressTransactions) saveFileSizeToBatch(batch keyvalue.Batch, size uint64) error {
fileSizeBytes := make([]byte, 8)
binary.BigEndian.PutUint64(fileSizeBytes, size)
batch.Put(fileSizeKeyBytes, fileSizeBytes)
return nil
}
func (at *addressTransactions) reset() {
if at.params.providesData {
at.stor.reset()
} else {
at.addrTransactionsBuf.Reset(at.addrTransactions)
}
}
func (at *addressTransactions) flush() error {
if at.params.providesData {
return at.stor.flush()
}
if err := at.addrTransactionsBuf.Flush(); err != nil {
return err
}
if err := at.addrTransactions.Sync(); err != nil {
return err
}
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := uint64(fileStats.Size())
if err := at.saveFileSizeToBatch(at.stateDB.dbBatch, size); err != nil {
return err
}
return nil
}
func (at *addressTransactions) providesData() bool {
return at.params.providesData
}
func (at *addressTransactions) close() error {
return at.addrTransactions.Close()
}
| {
// Already provides.
return nil
} | conditional_block |
address_transactions.go | package state
import (
"bufio"
"encoding/binary"
"io"
"os"
"path/filepath"
"runtime/debug"
"github.com/pkg/errors"
"github.com/starius/emsort"
"github.com/wavesplatform/gowaves/pkg/keyvalue"
"github.com/wavesplatform/gowaves/pkg/proto"
"go.uber.org/zap"
)
const (
// AddressID size + length of block num + transaction offset length.
addrTxRecordSize = proto.AddressIDSize + blockNumLen + txMetaSize
|
var (
fileSizeKeyBytes = []byte{txsByAddressesFileSizeKeyPrefix}
)
type txMeta struct {
offset uint64
failed bool
}
func (m *txMeta) bytes() []byte {
buf := make([]byte, txMetaSize)
binary.BigEndian.PutUint64(buf, m.offset)
buf[8] = 0
if m.failed {
buf[8] = 1
}
return buf
}
func (m *txMeta) unmarshal(data []byte) error {
if len(data) < txMetaSize {
return errInvalidDataSize
}
m.offset = binary.BigEndian.Uint64(data)
if data[8] == 1 {
m.failed = true
}
return nil
}
type txIter struct {
rw *blockReadWriter
iter *recordIterator
err error
}
func newTxIter(rw *blockReadWriter, iter *recordIterator) *txIter {
return &txIter{rw: rw, iter: iter}
}
func (i *txIter) Transaction() (proto.Transaction, bool, error) {
value, err := i.iter.currentRecord()
if err != nil {
return nil, false, err
}
var meta txMeta
err = meta.unmarshal(value)
if err != nil {
return nil, false, err
}
tx, err := i.rw.readTransactionByOffset(meta.offset)
if err != nil {
return nil, false, err
}
return tx, meta.failed, nil
}
func (i *txIter) Next() bool {
return i.iter.next()
}
func (i *txIter) Error() error {
if err := i.iter.error(); err != nil {
return err
}
return i.err
}
func (i *txIter) Release() {
i.iter.release()
}
func manageFile(file *os.File, db keyvalue.IterableKeyVal) error {
var properFileSize uint64
fileSizeBytes, err := db.Get(fileSizeKeyBytes)
if err == keyvalue.ErrNotFound {
properFileSize = 0
} else if err == nil {
properFileSize = binary.BigEndian.Uint64(fileSizeBytes)
} else {
return err
}
fileStats, err := os.Stat(file.Name())
if err != nil {
return err
}
size := uint64(fileStats.Size())
if size < properFileSize {
return errors.New("data loss: file size is less than it should be")
} else if size == properFileSize {
return nil
}
if err := file.Truncate(int64(properFileSize)); err != nil {
return err
}
if _, err := file.Seek(int64(properFileSize), 0); err != nil {
return err
}
return nil
}
type addressTransactionsParams struct {
dir string // Directory for address_transactions file.
batchedStorMemLimit int // Maximum size of batchedStor db batch.
batchedStorMaxKeys int // Maximum number of keys per flush().
maxFileSize int64 // Maximum size of address_transactions file.
providesData bool // True if transaction iterators can be used.
}
type addressTransactions struct {
stateDB *stateDB
rw *blockReadWriter
stor *batchedStorage
amend bool
filePath string
addrTransactions *os.File
addrTransactionsBuf *bufio.Writer
params *addressTransactionsParams
}
func newAddressTransactions(
db keyvalue.IterableKeyVal,
stateDB *stateDB,
rw *blockReadWriter,
params *addressTransactionsParams,
amend bool,
) (*addressTransactions, error) {
bsParams := &batchedStorParams{
maxBatchSize: maxTransactionIdsBatchSize,
recordSize: txMetaSize,
prefix: transactionIdsPrefix,
}
filePath := filepath.Join(filepath.Clean(params.dir), "address_transactions")
addrTransactionsFile, _, err := openOrCreateForAppending(filePath)
if err != nil {
return nil, err
}
if err := manageFile(addrTransactionsFile, db); err != nil {
return nil, err
}
stor, err := newBatchedStorage(db, stateDB, bsParams, params.batchedStorMemLimit, params.batchedStorMaxKeys, amend)
if err != nil {
return nil, err
}
atx := &addressTransactions{
stateDB: stateDB,
rw: rw,
stor: stor,
filePath: filePath,
addrTransactions: addrTransactionsFile,
addrTransactionsBuf: bufio.NewWriter(addrTransactionsFile),
params: params,
amend: amend,
}
if params.providesData {
if err := atx.persist(); err != nil {
return nil, errors.Wrap(err, "failed to persist")
}
}
return atx, nil
}
func (at *addressTransactions) saveTxIdByAddress(addr proto.Address, txID []byte, blockID proto.BlockID) error {
if at.rw.offsetLen != 8 {
return errors.New("unsupported meta length")
}
newRecord := make([]byte, addrTxRecordSize)
blockNum, err := at.stateDB.newestBlockIdToNum(blockID)
if err != nil {
return err
}
copy(newRecord[:proto.AddressIDSize], addr.ID().Bytes())
pos := proto.AddressIDSize
info, err := at.rw.newestTransactionInfoByID(txID)
if err != nil {
return err
}
meta := txMeta{info.offset, info.failed}
binary.BigEndian.PutUint32(newRecord[pos:], blockNum)
pos += blockNumLen
copy(newRecord[pos:], meta.bytes())
if at.params.providesData {
return at.stor.addRecordBytes(newRecord[:proto.AddressIDSize], newRecord[proto.AddressIDSize:])
}
if _, err := at.addrTransactionsBuf.Write(newRecord); err != nil {
return err
}
return nil
}
func (at *addressTransactions) newTransactionsByAddrIterator(addr proto.Address) (*txIter, error) {
if !at.params.providesData {
return nil, errors.New("state does not provide transactions by addresses now")
}
key := addr.ID().Bytes()
iter, err := at.stor.newBackwardRecordIterator(key)
if err != nil {
return nil, err
}
return newTxIter(at.rw, iter), nil
}
func (at *addressTransactions) startProvidingData() error {
if at.params.providesData {
// Already provides.
return nil
}
if err := at.persist(); err != nil {
return err
}
at.params.providesData = true
return nil
}
func (at *addressTransactions) offsetFromBytes(offsetBytes []byte) uint64 {
return binary.BigEndian.Uint64(offsetBytes)
}
func (at *addressTransactions) handleRecord(record []byte) error {
key := record[:proto.AddressIDSize]
newRecordBytes := record[proto.AddressIDSize:]
lastOffsetBytes, err := at.stor.newestLastRecordByKey(key)
if err == errNotFound {
// The first record for this key.
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
} else if err != nil {
return errors.Wrap(err, "newestLastRecordByKey() failed")
}
// Make sure the offset we add is greater than any other offsets
// by comparing it to the last (= maximum) offset.
// This makes adding from file to batchedStorage idempotent.
newOffsetBytes := newRecordBytes[blockNumLen:]
offset := at.offsetFromBytes(newOffsetBytes)
lastOffset := at.offsetFromBytes(lastOffsetBytes)
if lastOffset > at.rw.blockchainLen {
return errors.Errorf("invalid offset in storage: %d, max is: %d", lastOffset, at.rw.blockchainLen)
}
if offset <= lastOffset {
return nil
}
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
}
func (at *addressTransactions) shouldPersist() (bool, error) {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return false, err
}
size := fileStats.Size()
zap.S().Debugf("TransactionsByAddresses file size: %d; max is %d", size, at.params.maxFileSize)
return size >= at.params.maxFileSize, nil
}
func (at *addressTransactions) persist() error {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := fileStats.Size()
zap.S().Info("Starting to sort TransactionsByAddresses file, will take awhile...")
debug.FreeOSMemory()
// Create file for emsort and set emsort over it.
tempFile, err := os.CreateTemp(os.TempDir(), "emsort")
if err != nil {
return errors.Wrap(err, "failed to create temp file for emsort")
}
defer func(name string) {
err := os.Remove(name)
if err != nil {
zap.S().Warnf("Failed to remove temporary file: %v", err)
}
}(tempFile.Name())
sort, err := emsort.NewFixedSize(addrTxRecordSize, maxEmsortMem, tempFile)
if err != nil {
return errors.Wrap(err, "emsort.NewFixedSize() failed")
}
// Read records from file and append to emsort.
for readPos := int64(0); readPos < size; readPos += addrTxRecordSize {
record := make([]byte, addrTxRecordSize)
if n, err := at.addrTransactions.ReadAt(record, readPos); err != nil {
return err
} else if n != addrTxRecordSize {
return errors.New("failed to read full record")
}
// Filtering optimization: if all blocks are valid,
// we shouldn't check isValid() on records.
isValid := true
if at.amend {
blockNum := binary.BigEndian.Uint32(record[proto.AddressIDSize : proto.AddressIDSize+4])
isValid, err = at.stateDB.isValidBlock(blockNum)
if err != nil {
return errors.Wrap(err, "isValidBlock() failed")
}
}
if !isValid {
// Invalid record, we should skip it.
continue
}
if err := sort.Push(record); err != nil {
return errors.Wrap(err, "emsort.Push() failed")
}
}
// Tell emsort that we have finished appending records.
if err := sort.StopWriting(); err != nil {
return errors.Wrap(err, "emsort.StopWriting() failed")
}
zap.S().Info("Finished to sort TransactionsByAddresses file")
debug.FreeOSMemory()
zap.S().Info("Writing sorted records to database, will take awhile...")
// Read records from emsort in sorted order and save to batchedStorage.
for {
record, err := sort.Pop()
if err == io.EOF {
// All records were read.
break
} else if err != nil {
return errors.Wrap(err, "emsort.Pop() failed")
}
if err := at.handleRecord(record); err != nil {
return errors.Wrap(err, "failed to add record")
}
}
// Write 0 size to database batch.
// This way 0 size will be written to database together with new records.
// If program crashes after batch is flushed but before we truncate the file,
// next time 0 size will be read and file will be truncated upon next start.
if err := at.saveFileSizeToBatch(at.stor.dbBatch, 0); err != nil {
return errors.Wrap(err, "failed to write file size to db batch")
}
// Flush batchedStorage.
if err := at.stor.flush(); err != nil {
return errors.Wrap(err, "batchedStorage(): failed to flush")
}
// Clear batchedStorage.
at.stor.reset()
// Clear address transactions file.
if err := at.addrTransactions.Truncate(0); err != nil {
return err
}
if _, err := at.addrTransactions.Seek(0, 0); err != nil {
return err
}
at.addrTransactionsBuf.Reset(at.addrTransactions)
zap.S().Info("Successfully finished moving records from file to database")
debug.FreeOSMemory()
return nil
}
func (at *addressTransactions) saveFileSizeToBatch(batch keyvalue.Batch, size uint64) error {
fileSizeBytes := make([]byte, 8)
binary.BigEndian.PutUint64(fileSizeBytes, size)
batch.Put(fileSizeKeyBytes, fileSizeBytes)
return nil
}
func (at *addressTransactions) reset() {
if at.params.providesData {
at.stor.reset()
} else {
at.addrTransactionsBuf.Reset(at.addrTransactions)
}
}
func (at *addressTransactions) flush() error {
if at.params.providesData {
return at.stor.flush()
}
if err := at.addrTransactionsBuf.Flush(); err != nil {
return err
}
if err := at.addrTransactions.Sync(); err != nil {
return err
}
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := uint64(fileStats.Size())
if err := at.saveFileSizeToBatch(at.stateDB.dbBatch, size); err != nil {
return err
}
return nil
}
func (at *addressTransactions) providesData() bool {
return at.params.providesData
}
func (at *addressTransactions) close() error {
return at.addrTransactions.Close()
} | maxEmsortMem = 200 * 1024 * 1024 // 200 MiB.
txMetaSize = 8 + 1
) | random_line_split |
address_transactions.go | package state
import (
"bufio"
"encoding/binary"
"io"
"os"
"path/filepath"
"runtime/debug"
"github.com/pkg/errors"
"github.com/starius/emsort"
"github.com/wavesplatform/gowaves/pkg/keyvalue"
"github.com/wavesplatform/gowaves/pkg/proto"
"go.uber.org/zap"
)
const (
// AddressID size + length of block num + transaction offset length.
addrTxRecordSize = proto.AddressIDSize + blockNumLen + txMetaSize
maxEmsortMem = 200 * 1024 * 1024 // 200 MiB.
txMetaSize = 8 + 1
)
var (
fileSizeKeyBytes = []byte{txsByAddressesFileSizeKeyPrefix}
)
type txMeta struct {
offset uint64
failed bool
}
func (m *txMeta) bytes() []byte {
buf := make([]byte, txMetaSize)
binary.BigEndian.PutUint64(buf, m.offset)
buf[8] = 0
if m.failed {
buf[8] = 1
}
return buf
}
func (m *txMeta) unmarshal(data []byte) error {
if len(data) < txMetaSize {
return errInvalidDataSize
}
m.offset = binary.BigEndian.Uint64(data)
if data[8] == 1 {
m.failed = true
}
return nil
}
type txIter struct {
rw *blockReadWriter
iter *recordIterator
err error
}
func newTxIter(rw *blockReadWriter, iter *recordIterator) *txIter {
return &txIter{rw: rw, iter: iter}
}
func (i *txIter) | () (proto.Transaction, bool, error) {
value, err := i.iter.currentRecord()
if err != nil {
return nil, false, err
}
var meta txMeta
err = meta.unmarshal(value)
if err != nil {
return nil, false, err
}
tx, err := i.rw.readTransactionByOffset(meta.offset)
if err != nil {
return nil, false, err
}
return tx, meta.failed, nil
}
func (i *txIter) Next() bool {
return i.iter.next()
}
func (i *txIter) Error() error {
if err := i.iter.error(); err != nil {
return err
}
return i.err
}
func (i *txIter) Release() {
i.iter.release()
}
func manageFile(file *os.File, db keyvalue.IterableKeyVal) error {
var properFileSize uint64
fileSizeBytes, err := db.Get(fileSizeKeyBytes)
if err == keyvalue.ErrNotFound {
properFileSize = 0
} else if err == nil {
properFileSize = binary.BigEndian.Uint64(fileSizeBytes)
} else {
return err
}
fileStats, err := os.Stat(file.Name())
if err != nil {
return err
}
size := uint64(fileStats.Size())
if size < properFileSize {
return errors.New("data loss: file size is less than it should be")
} else if size == properFileSize {
return nil
}
if err := file.Truncate(int64(properFileSize)); err != nil {
return err
}
if _, err := file.Seek(int64(properFileSize), 0); err != nil {
return err
}
return nil
}
type addressTransactionsParams struct {
dir string // Directory for address_transactions file.
batchedStorMemLimit int // Maximum size of batchedStor db batch.
batchedStorMaxKeys int // Maximum number of keys per flush().
maxFileSize int64 // Maximum size of address_transactions file.
providesData bool // True if transaction iterators can be used.
}
type addressTransactions struct {
stateDB *stateDB
rw *blockReadWriter
stor *batchedStorage
amend bool
filePath string
addrTransactions *os.File
addrTransactionsBuf *bufio.Writer
params *addressTransactionsParams
}
func newAddressTransactions(
db keyvalue.IterableKeyVal,
stateDB *stateDB,
rw *blockReadWriter,
params *addressTransactionsParams,
amend bool,
) (*addressTransactions, error) {
bsParams := &batchedStorParams{
maxBatchSize: maxTransactionIdsBatchSize,
recordSize: txMetaSize,
prefix: transactionIdsPrefix,
}
filePath := filepath.Join(filepath.Clean(params.dir), "address_transactions")
addrTransactionsFile, _, err := openOrCreateForAppending(filePath)
if err != nil {
return nil, err
}
if err := manageFile(addrTransactionsFile, db); err != nil {
return nil, err
}
stor, err := newBatchedStorage(db, stateDB, bsParams, params.batchedStorMemLimit, params.batchedStorMaxKeys, amend)
if err != nil {
return nil, err
}
atx := &addressTransactions{
stateDB: stateDB,
rw: rw,
stor: stor,
filePath: filePath,
addrTransactions: addrTransactionsFile,
addrTransactionsBuf: bufio.NewWriter(addrTransactionsFile),
params: params,
amend: amend,
}
if params.providesData {
if err := atx.persist(); err != nil {
return nil, errors.Wrap(err, "failed to persist")
}
}
return atx, nil
}
func (at *addressTransactions) saveTxIdByAddress(addr proto.Address, txID []byte, blockID proto.BlockID) error {
if at.rw.offsetLen != 8 {
return errors.New("unsupported meta length")
}
newRecord := make([]byte, addrTxRecordSize)
blockNum, err := at.stateDB.newestBlockIdToNum(blockID)
if err != nil {
return err
}
copy(newRecord[:proto.AddressIDSize], addr.ID().Bytes())
pos := proto.AddressIDSize
info, err := at.rw.newestTransactionInfoByID(txID)
if err != nil {
return err
}
meta := txMeta{info.offset, info.failed}
binary.BigEndian.PutUint32(newRecord[pos:], blockNum)
pos += blockNumLen
copy(newRecord[pos:], meta.bytes())
if at.params.providesData {
return at.stor.addRecordBytes(newRecord[:proto.AddressIDSize], newRecord[proto.AddressIDSize:])
}
if _, err := at.addrTransactionsBuf.Write(newRecord); err != nil {
return err
}
return nil
}
func (at *addressTransactions) newTransactionsByAddrIterator(addr proto.Address) (*txIter, error) {
if !at.params.providesData {
return nil, errors.New("state does not provide transactions by addresses now")
}
key := addr.ID().Bytes()
iter, err := at.stor.newBackwardRecordIterator(key)
if err != nil {
return nil, err
}
return newTxIter(at.rw, iter), nil
}
func (at *addressTransactions) startProvidingData() error {
if at.params.providesData {
// Already provides.
return nil
}
if err := at.persist(); err != nil {
return err
}
at.params.providesData = true
return nil
}
func (at *addressTransactions) offsetFromBytes(offsetBytes []byte) uint64 {
return binary.BigEndian.Uint64(offsetBytes)
}
func (at *addressTransactions) handleRecord(record []byte) error {
key := record[:proto.AddressIDSize]
newRecordBytes := record[proto.AddressIDSize:]
lastOffsetBytes, err := at.stor.newestLastRecordByKey(key)
if err == errNotFound {
// The first record for this key.
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
} else if err != nil {
return errors.Wrap(err, "newestLastRecordByKey() failed")
}
// Make sure the offset we add is greater than any other offsets
// by comparing it to the last (= maximum) offset.
// This makes adding from file to batchedStorage idempotent.
newOffsetBytes := newRecordBytes[blockNumLen:]
offset := at.offsetFromBytes(newOffsetBytes)
lastOffset := at.offsetFromBytes(lastOffsetBytes)
if lastOffset > at.rw.blockchainLen {
return errors.Errorf("invalid offset in storage: %d, max is: %d", lastOffset, at.rw.blockchainLen)
}
if offset <= lastOffset {
return nil
}
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
}
func (at *addressTransactions) shouldPersist() (bool, error) {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return false, err
}
size := fileStats.Size()
zap.S().Debugf("TransactionsByAddresses file size: %d; max is %d", size, at.params.maxFileSize)
return size >= at.params.maxFileSize, nil
}
func (at *addressTransactions) persist() error {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := fileStats.Size()
zap.S().Info("Starting to sort TransactionsByAddresses file, will take awhile...")
debug.FreeOSMemory()
// Create file for emsort and set emsort over it.
tempFile, err := os.CreateTemp(os.TempDir(), "emsort")
if err != nil {
return errors.Wrap(err, "failed to create temp file for emsort")
}
defer func(name string) {
err := os.Remove(name)
if err != nil {
zap.S().Warnf("Failed to remove temporary file: %v", err)
}
}(tempFile.Name())
sort, err := emsort.NewFixedSize(addrTxRecordSize, maxEmsortMem, tempFile)
if err != nil {
return errors.Wrap(err, "emsort.NewFixedSize() failed")
}
// Read records from file and append to emsort.
for readPos := int64(0); readPos < size; readPos += addrTxRecordSize {
record := make([]byte, addrTxRecordSize)
if n, err := at.addrTransactions.ReadAt(record, readPos); err != nil {
return err
} else if n != addrTxRecordSize {
return errors.New("failed to read full record")
}
// Filtering optimization: if all blocks are valid,
// we shouldn't check isValid() on records.
isValid := true
if at.amend {
blockNum := binary.BigEndian.Uint32(record[proto.AddressIDSize : proto.AddressIDSize+4])
isValid, err = at.stateDB.isValidBlock(blockNum)
if err != nil {
return errors.Wrap(err, "isValidBlock() failed")
}
}
if !isValid {
// Invalid record, we should skip it.
continue
}
if err := sort.Push(record); err != nil {
return errors.Wrap(err, "emsort.Push() failed")
}
}
// Tell emsort that we have finished appending records.
if err := sort.StopWriting(); err != nil {
return errors.Wrap(err, "emsort.StopWriting() failed")
}
zap.S().Info("Finished to sort TransactionsByAddresses file")
debug.FreeOSMemory()
zap.S().Info("Writing sorted records to database, will take awhile...")
// Read records from emsort in sorted order and save to batchedStorage.
for {
record, err := sort.Pop()
if err == io.EOF {
// All records were read.
break
} else if err != nil {
return errors.Wrap(err, "emsort.Pop() failed")
}
if err := at.handleRecord(record); err != nil {
return errors.Wrap(err, "failed to add record")
}
}
// Write 0 size to database batch.
// This way 0 size will be written to database together with new records.
// If program crashes after batch is flushed but before we truncate the file,
// next time 0 size will be read and file will be truncated upon next start.
if err := at.saveFileSizeToBatch(at.stor.dbBatch, 0); err != nil {
return errors.Wrap(err, "failed to write file size to db batch")
}
// Flush batchedStorage.
if err := at.stor.flush(); err != nil {
return errors.Wrap(err, "batchedStorage(): failed to flush")
}
// Clear batchedStorage.
at.stor.reset()
// Clear address transactions file.
if err := at.addrTransactions.Truncate(0); err != nil {
return err
}
if _, err := at.addrTransactions.Seek(0, 0); err != nil {
return err
}
at.addrTransactionsBuf.Reset(at.addrTransactions)
zap.S().Info("Successfully finished moving records from file to database")
debug.FreeOSMemory()
return nil
}
func (at *addressTransactions) saveFileSizeToBatch(batch keyvalue.Batch, size uint64) error {
fileSizeBytes := make([]byte, 8)
binary.BigEndian.PutUint64(fileSizeBytes, size)
batch.Put(fileSizeKeyBytes, fileSizeBytes)
return nil
}
func (at *addressTransactions) reset() {
if at.params.providesData {
at.stor.reset()
} else {
at.addrTransactionsBuf.Reset(at.addrTransactions)
}
}
func (at *addressTransactions) flush() error {
if at.params.providesData {
return at.stor.flush()
}
if err := at.addrTransactionsBuf.Flush(); err != nil {
return err
}
if err := at.addrTransactions.Sync(); err != nil {
return err
}
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := uint64(fileStats.Size())
if err := at.saveFileSizeToBatch(at.stateDB.dbBatch, size); err != nil {
return err
}
return nil
}
func (at *addressTransactions) providesData() bool {
return at.params.providesData
}
func (at *addressTransactions) close() error {
return at.addrTransactions.Close()
}
| Transaction | identifier_name |
address_transactions.go | package state
import (
"bufio"
"encoding/binary"
"io"
"os"
"path/filepath"
"runtime/debug"
"github.com/pkg/errors"
"github.com/starius/emsort"
"github.com/wavesplatform/gowaves/pkg/keyvalue"
"github.com/wavesplatform/gowaves/pkg/proto"
"go.uber.org/zap"
)
const (
// AddressID size + length of block num + transaction offset length.
addrTxRecordSize = proto.AddressIDSize + blockNumLen + txMetaSize
maxEmsortMem = 200 * 1024 * 1024 // 200 MiB.
txMetaSize = 8 + 1
)
var (
fileSizeKeyBytes = []byte{txsByAddressesFileSizeKeyPrefix}
)
type txMeta struct {
offset uint64
failed bool
}
func (m *txMeta) bytes() []byte {
buf := make([]byte, txMetaSize)
binary.BigEndian.PutUint64(buf, m.offset)
buf[8] = 0
if m.failed {
buf[8] = 1
}
return buf
}
func (m *txMeta) unmarshal(data []byte) error {
if len(data) < txMetaSize {
return errInvalidDataSize
}
m.offset = binary.BigEndian.Uint64(data)
if data[8] == 1 {
m.failed = true
}
return nil
}
type txIter struct {
rw *blockReadWriter
iter *recordIterator
err error
}
func newTxIter(rw *blockReadWriter, iter *recordIterator) *txIter {
return &txIter{rw: rw, iter: iter}
}
func (i *txIter) Transaction() (proto.Transaction, bool, error) {
value, err := i.iter.currentRecord()
if err != nil {
return nil, false, err
}
var meta txMeta
err = meta.unmarshal(value)
if err != nil {
return nil, false, err
}
tx, err := i.rw.readTransactionByOffset(meta.offset)
if err != nil {
return nil, false, err
}
return tx, meta.failed, nil
}
func (i *txIter) Next() bool {
return i.iter.next()
}
func (i *txIter) Error() error {
if err := i.iter.error(); err != nil {
return err
}
return i.err
}
func (i *txIter) Release() {
i.iter.release()
}
func manageFile(file *os.File, db keyvalue.IterableKeyVal) error {
var properFileSize uint64
fileSizeBytes, err := db.Get(fileSizeKeyBytes)
if err == keyvalue.ErrNotFound {
properFileSize = 0
} else if err == nil {
properFileSize = binary.BigEndian.Uint64(fileSizeBytes)
} else {
return err
}
fileStats, err := os.Stat(file.Name())
if err != nil {
return err
}
size := uint64(fileStats.Size())
if size < properFileSize {
return errors.New("data loss: file size is less than it should be")
} else if size == properFileSize {
return nil
}
if err := file.Truncate(int64(properFileSize)); err != nil {
return err
}
if _, err := file.Seek(int64(properFileSize), 0); err != nil {
return err
}
return nil
}
type addressTransactionsParams struct {
dir string // Directory for address_transactions file.
batchedStorMemLimit int // Maximum size of batchedStor db batch.
batchedStorMaxKeys int // Maximum number of keys per flush().
maxFileSize int64 // Maximum size of address_transactions file.
providesData bool // True if transaction iterators can be used.
}
type addressTransactions struct {
stateDB *stateDB
rw *blockReadWriter
stor *batchedStorage
amend bool
filePath string
addrTransactions *os.File
addrTransactionsBuf *bufio.Writer
params *addressTransactionsParams
}
func newAddressTransactions(
db keyvalue.IterableKeyVal,
stateDB *stateDB,
rw *blockReadWriter,
params *addressTransactionsParams,
amend bool,
) (*addressTransactions, error) {
bsParams := &batchedStorParams{
maxBatchSize: maxTransactionIdsBatchSize,
recordSize: txMetaSize,
prefix: transactionIdsPrefix,
}
filePath := filepath.Join(filepath.Clean(params.dir), "address_transactions")
addrTransactionsFile, _, err := openOrCreateForAppending(filePath)
if err != nil {
return nil, err
}
if err := manageFile(addrTransactionsFile, db); err != nil {
return nil, err
}
stor, err := newBatchedStorage(db, stateDB, bsParams, params.batchedStorMemLimit, params.batchedStorMaxKeys, amend)
if err != nil {
return nil, err
}
atx := &addressTransactions{
stateDB: stateDB,
rw: rw,
stor: stor,
filePath: filePath,
addrTransactions: addrTransactionsFile,
addrTransactionsBuf: bufio.NewWriter(addrTransactionsFile),
params: params,
amend: amend,
}
if params.providesData {
if err := atx.persist(); err != nil {
return nil, errors.Wrap(err, "failed to persist")
}
}
return atx, nil
}
func (at *addressTransactions) saveTxIdByAddress(addr proto.Address, txID []byte, blockID proto.BlockID) error |
func (at *addressTransactions) newTransactionsByAddrIterator(addr proto.Address) (*txIter, error) {
if !at.params.providesData {
return nil, errors.New("state does not provide transactions by addresses now")
}
key := addr.ID().Bytes()
iter, err := at.stor.newBackwardRecordIterator(key)
if err != nil {
return nil, err
}
return newTxIter(at.rw, iter), nil
}
func (at *addressTransactions) startProvidingData() error {
if at.params.providesData {
// Already provides.
return nil
}
if err := at.persist(); err != nil {
return err
}
at.params.providesData = true
return nil
}
func (at *addressTransactions) offsetFromBytes(offsetBytes []byte) uint64 {
return binary.BigEndian.Uint64(offsetBytes)
}
func (at *addressTransactions) handleRecord(record []byte) error {
key := record[:proto.AddressIDSize]
newRecordBytes := record[proto.AddressIDSize:]
lastOffsetBytes, err := at.stor.newestLastRecordByKey(key)
if err == errNotFound {
// The first record for this key.
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
} else if err != nil {
return errors.Wrap(err, "newestLastRecordByKey() failed")
}
// Make sure the offset we add is greater than any other offsets
// by comparing it to the last (= maximum) offset.
// This makes adding from file to batchedStorage idempotent.
newOffsetBytes := newRecordBytes[blockNumLen:]
offset := at.offsetFromBytes(newOffsetBytes)
lastOffset := at.offsetFromBytes(lastOffsetBytes)
if lastOffset > at.rw.blockchainLen {
return errors.Errorf("invalid offset in storage: %d, max is: %d", lastOffset, at.rw.blockchainLen)
}
if offset <= lastOffset {
return nil
}
if err := at.stor.addRecordBytes(key, newRecordBytes); err != nil {
return errors.Wrap(err, "batchedStorage: failed to add record")
}
return nil
}
func (at *addressTransactions) shouldPersist() (bool, error) {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return false, err
}
size := fileStats.Size()
zap.S().Debugf("TransactionsByAddresses file size: %d; max is %d", size, at.params.maxFileSize)
return size >= at.params.maxFileSize, nil
}
func (at *addressTransactions) persist() error {
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := fileStats.Size()
zap.S().Info("Starting to sort TransactionsByAddresses file, will take awhile...")
debug.FreeOSMemory()
// Create file for emsort and set emsort over it.
tempFile, err := os.CreateTemp(os.TempDir(), "emsort")
if err != nil {
return errors.Wrap(err, "failed to create temp file for emsort")
}
defer func(name string) {
err := os.Remove(name)
if err != nil {
zap.S().Warnf("Failed to remove temporary file: %v", err)
}
}(tempFile.Name())
sort, err := emsort.NewFixedSize(addrTxRecordSize, maxEmsortMem, tempFile)
if err != nil {
return errors.Wrap(err, "emsort.NewFixedSize() failed")
}
// Read records from file and append to emsort.
for readPos := int64(0); readPos < size; readPos += addrTxRecordSize {
record := make([]byte, addrTxRecordSize)
if n, err := at.addrTransactions.ReadAt(record, readPos); err != nil {
return err
} else if n != addrTxRecordSize {
return errors.New("failed to read full record")
}
// Filtering optimization: if all blocks are valid,
// we shouldn't check isValid() on records.
isValid := true
if at.amend {
blockNum := binary.BigEndian.Uint32(record[proto.AddressIDSize : proto.AddressIDSize+4])
isValid, err = at.stateDB.isValidBlock(blockNum)
if err != nil {
return errors.Wrap(err, "isValidBlock() failed")
}
}
if !isValid {
// Invalid record, we should skip it.
continue
}
if err := sort.Push(record); err != nil {
return errors.Wrap(err, "emsort.Push() failed")
}
}
// Tell emsort that we have finished appending records.
if err := sort.StopWriting(); err != nil {
return errors.Wrap(err, "emsort.StopWriting() failed")
}
zap.S().Info("Finished to sort TransactionsByAddresses file")
debug.FreeOSMemory()
zap.S().Info("Writing sorted records to database, will take awhile...")
// Read records from emsort in sorted order and save to batchedStorage.
for {
record, err := sort.Pop()
if err == io.EOF {
// All records were read.
break
} else if err != nil {
return errors.Wrap(err, "emsort.Pop() failed")
}
if err := at.handleRecord(record); err != nil {
return errors.Wrap(err, "failed to add record")
}
}
// Write 0 size to database batch.
// This way 0 size will be written to database together with new records.
// If program crashes after batch is flushed but before we truncate the file,
// next time 0 size will be read and file will be truncated upon next start.
if err := at.saveFileSizeToBatch(at.stor.dbBatch, 0); err != nil {
return errors.Wrap(err, "failed to write file size to db batch")
}
// Flush batchedStorage.
if err := at.stor.flush(); err != nil {
return errors.Wrap(err, "batchedStorage(): failed to flush")
}
// Clear batchedStorage.
at.stor.reset()
// Clear address transactions file.
if err := at.addrTransactions.Truncate(0); err != nil {
return err
}
if _, err := at.addrTransactions.Seek(0, 0); err != nil {
return err
}
at.addrTransactionsBuf.Reset(at.addrTransactions)
zap.S().Info("Successfully finished moving records from file to database")
debug.FreeOSMemory()
return nil
}
func (at *addressTransactions) saveFileSizeToBatch(batch keyvalue.Batch, size uint64) error {
fileSizeBytes := make([]byte, 8)
binary.BigEndian.PutUint64(fileSizeBytes, size)
batch.Put(fileSizeKeyBytes, fileSizeBytes)
return nil
}
func (at *addressTransactions) reset() {
if at.params.providesData {
at.stor.reset()
} else {
at.addrTransactionsBuf.Reset(at.addrTransactions)
}
}
func (at *addressTransactions) flush() error {
if at.params.providesData {
return at.stor.flush()
}
if err := at.addrTransactionsBuf.Flush(); err != nil {
return err
}
if err := at.addrTransactions.Sync(); err != nil {
return err
}
fileStats, err := os.Stat(at.filePath)
if err != nil {
return err
}
size := uint64(fileStats.Size())
if err := at.saveFileSizeToBatch(at.stateDB.dbBatch, size); err != nil {
return err
}
return nil
}
func (at *addressTransactions) providesData() bool {
return at.params.providesData
}
func (at *addressTransactions) close() error {
return at.addrTransactions.Close()
}
| {
if at.rw.offsetLen != 8 {
return errors.New("unsupported meta length")
}
newRecord := make([]byte, addrTxRecordSize)
blockNum, err := at.stateDB.newestBlockIdToNum(blockID)
if err != nil {
return err
}
copy(newRecord[:proto.AddressIDSize], addr.ID().Bytes())
pos := proto.AddressIDSize
info, err := at.rw.newestTransactionInfoByID(txID)
if err != nil {
return err
}
meta := txMeta{info.offset, info.failed}
binary.BigEndian.PutUint32(newRecord[pos:], blockNum)
pos += blockNumLen
copy(newRecord[pos:], meta.bytes())
if at.params.providesData {
return at.stor.addRecordBytes(newRecord[:proto.AddressIDSize], newRecord[proto.AddressIDSize:])
}
if _, err := at.addrTransactionsBuf.Write(newRecord); err != nil {
return err
}
return nil
} | identifier_body |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5 ..= 5, 2 ..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5 ..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5 ..= 5, Const::<3>, 0 ..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5 .. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5 ..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self {
DimRange(range)
}
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples | /// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0 .. 5i32, Const::<3>, 0 ..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
} | /// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn}; | random_line_split |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5 ..= 5, 2 ..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5 ..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5 ..= 5, Const::<3>, 0 ..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5 .. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5 ..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self |
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples
/// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn};
/// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0 .. 5i32, Const::<3>, 0 ..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
}
| {
DimRange(range)
} | identifier_body |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5 ..= 5, 2 ..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5 ..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5 ..= 5, Const::<3>, 0 ..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5 .. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5 ..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self {
DimRange(range)
}
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples
/// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn};
/// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0 .. 5i32, Const::<3>, 0 ..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone + 'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn | () -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
}
| default | identifier_name |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"),
)
.await
{
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections |
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn set_html(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if !out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
}
| {
peer_connections.extend(node_peer_connections.into_iter());
} | conditional_block |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"),
)
.await
{
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections {
peer_connections.extend(node_peer_connections.into_iter());
}
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn | (&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if !out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
}
| set_html | identifier_name |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"), | {
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections {
peer_connections.extend(node_peer_connections.into_iter());
}
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id != source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn set_html(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if !out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
} | )
.await | random_line_split |
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if !self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
}
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty != ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) & !(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE != address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct Tls {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
| // ... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
} | random_line_split | |
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if !self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> |
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty != ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) & !(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE != address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct Tls {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
// ... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
}
| {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
} | identifier_body |
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if !self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
}
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty != ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) & !(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE != address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct | {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
// ... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
}
| Tls | identifier_name |
lib.rs | #![allow(
clippy::cast_lossless,
clippy::too_many_arguments,
clippy::cognitive_complexity,
clippy::redundant_closure
)]
//! Fast GPU cached text rendering using gfx-rs & ab_glyph.
//!
//! Makes use of three kinds of caching to optimise frame performance.
//!
//! * Caching of glyph positioning output to avoid repeated cost of identical text
//! rendering on sequential frames.
//! * Caches draw calculations to avoid repeated cost of identical text rendering on
//! sequential frames.
//! * GPU cache logic to dynamically maintain a GPU texture of rendered glyphs.
//!
//! # Example
//!
//! ```no_run
//! use gfx_glyph::{ab_glyph::FontArc, GlyphBrushBuilder, Section, Text};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
//! # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
//! # let gfx_depth: gfx_core::handle::DepthStencilView<gfx_device_gl::Resources, gfx::format::Depth> = unimplemented!();
//! # let gfx_factory: gfx_device_gl::Factory = unimplemented!();
//! # let gfx_encoder: gfx::Encoder<_, _> = gfx_factory.create_command_buffer().into();
//!
//! let dejavu = FontArc::try_from_slice(include_bytes!("../../fonts/DejaVuSans.ttf"))?;
//! let mut glyph_brush = GlyphBrushBuilder::using_font(dejavu).build(gfx_factory.clone());
//!
//! # let some_other_section = Section::default();
//! let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
//!
//! glyph_brush.queue(section);
//! glyph_brush.queue(some_other_section);
//!
//! glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
//! # Ok(()) }
//! ```
mod builder;
mod pipe;
#[macro_use]
mod trace;
mod draw_builder;
pub use crate::{builder::*, draw_builder::*};
pub use glyph_brush::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out != target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref() != Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else |
if cache.pso.0 != target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let mut gl_rect = Rect {
min: point(pixel_coords.min.x, pixel_coords.min.y),
max: point(pixel_coords.max.x, pixel_coords.max.y),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.max.y > gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y < gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
GlyphVertex {
left_top: [gl_rect.min.x, gl_rect.max.y, extra.z],
right_bottom: [gl_rect.max.x, gl_rect.min.y],
tex_left_top: [tex_coords.min.x, tex_coords.max.y],
tex_right_bottom: [tex_coords.max.x, tex_coords.min.y],
color: extra.color,
}
}
// Creates a gfx texture with the given data
fn create_texture<GF, R>(
factory: &mut GF,
width: u32,
height: u32,
) -> Result<(TexSurfaceHandle<R>, TexShaderView<R>), Box<dyn Error>>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
{
let kind = texture::Kind::D2(
width as texture::Size,
height as texture::Size,
texture::AaMode::Single,
);
let tex = factory.create_texture(
kind,
1,
gfx::memory::Bind::SHADER_RESOURCE,
gfx::memory::Usage::Dynamic,
Some(<TexChannel as format::ChannelTyped>::get_channel_type()),
)?;
let view =
factory.view_texture_as_shader_resource::<TexForm>(&tex, (0, 0), format::Swizzle::new())?;
Ok((tex, view))
}
// Updates a texture with the given data (used for updating the GlyphCache texture)
#[inline]
fn update_texture<R, C>(
encoder: &mut gfx::Encoder<R, C>,
texture: &handle::Texture<R, TexSurface>,
offset: [u16; 2],
size: [u16; 2],
data: &[u8],
) where
R: gfx::Resources,
C: gfx::CommandBuffer<R>,
{
let info = texture::ImageInfoCommon {
xoffset: offset[0],
yoffset: offset[1],
zoffset: 0,
width: size[0],
height: size[1],
depth: 0,
format: (),
mipmap: 0,
};
encoder
.update_texture::<TexSurface, TexForm>(texture, None, info, data)
.unwrap();
}
| {
cache.pipe_data.out_depth.take();
} | conditional_block |
lib.rs | #![allow(
clippy::cast_lossless,
clippy::too_many_arguments,
clippy::cognitive_complexity,
clippy::redundant_closure
)]
//! Fast GPU cached text rendering using gfx-rs & ab_glyph.
//!
//! Makes use of three kinds of caching to optimise frame performance.
//!
//! * Caching of glyph positioning output to avoid repeated cost of identical text
//! rendering on sequential frames.
//! * Caches draw calculations to avoid repeated cost of identical text rendering on
//! sequential frames.
//! * GPU cache logic to dynamically maintain a GPU texture of rendered glyphs.
//!
//! # Example
//!
//! ```no_run
//! use gfx_glyph::{ab_glyph::FontArc, GlyphBrushBuilder, Section, Text};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
//! # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
//! # let gfx_depth: gfx_core::handle::DepthStencilView<gfx_device_gl::Resources, gfx::format::Depth> = unimplemented!();
//! # let gfx_factory: gfx_device_gl::Factory = unimplemented!();
//! # let gfx_encoder: gfx::Encoder<_, _> = gfx_factory.create_command_buffer().into();
//!
//! let dejavu = FontArc::try_from_slice(include_bytes!("../../fonts/DejaVuSans.ttf"))?;
//! let mut glyph_brush = GlyphBrushBuilder::using_font(dejavu).build(gfx_factory.clone());
//!
//! # let some_other_section = Section::default();
//! let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
//!
//! glyph_brush.queue(section);
//! glyph_brush.queue(some_other_section);
//!
//! glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
//! # Ok(()) }
//! ```
mod builder;
mod pipe;
#[macro_use]
mod trace;
mod draw_builder;
pub use crate::{builder::*, draw_builder::*};
pub use glyph_brush::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn | <'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out != target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref() != Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0 != target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let mut gl_rect = Rect {
min: point(pixel_coords.min.x, pixel_coords.min.y),
max: point(pixel_coords.max.x, pixel_coords.max.y),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.max.y > gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y < gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
GlyphVertex {
left_top: [gl_rect.min.x, gl_rect.max.y, extra.z],
right_bottom: [gl_rect.max.x, gl_rect.min.y],
tex_left_top: [tex_coords.min.x, tex_coords.max.y],
tex_right_bottom: [tex_coords.max.x, tex_coords.min.y],
color: extra.color,
}
}
// Creates a gfx texture with the given data
fn create_texture<GF, R>(
factory: &mut GF,
width: u32,
height: u32,
) -> Result<(TexSurfaceHandle<R>, TexShaderView<R>), Box<dyn Error>>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
{
let kind = texture::Kind::D2(
width as texture::Size,
height as texture::Size,
texture::AaMode::Single,
);
let tex = factory.create_texture(
kind,
1,
gfx::memory::Bind::SHADER_RESOURCE,
gfx::memory::Usage::Dynamic,
Some(<TexChannel as format::ChannelTyped>::get_channel_type()),
)?;
let view =
factory.view_texture_as_shader_resource::<TexForm>(&tex, (0, 0), format::Swizzle::new())?;
Ok((tex, view))
}
// Updates a texture with the given data (used for updating the GlyphCache texture)
#[inline]
fn update_texture<R, C>(
encoder: &mut gfx::Encoder<R, C>,
texture: &handle::Texture<R, TexSurface>,
offset: [u16; 2],
size: [u16; 2],
data: &[u8],
) where
R: gfx::Resources,
C: gfx::CommandBuffer<R>,
{
let info = texture::ImageInfoCommon {
xoffset: offset[0],
yoffset: offset[1],
zoffset: 0,
width: size[0],
height: size[1],
depth: 0,
format: (),
mipmap: 0,
};
encoder
.update_texture::<TexSurface, TexForm>(texture, None, info, data)
.unwrap();
}
| queue | identifier_name |
lib.rs | #![allow(
clippy::cast_lossless,
clippy::too_many_arguments,
clippy::cognitive_complexity,
clippy::redundant_closure
)]
//! Fast GPU cached text rendering using gfx-rs & ab_glyph.
//!
//! Makes use of three kinds of caching to optimise frame performance.
//!
//! * Caching of glyph positioning output to avoid repeated cost of identical text
//! rendering on sequential frames.
//! * Caches draw calculations to avoid repeated cost of identical text rendering on
//! sequential frames.
//! * GPU cache logic to dynamically maintain a GPU texture of rendered glyphs.
//!
//! # Example
//!
//! ```no_run
//! use gfx_glyph::{ab_glyph::FontArc, GlyphBrushBuilder, Section, Text};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
//! # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
//! # let gfx_depth: gfx_core::handle::DepthStencilView<gfx_device_gl::Resources, gfx::format::Depth> = unimplemented!();
//! # let gfx_factory: gfx_device_gl::Factory = unimplemented!();
//! # let gfx_encoder: gfx::Encoder<_, _> = gfx_factory.create_command_buffer().into();
//!
//! let dejavu = FontArc::try_from_slice(include_bytes!("../../fonts/DejaVuSans.ttf"))?;
//! let mut glyph_brush = GlyphBrushBuilder::using_font(dejavu).build(gfx_factory.clone());
//!
//! # let some_other_section = Section::default();
//! let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
//!
//! glyph_brush.queue(section);
//! glyph_brush.queue(some_other_section);
//!
//! glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
//! # Ok(()) }
//! ```
mod builder;
mod pipe;
#[macro_use]
mod trace;
mod draw_builder;
pub use crate::{builder::*, draw_builder::*};
pub use glyph_brush::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: ( | gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out != target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref() != Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0 != target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let mut gl_rect = Rect {
min: point(pixel_coords.min.x, pixel_coords.min.y),
max: point(pixel_coords.max.x, pixel_coords.max.y),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.max.y > gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y < gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
GlyphVertex {
left_top: [gl_rect.min.x, gl_rect.max.y, extra.z],
right_bottom: [gl_rect.max.x, gl_rect.min.y],
tex_left_top: [tex_coords.min.x, tex_coords.max.y],
tex_right_bottom: [tex_coords.max.x, tex_coords.min.y],
color: extra.color,
}
}
// Creates a gfx texture with the given data
fn create_texture<GF, R>(
factory: &mut GF,
width: u32,
height: u32,
) -> Result<(TexSurfaceHandle<R>, TexShaderView<R>), Box<dyn Error>>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
{
let kind = texture::Kind::D2(
width as texture::Size,
height as texture::Size,
texture::AaMode::Single,
);
let tex = factory.create_texture(
kind,
1,
gfx::memory::Bind::SHADER_RESOURCE,
gfx::memory::Usage::Dynamic,
Some(<TexChannel as format::ChannelTyped>::get_channel_type()),
)?;
let view =
factory.view_texture_as_shader_resource::<TexForm>(&tex, (0, 0), format::Swizzle::new())?;
Ok((tex, view))
}
// Updates a texture with the given data (used for updating the GlyphCache texture)
#[inline]
fn update_texture<R, C>(
encoder: &mut gfx::Encoder<R, C>,
texture: &handle::Texture<R, TexSurface>,
offset: [u16; 2],
size: [u16; 2],
data: &[u8],
) where
R: gfx::Resources,
C: gfx::CommandBuffer<R>,
{
let info = texture::ImageInfoCommon {
xoffset: offset[0],
yoffset: offset[1],
zoffset: 0,
width: size[0],
height: size[1],
depth: 0,
format: (),
mipmap: 0,
};
encoder
.update_texture::<TexSurface, TexForm>(texture, None, info, data)
.unwrap();
} | random_line_split | |
lib.rs | #![allow(
clippy::cast_lossless,
clippy::too_many_arguments,
clippy::cognitive_complexity,
clippy::redundant_closure
)]
//! Fast GPU cached text rendering using gfx-rs & ab_glyph.
//!
//! Makes use of three kinds of caching to optimise frame performance.
//!
//! * Caching of glyph positioning output to avoid repeated cost of identical text
//! rendering on sequential frames.
//! * Caches draw calculations to avoid repeated cost of identical text rendering on
//! sequential frames.
//! * GPU cache logic to dynamically maintain a GPU texture of rendered glyphs.
//!
//! # Example
//!
//! ```no_run
//! use gfx_glyph::{ab_glyph::FontArc, GlyphBrushBuilder, Section, Text};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
//! # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
//! # let gfx_depth: gfx_core::handle::DepthStencilView<gfx_device_gl::Resources, gfx::format::Depth> = unimplemented!();
//! # let gfx_factory: gfx_device_gl::Factory = unimplemented!();
//! # let gfx_encoder: gfx::Encoder<_, _> = gfx_factory.create_command_buffer().into();
//!
//! let dejavu = FontArc::try_from_slice(include_bytes!("../../fonts/DejaVuSans.ttf"))?;
//! let mut glyph_brush = GlyphBrushBuilder::using_font(dejavu).build(gfx_factory.clone());
//!
//! # let some_other_section = Section::default();
//! let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
//!
//! glyph_brush.queue(section);
//! glyph_brush.queue(some_other_section);
//!
//! glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
//! # Ok(()) }
//! ```
mod builder;
mod pipe;
#[macro_use]
mod trace;
mod draw_builder;
pub use crate::{builder::*, draw_builder::*};
pub use glyph_brush::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out != target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref() != Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0 != target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let mut gl_rect = Rect {
min: point(pixel_coords.min.x, pixel_coords.min.y),
max: point(pixel_coords.max.x, pixel_coords.max.y),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.max.y > gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y < gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
GlyphVertex {
left_top: [gl_rect.min.x, gl_rect.max.y, extra.z],
right_bottom: [gl_rect.max.x, gl_rect.min.y],
tex_left_top: [tex_coords.min.x, tex_coords.max.y],
tex_right_bottom: [tex_coords.max.x, tex_coords.min.y],
color: extra.color,
}
}
// Creates a gfx texture with the given data
fn create_texture<GF, R>(
factory: &mut GF,
width: u32,
height: u32,
) -> Result<(TexSurfaceHandle<R>, TexShaderView<R>), Box<dyn Error>>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
|
// Updates a texture with the given data (used for updating the GlyphCache texture)
#[inline]
fn update_texture<R, C>(
encoder: &mut gfx::Encoder<R, C>,
texture: &handle::Texture<R, TexSurface>,
offset: [u16; 2],
size: [u16; 2],
data: &[u8],
) where
R: gfx::Resources,
C: gfx::CommandBuffer<R>,
{
let info = texture::ImageInfoCommon {
xoffset: offset[0],
yoffset: offset[1],
zoffset: 0,
width: size[0],
height: size[1],
depth: 0,
format: (),
mipmap: 0,
};
encoder
.update_texture::<TexSurface, TexForm>(texture, None, info, data)
.unwrap();
}
| {
let kind = texture::Kind::D2(
width as texture::Size,
height as texture::Size,
texture::AaMode::Single,
);
let tex = factory.create_texture(
kind,
1,
gfx::memory::Bind::SHADER_RESOURCE,
gfx::memory::Usage::Dynamic,
Some(<TexChannel as format::ChannelTyped>::get_channel_type()),
)?;
let view =
factory.view_texture_as_shader_resource::<TexForm>(&tex, (0, 0), format::Swizzle::new())?;
Ok((tex, view))
} | identifier_body |
encyclopediaWnd.py | # Copyright (C) 2004-2007 Prairie Games, Inc
# Please see LICENSE.TXT for details
from tgenative import *
from mud.tgepython.console import TGEExport
from mud.gamesettings import *
from mud.worlddocs.utils import GetTWikiName
from tomeGui import TomeGui
TomeGui = TomeGui.instance
import tarfile
import traceback
import re
# Checks for html links, metainfo, quicklinks and tags
PURGE_PARSER = re.compile(r'(<a +[\s\S]*?</a>+\s+)|(%META:TOPICINFO+[\s\S]*?%+\s+)|(\*Quick Links:+.*\s+)|(#[a-zA-Z]*\s+)')
LINK_PARSER = re.compile(r'\[\[(.*?)\]\[(.*?)\]\]')
HEADER1_PARSER = re.compile(r'(---\+)+(.*)')
HEADER2_PARSER = re.compile(r'(---\+\+)+(.*)')
HEADER3_PARSER = re.compile(r'(---\+\+\+)+(.*)')
HEADER4_PARSER = re.compile(r'(---\+\+\+\+)+(.*)')
HEADER5_PARSER = re.compile(r'(---\+\+\+\+\+)+(.*)')
BOLD_PARSER = re.compile(r'\*+(.*?)\*+')
ENCYC = {}
HEADER = """<color:2DCBC9><linkcolor:AAAA00><shadowcolor:000000><shadow:1:1><just:center><lmargin%%:2><rmargin%%:98><font:Arial Bold:20>Encyclopedia
<font:Arial:14><just:right><a:chatlink%s>Add to Chat</a>\n<just:left>"""
HOME = """
<a:ZoneIndex>Zones</a>
<a:SpawnIndex>Spawns (NPCs)</a>
<a:SpawnIndexByLevel>Spawns by Level</a>
<a:FactionIndex>Factions</a>
<a:ItemIndex>Items</a>
<a:ItemSetIndex>Item Sets</a>
<a:QuestIndex>Quests</a> | <a:SkillIndex>Skills</a>
<a:ClassIndex>Classes</a>
<a:RecipeIndex>Recipes</a><br>
<a:EnchantingDisenchantingIndex>Enchanting / Disenchanting</a>
"""
PAGECACHE = {}
ENCWND = None
class EncWindow:
def __init__(self):
self.encText = TGEObject("ENCYC_TEXT")
self.encScroll = TGEObject("ENCYC_SCROLL")
self.history = []
self.positions = {}
self.curIndex = -1
def setPage(self,mypage,append = True):
try:
page = ENCYC[mypage]
except:
return False
try:
text = PAGECACHE[mypage]
except KeyError:
text = ""
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
if append:
if self.curIndex >= 0:
self.history = self.history[:self.curIndex+1]
self.history.append(mypage)
self.curIndex = len(self.history) - 1
else:
self.history.append(mypage)
self.curIndex += 1
if not text:
text = HEADER%mypage + page
# Strip out html links, metainfo, quick links and tags
text = PURGE_PARSER.sub('',text)
# Reformat bold font
text = BOLD_PARSER.sub(r'<font:Arial Bold:14>\1<font:Arial:14>',text)
# HEADERS
text = HEADER5_PARSER.sub(r'<font:Arial Bold:15><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER4_PARSER.sub(r'<font:Arial Bold:16><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER3_PARSER.sub(r'<font:Arial Bold:17><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER2_PARSER.sub(r'<font:Arial Bold:18><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER1_PARSER.sub(r'<font:Arial Bold:20><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
# Text coloring
text = text.replace(r'%GREEN%',"<color:00FF00>")
text = text.replace(r'%BLUE%',"<color:3030FF>")
text = text.replace(r'%RED%',"<color:FF0000>")
text = text.replace(r'%YELLOW%',"<color:FFC000>")
text = text.replace(r'%ENDCOLOR%',"<color:D5E70A>")
text = text.replace('\r',"\\r")
text = text.replace('\n',"\\n") # valid quote
text = text.replace('\a',"\\a") # valid quote
text = text.replace('"','\\"') # invalid quote
text = LINK_PARSER.sub(r'<a:\1>\2</a>',text)
TGEEval(r'ENCYC_TEXT.setText("");')
# get around some tge poop
x = 0
while x < len(text):
add = 1024
t = text[x:x+add]
if t[len(t)-1] == '\\':
add += 1
t = text[x:x+add]
TGEEval(r'ENCYC_TEXT.addText("%s",false);'%t)
x += add
PAGECACHE[mypage] = text
# reformat
TGEEval(r'ENCYC_TEXT.addText("\n",true);')
return True
def home(self):
self.curIndex = -1
self.history = []
self.setPage("Home")
def back(self):
if self.curIndex < 1:
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex-1],False)
self.curIndex -= 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def forward(self):
if self.curIndex >= len(self.history)-1 or not len(self.history):
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex+1],False)
self.curIndex += 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def encyclopediaSearch(searchvalue):
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
page = None
if ENCYC.has_key("Item%s"%formatted):
page = "Item%s"%formatted
elif ENCYC.has_key("ItemSet%s"%formatted):
page = "ItemSet%s"%formatted
elif ENCYC.has_key("Spell%s"%formatted):
page = "Spell%s"%formatted
elif ENCYC.has_key("Recipe%s"%formatted):
page = "Recipe%s"%formatted
elif ENCYC.has_key("Skill%s"%formatted):
page = "Skill%s"%formatted
elif ENCYC.has_key("Class%s"%formatted):
page = "Class%s"%formatted
elif ENCYC.has_key("Spawn%s"%formatted):
page = "Spawn%s"%formatted
elif ENCYC.has_key("Quest%s"%formatted):
page = "Quest%s"%formatted
elif ENCYC.has_key("Zone%s"%formatted):
page = "Zone%s"%formatted
elif ENCYC.has_key("Faction%s"%formatted):
page = "Faction%s"%formatted
if page:
ENCWND.setPage(page)
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
else:
TGECall("MessageBoxOK","Entry not found","No entry for %s in encyclopedia."%searchvalue)
return
def encyclopediaGetLink(searchvalue):
if not searchvalue:
return None
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
link = None
if ENCYC.has_key(formatted):
link = "<a:%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Item%s"%formatted):
link = "<a:Item%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("ItemSet%s"%formatted):
link = "<a:ItemSet%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spell%s"%formatted):
link = "<a:Spell%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Recipe%s"%formatted):
link = "<a:Recipe%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Skill%s"%formatted):
link = "<a:Skill%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Class%s"%formatted):
link = "<a:Class%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spawn%s"%formatted):
link = "<a:Spawn%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Quest%s"%formatted):
link = "<a:Quest%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Zone%s"%formatted):
link = "<a:Zone%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Faction%s"%formatted):
link = "<a:Faction%s>%s</a>"%(formatted,searchvalue)
return link
def OnEncyclopediaOnURL(args):
page = args[1]
if page.startswith('chatlink'):
# If a chatlink is clicked from encyclopedia, then
# the command control is not visible because the
# encyclopedia would have gained focus.
commandCtrl = TomeGui.tomeCommandCtrl
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
txt = commandCtrl.GetValue()
commandCtrl.SetValue("%s <%s>"%(txt,page[8:]))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
def externEncyclopediaLinkURL(args):
page = args[1].replace('gamelink','')
if page.startswith('charlink'): # link to tell, not encyc
commandCtrl = TomeGui.tomeCommandCtrl
if not commandCtrl.visible:
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
commandCtrl.SetValue("/tell %s "%page[8:].replace(' ','_'))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
else:
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
def OnEncyclopediaHome():
ENCWND.home()
def OnEncyclopediaBack():
ENCWND.back()
def OnEncyclopediaForward():
ENCWND.forward()
def PyExec():
global ENCWND
ENCWND = EncWindow()
#read the encyclopedia
try:
tar = tarfile.open("./%s/data/ui/encyclopedia/momworld.tar.gz"%GAMEROOT,'r:gz')
for tarinfo in tar:
if tarinfo.name.startswith("twiki/data/MoMWorld") and tarinfo.isreg():
f = tar.extractfile(tarinfo)
data = f.read()
ENCYC[tarinfo.name[20:-4]] = data
tar.close()
except:
traceback.print_exc()
ENCYC["Home"]=HOME
ENCWND.setPage("Home")
TGEExport(OnEncyclopediaOnURL,"Py","OnEncyclopediaOnURL","desc",2,2)
TGEExport(externEncyclopediaLinkURL,"Py","ExternEncyclopediaLinkURL","desc",2,2)
TGEExport(OnEncyclopediaHome,"Py","OnEncyclopediaHome","desc",1,1)
TGEExport(OnEncyclopediaForward,"Py","OnEncyclopediaForward","desc",1,1)
TGEExport(OnEncyclopediaBack,"Py","OnEncyclopediaBack","desc",1,1) | <a:SpellIndex>Spells</a> | random_line_split |
encyclopediaWnd.py | # Copyright (C) 2004-2007 Prairie Games, Inc
# Please see LICENSE.TXT for details
from tgenative import *
from mud.tgepython.console import TGEExport
from mud.gamesettings import *
from mud.worlddocs.utils import GetTWikiName
from tomeGui import TomeGui
TomeGui = TomeGui.instance
import tarfile
import traceback
import re
# Checks for html links, metainfo, quicklinks and tags
PURGE_PARSER = re.compile(r'(<a +[\s\S]*?</a>+\s+)|(%META:TOPICINFO+[\s\S]*?%+\s+)|(\*Quick Links:+.*\s+)|(#[a-zA-Z]*\s+)')
LINK_PARSER = re.compile(r'\[\[(.*?)\]\[(.*?)\]\]')
HEADER1_PARSER = re.compile(r'(---\+)+(.*)')
HEADER2_PARSER = re.compile(r'(---\+\+)+(.*)')
HEADER3_PARSER = re.compile(r'(---\+\+\+)+(.*)')
HEADER4_PARSER = re.compile(r'(---\+\+\+\+)+(.*)')
HEADER5_PARSER = re.compile(r'(---\+\+\+\+\+)+(.*)')
BOLD_PARSER = re.compile(r'\*+(.*?)\*+')
ENCYC = {}
HEADER = """<color:2DCBC9><linkcolor:AAAA00><shadowcolor:000000><shadow:1:1><just:center><lmargin%%:2><rmargin%%:98><font:Arial Bold:20>Encyclopedia
<font:Arial:14><just:right><a:chatlink%s>Add to Chat</a>\n<just:left>"""
HOME = """
<a:ZoneIndex>Zones</a>
<a:SpawnIndex>Spawns (NPCs)</a>
<a:SpawnIndexByLevel>Spawns by Level</a>
<a:FactionIndex>Factions</a>
<a:ItemIndex>Items</a>
<a:ItemSetIndex>Item Sets</a>
<a:QuestIndex>Quests</a>
<a:SpellIndex>Spells</a>
<a:SkillIndex>Skills</a>
<a:ClassIndex>Classes</a>
<a:RecipeIndex>Recipes</a><br>
<a:EnchantingDisenchantingIndex>Enchanting / Disenchanting</a>
"""
PAGECACHE = {}
ENCWND = None
class EncWindow:
def __init__(self):
self.encText = TGEObject("ENCYC_TEXT")
self.encScroll = TGEObject("ENCYC_SCROLL")
self.history = []
self.positions = {}
self.curIndex = -1
def setPage(self,mypage,append = True):
try:
page = ENCYC[mypage]
except:
return False
try:
text = PAGECACHE[mypage]
except KeyError:
text = ""
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
if append:
if self.curIndex >= 0:
self.history = self.history[:self.curIndex+1]
self.history.append(mypage)
self.curIndex = len(self.history) - 1
else:
self.history.append(mypage)
self.curIndex += 1
if not text:
text = HEADER%mypage + page
# Strip out html links, metainfo, quick links and tags
text = PURGE_PARSER.sub('',text)
# Reformat bold font
text = BOLD_PARSER.sub(r'<font:Arial Bold:14>\1<font:Arial:14>',text)
# HEADERS
text = HEADER5_PARSER.sub(r'<font:Arial Bold:15><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER4_PARSER.sub(r'<font:Arial Bold:16><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER3_PARSER.sub(r'<font:Arial Bold:17><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER2_PARSER.sub(r'<font:Arial Bold:18><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER1_PARSER.sub(r'<font:Arial Bold:20><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
# Text coloring
text = text.replace(r'%GREEN%',"<color:00FF00>")
text = text.replace(r'%BLUE%',"<color:3030FF>")
text = text.replace(r'%RED%',"<color:FF0000>")
text = text.replace(r'%YELLOW%',"<color:FFC000>")
text = text.replace(r'%ENDCOLOR%',"<color:D5E70A>")
text = text.replace('\r',"\\r")
text = text.replace('\n',"\\n") # valid quote
text = text.replace('\a',"\\a") # valid quote
text = text.replace('"','\\"') # invalid quote
text = LINK_PARSER.sub(r'<a:\1>\2</a>',text)
TGEEval(r'ENCYC_TEXT.setText("");')
# get around some tge poop
x = 0
while x < len(text):
add = 1024
t = text[x:x+add]
if t[len(t)-1] == '\\':
add += 1
t = text[x:x+add]
TGEEval(r'ENCYC_TEXT.addText("%s",false);'%t)
x += add
PAGECACHE[mypage] = text
# reformat
TGEEval(r'ENCYC_TEXT.addText("\n",true);')
return True
def home(self):
self.curIndex = -1
self.history = []
self.setPage("Home")
def back(self):
if self.curIndex < 1:
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex-1],False)
self.curIndex -= 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def forward(self):
if self.curIndex >= len(self.history)-1 or not len(self.history):
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex+1],False)
self.curIndex += 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def encyclopediaSearch(searchvalue):
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
page = None
if ENCYC.has_key("Item%s"%formatted):
page = "Item%s"%formatted
elif ENCYC.has_key("ItemSet%s"%formatted):
page = "ItemSet%s"%formatted
elif ENCYC.has_key("Spell%s"%formatted):
page = "Spell%s"%formatted
elif ENCYC.has_key("Recipe%s"%formatted):
page = "Recipe%s"%formatted
elif ENCYC.has_key("Skill%s"%formatted):
page = "Skill%s"%formatted
elif ENCYC.has_key("Class%s"%formatted):
page = "Class%s"%formatted
elif ENCYC.has_key("Spawn%s"%formatted):
page = "Spawn%s"%formatted
elif ENCYC.has_key("Quest%s"%formatted):
page = "Quest%s"%formatted
elif ENCYC.has_key("Zone%s"%formatted):
page = "Zone%s"%formatted
elif ENCYC.has_key("Faction%s"%formatted):
page = "Faction%s"%formatted
if page:
ENCWND.setPage(page)
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
else:
TGECall("MessageBoxOK","Entry not found","No entry for %s in encyclopedia."%searchvalue)
return
def encyclopediaGetLink(searchvalue):
if not searchvalue:
return None
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
link = None
if ENCYC.has_key(formatted):
link = "<a:%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Item%s"%formatted):
link = "<a:Item%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("ItemSet%s"%formatted):
link = "<a:ItemSet%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spell%s"%formatted):
link = "<a:Spell%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Recipe%s"%formatted):
link = "<a:Recipe%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Skill%s"%formatted):
link = "<a:Skill%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Class%s"%formatted):
link = "<a:Class%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spawn%s"%formatted):
link = "<a:Spawn%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Quest%s"%formatted):
link = "<a:Quest%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Zone%s"%formatted):
link = "<a:Zone%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Faction%s"%formatted):
link = "<a:Faction%s>%s</a>"%(formatted,searchvalue)
return link
def OnEncyclopediaOnURL(args):
page = args[1]
if page.startswith('chatlink'):
# If a chatlink is clicked from encyclopedia, then
# the command control is not visible because the
# encyclopedia would have gained focus.
commandCtrl = TomeGui.tomeCommandCtrl
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
txt = commandCtrl.GetValue()
commandCtrl.SetValue("%s <%s>"%(txt,page[8:]))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
def externEncyclopediaLinkURL(args):
|
def OnEncyclopediaHome():
ENCWND.home()
def OnEncyclopediaBack():
ENCWND.back()
def OnEncyclopediaForward():
ENCWND.forward()
def PyExec():
global ENCWND
ENCWND = EncWindow()
#read the encyclopedia
try:
tar = tarfile.open("./%s/data/ui/encyclopedia/momworld.tar.gz"%GAMEROOT,'r:gz')
for tarinfo in tar:
if tarinfo.name.startswith("twiki/data/MoMWorld") and tarinfo.isreg():
f = tar.extractfile(tarinfo)
data = f.read()
ENCYC[tarinfo.name[20:-4]] = data
tar.close()
except:
traceback.print_exc()
ENCYC["Home"]=HOME
ENCWND.setPage("Home")
TGEExport(OnEncyclopediaOnURL,"Py","OnEncyclopediaOnURL","desc",2,2)
TGEExport(externEncyclopediaLinkURL,"Py","ExternEncyclopediaLinkURL","desc",2,2)
TGEExport(OnEncyclopediaHome,"Py","OnEncyclopediaHome","desc",1,1)
TGEExport(OnEncyclopediaForward,"Py","OnEncyclopediaForward","desc",1,1)
TGEExport(OnEncyclopediaBack,"Py","OnEncyclopediaBack","desc",1,1)
| page = args[1].replace('gamelink','')
if page.startswith('charlink'): # link to tell, not encyc
commandCtrl = TomeGui.tomeCommandCtrl
if not commandCtrl.visible:
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
commandCtrl.SetValue("/tell %s "%page[8:].replace(' ','_'))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
else:
TGEEval("canvas.pushDialog(EncyclopediaWnd);") | identifier_body |
encyclopediaWnd.py | # Copyright (C) 2004-2007 Prairie Games, Inc
# Please see LICENSE.TXT for details
from tgenative import *
from mud.tgepython.console import TGEExport
from mud.gamesettings import *
from mud.worlddocs.utils import GetTWikiName
from tomeGui import TomeGui
TomeGui = TomeGui.instance
import tarfile
import traceback
import re
# Checks for html links, metainfo, quicklinks and tags
PURGE_PARSER = re.compile(r'(<a +[\s\S]*?</a>+\s+)|(%META:TOPICINFO+[\s\S]*?%+\s+)|(\*Quick Links:+.*\s+)|(#[a-zA-Z]*\s+)')
LINK_PARSER = re.compile(r'\[\[(.*?)\]\[(.*?)\]\]')
HEADER1_PARSER = re.compile(r'(---\+)+(.*)')
HEADER2_PARSER = re.compile(r'(---\+\+)+(.*)')
HEADER3_PARSER = re.compile(r'(---\+\+\+)+(.*)')
HEADER4_PARSER = re.compile(r'(---\+\+\+\+)+(.*)')
HEADER5_PARSER = re.compile(r'(---\+\+\+\+\+)+(.*)')
BOLD_PARSER = re.compile(r'\*+(.*?)\*+')
ENCYC = {}
HEADER = """<color:2DCBC9><linkcolor:AAAA00><shadowcolor:000000><shadow:1:1><just:center><lmargin%%:2><rmargin%%:98><font:Arial Bold:20>Encyclopedia
<font:Arial:14><just:right><a:chatlink%s>Add to Chat</a>\n<just:left>"""
HOME = """
<a:ZoneIndex>Zones</a>
<a:SpawnIndex>Spawns (NPCs)</a>
<a:SpawnIndexByLevel>Spawns by Level</a>
<a:FactionIndex>Factions</a>
<a:ItemIndex>Items</a>
<a:ItemSetIndex>Item Sets</a>
<a:QuestIndex>Quests</a>
<a:SpellIndex>Spells</a>
<a:SkillIndex>Skills</a>
<a:ClassIndex>Classes</a>
<a:RecipeIndex>Recipes</a><br>
<a:EnchantingDisenchantingIndex>Enchanting / Disenchanting</a>
"""
PAGECACHE = {}
ENCWND = None
class EncWindow:
def __init__(self):
self.encText = TGEObject("ENCYC_TEXT")
self.encScroll = TGEObject("ENCYC_SCROLL")
self.history = []
self.positions = {}
self.curIndex = -1
def setPage(self,mypage,append = True):
try:
page = ENCYC[mypage]
except:
return False
try:
text = PAGECACHE[mypage]
except KeyError:
text = ""
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
if append:
if self.curIndex >= 0:
self.history = self.history[:self.curIndex+1]
self.history.append(mypage)
self.curIndex = len(self.history) - 1
else:
self.history.append(mypage)
self.curIndex += 1
if not text:
text = HEADER%mypage + page
# Strip out html links, metainfo, quick links and tags
text = PURGE_PARSER.sub('',text)
# Reformat bold font
text = BOLD_PARSER.sub(r'<font:Arial Bold:14>\1<font:Arial:14>',text)
# HEADERS
text = HEADER5_PARSER.sub(r'<font:Arial Bold:15><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER4_PARSER.sub(r'<font:Arial Bold:16><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER3_PARSER.sub(r'<font:Arial Bold:17><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER2_PARSER.sub(r'<font:Arial Bold:18><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER1_PARSER.sub(r'<font:Arial Bold:20><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
# Text coloring
text = text.replace(r'%GREEN%',"<color:00FF00>")
text = text.replace(r'%BLUE%',"<color:3030FF>")
text = text.replace(r'%RED%',"<color:FF0000>")
text = text.replace(r'%YELLOW%',"<color:FFC000>")
text = text.replace(r'%ENDCOLOR%',"<color:D5E70A>")
text = text.replace('\r',"\\r")
text = text.replace('\n',"\\n") # valid quote
text = text.replace('\a',"\\a") # valid quote
text = text.replace('"','\\"') # invalid quote
text = LINK_PARSER.sub(r'<a:\1>\2</a>',text)
TGEEval(r'ENCYC_TEXT.setText("");')
# get around some tge poop
x = 0
while x < len(text):
add = 1024
t = text[x:x+add]
if t[len(t)-1] == '\\':
add += 1
t = text[x:x+add]
TGEEval(r'ENCYC_TEXT.addText("%s",false);'%t)
x += add
PAGECACHE[mypage] = text
# reformat
TGEEval(r'ENCYC_TEXT.addText("\n",true);')
return True
def home(self):
self.curIndex = -1
self.history = []
self.setPage("Home")
def back(self):
if self.curIndex < 1:
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex-1],False)
self.curIndex -= 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def forward(self):
if self.curIndex >= len(self.history)-1 or not len(self.history):
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex+1],False)
self.curIndex += 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def encyclopediaSearch(searchvalue):
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
page = None
if ENCYC.has_key("Item%s"%formatted):
page = "Item%s"%formatted
elif ENCYC.has_key("ItemSet%s"%formatted):
page = "ItemSet%s"%formatted
elif ENCYC.has_key("Spell%s"%formatted):
page = "Spell%s"%formatted
elif ENCYC.has_key("Recipe%s"%formatted):
page = "Recipe%s"%formatted
elif ENCYC.has_key("Skill%s"%formatted):
page = "Skill%s"%formatted
elif ENCYC.has_key("Class%s"%formatted):
page = "Class%s"%formatted
elif ENCYC.has_key("Spawn%s"%formatted):
page = "Spawn%s"%formatted
elif ENCYC.has_key("Quest%s"%formatted):
page = "Quest%s"%formatted
elif ENCYC.has_key("Zone%s"%formatted):
page = "Zone%s"%formatted
elif ENCYC.has_key("Faction%s"%formatted):
page = "Faction%s"%formatted
if page:
ENCWND.setPage(page)
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
else:
|
return
def encyclopediaGetLink(searchvalue):
if not searchvalue:
return None
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
link = None
if ENCYC.has_key(formatted):
link = "<a:%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Item%s"%formatted):
link = "<a:Item%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("ItemSet%s"%formatted):
link = "<a:ItemSet%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spell%s"%formatted):
link = "<a:Spell%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Recipe%s"%formatted):
link = "<a:Recipe%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Skill%s"%formatted):
link = "<a:Skill%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Class%s"%formatted):
link = "<a:Class%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spawn%s"%formatted):
link = "<a:Spawn%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Quest%s"%formatted):
link = "<a:Quest%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Zone%s"%formatted):
link = "<a:Zone%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Faction%s"%formatted):
link = "<a:Faction%s>%s</a>"%(formatted,searchvalue)
return link
def OnEncyclopediaOnURL(args):
page = args[1]
if page.startswith('chatlink'):
# If a chatlink is clicked from encyclopedia, then
# the command control is not visible because the
# encyclopedia would have gained focus.
commandCtrl = TomeGui.tomeCommandCtrl
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
txt = commandCtrl.GetValue()
commandCtrl.SetValue("%s <%s>"%(txt,page[8:]))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
def externEncyclopediaLinkURL(args):
page = args[1].replace('gamelink','')
if page.startswith('charlink'): # link to tell, not encyc
commandCtrl = TomeGui.tomeCommandCtrl
if not commandCtrl.visible:
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
commandCtrl.SetValue("/tell %s "%page[8:].replace(' ','_'))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
else:
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
def OnEncyclopediaHome():
ENCWND.home()
def OnEncyclopediaBack():
ENCWND.back()
def OnEncyclopediaForward():
ENCWND.forward()
def PyExec():
global ENCWND
ENCWND = EncWindow()
#read the encyclopedia
try:
tar = tarfile.open("./%s/data/ui/encyclopedia/momworld.tar.gz"%GAMEROOT,'r:gz')
for tarinfo in tar:
if tarinfo.name.startswith("twiki/data/MoMWorld") and tarinfo.isreg():
f = tar.extractfile(tarinfo)
data = f.read()
ENCYC[tarinfo.name[20:-4]] = data
tar.close()
except:
traceback.print_exc()
ENCYC["Home"]=HOME
ENCWND.setPage("Home")
TGEExport(OnEncyclopediaOnURL,"Py","OnEncyclopediaOnURL","desc",2,2)
TGEExport(externEncyclopediaLinkURL,"Py","ExternEncyclopediaLinkURL","desc",2,2)
TGEExport(OnEncyclopediaHome,"Py","OnEncyclopediaHome","desc",1,1)
TGEExport(OnEncyclopediaForward,"Py","OnEncyclopediaForward","desc",1,1)
TGEExport(OnEncyclopediaBack,"Py","OnEncyclopediaBack","desc",1,1)
| TGECall("MessageBoxOK","Entry not found","No entry for %s in encyclopedia."%searchvalue) | conditional_block |
encyclopediaWnd.py | # Copyright (C) 2004-2007 Prairie Games, Inc
# Please see LICENSE.TXT for details
from tgenative import *
from mud.tgepython.console import TGEExport
from mud.gamesettings import *
from mud.worlddocs.utils import GetTWikiName
from tomeGui import TomeGui
TomeGui = TomeGui.instance
import tarfile
import traceback
import re
# Checks for html links, metainfo, quicklinks and tags
PURGE_PARSER = re.compile(r'(<a +[\s\S]*?</a>+\s+)|(%META:TOPICINFO+[\s\S]*?%+\s+)|(\*Quick Links:+.*\s+)|(#[a-zA-Z]*\s+)')
LINK_PARSER = re.compile(r'\[\[(.*?)\]\[(.*?)\]\]')
HEADER1_PARSER = re.compile(r'(---\+)+(.*)')
HEADER2_PARSER = re.compile(r'(---\+\+)+(.*)')
HEADER3_PARSER = re.compile(r'(---\+\+\+)+(.*)')
HEADER4_PARSER = re.compile(r'(---\+\+\+\+)+(.*)')
HEADER5_PARSER = re.compile(r'(---\+\+\+\+\+)+(.*)')
BOLD_PARSER = re.compile(r'\*+(.*?)\*+')
ENCYC = {}
HEADER = """<color:2DCBC9><linkcolor:AAAA00><shadowcolor:000000><shadow:1:1><just:center><lmargin%%:2><rmargin%%:98><font:Arial Bold:20>Encyclopedia
<font:Arial:14><just:right><a:chatlink%s>Add to Chat</a>\n<just:left>"""
HOME = """
<a:ZoneIndex>Zones</a>
<a:SpawnIndex>Spawns (NPCs)</a>
<a:SpawnIndexByLevel>Spawns by Level</a>
<a:FactionIndex>Factions</a>
<a:ItemIndex>Items</a>
<a:ItemSetIndex>Item Sets</a>
<a:QuestIndex>Quests</a>
<a:SpellIndex>Spells</a>
<a:SkillIndex>Skills</a>
<a:ClassIndex>Classes</a>
<a:RecipeIndex>Recipes</a><br>
<a:EnchantingDisenchantingIndex>Enchanting / Disenchanting</a>
"""
PAGECACHE = {}
ENCWND = None
class EncWindow:
def | (self):
self.encText = TGEObject("ENCYC_TEXT")
self.encScroll = TGEObject("ENCYC_SCROLL")
self.history = []
self.positions = {}
self.curIndex = -1
def setPage(self,mypage,append = True):
try:
page = ENCYC[mypage]
except:
return False
try:
text = PAGECACHE[mypage]
except KeyError:
text = ""
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
if append:
if self.curIndex >= 0:
self.history = self.history[:self.curIndex+1]
self.history.append(mypage)
self.curIndex = len(self.history) - 1
else:
self.history.append(mypage)
self.curIndex += 1
if not text:
text = HEADER%mypage + page
# Strip out html links, metainfo, quick links and tags
text = PURGE_PARSER.sub('',text)
# Reformat bold font
text = BOLD_PARSER.sub(r'<font:Arial Bold:14>\1<font:Arial:14>',text)
# HEADERS
text = HEADER5_PARSER.sub(r'<font:Arial Bold:15><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER4_PARSER.sub(r'<font:Arial Bold:16><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER3_PARSER.sub(r'<font:Arial Bold:17><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER2_PARSER.sub(r'<font:Arial Bold:18><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
text = HEADER1_PARSER.sub(r'<font:Arial Bold:20><color:2DCBC9><just:center>\2<font:Arial:14><just:left><color:D5E70A>',text)
# Text coloring
text = text.replace(r'%GREEN%',"<color:00FF00>")
text = text.replace(r'%BLUE%',"<color:3030FF>")
text = text.replace(r'%RED%',"<color:FF0000>")
text = text.replace(r'%YELLOW%',"<color:FFC000>")
text = text.replace(r'%ENDCOLOR%',"<color:D5E70A>")
text = text.replace('\r',"\\r")
text = text.replace('\n',"\\n") # valid quote
text = text.replace('\a',"\\a") # valid quote
text = text.replace('"','\\"') # invalid quote
text = LINK_PARSER.sub(r'<a:\1>\2</a>',text)
TGEEval(r'ENCYC_TEXT.setText("");')
# get around some tge poop
x = 0
while x < len(text):
add = 1024
t = text[x:x+add]
if t[len(t)-1] == '\\':
add += 1
t = text[x:x+add]
TGEEval(r'ENCYC_TEXT.addText("%s",false);'%t)
x += add
PAGECACHE[mypage] = text
# reformat
TGEEval(r'ENCYC_TEXT.addText("\n",true);')
return True
def home(self):
self.curIndex = -1
self.history = []
self.setPage("Home")
def back(self):
if self.curIndex < 1:
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex-1],False)
self.curIndex -= 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def forward(self):
if self.curIndex >= len(self.history)-1 or not len(self.history):
return
pos = self.encScroll.childRelPos.split(" ")
self.positions[self.curIndex] = (pos[0],pos[1])
self.setPage(self.history[self.curIndex+1],False)
self.curIndex += 1
pos = self.positions[self.curIndex]
self.encScroll.scrollRectVisible(pos[0],pos[1],1,444)
def encyclopediaSearch(searchvalue):
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
page = None
if ENCYC.has_key("Item%s"%formatted):
page = "Item%s"%formatted
elif ENCYC.has_key("ItemSet%s"%formatted):
page = "ItemSet%s"%formatted
elif ENCYC.has_key("Spell%s"%formatted):
page = "Spell%s"%formatted
elif ENCYC.has_key("Recipe%s"%formatted):
page = "Recipe%s"%formatted
elif ENCYC.has_key("Skill%s"%formatted):
page = "Skill%s"%formatted
elif ENCYC.has_key("Class%s"%formatted):
page = "Class%s"%formatted
elif ENCYC.has_key("Spawn%s"%formatted):
page = "Spawn%s"%formatted
elif ENCYC.has_key("Quest%s"%formatted):
page = "Quest%s"%formatted
elif ENCYC.has_key("Zone%s"%formatted):
page = "Zone%s"%formatted
elif ENCYC.has_key("Faction%s"%formatted):
page = "Faction%s"%formatted
if page:
ENCWND.setPage(page)
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
else:
TGECall("MessageBoxOK","Entry not found","No entry for %s in encyclopedia."%searchvalue)
return
def encyclopediaGetLink(searchvalue):
if not searchvalue:
return None
if not ENCWND:
PyExec()
formatted = GetTWikiName(searchvalue)
link = None
if ENCYC.has_key(formatted):
link = "<a:%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Item%s"%formatted):
link = "<a:Item%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("ItemSet%s"%formatted):
link = "<a:ItemSet%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spell%s"%formatted):
link = "<a:Spell%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Recipe%s"%formatted):
link = "<a:Recipe%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Skill%s"%formatted):
link = "<a:Skill%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Class%s"%formatted):
link = "<a:Class%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Spawn%s"%formatted):
link = "<a:Spawn%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Quest%s"%formatted):
link = "<a:Quest%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Zone%s"%formatted):
link = "<a:Zone%s>%s</a>"%(formatted,searchvalue)
elif ENCYC.has_key("Faction%s"%formatted):
link = "<a:Faction%s>%s</a>"%(formatted,searchvalue)
return link
def OnEncyclopediaOnURL(args):
page = args[1]
if page.startswith('chatlink'):
# If a chatlink is clicked from encyclopedia, then
# the command control is not visible because the
# encyclopedia would have gained focus.
commandCtrl = TomeGui.tomeCommandCtrl
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
txt = commandCtrl.GetValue()
commandCtrl.SetValue("%s <%s>"%(txt,page[8:]))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
def externEncyclopediaLinkURL(args):
page = args[1].replace('gamelink','')
if page.startswith('charlink'): # link to tell, not encyc
commandCtrl = TomeGui.tomeCommandCtrl
if not commandCtrl.visible:
TGECall("PushChatGui")
commandCtrl.visible = True
commandCtrl.makeFirstResponder(True)
commandCtrl.SetValue("/tell %s "%page[8:].replace(' ','_'))
elif not ENCWND.setPage(page):
TGECall("MessageBoxOK","Invalid Link","Sorry, you just stumbled upon an invalid encyclopedia link, page %s not found."%page)
else:
TGEEval("canvas.pushDialog(EncyclopediaWnd);")
def OnEncyclopediaHome():
ENCWND.home()
def OnEncyclopediaBack():
ENCWND.back()
def OnEncyclopediaForward():
ENCWND.forward()
def PyExec():
global ENCWND
ENCWND = EncWindow()
#read the encyclopedia
try:
tar = tarfile.open("./%s/data/ui/encyclopedia/momworld.tar.gz"%GAMEROOT,'r:gz')
for tarinfo in tar:
if tarinfo.name.startswith("twiki/data/MoMWorld") and tarinfo.isreg():
f = tar.extractfile(tarinfo)
data = f.read()
ENCYC[tarinfo.name[20:-4]] = data
tar.close()
except:
traceback.print_exc()
ENCYC["Home"]=HOME
ENCWND.setPage("Home")
TGEExport(OnEncyclopediaOnURL,"Py","OnEncyclopediaOnURL","desc",2,2)
TGEExport(externEncyclopediaLinkURL,"Py","ExternEncyclopediaLinkURL","desc",2,2)
TGEExport(OnEncyclopediaHome,"Py","OnEncyclopediaHome","desc",1,1)
TGEExport(OnEncyclopediaForward,"Py","OnEncyclopediaForward","desc",1,1)
TGEExport(OnEncyclopediaBack,"Py","OnEncyclopediaBack","desc",1,1)
| __init__ | identifier_name |
report.go | // Package reporttsi provides a report about the series cardinality in one or more TSI indexes.
package reporttsi
import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"sync/atomic"
"text/tabwriter"
"github.com/freetsdb/freetsdb/logger"
"github.com/freetsdb/freetsdb/tsdb"
"github.com/freetsdb/freetsdb/tsdb/index/tsi1"
)
const (
// Number of series IDs to stored in slice before we convert to a roaring
// bitmap. Roaring bitmaps have a non-trivial initial cost to construct.
useBitmapN = 25
)
// Command represents the program execution for "freets_inspect reporttsi".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dbPath string
shardPaths map[uint64]string
shardIdxs map[uint64]*tsi1.Index
cardinalities map[uint64]map[string]*cardinality
seriesFilePath string // optional. Defaults to dbPath/_series
sfile *tsdb.SeriesFile
topN int
byMeasurement bool
byTagKey bool
// How many goroutines to dedicate to calculating cardinality.
concurrency int
}
// NewCommand returns a new instance of Command with default setting applied.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
shardPaths: map[uint64]string{},
shardIdxs: map[uint64]*tsi1.Index{},
cardinalities: map[uint64]map[string]*cardinality{},
topN: 0,
byMeasurement: true,
byTagKey: false,
concurrency: runtime.GOMAXPROCS(0),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("reporttsi", flag.ExitOnError)
fs.StringVar(&cmd.dbPath, "db-path", "", "Path to database. Required.")
fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Optional path to series file. Defaults /path/to/db-path/_series")
fs.BoolVar(&cmd.byMeasurement, "measurements", true, "Segment cardinality by measurements")
// TODO(edd): Not yet implemented.
// fs.BoolVar(&cmd.byTagKey, "tag-key", false, "Segment cardinality by tag keys (overrides `measurements`")
fs.IntVar(&cmd.topN, "top", 0, "Limit results to top n")
fs.IntVar(&cmd.concurrency, "c", runtime.GOMAXPROCS(0), "Set worker concurrency. Defaults to GOMAXPROCS setting.")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
}
if cmd.byTagKey {
return errors.New("Segmenting cardinality by tag key is not yet implemented")
}
if cmd.dbPath == "" {
return errors.New("path to database must be provided")
}
if cmd.seriesFilePath == "" {
cmd.seriesFilePath = path.Join(cmd.dbPath, tsdb.SeriesFileDirectory)
}
// Walk database directory to get shards.
if err := filepath.Walk(cmd.dbPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
// TODO(edd): this would be a problem if the retention policy was named
// "index".
if info.Name() == tsdb.SeriesFileDirectory || info.Name() == "index" {
return filepath.SkipDir
}
id, err := strconv.Atoi(info.Name())
if err != nil {
return nil
}
cmd.shardPaths[uint64(id)] = path
return nil
}); err != nil {
return err
}
if len(cmd.shardPaths) == 0 {
fmt.Fprintf(cmd.Stderr, "No shards under %s\n", cmd.dbPath)
return nil
}
return cmd.run()
}
func (cmd *Command) run() error {
cmd.sfile = tsdb.NewSeriesFile(cmd.seriesFilePath)
cmd.sfile.Logger = logger.New(os.Stderr)
if err := cmd.sfile.Open(); err != nil {
return err
}
defer cmd.sfile.Close()
// Open all the indexes.
for id, pth := range cmd.shardPaths {
pth = path.Join(pth, "index")
// Verify directory is an index before opening it.
if ok, err := tsi1.IsIndexDir(pth); err != nil {
return err
} else if !ok {
return fmt.Errorf("not a TSI index directory: %q", pth)
}
cmd.shardIdxs[id] = tsi1.NewIndex(cmd.sfile,
"",
tsi1.WithPath(pth),
tsi1.DisableCompactions(),
)
if err := cmd.shardIdxs[id].Open(); err != nil {
return err
}
defer cmd.shardIdxs[id].Close()
// Initialise cardinality set to store cardinalities for this shard.
cmd.cardinalities[id] = map[string]*cardinality{}
}
// Calculate cardinalities of shards.
fn := cmd.cardinalityByMeasurement
// if cmd.byTagKey {
// TODO(edd)
// }
// Blocks until all work done.
cmd.calculateCardinalities(fn)
// Print summary.
if err := cmd.printSummaryByMeasurement(); err != nil {
return err
}
allIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
allIDs = append(allIDs, id)
}
sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] })
for _, id := range allIDs {
if err := cmd.printShardByMeasurement(id); err != nil {
return err
}
}
return nil
}
// calculateCardinalities calculates the cardinalities of the set of shard being
// worked on concurrently. The provided function determines how cardinality is
// calculated and broken down.
func (cmd *Command) calculateCardinalities(fn func(id uint64) error) error {
// Get list of shards to work on.
shardIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
shardIDs = append(shardIDs, id)
}
errC := make(chan error, len(shardIDs))
var maxi uint32 // index of maximumm shard being worked on.
for k := 0; k < cmd.concurrency; k++ {
go func() {
for {
i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on.
if i >= len(shardIDs) |
errC <- fn(shardIDs[i])
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
return nil
}
type cardinality struct {
name []byte
short []uint32
set *tsdb.SeriesIDSet
}
func (c *cardinality) add(x uint64) {
if c.set != nil {
c.set.AddNoLock(x)
return
}
c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32
// Cheaper to store in bitmap.
if len(c.short) > useBitmapN {
c.set = tsdb.NewSeriesIDSet()
for i := 0; i < len(c.short); i++ {
c.set.AddNoLock(uint64(c.short[i]))
}
c.short = nil
return
}
}
func (c *cardinality) cardinality() int64 {
if c == nil || (c.short == nil && c.set == nil) {
return 0
}
if c.short != nil {
return int64(len(c.short))
}
return int64(c.set.Cardinality())
}
type cardinalities []*cardinality
func (a cardinalities) Len() int { return len(a) }
func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() }
func (a cardinalities) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) cardinalityByMeasurement(shardID uint64) error {
idx := cmd.shardIdxs[shardID]
itr, err := idx.MeasurementIterator()
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
OUTER:
for {
name, err := itr.Next()
if err != nil {
return err
} else if name == nil {
break OUTER
}
// Get series ID set to track cardinality under measurement.
c, ok := cmd.cardinalities[shardID][string(name)]
if !ok {
c = &cardinality{name: name}
cmd.cardinalities[shardID][string(name)] = c
}
sitr, err := idx.MeasurementSeriesIDIterator(name)
if err != nil {
return err
} else if sitr == nil {
continue
}
var e tsdb.SeriesIDElem
for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() {
if e.SeriesID > math.MaxUint32 {
panic(fmt.Sprintf("series ID is too large: %d (max %d). Corrupted series file?", e.SeriesID, uint32(math.MaxUint32)))
}
c.add(e.SeriesID)
}
sitr.Close()
if err != nil {
return err
}
}
return nil
}
type result struct {
name []byte
count int64
// For low cardinality measurements just track series using map
lowCardinality map[uint32]struct{}
// For higher cardinality measurements track using bitmap.
set *tsdb.SeriesIDSet
}
func (r *result) addShort(ids []uint32) {
// There is already a bitset of this result.
if r.set != nil {
for _, id := range ids {
r.set.AddNoLock(uint64(id))
}
return
}
// Still tracking low cardinality sets
if r.lowCardinality == nil {
r.lowCardinality = map[uint32]struct{}{}
}
for _, id := range ids {
r.lowCardinality[id] = struct{}{}
}
// Cardinality is large enough that we will benefit from using a bitmap
if len(r.lowCardinality) > useBitmapN {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
}
func (r *result) merge(other *tsdb.SeriesIDSet) {
if r.set == nil {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
r.set.Merge(other)
}
type results []*result
func (a results) Len() int { return len(a) }
func (a results) Less(i, j int) bool { return a[i].count < a[j].count }
func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) printSummaryByMeasurement() error {
// Get global set of measurement names across shards.
idxs := &tsdb.IndexSet{SeriesFile: cmd.sfile}
for _, idx := range cmd.shardIdxs {
idxs.Indexes = append(idxs.Indexes, idx)
}
mitr, err := idxs.MeasurementIterator()
if err != nil {
return err
} else if mitr == nil {
return errors.New("got nil measurement iterator for index set")
}
defer mitr.Close()
var name []byte
var totalCardinality int64
measurements := results{}
for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() {
res := &result{name: name}
for _, shardCards := range cmd.cardinalities {
other, ok := shardCards[string(name)]
if !ok {
continue // this shard doesn't have anything for this measurement.
}
if other.short != nil && other.set != nil {
panic("cardinality stored incorrectly")
}
if other.short != nil { // low cardinality case
res.addShort(other.short)
} else if other.set != nil { // High cardinality case
res.merge(other.set)
}
// Shard does not have any series for this measurement.
}
// Determine final cardinality and allow intermediate structures to be
// GCd.
if res.lowCardinality != nil {
res.count = int64(len(res.lowCardinality))
} else {
res.count = int64(res.set.Cardinality())
}
totalCardinality += res.count
res.set = nil
res.lowCardinality = nil
measurements = append(measurements, res)
}
if err != nil {
return err
}
// sort measurements by cardinality.
sort.Sort(sort.Reverse(measurements))
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(measurements))))
measurements = measurements[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", cmd.dbPath, totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, res := range measurements {
fmt.Fprintf(tw, "%q\t\t%d\n", res.name, res.count)
}
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printShardByMeasurement(id uint64) error {
allMap, ok := cmd.cardinalities[id]
if !ok {
return nil
}
var totalCardinality int64
all := make(cardinalities, 0, len(allMap))
for _, card := range allMap {
n := card.cardinality()
if n == 0 {
continue
}
totalCardinality += n
all = append(all, card)
}
sort.Sort(sort.Reverse(all))
// Trim to top-n
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(all))))
all = all[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, cmd.shardPaths[id], totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, card := range all {
fmt.Fprintf(tw, "%q\t\t%d\n", card.name, card.cardinality())
}
fmt.Fprint(tw, "===============\n\n")
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
| {
return // No more work.
} | conditional_block |
report.go | // Package reporttsi provides a report about the series cardinality in one or more TSI indexes.
package reporttsi
import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"sync/atomic"
"text/tabwriter"
"github.com/freetsdb/freetsdb/logger"
"github.com/freetsdb/freetsdb/tsdb"
"github.com/freetsdb/freetsdb/tsdb/index/tsi1"
)
const (
// Number of series IDs to stored in slice before we convert to a roaring
// bitmap. Roaring bitmaps have a non-trivial initial cost to construct.
useBitmapN = 25
)
// Command represents the program execution for "freets_inspect reporttsi".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dbPath string
shardPaths map[uint64]string
shardIdxs map[uint64]*tsi1.Index
cardinalities map[uint64]map[string]*cardinality
seriesFilePath string // optional. Defaults to dbPath/_series
sfile *tsdb.SeriesFile
topN int
byMeasurement bool
byTagKey bool
// How many goroutines to dedicate to calculating cardinality.
concurrency int
}
// NewCommand returns a new instance of Command with default setting applied.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
shardPaths: map[uint64]string{},
shardIdxs: map[uint64]*tsi1.Index{},
cardinalities: map[uint64]map[string]*cardinality{},
topN: 0,
byMeasurement: true,
byTagKey: false,
concurrency: runtime.GOMAXPROCS(0),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("reporttsi", flag.ExitOnError)
fs.StringVar(&cmd.dbPath, "db-path", "", "Path to database. Required.")
fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Optional path to series file. Defaults /path/to/db-path/_series")
fs.BoolVar(&cmd.byMeasurement, "measurements", true, "Segment cardinality by measurements")
// TODO(edd): Not yet implemented.
// fs.BoolVar(&cmd.byTagKey, "tag-key", false, "Segment cardinality by tag keys (overrides `measurements`")
fs.IntVar(&cmd.topN, "top", 0, "Limit results to top n")
fs.IntVar(&cmd.concurrency, "c", runtime.GOMAXPROCS(0), "Set worker concurrency. Defaults to GOMAXPROCS setting.")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
}
if cmd.byTagKey {
return errors.New("Segmenting cardinality by tag key is not yet implemented")
}
if cmd.dbPath == "" {
return errors.New("path to database must be provided")
}
if cmd.seriesFilePath == "" {
cmd.seriesFilePath = path.Join(cmd.dbPath, tsdb.SeriesFileDirectory)
}
// Walk database directory to get shards.
if err := filepath.Walk(cmd.dbPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
// TODO(edd): this would be a problem if the retention policy was named
// "index".
if info.Name() == tsdb.SeriesFileDirectory || info.Name() == "index" {
return filepath.SkipDir
}
id, err := strconv.Atoi(info.Name())
if err != nil {
return nil
}
cmd.shardPaths[uint64(id)] = path
return nil
}); err != nil {
return err
}
if len(cmd.shardPaths) == 0 {
fmt.Fprintf(cmd.Stderr, "No shards under %s\n", cmd.dbPath)
return nil
}
return cmd.run()
}
func (cmd *Command) run() error {
cmd.sfile = tsdb.NewSeriesFile(cmd.seriesFilePath)
cmd.sfile.Logger = logger.New(os.Stderr)
if err := cmd.sfile.Open(); err != nil {
return err
}
defer cmd.sfile.Close()
// Open all the indexes.
for id, pth := range cmd.shardPaths {
pth = path.Join(pth, "index")
// Verify directory is an index before opening it.
if ok, err := tsi1.IsIndexDir(pth); err != nil {
return err
} else if !ok {
return fmt.Errorf("not a TSI index directory: %q", pth)
}
cmd.shardIdxs[id] = tsi1.NewIndex(cmd.sfile,
"",
tsi1.WithPath(pth),
tsi1.DisableCompactions(),
)
if err := cmd.shardIdxs[id].Open(); err != nil {
return err
}
defer cmd.shardIdxs[id].Close()
// Initialise cardinality set to store cardinalities for this shard.
cmd.cardinalities[id] = map[string]*cardinality{}
}
// Calculate cardinalities of shards.
fn := cmd.cardinalityByMeasurement
// if cmd.byTagKey {
// TODO(edd)
// }
// Blocks until all work done.
cmd.calculateCardinalities(fn)
// Print summary.
if err := cmd.printSummaryByMeasurement(); err != nil {
return err
}
allIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
allIDs = append(allIDs, id)
}
sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] })
for _, id := range allIDs {
if err := cmd.printShardByMeasurement(id); err != nil {
return err
}
}
return nil
}
// calculateCardinalities calculates the cardinalities of the set of shard being
// worked on concurrently. The provided function determines how cardinality is
// calculated and broken down.
func (cmd *Command) calculateCardinalities(fn func(id uint64) error) error {
// Get list of shards to work on.
shardIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
shardIDs = append(shardIDs, id)
}
errC := make(chan error, len(shardIDs))
var maxi uint32 // index of maximumm shard being worked on.
for k := 0; k < cmd.concurrency; k++ {
go func() {
for {
i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on.
if i >= len(shardIDs) {
return // No more work.
}
errC <- fn(shardIDs[i])
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
return nil
}
type cardinality struct {
name []byte
short []uint32
set *tsdb.SeriesIDSet
}
func (c *cardinality) add(x uint64) {
if c.set != nil {
c.set.AddNoLock(x)
return
}
c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32
// Cheaper to store in bitmap.
if len(c.short) > useBitmapN {
c.set = tsdb.NewSeriesIDSet()
for i := 0; i < len(c.short); i++ {
c.set.AddNoLock(uint64(c.short[i]))
}
c.short = nil
return
}
}
func (c *cardinality) cardinality() int64 {
if c == nil || (c.short == nil && c.set == nil) {
return 0
}
if c.short != nil {
return int64(len(c.short))
}
return int64(c.set.Cardinality())
}
type cardinalities []*cardinality
func (a cardinalities) Len() int { return len(a) }
func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() }
func (a cardinalities) | (i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) cardinalityByMeasurement(shardID uint64) error {
idx := cmd.shardIdxs[shardID]
itr, err := idx.MeasurementIterator()
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
OUTER:
for {
name, err := itr.Next()
if err != nil {
return err
} else if name == nil {
break OUTER
}
// Get series ID set to track cardinality under measurement.
c, ok := cmd.cardinalities[shardID][string(name)]
if !ok {
c = &cardinality{name: name}
cmd.cardinalities[shardID][string(name)] = c
}
sitr, err := idx.MeasurementSeriesIDIterator(name)
if err != nil {
return err
} else if sitr == nil {
continue
}
var e tsdb.SeriesIDElem
for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() {
if e.SeriesID > math.MaxUint32 {
panic(fmt.Sprintf("series ID is too large: %d (max %d). Corrupted series file?", e.SeriesID, uint32(math.MaxUint32)))
}
c.add(e.SeriesID)
}
sitr.Close()
if err != nil {
return err
}
}
return nil
}
type result struct {
name []byte
count int64
// For low cardinality measurements just track series using map
lowCardinality map[uint32]struct{}
// For higher cardinality measurements track using bitmap.
set *tsdb.SeriesIDSet
}
func (r *result) addShort(ids []uint32) {
// There is already a bitset of this result.
if r.set != nil {
for _, id := range ids {
r.set.AddNoLock(uint64(id))
}
return
}
// Still tracking low cardinality sets
if r.lowCardinality == nil {
r.lowCardinality = map[uint32]struct{}{}
}
for _, id := range ids {
r.lowCardinality[id] = struct{}{}
}
// Cardinality is large enough that we will benefit from using a bitmap
if len(r.lowCardinality) > useBitmapN {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
}
func (r *result) merge(other *tsdb.SeriesIDSet) {
if r.set == nil {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
r.set.Merge(other)
}
type results []*result
func (a results) Len() int { return len(a) }
func (a results) Less(i, j int) bool { return a[i].count < a[j].count }
func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) printSummaryByMeasurement() error {
// Get global set of measurement names across shards.
idxs := &tsdb.IndexSet{SeriesFile: cmd.sfile}
for _, idx := range cmd.shardIdxs {
idxs.Indexes = append(idxs.Indexes, idx)
}
mitr, err := idxs.MeasurementIterator()
if err != nil {
return err
} else if mitr == nil {
return errors.New("got nil measurement iterator for index set")
}
defer mitr.Close()
var name []byte
var totalCardinality int64
measurements := results{}
for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() {
res := &result{name: name}
for _, shardCards := range cmd.cardinalities {
other, ok := shardCards[string(name)]
if !ok {
continue // this shard doesn't have anything for this measurement.
}
if other.short != nil && other.set != nil {
panic("cardinality stored incorrectly")
}
if other.short != nil { // low cardinality case
res.addShort(other.short)
} else if other.set != nil { // High cardinality case
res.merge(other.set)
}
// Shard does not have any series for this measurement.
}
// Determine final cardinality and allow intermediate structures to be
// GCd.
if res.lowCardinality != nil {
res.count = int64(len(res.lowCardinality))
} else {
res.count = int64(res.set.Cardinality())
}
totalCardinality += res.count
res.set = nil
res.lowCardinality = nil
measurements = append(measurements, res)
}
if err != nil {
return err
}
// sort measurements by cardinality.
sort.Sort(sort.Reverse(measurements))
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(measurements))))
measurements = measurements[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", cmd.dbPath, totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, res := range measurements {
fmt.Fprintf(tw, "%q\t\t%d\n", res.name, res.count)
}
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printShardByMeasurement(id uint64) error {
allMap, ok := cmd.cardinalities[id]
if !ok {
return nil
}
var totalCardinality int64
all := make(cardinalities, 0, len(allMap))
for _, card := range allMap {
n := card.cardinality()
if n == 0 {
continue
}
totalCardinality += n
all = append(all, card)
}
sort.Sort(sort.Reverse(all))
// Trim to top-n
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(all))))
all = all[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, cmd.shardPaths[id], totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, card := range all {
fmt.Fprintf(tw, "%q\t\t%d\n", card.name, card.cardinality())
}
fmt.Fprint(tw, "===============\n\n")
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
| Swap | identifier_name |
report.go | // Package reporttsi provides a report about the series cardinality in one or more TSI indexes.
package reporttsi
import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"sync/atomic"
"text/tabwriter"
"github.com/freetsdb/freetsdb/logger"
"github.com/freetsdb/freetsdb/tsdb"
"github.com/freetsdb/freetsdb/tsdb/index/tsi1"
)
const (
// Number of series IDs to stored in slice before we convert to a roaring
// bitmap. Roaring bitmaps have a non-trivial initial cost to construct.
useBitmapN = 25
)
// Command represents the program execution for "freets_inspect reporttsi".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dbPath string
shardPaths map[uint64]string
shardIdxs map[uint64]*tsi1.Index
cardinalities map[uint64]map[string]*cardinality
seriesFilePath string // optional. Defaults to dbPath/_series
sfile *tsdb.SeriesFile
topN int
byMeasurement bool
byTagKey bool
// How many goroutines to dedicate to calculating cardinality.
concurrency int
}
// NewCommand returns a new instance of Command with default setting applied.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
shardPaths: map[uint64]string{},
shardIdxs: map[uint64]*tsi1.Index{},
cardinalities: map[uint64]map[string]*cardinality{},
topN: 0,
byMeasurement: true,
byTagKey: false,
concurrency: runtime.GOMAXPROCS(0),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("reporttsi", flag.ExitOnError)
fs.StringVar(&cmd.dbPath, "db-path", "", "Path to database. Required.")
fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Optional path to series file. Defaults /path/to/db-path/_series")
fs.BoolVar(&cmd.byMeasurement, "measurements", true, "Segment cardinality by measurements")
// TODO(edd): Not yet implemented.
// fs.BoolVar(&cmd.byTagKey, "tag-key", false, "Segment cardinality by tag keys (overrides `measurements`")
fs.IntVar(&cmd.topN, "top", 0, "Limit results to top n")
fs.IntVar(&cmd.concurrency, "c", runtime.GOMAXPROCS(0), "Set worker concurrency. Defaults to GOMAXPROCS setting.")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
}
if cmd.byTagKey {
return errors.New("Segmenting cardinality by tag key is not yet implemented")
}
if cmd.dbPath == "" {
return errors.New("path to database must be provided")
}
if cmd.seriesFilePath == "" {
cmd.seriesFilePath = path.Join(cmd.dbPath, tsdb.SeriesFileDirectory)
}
// Walk database directory to get shards.
if err := filepath.Walk(cmd.dbPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
// TODO(edd): this would be a problem if the retention policy was named
// "index".
if info.Name() == tsdb.SeriesFileDirectory || info.Name() == "index" {
return filepath.SkipDir
}
id, err := strconv.Atoi(info.Name())
if err != nil {
return nil
}
cmd.shardPaths[uint64(id)] = path
return nil
}); err != nil {
return err
}
if len(cmd.shardPaths) == 0 {
fmt.Fprintf(cmd.Stderr, "No shards under %s\n", cmd.dbPath)
return nil
}
return cmd.run()
}
func (cmd *Command) run() error {
cmd.sfile = tsdb.NewSeriesFile(cmd.seriesFilePath)
cmd.sfile.Logger = logger.New(os.Stderr)
if err := cmd.sfile.Open(); err != nil {
return err | defer cmd.sfile.Close()
// Open all the indexes.
for id, pth := range cmd.shardPaths {
pth = path.Join(pth, "index")
// Verify directory is an index before opening it.
if ok, err := tsi1.IsIndexDir(pth); err != nil {
return err
} else if !ok {
return fmt.Errorf("not a TSI index directory: %q", pth)
}
cmd.shardIdxs[id] = tsi1.NewIndex(cmd.sfile,
"",
tsi1.WithPath(pth),
tsi1.DisableCompactions(),
)
if err := cmd.shardIdxs[id].Open(); err != nil {
return err
}
defer cmd.shardIdxs[id].Close()
// Initialise cardinality set to store cardinalities for this shard.
cmd.cardinalities[id] = map[string]*cardinality{}
}
// Calculate cardinalities of shards.
fn := cmd.cardinalityByMeasurement
// if cmd.byTagKey {
// TODO(edd)
// }
// Blocks until all work done.
cmd.calculateCardinalities(fn)
// Print summary.
if err := cmd.printSummaryByMeasurement(); err != nil {
return err
}
allIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
allIDs = append(allIDs, id)
}
sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] })
for _, id := range allIDs {
if err := cmd.printShardByMeasurement(id); err != nil {
return err
}
}
return nil
}
// calculateCardinalities calculates the cardinalities of the set of shard being
// worked on concurrently. The provided function determines how cardinality is
// calculated and broken down.
func (cmd *Command) calculateCardinalities(fn func(id uint64) error) error {
// Get list of shards to work on.
shardIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
shardIDs = append(shardIDs, id)
}
errC := make(chan error, len(shardIDs))
var maxi uint32 // index of maximumm shard being worked on.
for k := 0; k < cmd.concurrency; k++ {
go func() {
for {
i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on.
if i >= len(shardIDs) {
return // No more work.
}
errC <- fn(shardIDs[i])
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
return nil
}
type cardinality struct {
name []byte
short []uint32
set *tsdb.SeriesIDSet
}
func (c *cardinality) add(x uint64) {
if c.set != nil {
c.set.AddNoLock(x)
return
}
c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32
// Cheaper to store in bitmap.
if len(c.short) > useBitmapN {
c.set = tsdb.NewSeriesIDSet()
for i := 0; i < len(c.short); i++ {
c.set.AddNoLock(uint64(c.short[i]))
}
c.short = nil
return
}
}
func (c *cardinality) cardinality() int64 {
if c == nil || (c.short == nil && c.set == nil) {
return 0
}
if c.short != nil {
return int64(len(c.short))
}
return int64(c.set.Cardinality())
}
type cardinalities []*cardinality
func (a cardinalities) Len() int { return len(a) }
func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() }
func (a cardinalities) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) cardinalityByMeasurement(shardID uint64) error {
idx := cmd.shardIdxs[shardID]
itr, err := idx.MeasurementIterator()
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
OUTER:
for {
name, err := itr.Next()
if err != nil {
return err
} else if name == nil {
break OUTER
}
// Get series ID set to track cardinality under measurement.
c, ok := cmd.cardinalities[shardID][string(name)]
if !ok {
c = &cardinality{name: name}
cmd.cardinalities[shardID][string(name)] = c
}
sitr, err := idx.MeasurementSeriesIDIterator(name)
if err != nil {
return err
} else if sitr == nil {
continue
}
var e tsdb.SeriesIDElem
for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() {
if e.SeriesID > math.MaxUint32 {
panic(fmt.Sprintf("series ID is too large: %d (max %d). Corrupted series file?", e.SeriesID, uint32(math.MaxUint32)))
}
c.add(e.SeriesID)
}
sitr.Close()
if err != nil {
return err
}
}
return nil
}
type result struct {
name []byte
count int64
// For low cardinality measurements just track series using map
lowCardinality map[uint32]struct{}
// For higher cardinality measurements track using bitmap.
set *tsdb.SeriesIDSet
}
func (r *result) addShort(ids []uint32) {
// There is already a bitset of this result.
if r.set != nil {
for _, id := range ids {
r.set.AddNoLock(uint64(id))
}
return
}
// Still tracking low cardinality sets
if r.lowCardinality == nil {
r.lowCardinality = map[uint32]struct{}{}
}
for _, id := range ids {
r.lowCardinality[id] = struct{}{}
}
// Cardinality is large enough that we will benefit from using a bitmap
if len(r.lowCardinality) > useBitmapN {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
}
func (r *result) merge(other *tsdb.SeriesIDSet) {
if r.set == nil {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
r.set.Merge(other)
}
type results []*result
func (a results) Len() int { return len(a) }
func (a results) Less(i, j int) bool { return a[i].count < a[j].count }
func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) printSummaryByMeasurement() error {
// Get global set of measurement names across shards.
idxs := &tsdb.IndexSet{SeriesFile: cmd.sfile}
for _, idx := range cmd.shardIdxs {
idxs.Indexes = append(idxs.Indexes, idx)
}
mitr, err := idxs.MeasurementIterator()
if err != nil {
return err
} else if mitr == nil {
return errors.New("got nil measurement iterator for index set")
}
defer mitr.Close()
var name []byte
var totalCardinality int64
measurements := results{}
for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() {
res := &result{name: name}
for _, shardCards := range cmd.cardinalities {
other, ok := shardCards[string(name)]
if !ok {
continue // this shard doesn't have anything for this measurement.
}
if other.short != nil && other.set != nil {
panic("cardinality stored incorrectly")
}
if other.short != nil { // low cardinality case
res.addShort(other.short)
} else if other.set != nil { // High cardinality case
res.merge(other.set)
}
// Shard does not have any series for this measurement.
}
// Determine final cardinality and allow intermediate structures to be
// GCd.
if res.lowCardinality != nil {
res.count = int64(len(res.lowCardinality))
} else {
res.count = int64(res.set.Cardinality())
}
totalCardinality += res.count
res.set = nil
res.lowCardinality = nil
measurements = append(measurements, res)
}
if err != nil {
return err
}
// sort measurements by cardinality.
sort.Sort(sort.Reverse(measurements))
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(measurements))))
measurements = measurements[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", cmd.dbPath, totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, res := range measurements {
fmt.Fprintf(tw, "%q\t\t%d\n", res.name, res.count)
}
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printShardByMeasurement(id uint64) error {
allMap, ok := cmd.cardinalities[id]
if !ok {
return nil
}
var totalCardinality int64
all := make(cardinalities, 0, len(allMap))
for _, card := range allMap {
n := card.cardinality()
if n == 0 {
continue
}
totalCardinality += n
all = append(all, card)
}
sort.Sort(sort.Reverse(all))
// Trim to top-n
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(all))))
all = all[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, cmd.shardPaths[id], totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, card := range all {
fmt.Fprintf(tw, "%q\t\t%d\n", card.name, card.cardinality())
}
fmt.Fprint(tw, "===============\n\n")
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
} | } | random_line_split |
report.go | // Package reporttsi provides a report about the series cardinality in one or more TSI indexes.
package reporttsi
import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"sync/atomic"
"text/tabwriter"
"github.com/freetsdb/freetsdb/logger"
"github.com/freetsdb/freetsdb/tsdb"
"github.com/freetsdb/freetsdb/tsdb/index/tsi1"
)
const (
// Number of series IDs to stored in slice before we convert to a roaring
// bitmap. Roaring bitmaps have a non-trivial initial cost to construct.
useBitmapN = 25
)
// Command represents the program execution for "freets_inspect reporttsi".
type Command struct {
// Standard input/output, overridden for testing.
Stderr io.Writer
Stdout io.Writer
dbPath string
shardPaths map[uint64]string
shardIdxs map[uint64]*tsi1.Index
cardinalities map[uint64]map[string]*cardinality
seriesFilePath string // optional. Defaults to dbPath/_series
sfile *tsdb.SeriesFile
topN int
byMeasurement bool
byTagKey bool
// How many goroutines to dedicate to calculating cardinality.
concurrency int
}
// NewCommand returns a new instance of Command with default setting applied.
func NewCommand() *Command {
return &Command{
Stderr: os.Stderr,
Stdout: os.Stdout,
shardPaths: map[uint64]string{},
shardIdxs: map[uint64]*tsi1.Index{},
cardinalities: map[uint64]map[string]*cardinality{},
topN: 0,
byMeasurement: true,
byTagKey: false,
concurrency: runtime.GOMAXPROCS(0),
}
}
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("reporttsi", flag.ExitOnError)
fs.StringVar(&cmd.dbPath, "db-path", "", "Path to database. Required.")
fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Optional path to series file. Defaults /path/to/db-path/_series")
fs.BoolVar(&cmd.byMeasurement, "measurements", true, "Segment cardinality by measurements")
// TODO(edd): Not yet implemented.
// fs.BoolVar(&cmd.byTagKey, "tag-key", false, "Segment cardinality by tag keys (overrides `measurements`")
fs.IntVar(&cmd.topN, "top", 0, "Limit results to top n")
fs.IntVar(&cmd.concurrency, "c", runtime.GOMAXPROCS(0), "Set worker concurrency. Defaults to GOMAXPROCS setting.")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
}
if cmd.byTagKey {
return errors.New("Segmenting cardinality by tag key is not yet implemented")
}
if cmd.dbPath == "" {
return errors.New("path to database must be provided")
}
if cmd.seriesFilePath == "" {
cmd.seriesFilePath = path.Join(cmd.dbPath, tsdb.SeriesFileDirectory)
}
// Walk database directory to get shards.
if err := filepath.Walk(cmd.dbPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
// TODO(edd): this would be a problem if the retention policy was named
// "index".
if info.Name() == tsdb.SeriesFileDirectory || info.Name() == "index" {
return filepath.SkipDir
}
id, err := strconv.Atoi(info.Name())
if err != nil {
return nil
}
cmd.shardPaths[uint64(id)] = path
return nil
}); err != nil {
return err
}
if len(cmd.shardPaths) == 0 {
fmt.Fprintf(cmd.Stderr, "No shards under %s\n", cmd.dbPath)
return nil
}
return cmd.run()
}
func (cmd *Command) run() error {
cmd.sfile = tsdb.NewSeriesFile(cmd.seriesFilePath)
cmd.sfile.Logger = logger.New(os.Stderr)
if err := cmd.sfile.Open(); err != nil {
return err
}
defer cmd.sfile.Close()
// Open all the indexes.
for id, pth := range cmd.shardPaths {
pth = path.Join(pth, "index")
// Verify directory is an index before opening it.
if ok, err := tsi1.IsIndexDir(pth); err != nil {
return err
} else if !ok {
return fmt.Errorf("not a TSI index directory: %q", pth)
}
cmd.shardIdxs[id] = tsi1.NewIndex(cmd.sfile,
"",
tsi1.WithPath(pth),
tsi1.DisableCompactions(),
)
if err := cmd.shardIdxs[id].Open(); err != nil {
return err
}
defer cmd.shardIdxs[id].Close()
// Initialise cardinality set to store cardinalities for this shard.
cmd.cardinalities[id] = map[string]*cardinality{}
}
// Calculate cardinalities of shards.
fn := cmd.cardinalityByMeasurement
// if cmd.byTagKey {
// TODO(edd)
// }
// Blocks until all work done.
cmd.calculateCardinalities(fn)
// Print summary.
if err := cmd.printSummaryByMeasurement(); err != nil {
return err
}
allIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
allIDs = append(allIDs, id)
}
sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] })
for _, id := range allIDs {
if err := cmd.printShardByMeasurement(id); err != nil {
return err
}
}
return nil
}
// calculateCardinalities calculates the cardinalities of the set of shard being
// worked on concurrently. The provided function determines how cardinality is
// calculated and broken down.
func (cmd *Command) calculateCardinalities(fn func(id uint64) error) error {
// Get list of shards to work on.
shardIDs := make([]uint64, 0, len(cmd.shardIdxs))
for id := range cmd.shardIdxs {
shardIDs = append(shardIDs, id)
}
errC := make(chan error, len(shardIDs))
var maxi uint32 // index of maximumm shard being worked on.
for k := 0; k < cmd.concurrency; k++ {
go func() {
for {
i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on.
if i >= len(shardIDs) {
return // No more work.
}
errC <- fn(shardIDs[i])
}
}()
}
// Check for error
for i := 0; i < cap(errC); i++ {
if err := <-errC; err != nil {
return err
}
}
return nil
}
type cardinality struct {
name []byte
short []uint32
set *tsdb.SeriesIDSet
}
func (c *cardinality) add(x uint64) {
if c.set != nil {
c.set.AddNoLock(x)
return
}
c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32
// Cheaper to store in bitmap.
if len(c.short) > useBitmapN {
c.set = tsdb.NewSeriesIDSet()
for i := 0; i < len(c.short); i++ {
c.set.AddNoLock(uint64(c.short[i]))
}
c.short = nil
return
}
}
func (c *cardinality) cardinality() int64 {
if c == nil || (c.short == nil && c.set == nil) {
return 0
}
if c.short != nil {
return int64(len(c.short))
}
return int64(c.set.Cardinality())
}
type cardinalities []*cardinality
func (a cardinalities) Len() int { return len(a) }
func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() }
func (a cardinalities) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) cardinalityByMeasurement(shardID uint64) error {
idx := cmd.shardIdxs[shardID]
itr, err := idx.MeasurementIterator()
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
OUTER:
for {
name, err := itr.Next()
if err != nil {
return err
} else if name == nil {
break OUTER
}
// Get series ID set to track cardinality under measurement.
c, ok := cmd.cardinalities[shardID][string(name)]
if !ok {
c = &cardinality{name: name}
cmd.cardinalities[shardID][string(name)] = c
}
sitr, err := idx.MeasurementSeriesIDIterator(name)
if err != nil {
return err
} else if sitr == nil {
continue
}
var e tsdb.SeriesIDElem
for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() {
if e.SeriesID > math.MaxUint32 {
panic(fmt.Sprintf("series ID is too large: %d (max %d). Corrupted series file?", e.SeriesID, uint32(math.MaxUint32)))
}
c.add(e.SeriesID)
}
sitr.Close()
if err != nil {
return err
}
}
return nil
}
type result struct {
name []byte
count int64
// For low cardinality measurements just track series using map
lowCardinality map[uint32]struct{}
// For higher cardinality measurements track using bitmap.
set *tsdb.SeriesIDSet
}
func (r *result) addShort(ids []uint32) {
// There is already a bitset of this result.
if r.set != nil {
for _, id := range ids {
r.set.AddNoLock(uint64(id))
}
return
}
// Still tracking low cardinality sets
if r.lowCardinality == nil {
r.lowCardinality = map[uint32]struct{}{}
}
for _, id := range ids {
r.lowCardinality[id] = struct{}{}
}
// Cardinality is large enough that we will benefit from using a bitmap
if len(r.lowCardinality) > useBitmapN {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
}
func (r *result) merge(other *tsdb.SeriesIDSet) {
if r.set == nil {
r.set = tsdb.NewSeriesIDSet()
for id := range r.lowCardinality {
r.set.AddNoLock(uint64(id))
}
r.lowCardinality = nil
}
r.set.Merge(other)
}
type results []*result
func (a results) Len() int { return len(a) }
func (a results) Less(i, j int) bool { return a[i].count < a[j].count }
func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (cmd *Command) printSummaryByMeasurement() error {
// Get global set of measurement names across shards.
idxs := &tsdb.IndexSet{SeriesFile: cmd.sfile}
for _, idx := range cmd.shardIdxs {
idxs.Indexes = append(idxs.Indexes, idx)
}
mitr, err := idxs.MeasurementIterator()
if err != nil {
return err
} else if mitr == nil {
return errors.New("got nil measurement iterator for index set")
}
defer mitr.Close()
var name []byte
var totalCardinality int64
measurements := results{}
for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() {
res := &result{name: name}
for _, shardCards := range cmd.cardinalities {
other, ok := shardCards[string(name)]
if !ok {
continue // this shard doesn't have anything for this measurement.
}
if other.short != nil && other.set != nil {
panic("cardinality stored incorrectly")
}
if other.short != nil { // low cardinality case
res.addShort(other.short)
} else if other.set != nil { // High cardinality case
res.merge(other.set)
}
// Shard does not have any series for this measurement.
}
// Determine final cardinality and allow intermediate structures to be
// GCd.
if res.lowCardinality != nil {
res.count = int64(len(res.lowCardinality))
} else {
res.count = int64(res.set.Cardinality())
}
totalCardinality += res.count
res.set = nil
res.lowCardinality = nil
measurements = append(measurements, res)
}
if err != nil {
return err
}
// sort measurements by cardinality.
sort.Sort(sort.Reverse(measurements))
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(measurements))))
measurements = measurements[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", cmd.dbPath, totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, res := range measurements {
fmt.Fprintf(tw, "%q\t\t%d\n", res.name, res.count)
}
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
}
func (cmd *Command) printShardByMeasurement(id uint64) error | {
allMap, ok := cmd.cardinalities[id]
if !ok {
return nil
}
var totalCardinality int64
all := make(cardinalities, 0, len(allMap))
for _, card := range allMap {
n := card.cardinality()
if n == 0 {
continue
}
totalCardinality += n
all = append(all, card)
}
sort.Sort(sort.Reverse(all))
// Trim to top-n
if cmd.topN > 0 {
// There may not be "topN" measurement cardinality to sub-slice.
n := int(math.Min(float64(cmd.topN), float64(len(all))))
all = all[:n]
}
tw := tabwriter.NewWriter(cmd.Stdout, 4, 4, 1, '\t', 0)
fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, cmd.shardPaths[id], totalCardinality)
fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n")
for _, card := range all {
fmt.Fprintf(tw, "%q\t\t%d\n", card.name, card.cardinality())
}
fmt.Fprint(tw, "===============\n\n")
if err := tw.Flush(); err != nil {
return err
}
fmt.Fprint(cmd.Stdout, "\n\n")
return nil
} | identifier_body | |
tier_fs.go | package pyramid
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/treeverse/lakefs/cache"
"github.com/treeverse/lakefs/logging"
"github.com/google/uuid"
"github.com/treeverse/lakefs/block"
)
// TierFS is a filesystem where written files are never edited.
// All files are stored in the block storage. Local paths are treated as a
// cache layer that will be evicted according to the eviction control.
type TierFS struct {
logger logging.Logger
adaptor block.Adapter
eviction eviction
keyLock cache.OnlyOne
syncDir *directory
fsName string
fsLocalBaseDir string
remotePrefix string
}
type Config struct {
// fsName is the unique filesystem name for this TierFS instance.
// If two TierFS instances have the same name, behaviour is undefined.
fsName string
adaptor block.Adapter
logger logging.Logger
// Prefix for all metadata file lakeFS stores in the block storage.
fsBlockStoragePrefix string
// The directory where TierFS files are kept locally.
localBaseDir string
// Maximum number of bytes an instance of TierFS can allocate to local files.
// This is not a hard limit - there might be short period of times where TierFS
// uses more disk due to ongoing writes and slow disk cleanups.
allocatedDiskBytes int64
}
const workspaceDir = "workspace"
// NewFS creates a new TierFS.
// It will traverse the existing local folders and will update
// the local disk cache to reflect existing files.
func NewFS(c *Config) (FS, error) {
fsLocalBaseDir := path.Join(c.localBaseDir, c.fsName)
if err := os.MkdirAll(fsLocalBaseDir, os.ModePerm); err != nil {
return nil, fmt.Errorf("creating base dir: %w", err)
}
tierFS := &TierFS{
adaptor: c.adaptor,
fsName: c.fsName,
logger: c.logger,
fsLocalBaseDir: fsLocalBaseDir,
syncDir: &directory{ceilingDir: fsLocalBaseDir},
keyLock: cache.NewChanOnlyOne(),
remotePrefix: path.Join(c.fsBlockStoragePrefix, c.fsName),
}
eviction, err := newRistrettoEviction(c.allocatedDiskBytes, tierFS.removeFromLocal)
if err != nil {
return nil, fmt.Errorf("creating eviction control: %w", err)
}
if err := handleExistingFiles(eviction, fsLocalBaseDir); err != nil {
return nil, fmt.Errorf("handling existing files: %w", err)
}
tierFS.eviction = eviction
return tierFS, nil
}
// handleExistingFiles should only be called during init of the TierFS.
// It does 2 things:
// 1. Adds stored files to the eviction control
// 2. Remove workspace directories and all its content if it
// exist under the namespace dir.
func handleExistingFiles(eviction eviction, fsLocalBaseDir string) error {
if err := filepath.Walk(fsLocalBaseDir, func(rPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if info.Name() == workspaceDir {
// skipping workspaces and saving them for later delete
if err := os.RemoveAll(rPath); err != nil {
return fmt.Errorf("removing dir: %w", err)
}
return filepath.SkipDir
}
return nil
}
if err := storeLocalFile(rPath, info.Size(), eviction); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("walking the fs dir: %w", err)
}
return nil
}
func (tfs *TierFS) removeFromLocal(rPath relativePath, filesize int64) {
// This will be called by the cache eviction mechanism during entry insert.
// We don't want to wait while the file is being removed from the local disk.
evictionHistograms.WithLabelValues(tfs.fsName).Observe(float64(filesize))
go tfs.removeFromLocalInternal(rPath)
}
func (tfs *TierFS) removeFromLocalInternal(rPath relativePath) {
p := path.Join(tfs.fsLocalBaseDir, string(rPath))
if err := os.Remove(p); err != nil {
tfs.logger.WithError(err).WithField("path", p).Error("Removing file failed")
errorsTotal.WithLabelValues(tfs.fsName, "FileRemoval")
return
}
if err := tfs.syncDir.deleteDirRecIfEmpty(path.Dir(p)); err != nil {
tfs.logger.WithError(err).Error("Failed deleting empty dir")
errorsTotal.WithLabelValues(tfs.fsName, "DirRemoval")
}
}
func (tfs *TierFS) store(namespace, originalPath, filename string) error {
f, err := os.Open(originalPath)
if err != nil {
return fmt.Errorf("open file %s: %w", originalPath, err)
}
stat, err := f.Stat()
if err != nil {
return fmt.Errorf("file stat %s: %w", originalPath, err)
}
if err := tfs.adaptor.Put(tfs.objPointer(namespace, filename), stat.Size(), f, block.PutOpts{}); err != nil {
return fmt.Errorf("adapter put %s: %w", filename, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("closing file %s: %w", filename, err)
}
fileRef := tfs.newLocalFileRef(namespace, filename)
if tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// file was stored by the policy
return tfs.syncDir.renameFile(originalPath, fileRef.fullPath)
} else {
return os.Remove(originalPath)
}
}
// Create creates a new file in TierFS.
// File isn't stored in TierFS until a successful close operation.
// Open(namespace, filename) calls will return an error before the close was called.
func (tfs *TierFS) Create(namespace string) (StoredFile, error) {
if err := validateNamespace(namespace); err != nil {
return nil, fmt.Errorf("invalid args: %w", err)
}
if err := tfs.createNSWorkspaceDir(namespace); err != nil {
return nil, fmt.Errorf("create namespace dir: %w", err)
}
tempPath := tfs.workspaceTempFilePath(namespace)
fh, err := os.Create(tempPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
return &WRFile{
File: fh,
store: func(filename string) error {
return tfs.store(namespace, tempPath, filename)
},
}, nil
}
// Open returns the a file descriptor to the local file.
// If the file is missing from the local disk, it will try to fetch it from the block storage.
func (tfs *TierFS) Open(namespace, filename string) (File, error) {
if err := validateArgs(namespace, filename); err != nil {
return nil, err
}
fileRef := tfs.newLocalFileRef(namespace, filename)
fh, err := os.Open(fileRef.fullPath)
if err == nil {
cacheAccess.WithLabelValues(tfs.fsName, "Hit").Inc()
return tfs.openFile(fileRef, fh)
}
if !os.IsNotExist(err) {
return nil, fmt.Errorf("open file: %w", err)
}
cacheAccess.WithLabelValues(tfs.fsName, "Miss").Inc()
fh, err = tfs.readFromBlockStorage(fileRef)
if err != nil {
return nil, err
}
return tfs.openFile(fileRef, fh)
}
// openFile converts an os.File to pyramid.ROFile and updates the eviction control.
func (tfs *TierFS) openFile(fileRef localFileRef, fh *os.File) (*ROFile, error) {
stat, err := fh.Stat()
if err != nil {
return nil, fmt.Errorf("file stat: %w", err)
}
if !tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// This is where we get less strict.
// Ideally, newly fetched file will never be rejected by the cache.
// But if it did, we prefer to serve the file and delete it.
// When the user will close the file, the file will be deleted from the disk too.
if err := os.Remove(fileRef.fullPath); err != nil {
return nil, err
}
}
return &ROFile{
File: fh,
rPath: fileRef.fsRelativePath,
eviction: tfs.eviction,
}, nil
}
// readFromBlockStorage reads the referenced file from the block storage
// and places it in the local FS for further reading.
// It returns a file handle to the local file.
func (tfs *TierFS) readFromBlockStorage(fileRef localFileRef) (*os.File, error) {
_, err := tfs.keyLock.Compute(fileRef.filename, func() (interface{}, error) {
var err error
reader, err := tfs.adaptor.Get(tfs.objPointer(fileRef.namespace, fileRef.filename), 0)
if err != nil {
return nil, fmt.Errorf("read from block storage: %w", err)
}
defer reader.Close()
writer, err := tfs.syncDir.createFile(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
written, err := io.Copy(writer, reader)
if err != nil {
return nil, fmt.Errorf("copying date to file: %w", err)
}
if err = writer.Close(); err != nil {
err = fmt.Errorf("writer close: %w", err)
}
downloadHistograms.WithLabelValues(tfs.fsName).Observe(float64(written))
return nil, err
})
if err != nil {
return nil, err
}
fh, err := os.Open(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
return fh, nil
}
func storeLocalFile(rPath string, size int64, eviction eviction) error {
if !eviction.store(relativePath(rPath), size) {
return fmt.Errorf("removing file: %w", os.Remove(rPath))
}
return nil
}
func validateArgs(namespace, filename string) error {
if err := validateNamespace(namespace); err != nil {
return err
}
return validateFilename(filename)
}
var (
errSeparatorInFS = errors.New("path contains separator")
errPathInWorkspace = errors.New("file cannot be located in the workspace")
errEmptyDirInPath = errors.New("file path cannot contain an empty directory")
)
func validateFilename(filename string) error {
if strings.HasPrefix(filename, workspaceDir+string(os.PathSeparator)) {
return errPathInWorkspace
}
if strings.Contains(filename, strings.Repeat(string(os.PathSeparator), 2)) {
return errEmptyDirInPath
}
return nil
}
func validateNamespace(ns string) error {
if strings.ContainsRune(ns, os.PathSeparator) {
return errSeparatorInFS
}
return nil
}
// relativePath is the path of the file under TierFS
type relativePath string
// localFileRef consists of all possible local file references
type localFileRef struct {
namespace string
filename string
fullPath string
fsRelativePath relativePath
}
func (tfs *TierFS) newLocalFileRef(namespace, filename string) localFileRef {
relative := path.Join(namespace, filename)
return localFileRef{
namespace: namespace,
filename: filename,
fsRelativePath: relativePath(relative),
fullPath: path.Join(tfs.fsLocalBaseDir, relative),
}
}
func (tfs *TierFS) objPointer(namespace, filename string) block.ObjectPointer {
if runtime.GOOS == "windows" {
filename = strings.ReplaceAll(filename, `\\`, "/")
}
return block.ObjectPointer{
StorageNamespace: namespace,
Identifier: tfs.blockStoragePath(filename),
}
}
func (tfs *TierFS) blockStoragePath(filename string) string {
return path.Join(tfs.remotePrefix, filename)
}
func (tfs *TierFS) createNSWorkspaceDir(namespace string) error {
return os.MkdirAll(tfs.workspaceDirPath(namespace), os.ModePerm)
}
func (tfs *TierFS) workspaceDirPath(namespace string) string {
return path.Join(tfs.fsLocalBaseDir, namespace, workspaceDir)
}
func (tfs *TierFS) | (namespace string) string {
return path.Join(tfs.workspaceDirPath(namespace), uuid.Must(uuid.NewRandom()).String())
}
| workspaceTempFilePath | identifier_name |
tier_fs.go | package pyramid
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/treeverse/lakefs/cache"
"github.com/treeverse/lakefs/logging"
"github.com/google/uuid"
"github.com/treeverse/lakefs/block"
)
// TierFS is a filesystem where written files are never edited.
// All files are stored in the block storage. Local paths are treated as a
// cache layer that will be evicted according to the eviction control.
type TierFS struct {
logger logging.Logger
adaptor block.Adapter
eviction eviction
keyLock cache.OnlyOne
syncDir *directory
fsName string
fsLocalBaseDir string
remotePrefix string
}
type Config struct {
// fsName is the unique filesystem name for this TierFS instance.
// If two TierFS instances have the same name, behaviour is undefined.
fsName string
adaptor block.Adapter
logger logging.Logger
// Prefix for all metadata file lakeFS stores in the block storage.
fsBlockStoragePrefix string
// The directory where TierFS files are kept locally.
localBaseDir string
// Maximum number of bytes an instance of TierFS can allocate to local files.
// This is not a hard limit - there might be short period of times where TierFS
// uses more disk due to ongoing writes and slow disk cleanups.
allocatedDiskBytes int64
}
const workspaceDir = "workspace"
// NewFS creates a new TierFS.
// It will traverse the existing local folders and will update
// the local disk cache to reflect existing files.
func NewFS(c *Config) (FS, error) {
fsLocalBaseDir := path.Join(c.localBaseDir, c.fsName)
if err := os.MkdirAll(fsLocalBaseDir, os.ModePerm); err != nil {
return nil, fmt.Errorf("creating base dir: %w", err)
}
tierFS := &TierFS{
adaptor: c.adaptor,
fsName: c.fsName,
logger: c.logger,
fsLocalBaseDir: fsLocalBaseDir,
syncDir: &directory{ceilingDir: fsLocalBaseDir},
keyLock: cache.NewChanOnlyOne(),
remotePrefix: path.Join(c.fsBlockStoragePrefix, c.fsName),
}
eviction, err := newRistrettoEviction(c.allocatedDiskBytes, tierFS.removeFromLocal)
if err != nil {
return nil, fmt.Errorf("creating eviction control: %w", err)
}
if err := handleExistingFiles(eviction, fsLocalBaseDir); err != nil |
tierFS.eviction = eviction
return tierFS, nil
}
// handleExistingFiles should only be called during init of the TierFS.
// It does 2 things:
// 1. Adds stored files to the eviction control
// 2. Remove workspace directories and all its content if it
// exist under the namespace dir.
func handleExistingFiles(eviction eviction, fsLocalBaseDir string) error {
if err := filepath.Walk(fsLocalBaseDir, func(rPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if info.Name() == workspaceDir {
// skipping workspaces and saving them for later delete
if err := os.RemoveAll(rPath); err != nil {
return fmt.Errorf("removing dir: %w", err)
}
return filepath.SkipDir
}
return nil
}
if err := storeLocalFile(rPath, info.Size(), eviction); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("walking the fs dir: %w", err)
}
return nil
}
func (tfs *TierFS) removeFromLocal(rPath relativePath, filesize int64) {
// This will be called by the cache eviction mechanism during entry insert.
// We don't want to wait while the file is being removed from the local disk.
evictionHistograms.WithLabelValues(tfs.fsName).Observe(float64(filesize))
go tfs.removeFromLocalInternal(rPath)
}
func (tfs *TierFS) removeFromLocalInternal(rPath relativePath) {
p := path.Join(tfs.fsLocalBaseDir, string(rPath))
if err := os.Remove(p); err != nil {
tfs.logger.WithError(err).WithField("path", p).Error("Removing file failed")
errorsTotal.WithLabelValues(tfs.fsName, "FileRemoval")
return
}
if err := tfs.syncDir.deleteDirRecIfEmpty(path.Dir(p)); err != nil {
tfs.logger.WithError(err).Error("Failed deleting empty dir")
errorsTotal.WithLabelValues(tfs.fsName, "DirRemoval")
}
}
func (tfs *TierFS) store(namespace, originalPath, filename string) error {
f, err := os.Open(originalPath)
if err != nil {
return fmt.Errorf("open file %s: %w", originalPath, err)
}
stat, err := f.Stat()
if err != nil {
return fmt.Errorf("file stat %s: %w", originalPath, err)
}
if err := tfs.adaptor.Put(tfs.objPointer(namespace, filename), stat.Size(), f, block.PutOpts{}); err != nil {
return fmt.Errorf("adapter put %s: %w", filename, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("closing file %s: %w", filename, err)
}
fileRef := tfs.newLocalFileRef(namespace, filename)
if tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// file was stored by the policy
return tfs.syncDir.renameFile(originalPath, fileRef.fullPath)
} else {
return os.Remove(originalPath)
}
}
// Create creates a new file in TierFS.
// File isn't stored in TierFS until a successful close operation.
// Open(namespace, filename) calls will return an error before the close was called.
func (tfs *TierFS) Create(namespace string) (StoredFile, error) {
if err := validateNamespace(namespace); err != nil {
return nil, fmt.Errorf("invalid args: %w", err)
}
if err := tfs.createNSWorkspaceDir(namespace); err != nil {
return nil, fmt.Errorf("create namespace dir: %w", err)
}
tempPath := tfs.workspaceTempFilePath(namespace)
fh, err := os.Create(tempPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
return &WRFile{
File: fh,
store: func(filename string) error {
return tfs.store(namespace, tempPath, filename)
},
}, nil
}
// Open returns the a file descriptor to the local file.
// If the file is missing from the local disk, it will try to fetch it from the block storage.
func (tfs *TierFS) Open(namespace, filename string) (File, error) {
if err := validateArgs(namespace, filename); err != nil {
return nil, err
}
fileRef := tfs.newLocalFileRef(namespace, filename)
fh, err := os.Open(fileRef.fullPath)
if err == nil {
cacheAccess.WithLabelValues(tfs.fsName, "Hit").Inc()
return tfs.openFile(fileRef, fh)
}
if !os.IsNotExist(err) {
return nil, fmt.Errorf("open file: %w", err)
}
cacheAccess.WithLabelValues(tfs.fsName, "Miss").Inc()
fh, err = tfs.readFromBlockStorage(fileRef)
if err != nil {
return nil, err
}
return tfs.openFile(fileRef, fh)
}
// openFile converts an os.File to pyramid.ROFile and updates the eviction control.
func (tfs *TierFS) openFile(fileRef localFileRef, fh *os.File) (*ROFile, error) {
stat, err := fh.Stat()
if err != nil {
return nil, fmt.Errorf("file stat: %w", err)
}
if !tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// This is where we get less strict.
// Ideally, newly fetched file will never be rejected by the cache.
// But if it did, we prefer to serve the file and delete it.
// When the user will close the file, the file will be deleted from the disk too.
if err := os.Remove(fileRef.fullPath); err != nil {
return nil, err
}
}
return &ROFile{
File: fh,
rPath: fileRef.fsRelativePath,
eviction: tfs.eviction,
}, nil
}
// readFromBlockStorage reads the referenced file from the block storage
// and places it in the local FS for further reading.
// It returns a file handle to the local file.
func (tfs *TierFS) readFromBlockStorage(fileRef localFileRef) (*os.File, error) {
_, err := tfs.keyLock.Compute(fileRef.filename, func() (interface{}, error) {
var err error
reader, err := tfs.adaptor.Get(tfs.objPointer(fileRef.namespace, fileRef.filename), 0)
if err != nil {
return nil, fmt.Errorf("read from block storage: %w", err)
}
defer reader.Close()
writer, err := tfs.syncDir.createFile(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
written, err := io.Copy(writer, reader)
if err != nil {
return nil, fmt.Errorf("copying date to file: %w", err)
}
if err = writer.Close(); err != nil {
err = fmt.Errorf("writer close: %w", err)
}
downloadHistograms.WithLabelValues(tfs.fsName).Observe(float64(written))
return nil, err
})
if err != nil {
return nil, err
}
fh, err := os.Open(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
return fh, nil
}
func storeLocalFile(rPath string, size int64, eviction eviction) error {
if !eviction.store(relativePath(rPath), size) {
return fmt.Errorf("removing file: %w", os.Remove(rPath))
}
return nil
}
func validateArgs(namespace, filename string) error {
if err := validateNamespace(namespace); err != nil {
return err
}
return validateFilename(filename)
}
var (
errSeparatorInFS = errors.New("path contains separator")
errPathInWorkspace = errors.New("file cannot be located in the workspace")
errEmptyDirInPath = errors.New("file path cannot contain an empty directory")
)
func validateFilename(filename string) error {
if strings.HasPrefix(filename, workspaceDir+string(os.PathSeparator)) {
return errPathInWorkspace
}
if strings.Contains(filename, strings.Repeat(string(os.PathSeparator), 2)) {
return errEmptyDirInPath
}
return nil
}
func validateNamespace(ns string) error {
if strings.ContainsRune(ns, os.PathSeparator) {
return errSeparatorInFS
}
return nil
}
// relativePath is the path of the file under TierFS
type relativePath string
// localFileRef consists of all possible local file references
type localFileRef struct {
namespace string
filename string
fullPath string
fsRelativePath relativePath
}
func (tfs *TierFS) newLocalFileRef(namespace, filename string) localFileRef {
relative := path.Join(namespace, filename)
return localFileRef{
namespace: namespace,
filename: filename,
fsRelativePath: relativePath(relative),
fullPath: path.Join(tfs.fsLocalBaseDir, relative),
}
}
func (tfs *TierFS) objPointer(namespace, filename string) block.ObjectPointer {
if runtime.GOOS == "windows" {
filename = strings.ReplaceAll(filename, `\\`, "/")
}
return block.ObjectPointer{
StorageNamespace: namespace,
Identifier: tfs.blockStoragePath(filename),
}
}
func (tfs *TierFS) blockStoragePath(filename string) string {
return path.Join(tfs.remotePrefix, filename)
}
func (tfs *TierFS) createNSWorkspaceDir(namespace string) error {
return os.MkdirAll(tfs.workspaceDirPath(namespace), os.ModePerm)
}
func (tfs *TierFS) workspaceDirPath(namespace string) string {
return path.Join(tfs.fsLocalBaseDir, namespace, workspaceDir)
}
func (tfs *TierFS) workspaceTempFilePath(namespace string) string {
return path.Join(tfs.workspaceDirPath(namespace), uuid.Must(uuid.NewRandom()).String())
}
| {
return nil, fmt.Errorf("handling existing files: %w", err)
} | conditional_block |
tier_fs.go | package pyramid
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/treeverse/lakefs/cache"
"github.com/treeverse/lakefs/logging"
"github.com/google/uuid"
"github.com/treeverse/lakefs/block"
)
// TierFS is a filesystem where written files are never edited.
// All files are stored in the block storage. Local paths are treated as a
// cache layer that will be evicted according to the eviction control.
type TierFS struct {
logger logging.Logger
adaptor block.Adapter
eviction eviction
keyLock cache.OnlyOne
syncDir *directory
fsName string
fsLocalBaseDir string
remotePrefix string
}
type Config struct {
// fsName is the unique filesystem name for this TierFS instance.
// If two TierFS instances have the same name, behaviour is undefined.
fsName string
adaptor block.Adapter
logger logging.Logger
// Prefix for all metadata file lakeFS stores in the block storage.
fsBlockStoragePrefix string
// The directory where TierFS files are kept locally.
localBaseDir string
// Maximum number of bytes an instance of TierFS can allocate to local files.
// This is not a hard limit - there might be short period of times where TierFS
// uses more disk due to ongoing writes and slow disk cleanups.
allocatedDiskBytes int64
}
const workspaceDir = "workspace"
// NewFS creates a new TierFS.
// It will traverse the existing local folders and will update
// the local disk cache to reflect existing files.
func NewFS(c *Config) (FS, error) {
fsLocalBaseDir := path.Join(c.localBaseDir, c.fsName)
if err := os.MkdirAll(fsLocalBaseDir, os.ModePerm); err != nil {
return nil, fmt.Errorf("creating base dir: %w", err)
}
tierFS := &TierFS{
adaptor: c.adaptor,
fsName: c.fsName,
logger: c.logger,
fsLocalBaseDir: fsLocalBaseDir,
syncDir: &directory{ceilingDir: fsLocalBaseDir},
keyLock: cache.NewChanOnlyOne(),
remotePrefix: path.Join(c.fsBlockStoragePrefix, c.fsName),
}
eviction, err := newRistrettoEviction(c.allocatedDiskBytes, tierFS.removeFromLocal)
if err != nil {
return nil, fmt.Errorf("creating eviction control: %w", err)
}
if err := handleExistingFiles(eviction, fsLocalBaseDir); err != nil {
return nil, fmt.Errorf("handling existing files: %w", err)
}
tierFS.eviction = eviction
return tierFS, nil
}
// handleExistingFiles should only be called during init of the TierFS.
// It does 2 things:
// 1. Adds stored files to the eviction control
// 2. Remove workspace directories and all its content if it
// exist under the namespace dir.
func handleExistingFiles(eviction eviction, fsLocalBaseDir string) error {
if err := filepath.Walk(fsLocalBaseDir, func(rPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if info.Name() == workspaceDir {
// skipping workspaces and saving them for later delete
if err := os.RemoveAll(rPath); err != nil {
return fmt.Errorf("removing dir: %w", err)
}
return filepath.SkipDir
}
return nil
}
if err := storeLocalFile(rPath, info.Size(), eviction); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("walking the fs dir: %w", err)
}
return nil
}
func (tfs *TierFS) removeFromLocal(rPath relativePath, filesize int64) {
// This will be called by the cache eviction mechanism during entry insert.
// We don't want to wait while the file is being removed from the local disk.
evictionHistograms.WithLabelValues(tfs.fsName).Observe(float64(filesize))
go tfs.removeFromLocalInternal(rPath)
}
func (tfs *TierFS) removeFromLocalInternal(rPath relativePath) |
func (tfs *TierFS) store(namespace, originalPath, filename string) error {
f, err := os.Open(originalPath)
if err != nil {
return fmt.Errorf("open file %s: %w", originalPath, err)
}
stat, err := f.Stat()
if err != nil {
return fmt.Errorf("file stat %s: %w", originalPath, err)
}
if err := tfs.adaptor.Put(tfs.objPointer(namespace, filename), stat.Size(), f, block.PutOpts{}); err != nil {
return fmt.Errorf("adapter put %s: %w", filename, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("closing file %s: %w", filename, err)
}
fileRef := tfs.newLocalFileRef(namespace, filename)
if tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// file was stored by the policy
return tfs.syncDir.renameFile(originalPath, fileRef.fullPath)
} else {
return os.Remove(originalPath)
}
}
// Create creates a new file in TierFS.
// File isn't stored in TierFS until a successful close operation.
// Open(namespace, filename) calls will return an error before the close was called.
func (tfs *TierFS) Create(namespace string) (StoredFile, error) {
if err := validateNamespace(namespace); err != nil {
return nil, fmt.Errorf("invalid args: %w", err)
}
if err := tfs.createNSWorkspaceDir(namespace); err != nil {
return nil, fmt.Errorf("create namespace dir: %w", err)
}
tempPath := tfs.workspaceTempFilePath(namespace)
fh, err := os.Create(tempPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
return &WRFile{
File: fh,
store: func(filename string) error {
return tfs.store(namespace, tempPath, filename)
},
}, nil
}
// Open returns the a file descriptor to the local file.
// If the file is missing from the local disk, it will try to fetch it from the block storage.
func (tfs *TierFS) Open(namespace, filename string) (File, error) {
if err := validateArgs(namespace, filename); err != nil {
return nil, err
}
fileRef := tfs.newLocalFileRef(namespace, filename)
fh, err := os.Open(fileRef.fullPath)
if err == nil {
cacheAccess.WithLabelValues(tfs.fsName, "Hit").Inc()
return tfs.openFile(fileRef, fh)
}
if !os.IsNotExist(err) {
return nil, fmt.Errorf("open file: %w", err)
}
cacheAccess.WithLabelValues(tfs.fsName, "Miss").Inc()
fh, err = tfs.readFromBlockStorage(fileRef)
if err != nil {
return nil, err
}
return tfs.openFile(fileRef, fh)
}
// openFile converts an os.File to pyramid.ROFile and updates the eviction control.
func (tfs *TierFS) openFile(fileRef localFileRef, fh *os.File) (*ROFile, error) {
stat, err := fh.Stat()
if err != nil {
return nil, fmt.Errorf("file stat: %w", err)
}
if !tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// This is where we get less strict.
// Ideally, newly fetched file will never be rejected by the cache.
// But if it did, we prefer to serve the file and delete it.
// When the user will close the file, the file will be deleted from the disk too.
if err := os.Remove(fileRef.fullPath); err != nil {
return nil, err
}
}
return &ROFile{
File: fh,
rPath: fileRef.fsRelativePath,
eviction: tfs.eviction,
}, nil
}
// readFromBlockStorage reads the referenced file from the block storage
// and places it in the local FS for further reading.
// It returns a file handle to the local file.
func (tfs *TierFS) readFromBlockStorage(fileRef localFileRef) (*os.File, error) {
_, err := tfs.keyLock.Compute(fileRef.filename, func() (interface{}, error) {
var err error
reader, err := tfs.adaptor.Get(tfs.objPointer(fileRef.namespace, fileRef.filename), 0)
if err != nil {
return nil, fmt.Errorf("read from block storage: %w", err)
}
defer reader.Close()
writer, err := tfs.syncDir.createFile(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
written, err := io.Copy(writer, reader)
if err != nil {
return nil, fmt.Errorf("copying date to file: %w", err)
}
if err = writer.Close(); err != nil {
err = fmt.Errorf("writer close: %w", err)
}
downloadHistograms.WithLabelValues(tfs.fsName).Observe(float64(written))
return nil, err
})
if err != nil {
return nil, err
}
fh, err := os.Open(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
return fh, nil
}
func storeLocalFile(rPath string, size int64, eviction eviction) error {
if !eviction.store(relativePath(rPath), size) {
return fmt.Errorf("removing file: %w", os.Remove(rPath))
}
return nil
}
func validateArgs(namespace, filename string) error {
if err := validateNamespace(namespace); err != nil {
return err
}
return validateFilename(filename)
}
var (
errSeparatorInFS = errors.New("path contains separator")
errPathInWorkspace = errors.New("file cannot be located in the workspace")
errEmptyDirInPath = errors.New("file path cannot contain an empty directory")
)
func validateFilename(filename string) error {
if strings.HasPrefix(filename, workspaceDir+string(os.PathSeparator)) {
return errPathInWorkspace
}
if strings.Contains(filename, strings.Repeat(string(os.PathSeparator), 2)) {
return errEmptyDirInPath
}
return nil
}
func validateNamespace(ns string) error {
if strings.ContainsRune(ns, os.PathSeparator) {
return errSeparatorInFS
}
return nil
}
// relativePath is the path of the file under TierFS
type relativePath string
// localFileRef consists of all possible local file references
type localFileRef struct {
namespace string
filename string
fullPath string
fsRelativePath relativePath
}
func (tfs *TierFS) newLocalFileRef(namespace, filename string) localFileRef {
relative := path.Join(namespace, filename)
return localFileRef{
namespace: namespace,
filename: filename,
fsRelativePath: relativePath(relative),
fullPath: path.Join(tfs.fsLocalBaseDir, relative),
}
}
func (tfs *TierFS) objPointer(namespace, filename string) block.ObjectPointer {
if runtime.GOOS == "windows" {
filename = strings.ReplaceAll(filename, `\\`, "/")
}
return block.ObjectPointer{
StorageNamespace: namespace,
Identifier: tfs.blockStoragePath(filename),
}
}
func (tfs *TierFS) blockStoragePath(filename string) string {
return path.Join(tfs.remotePrefix, filename)
}
func (tfs *TierFS) createNSWorkspaceDir(namespace string) error {
return os.MkdirAll(tfs.workspaceDirPath(namespace), os.ModePerm)
}
func (tfs *TierFS) workspaceDirPath(namespace string) string {
return path.Join(tfs.fsLocalBaseDir, namespace, workspaceDir)
}
func (tfs *TierFS) workspaceTempFilePath(namespace string) string {
return path.Join(tfs.workspaceDirPath(namespace), uuid.Must(uuid.NewRandom()).String())
}
| {
p := path.Join(tfs.fsLocalBaseDir, string(rPath))
if err := os.Remove(p); err != nil {
tfs.logger.WithError(err).WithField("path", p).Error("Removing file failed")
errorsTotal.WithLabelValues(tfs.fsName, "FileRemoval")
return
}
if err := tfs.syncDir.deleteDirRecIfEmpty(path.Dir(p)); err != nil {
tfs.logger.WithError(err).Error("Failed deleting empty dir")
errorsTotal.WithLabelValues(tfs.fsName, "DirRemoval")
}
} | identifier_body |
tier_fs.go | package pyramid
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/treeverse/lakefs/cache"
"github.com/treeverse/lakefs/logging"
"github.com/google/uuid"
"github.com/treeverse/lakefs/block"
)
// TierFS is a filesystem where written files are never edited.
// All files are stored in the block storage. Local paths are treated as a
// cache layer that will be evicted according to the eviction control.
type TierFS struct {
logger logging.Logger
adaptor block.Adapter
eviction eviction
keyLock cache.OnlyOne
syncDir *directory
fsName string
fsLocalBaseDir string
remotePrefix string
}
type Config struct {
// fsName is the unique filesystem name for this TierFS instance.
// If two TierFS instances have the same name, behaviour is undefined.
fsName string
adaptor block.Adapter
logger logging.Logger
// Prefix for all metadata file lakeFS stores in the block storage. | // Maximum number of bytes an instance of TierFS can allocate to local files.
// This is not a hard limit - there might be short period of times where TierFS
// uses more disk due to ongoing writes and slow disk cleanups.
allocatedDiskBytes int64
}
const workspaceDir = "workspace"
// NewFS creates a new TierFS.
// It will traverse the existing local folders and will update
// the local disk cache to reflect existing files.
func NewFS(c *Config) (FS, error) {
fsLocalBaseDir := path.Join(c.localBaseDir, c.fsName)
if err := os.MkdirAll(fsLocalBaseDir, os.ModePerm); err != nil {
return nil, fmt.Errorf("creating base dir: %w", err)
}
tierFS := &TierFS{
adaptor: c.adaptor,
fsName: c.fsName,
logger: c.logger,
fsLocalBaseDir: fsLocalBaseDir,
syncDir: &directory{ceilingDir: fsLocalBaseDir},
keyLock: cache.NewChanOnlyOne(),
remotePrefix: path.Join(c.fsBlockStoragePrefix, c.fsName),
}
eviction, err := newRistrettoEviction(c.allocatedDiskBytes, tierFS.removeFromLocal)
if err != nil {
return nil, fmt.Errorf("creating eviction control: %w", err)
}
if err := handleExistingFiles(eviction, fsLocalBaseDir); err != nil {
return nil, fmt.Errorf("handling existing files: %w", err)
}
tierFS.eviction = eviction
return tierFS, nil
}
// handleExistingFiles should only be called during init of the TierFS.
// It does 2 things:
// 1. Adds stored files to the eviction control
// 2. Remove workspace directories and all its content if it
// exist under the namespace dir.
func handleExistingFiles(eviction eviction, fsLocalBaseDir string) error {
if err := filepath.Walk(fsLocalBaseDir, func(rPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if info.Name() == workspaceDir {
// skipping workspaces and saving them for later delete
if err := os.RemoveAll(rPath); err != nil {
return fmt.Errorf("removing dir: %w", err)
}
return filepath.SkipDir
}
return nil
}
if err := storeLocalFile(rPath, info.Size(), eviction); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("walking the fs dir: %w", err)
}
return nil
}
func (tfs *TierFS) removeFromLocal(rPath relativePath, filesize int64) {
// This will be called by the cache eviction mechanism during entry insert.
// We don't want to wait while the file is being removed from the local disk.
evictionHistograms.WithLabelValues(tfs.fsName).Observe(float64(filesize))
go tfs.removeFromLocalInternal(rPath)
}
func (tfs *TierFS) removeFromLocalInternal(rPath relativePath) {
p := path.Join(tfs.fsLocalBaseDir, string(rPath))
if err := os.Remove(p); err != nil {
tfs.logger.WithError(err).WithField("path", p).Error("Removing file failed")
errorsTotal.WithLabelValues(tfs.fsName, "FileRemoval")
return
}
if err := tfs.syncDir.deleteDirRecIfEmpty(path.Dir(p)); err != nil {
tfs.logger.WithError(err).Error("Failed deleting empty dir")
errorsTotal.WithLabelValues(tfs.fsName, "DirRemoval")
}
}
func (tfs *TierFS) store(namespace, originalPath, filename string) error {
f, err := os.Open(originalPath)
if err != nil {
return fmt.Errorf("open file %s: %w", originalPath, err)
}
stat, err := f.Stat()
if err != nil {
return fmt.Errorf("file stat %s: %w", originalPath, err)
}
if err := tfs.adaptor.Put(tfs.objPointer(namespace, filename), stat.Size(), f, block.PutOpts{}); err != nil {
return fmt.Errorf("adapter put %s: %w", filename, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("closing file %s: %w", filename, err)
}
fileRef := tfs.newLocalFileRef(namespace, filename)
if tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// file was stored by the policy
return tfs.syncDir.renameFile(originalPath, fileRef.fullPath)
} else {
return os.Remove(originalPath)
}
}
// Create creates a new file in TierFS.
// File isn't stored in TierFS until a successful close operation.
// Open(namespace, filename) calls will return an error before the close was called.
func (tfs *TierFS) Create(namespace string) (StoredFile, error) {
if err := validateNamespace(namespace); err != nil {
return nil, fmt.Errorf("invalid args: %w", err)
}
if err := tfs.createNSWorkspaceDir(namespace); err != nil {
return nil, fmt.Errorf("create namespace dir: %w", err)
}
tempPath := tfs.workspaceTempFilePath(namespace)
fh, err := os.Create(tempPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
return &WRFile{
File: fh,
store: func(filename string) error {
return tfs.store(namespace, tempPath, filename)
},
}, nil
}
// Open returns the a file descriptor to the local file.
// If the file is missing from the local disk, it will try to fetch it from the block storage.
func (tfs *TierFS) Open(namespace, filename string) (File, error) {
if err := validateArgs(namespace, filename); err != nil {
return nil, err
}
fileRef := tfs.newLocalFileRef(namespace, filename)
fh, err := os.Open(fileRef.fullPath)
if err == nil {
cacheAccess.WithLabelValues(tfs.fsName, "Hit").Inc()
return tfs.openFile(fileRef, fh)
}
if !os.IsNotExist(err) {
return nil, fmt.Errorf("open file: %w", err)
}
cacheAccess.WithLabelValues(tfs.fsName, "Miss").Inc()
fh, err = tfs.readFromBlockStorage(fileRef)
if err != nil {
return nil, err
}
return tfs.openFile(fileRef, fh)
}
// openFile converts an os.File to pyramid.ROFile and updates the eviction control.
func (tfs *TierFS) openFile(fileRef localFileRef, fh *os.File) (*ROFile, error) {
stat, err := fh.Stat()
if err != nil {
return nil, fmt.Errorf("file stat: %w", err)
}
if !tfs.eviction.store(fileRef.fsRelativePath, stat.Size()) {
// This is where we get less strict.
// Ideally, newly fetched file will never be rejected by the cache.
// But if it did, we prefer to serve the file and delete it.
// When the user will close the file, the file will be deleted from the disk too.
if err := os.Remove(fileRef.fullPath); err != nil {
return nil, err
}
}
return &ROFile{
File: fh,
rPath: fileRef.fsRelativePath,
eviction: tfs.eviction,
}, nil
}
// readFromBlockStorage reads the referenced file from the block storage
// and places it in the local FS for further reading.
// It returns a file handle to the local file.
func (tfs *TierFS) readFromBlockStorage(fileRef localFileRef) (*os.File, error) {
_, err := tfs.keyLock.Compute(fileRef.filename, func() (interface{}, error) {
var err error
reader, err := tfs.adaptor.Get(tfs.objPointer(fileRef.namespace, fileRef.filename), 0)
if err != nil {
return nil, fmt.Errorf("read from block storage: %w", err)
}
defer reader.Close()
writer, err := tfs.syncDir.createFile(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
written, err := io.Copy(writer, reader)
if err != nil {
return nil, fmt.Errorf("copying date to file: %w", err)
}
if err = writer.Close(); err != nil {
err = fmt.Errorf("writer close: %w", err)
}
downloadHistograms.WithLabelValues(tfs.fsName).Observe(float64(written))
return nil, err
})
if err != nil {
return nil, err
}
fh, err := os.Open(fileRef.fullPath)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
return fh, nil
}
func storeLocalFile(rPath string, size int64, eviction eviction) error {
if !eviction.store(relativePath(rPath), size) {
return fmt.Errorf("removing file: %w", os.Remove(rPath))
}
return nil
}
func validateArgs(namespace, filename string) error {
if err := validateNamespace(namespace); err != nil {
return err
}
return validateFilename(filename)
}
var (
errSeparatorInFS = errors.New("path contains separator")
errPathInWorkspace = errors.New("file cannot be located in the workspace")
errEmptyDirInPath = errors.New("file path cannot contain an empty directory")
)
func validateFilename(filename string) error {
if strings.HasPrefix(filename, workspaceDir+string(os.PathSeparator)) {
return errPathInWorkspace
}
if strings.Contains(filename, strings.Repeat(string(os.PathSeparator), 2)) {
return errEmptyDirInPath
}
return nil
}
func validateNamespace(ns string) error {
if strings.ContainsRune(ns, os.PathSeparator) {
return errSeparatorInFS
}
return nil
}
// relativePath is the path of the file under TierFS
type relativePath string
// localFileRef consists of all possible local file references
type localFileRef struct {
namespace string
filename string
fullPath string
fsRelativePath relativePath
}
func (tfs *TierFS) newLocalFileRef(namespace, filename string) localFileRef {
relative := path.Join(namespace, filename)
return localFileRef{
namespace: namespace,
filename: filename,
fsRelativePath: relativePath(relative),
fullPath: path.Join(tfs.fsLocalBaseDir, relative),
}
}
func (tfs *TierFS) objPointer(namespace, filename string) block.ObjectPointer {
if runtime.GOOS == "windows" {
filename = strings.ReplaceAll(filename, `\\`, "/")
}
return block.ObjectPointer{
StorageNamespace: namespace,
Identifier: tfs.blockStoragePath(filename),
}
}
func (tfs *TierFS) blockStoragePath(filename string) string {
return path.Join(tfs.remotePrefix, filename)
}
func (tfs *TierFS) createNSWorkspaceDir(namespace string) error {
return os.MkdirAll(tfs.workspaceDirPath(namespace), os.ModePerm)
}
func (tfs *TierFS) workspaceDirPath(namespace string) string {
return path.Join(tfs.fsLocalBaseDir, namespace, workspaceDir)
}
func (tfs *TierFS) workspaceTempFilePath(namespace string) string {
return path.Join(tfs.workspaceDirPath(namespace), uuid.Must(uuid.NewRandom()).String())
} | fsBlockStoragePrefix string
// The directory where TierFS files are kept locally.
localBaseDir string
| random_line_split |
bufimage.go | // Copyright 2020-2023 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufimage
import (
"fmt"
"sort"
"github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref"
imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1"
"github.com/bufbuild/buf/private/pkg/normalpath"
"github.com/bufbuild/buf/private/pkg/protodescriptor"
"github.com/bufbuild/buf/private/pkg/protoencoding"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/pluginpb"
)
// ImageFile is a Protobuf file within an image.
type ImageFile interface {
bufmoduleref.FileInfo
// Proto is the backing *descriptorpb.FileDescriptorProto for this File.
//
// FileDescriptor should be preferred to Proto. We keep this method around
// because we have code that does modification to the ImageFile via this.
//
// This will never be nil.
// The value Path() is equal to Proto.GetName() .
Proto() *descriptorpb.FileDescriptorProto
// FileDescriptor is the backing FileDescriptor for this File.
//
// This will never be nil.
// The value Path() is equal to FileDescriptor.GetName() .
FileDescriptor() protodescriptor.FileDescriptor
// IsSyntaxUnspecified will be true if the syntax was not explicitly specified.
IsSyntaxUnspecified() bool
// UnusedDependencyIndexes returns the indexes of the unused dependencies within
// FileDescriptor.GetDependency().
//
// All indexes will be valid.
// Will return nil if empty.
UnusedDependencyIndexes() []int32
// ImageFileWithIsImport returns a copy of the ImageFile with the new ImageFile
// now marked as an import.
//
// If the original ImageFile was already an import, this returns
// the original ImageFile.
ImageFileWithIsImport(isImport bool) ImageFile
isImageFile()
}
// NewImageFile returns a new ImageFile.
//
// If externalPath is empty, path is used.
//
// TODO: moduleIdentity and commit should be options since they are optional.
func NewImageFile(
fileDescriptor protodescriptor.FileDescriptor,
moduleIdentity bufmoduleref.ModuleIdentity,
commit string,
externalPath string,
isImport bool,
isSyntaxUnspecified bool,
unusedDependencyIndexes []int32,
) (ImageFile, error) {
return newImageFile(
fileDescriptor,
moduleIdentity,
commit,
externalPath,
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
}
// Image is a buf image.
type Image interface {
// Files are the files that comprise the image.
//
// This contains all files, including imports if available.
// The returned files are in correct DAG order.
//
// All files that have the same ModuleIdentity will also have the same commit, or no commit.
// This is enforced at construction time.
Files() []ImageFile
// GetFile gets the file for the root relative file path.
//
// If the file does not exist, nil is returned.
// The path is expected to be normalized and validated.
// Note that all values of GetDependency() can be used here.
GetFile(path string) ImageFile
isImage()
}
// NewImage returns a new Image for the given ImageFiles.
//
// The input ImageFiles are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
// If imageFiles is empty, returns error
func NewImage(imageFiles []ImageFile) (Image, error) {
return newImage(imageFiles, false)
}
// MergeImages returns a new Image for the given Images. ImageFiles
// treated as non-imports in at least one of the given Images will
// be treated as non-imports in the returned Image. The first non-import
// version of a file will be used in the result.
//
// Reorders the ImageFiles to be in DAG order.
// Duplicates can exist across the Images, but only if duplicates are non-imports.
func MergeImages(images ...Image) (Image, error) {
switch len(images) {
case 0:
return nil, nil
case 1:
return images[0], nil
default:
var paths []string
imageFileSet := make(map[string]ImageFile)
for _, image := range images {
for _, currentImageFile := range image.Files() {
storedImageFile, ok := imageFileSet[currentImageFile.Path()]
if !ok {
imageFileSet[currentImageFile.Path()] = currentImageFile
paths = append(paths, currentImageFile.Path())
continue
}
if !storedImageFile.IsImport() && !currentImageFile.IsImport() {
return nil, fmt.Errorf("%s is a non-import in multiple images", currentImageFile.Path())
}
if storedImageFile.IsImport() && !currentImageFile.IsImport() {
imageFileSet[currentImageFile.Path()] = currentImageFile
}
}
}
// We need to preserve order for deterministic results, so we add
// the files in the order they're given, but base our selection
// on the imageFileSet.
imageFiles := make([]ImageFile, 0, len(imageFileSet))
for _, path := range paths {
imageFiles = append(imageFiles, imageFileSet[path] /* Guaranteed to exist */)
}
return newImage(imageFiles, true)
}
}
// NewImageForProto returns a new Image for the given proto Image.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
//
// TODO: do we want to add the ability to do external path resolution here?
func NewImageForProto(protoImage *imagev1.Image, options ...NewImageForProtoOption) (Image, error) {
var newImageOptions newImageForProtoOptions
for _, option := range options {
option(&newImageOptions)
}
if newImageOptions.noReparse && newImageOptions.computeUnusedImports {
return nil, fmt.Errorf("cannot use both WithNoReparse and WithComputeUnusedImports options; they are mutually exclusive")
}
if !newImageOptions.noReparse {
if err := reparseImageProto(protoImage, newImageOptions.computeUnusedImports); err != nil {
return nil, err
}
}
if err := validateProtoImage(protoImage); err != nil {
return nil, err
}
imageFiles := make([]ImageFile, len(protoImage.File))
for i, protoImageFile := range protoImage.File {
var isImport bool
var isSyntaxUnspecified bool
var unusedDependencyIndexes []int32
var moduleIdentity bufmoduleref.ModuleIdentity
var commit string
var err error
if protoImageFileExtension := protoImageFile.GetBufExtension(); protoImageFileExtension != nil {
isImport = protoImageFileExtension.GetIsImport()
isSyntaxUnspecified = protoImageFileExtension.GetIsSyntaxUnspecified()
unusedDependencyIndexes = protoImageFileExtension.GetUnusedDependency()
if protoModuleInfo := protoImageFileExtension.GetModuleInfo(); protoModuleInfo != nil {
if protoModuleName := protoModuleInfo.GetName(); protoModuleName != nil {
moduleIdentity, err = bufmoduleref.NewModuleIdentity(
protoModuleName.GetRemote(),
protoModuleName.GetOwner(),
protoModuleName.GetRepository(),
)
if err != nil {
return nil, err
}
// we only want to set this if there is a module name
commit = protoModuleInfo.GetCommit()
}
}
}
imageFile, err := NewImageFile(
protoImageFile,
moduleIdentity,
commit,
protoImageFile.GetName(),
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
if err != nil {
return nil, err
}
imageFiles[i] = imageFile
}
return NewImage(imageFiles)
}
// NewImageForCodeGeneratorRequest returns a new Image from a given CodeGeneratorRequest.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
func NewImageForCodeGeneratorRequest(request *pluginpb.CodeGeneratorRequest, options ...NewImageForProtoOption) (Image, error) {
if err := protodescriptor.ValidateCodeGeneratorRequestExceptFileDescriptorProtos(request); err != nil {
return nil, err
}
protoImageFiles := make([]*imagev1.ImageFile, len(request.GetProtoFile()))
for i, fileDescriptorProto := range request.GetProtoFile() {
// we filter whether something is an import or not in ImageWithOnlyPaths
// we cannot determine if the syntax was unset
protoImageFiles[i] = fileDescriptorProtoToProtoImageFile(fileDescriptorProto, false, false, nil, nil, "")
}
image, err := NewImageForProto(
&imagev1.Image{
File: protoImageFiles,
},
options...,
)
if err != nil {
return nil, err
}
return ImageWithOnlyPaths(
image,
request.GetFileToGenerate(),
nil,
)
}
// NewImageForProtoOption is an option for use with NewImageForProto.
type NewImageForProtoOption func(*newImageForProtoOptions)
// WithNoReparse instructs NewImageForProto to skip the reparse step. The reparse
// step is usually needed when unmarshalling the image from bytes. It reconstitutes
// custom options, from unrecognized bytes to known extension fields.
func WithNoReparse() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.noReparse = true
}
}
// WithUnusedImportsComputation instructs NewImageForProto to compute unused imports
// for the files. These are usually computed by the compiler and stored in the image.
// But some sources of images may not include this information, so this option can be
// used to ensure that information is present in the image and accurate.
//
// This option is NOT compatible with WithNoReparse: the image must be re-parsed for
// there to be adequate information for computing unused imports.
func WithUnusedImportsComputation() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.computeUnusedImports = true
}
}
// ImageWithoutImports returns a copy of the Image without imports.
//
// The backing Files are not copied.
func ImageWithoutImports(image Image) Image {
imageFiles := image.Files()
newImageFiles := make([]ImageFile, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
newImageFiles = append(newImageFiles, imageFile)
}
}
return newImageNoValidate(newImageFiles)
}
// ImageWithOnlyPaths returns a copy of the Image that only includes the files
// with the given root relative file paths or directories.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this errors.
func ImageWithOnlyPaths(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, false)
}
// ImageWithOnlyPathsAllowNotExist returns a copy of the Image that only includes the files
// with the given root relative file paths.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this skips this path.
func ImageWithOnlyPathsAllowNotExist(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, true)
}
// ImageByDir returns multiple images that have non-imports split
// by directory.
//
// That is, each Image will only contain a single directory's files
// as it's non-imports, along with all required imports for the
// files in that directory.
func ImageByDir(image Image) ([]Image, error) {
imageFiles := image.Files()
paths := make([]string, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
paths = append(paths, imageFile.Path())
}
}
dirToPaths := normalpath.ByDir(paths...)
// we need this to produce a deterministic order of the returned Images
dirs := make([]string, 0, len(dirToPaths))
for dir := range dirToPaths {
dirs = append(dirs, dir)
}
sort.Strings(dirs)
newImages := make([]Image, 0, len(dirToPaths))
for _, dir := range dirs {
paths, ok := dirToPaths[dir]
if !ok {
// this should never happen
return nil, fmt.Errorf("no dir for %q in dirToPaths", dir)
}
newImage, err := ImageWithOnlyPaths(image, paths, nil)
if err != nil {
return nil, err
}
newImages = append(newImages, newImage)
}
return newImages, nil
}
// ImageToProtoImage returns a new ProtoImage for the Image.
func ImageToProtoImage(image Image) *imagev1.Image |
// ImageToFileDescriptorSet returns a new FileDescriptorSet for the Image.
func ImageToFileDescriptorSet(image Image) *descriptorpb.FileDescriptorSet {
return protodescriptor.FileDescriptorSetForFileDescriptors(ImageToFileDescriptors(image)...)
}
// ImageToFileDescriptors returns the FileDescriptors for the Image.
func ImageToFileDescriptors(image Image) []protodescriptor.FileDescriptor {
return imageFilesToFileDescriptors(image.Files())
}
// ImageToFileDescriptorProtos returns the FileDescriptorProtos for the Image.
func ImageToFileDescriptorProtos(image Image) []*descriptorpb.FileDescriptorProto {
return imageFilesToFileDescriptorProtos(image.Files())
}
// ImageToCodeGeneratorRequest returns a new CodeGeneratorRequest for the Image.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImageToCodeGeneratorRequest(
image Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) *pluginpb.CodeGeneratorRequest {
return imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
nil,
nil,
)
}
// ImagesToCodeGeneratorRequests converts the Images to CodeGeneratorRequests.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeImports is set, only one CodeGeneratorRequest will contain any given file as a FileToGenerate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImagesToCodeGeneratorRequests(
images []Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) []*pluginpb.CodeGeneratorRequest {
requests := make([]*pluginpb.CodeGeneratorRequest, len(images))
// alreadyUsedPaths is a map of paths that have already been added to an image.
//
// We track this if includeImports is set, so that when we find an import, we can
// see if the import was already added to a CodeGeneratorRequest via another Image
// in the Image slice. If the import was already added, we do not add duplicates
// across CodeGeneratorRequests.
var alreadyUsedPaths map[string]struct{}
// nonImportPaths is a map of non-import paths.
//
// We track this if includeImports is set. If we find a non-import file in Image A
// and this file is an import in Image B, the file will have already been added to
// a CodeGeneratorRequest via Image A, so do not add the duplicate to any other
// CodeGeneratorRequest.
var nonImportPaths map[string]struct{}
if includeImports {
// We don't need to track these if includeImports is false, so we only populate
// the maps if includeImports is true. If includeImports is false, only non-imports
// will be added to each CodeGeneratorRequest, so figuring out whether or not
// we should add a given import to a given CodeGeneratorRequest is unnecessary.
//
// imageToCodeGeneratorRequest checks if these maps are nil before every access.
alreadyUsedPaths = make(map[string]struct{})
nonImportPaths = make(map[string]struct{})
for _, image := range images {
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
nonImportPaths[imageFile.Path()] = struct{}{}
}
}
}
}
for i, image := range images {
requests[i] = imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
alreadyUsedPaths,
nonImportPaths,
)
}
return requests
}
// ProtoImageToFileDescriptors returns the FileDescriptors for the proto Image.
func ProtoImageToFileDescriptors(protoImage *imagev1.Image) []protodescriptor.FileDescriptor {
return protoImageFilesToFileDescriptors(protoImage.File)
}
// ImageDependency is a dependency of an image.
//
// This could conceivably be part of ImageFile or bufmoduleref.FileInfo.
// For ImageFile, this would be a field that is ignored when translated to proto,
// and is calculated on creation from proto. IsImport would become ImportType.
// You could go a step further and make this optionally part of the proto definition.
//
// You could even go down to bufmoduleref.FileInfo if you used the AST, but this
// could be error prone.
//
// However, for simplicity now (and to not rewrite the whole codebase), we make
// this a separate type that is calculated off of an Image after the fact.
//
// If this became part of ImageFile or bufmoduleref.FileInfo, you would get
// all the ImageDependencies from the ImageFiles, and then sort | uniq them
// to get the ImageDependencies for an Image. This would remove the requirement
// of this associated type to have a ModuleIdentity and commit, so in
// the IsDirect example below, d.proto would not be "ignored" - it would
// be an ImageFile like any other, with ImportType DIRECT.
//
// Note that if we ever do this, there is validation in newImage that enforces
// that all ImageFiles with the same ModuleIdentity have the same commit. This
// validation will likely have to be moved around.
type ImageModuleDependency interface {
// String() returns remote/owner/repository[:commit].
fmt.Stringer
// Required. Will never be nil.
ModuleIdentity() bufmoduleref.ModuleIdentity
// Optional. May be empty.
Commit() string
// IsDirect returns true if the dependency is a direct dependency.
//
// A dependency is direct if it is only an import of non-imports in the image.
//
// Example:
//
// a.proto, module buf.build/foo/a, is non-import, imports b.proto
// b.proto, module buf.build/foo/b, is import, imports c.proto
// c.proto, module buf.build/foo/c, is import
//
// In this case, the list would contain only buf.build/foo/b, as buf.build/foo/a
// for a.proto is a non-import, and buf.build/foo/c for c.proto is only imported
// by an import
IsDirect() bool
isImageModuleDependency()
}
// ImageModuleDependency returns all ImageModuleDependencies for the Image.
//
// Does not return any ImageModuleDependencies for non-imports, that is the
// ModuleIdentities and commits represented by non-imports are not represented
// in this list.
func ImageModuleDependencies(image Image) []ImageModuleDependency {
importsOfNonImports := make(map[string]struct{})
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
for _, dependency := range imageFile.FileDescriptor().GetDependency() {
importsOfNonImports[dependency] = struct{}{}
}
}
}
// We know that all ImageFiles with the same ModuleIdentity
// have the same commit or no commit, so using String() will properly identify
// unique dependencies.
stringToImageModuleDependency := make(map[string]ImageModuleDependency)
for _, imageFile := range image.Files() {
if imageFile.IsImport() {
if moduleIdentity := imageFile.ModuleIdentity(); moduleIdentity != nil {
_, isDirect := importsOfNonImports[imageFile.Path()]
imageModuleDependency := newImageModuleDependency(
moduleIdentity,
imageFile.Commit(),
isDirect,
)
stringToImageModuleDependency[imageModuleDependency.String()] = imageModuleDependency
}
}
}
imageModuleDependencies := make([]ImageModuleDependency, 0, len(stringToImageModuleDependency))
for _, imageModuleDependency := range stringToImageModuleDependency {
imageModuleDependencies = append(
imageModuleDependencies,
imageModuleDependency,
)
}
sortImageModuleDependencies(imageModuleDependencies)
return imageModuleDependencies
}
type newImageForProtoOptions struct {
noReparse bool
computeUnusedImports bool
}
func reparseImageProto(protoImage *imagev1.Image, computeUnusedImports bool) error {
// TODO right now, NewResolver sets AllowUnresolvable to true all the time
// we want to make this into a check, and we verify if we need this for the individual command
resolver := protoencoding.NewLazyResolver(
ProtoImageToFileDescriptors(
protoImage,
)...,
)
if err := protoencoding.ReparseUnrecognized(resolver, protoImage.ProtoReflect()); err != nil {
return fmt.Errorf("could not reparse image: %v", err)
}
if computeUnusedImports {
tracker := &importTracker{
resolver: resolver,
used: map[string]map[string]struct{}{},
}
tracker.findUsedImports(protoImage)
// Now we can populated list of unused dependencies
for _, file := range protoImage.File {
bufExt := file.BufExtension
if bufExt == nil {
bufExt = &imagev1.ImageFileExtension{}
file.BufExtension = bufExt
}
bufExt.UnusedDependency = nil // reset
usedImports := tracker.used[file.GetName()]
for i, dep := range file.Dependency {
if _, ok := usedImports[dep]; !ok {
// it's fine if it's public
isPublic := false
for _, publicDepIndex := range file.PublicDependency {
if i == int(publicDepIndex) {
isPublic = true
break
}
}
if !isPublic {
bufExt.UnusedDependency = append(bufExt.UnusedDependency, int32(i))
}
}
}
}
}
return nil
}
| {
imageFiles := image.Files()
protoImage := &imagev1.Image{
File: make([]*imagev1.ImageFile, len(imageFiles)),
}
for i, imageFile := range imageFiles {
protoImage.File[i] = imageFileToProtoImageFile(imageFile)
}
return protoImage
} | identifier_body |
bufimage.go | // Copyright 2020-2023 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufimage
import (
"fmt"
"sort"
"github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref"
imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1"
"github.com/bufbuild/buf/private/pkg/normalpath"
"github.com/bufbuild/buf/private/pkg/protodescriptor"
"github.com/bufbuild/buf/private/pkg/protoencoding"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/pluginpb"
)
// ImageFile is a Protobuf file within an image.
type ImageFile interface {
bufmoduleref.FileInfo
// Proto is the backing *descriptorpb.FileDescriptorProto for this File.
//
// FileDescriptor should be preferred to Proto. We keep this method around
// because we have code that does modification to the ImageFile via this.
//
// This will never be nil.
// The value Path() is equal to Proto.GetName() .
Proto() *descriptorpb.FileDescriptorProto
// FileDescriptor is the backing FileDescriptor for this File.
//
// This will never be nil.
// The value Path() is equal to FileDescriptor.GetName() .
FileDescriptor() protodescriptor.FileDescriptor
// IsSyntaxUnspecified will be true if the syntax was not explicitly specified.
IsSyntaxUnspecified() bool
// UnusedDependencyIndexes returns the indexes of the unused dependencies within
// FileDescriptor.GetDependency().
//
// All indexes will be valid.
// Will return nil if empty.
UnusedDependencyIndexes() []int32
// ImageFileWithIsImport returns a copy of the ImageFile with the new ImageFile
// now marked as an import.
//
// If the original ImageFile was already an import, this returns
// the original ImageFile.
ImageFileWithIsImport(isImport bool) ImageFile
isImageFile()
}
// NewImageFile returns a new ImageFile.
//
// If externalPath is empty, path is used.
//
// TODO: moduleIdentity and commit should be options since they are optional.
func NewImageFile(
fileDescriptor protodescriptor.FileDescriptor,
moduleIdentity bufmoduleref.ModuleIdentity,
commit string,
externalPath string,
isImport bool,
isSyntaxUnspecified bool,
unusedDependencyIndexes []int32,
) (ImageFile, error) {
return newImageFile(
fileDescriptor,
moduleIdentity,
commit,
externalPath,
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
}
// Image is a buf image.
type Image interface {
// Files are the files that comprise the image.
//
// This contains all files, including imports if available.
// The returned files are in correct DAG order.
//
// All files that have the same ModuleIdentity will also have the same commit, or no commit.
// This is enforced at construction time.
Files() []ImageFile
// GetFile gets the file for the root relative file path.
//
// If the file does not exist, nil is returned.
// The path is expected to be normalized and validated.
// Note that all values of GetDependency() can be used here.
GetFile(path string) ImageFile
isImage()
}
// NewImage returns a new Image for the given ImageFiles.
//
// The input ImageFiles are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
// If imageFiles is empty, returns error
func NewImage(imageFiles []ImageFile) (Image, error) {
return newImage(imageFiles, false)
}
// MergeImages returns a new Image for the given Images. ImageFiles
// treated as non-imports in at least one of the given Images will
// be treated as non-imports in the returned Image. The first non-import
// version of a file will be used in the result.
//
// Reorders the ImageFiles to be in DAG order.
// Duplicates can exist across the Images, but only if duplicates are non-imports.
func MergeImages(images ...Image) (Image, error) {
switch len(images) {
case 0:
return nil, nil
case 1:
return images[0], nil
default:
var paths []string
imageFileSet := make(map[string]ImageFile)
for _, image := range images {
for _, currentImageFile := range image.Files() {
storedImageFile, ok := imageFileSet[currentImageFile.Path()]
if !ok {
imageFileSet[currentImageFile.Path()] = currentImageFile
paths = append(paths, currentImageFile.Path())
continue
}
if !storedImageFile.IsImport() && !currentImageFile.IsImport() {
return nil, fmt.Errorf("%s is a non-import in multiple images", currentImageFile.Path())
}
if storedImageFile.IsImport() && !currentImageFile.IsImport() {
imageFileSet[currentImageFile.Path()] = currentImageFile
}
}
}
// We need to preserve order for deterministic results, so we add
// the files in the order they're given, but base our selection
// on the imageFileSet.
imageFiles := make([]ImageFile, 0, len(imageFileSet))
for _, path := range paths {
imageFiles = append(imageFiles, imageFileSet[path] /* Guaranteed to exist */)
}
return newImage(imageFiles, true)
}
}
// NewImageForProto returns a new Image for the given proto Image.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
//
// TODO: do we want to add the ability to do external path resolution here?
func NewImageForProto(protoImage *imagev1.Image, options ...NewImageForProtoOption) (Image, error) {
var newImageOptions newImageForProtoOptions
for _, option := range options {
option(&newImageOptions)
}
if newImageOptions.noReparse && newImageOptions.computeUnusedImports {
return nil, fmt.Errorf("cannot use both WithNoReparse and WithComputeUnusedImports options; they are mutually exclusive")
}
if !newImageOptions.noReparse {
if err := reparseImageProto(protoImage, newImageOptions.computeUnusedImports); err != nil {
return nil, err
}
}
if err := validateProtoImage(protoImage); err != nil {
return nil, err
}
imageFiles := make([]ImageFile, len(protoImage.File))
for i, protoImageFile := range protoImage.File {
var isImport bool
var isSyntaxUnspecified bool
var unusedDependencyIndexes []int32
var moduleIdentity bufmoduleref.ModuleIdentity
var commit string
var err error
if protoImageFileExtension := protoImageFile.GetBufExtension(); protoImageFileExtension != nil {
isImport = protoImageFileExtension.GetIsImport()
isSyntaxUnspecified = protoImageFileExtension.GetIsSyntaxUnspecified()
unusedDependencyIndexes = protoImageFileExtension.GetUnusedDependency()
if protoModuleInfo := protoImageFileExtension.GetModuleInfo(); protoModuleInfo != nil {
if protoModuleName := protoModuleInfo.GetName(); protoModuleName != nil {
moduleIdentity, err = bufmoduleref.NewModuleIdentity(
protoModuleName.GetRemote(),
protoModuleName.GetOwner(),
protoModuleName.GetRepository(),
)
if err != nil {
return nil, err
}
// we only want to set this if there is a module name
commit = protoModuleInfo.GetCommit()
}
}
}
imageFile, err := NewImageFile(
protoImageFile,
moduleIdentity,
commit,
protoImageFile.GetName(),
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
if err != nil {
return nil, err
}
imageFiles[i] = imageFile
}
return NewImage(imageFiles)
}
// NewImageForCodeGeneratorRequest returns a new Image from a given CodeGeneratorRequest.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
func NewImageForCodeGeneratorRequest(request *pluginpb.CodeGeneratorRequest, options ...NewImageForProtoOption) (Image, error) {
if err := protodescriptor.ValidateCodeGeneratorRequestExceptFileDescriptorProtos(request); err != nil {
return nil, err
}
protoImageFiles := make([]*imagev1.ImageFile, len(request.GetProtoFile()))
for i, fileDescriptorProto := range request.GetProtoFile() {
// we filter whether something is an import or not in ImageWithOnlyPaths
// we cannot determine if the syntax was unset
protoImageFiles[i] = fileDescriptorProtoToProtoImageFile(fileDescriptorProto, false, false, nil, nil, "")
}
image, err := NewImageForProto(
&imagev1.Image{
File: protoImageFiles,
},
options...,
)
if err != nil |
return ImageWithOnlyPaths(
image,
request.GetFileToGenerate(),
nil,
)
}
// NewImageForProtoOption is an option for use with NewImageForProto.
type NewImageForProtoOption func(*newImageForProtoOptions)
// WithNoReparse instructs NewImageForProto to skip the reparse step. The reparse
// step is usually needed when unmarshalling the image from bytes. It reconstitutes
// custom options, from unrecognized bytes to known extension fields.
func WithNoReparse() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.noReparse = true
}
}
// WithUnusedImportsComputation instructs NewImageForProto to compute unused imports
// for the files. These are usually computed by the compiler and stored in the image.
// But some sources of images may not include this information, so this option can be
// used to ensure that information is present in the image and accurate.
//
// This option is NOT compatible with WithNoReparse: the image must be re-parsed for
// there to be adequate information for computing unused imports.
func WithUnusedImportsComputation() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.computeUnusedImports = true
}
}
// ImageWithoutImports returns a copy of the Image without imports.
//
// The backing Files are not copied.
func ImageWithoutImports(image Image) Image {
imageFiles := image.Files()
newImageFiles := make([]ImageFile, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
newImageFiles = append(newImageFiles, imageFile)
}
}
return newImageNoValidate(newImageFiles)
}
// ImageWithOnlyPaths returns a copy of the Image that only includes the files
// with the given root relative file paths or directories.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this errors.
func ImageWithOnlyPaths(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, false)
}
// ImageWithOnlyPathsAllowNotExist returns a copy of the Image that only includes the files
// with the given root relative file paths.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this skips this path.
func ImageWithOnlyPathsAllowNotExist(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, true)
}
// ImageByDir returns multiple images that have non-imports split
// by directory.
//
// That is, each Image will only contain a single directory's files
// as it's non-imports, along with all required imports for the
// files in that directory.
func ImageByDir(image Image) ([]Image, error) {
imageFiles := image.Files()
paths := make([]string, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
paths = append(paths, imageFile.Path())
}
}
dirToPaths := normalpath.ByDir(paths...)
// we need this to produce a deterministic order of the returned Images
dirs := make([]string, 0, len(dirToPaths))
for dir := range dirToPaths {
dirs = append(dirs, dir)
}
sort.Strings(dirs)
newImages := make([]Image, 0, len(dirToPaths))
for _, dir := range dirs {
paths, ok := dirToPaths[dir]
if !ok {
// this should never happen
return nil, fmt.Errorf("no dir for %q in dirToPaths", dir)
}
newImage, err := ImageWithOnlyPaths(image, paths, nil)
if err != nil {
return nil, err
}
newImages = append(newImages, newImage)
}
return newImages, nil
}
// ImageToProtoImage returns a new ProtoImage for the Image.
func ImageToProtoImage(image Image) *imagev1.Image {
imageFiles := image.Files()
protoImage := &imagev1.Image{
File: make([]*imagev1.ImageFile, len(imageFiles)),
}
for i, imageFile := range imageFiles {
protoImage.File[i] = imageFileToProtoImageFile(imageFile)
}
return protoImage
}
// ImageToFileDescriptorSet returns a new FileDescriptorSet for the Image.
func ImageToFileDescriptorSet(image Image) *descriptorpb.FileDescriptorSet {
return protodescriptor.FileDescriptorSetForFileDescriptors(ImageToFileDescriptors(image)...)
}
// ImageToFileDescriptors returns the FileDescriptors for the Image.
func ImageToFileDescriptors(image Image) []protodescriptor.FileDescriptor {
return imageFilesToFileDescriptors(image.Files())
}
// ImageToFileDescriptorProtos returns the FileDescriptorProtos for the Image.
func ImageToFileDescriptorProtos(image Image) []*descriptorpb.FileDescriptorProto {
return imageFilesToFileDescriptorProtos(image.Files())
}
// ImageToCodeGeneratorRequest returns a new CodeGeneratorRequest for the Image.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImageToCodeGeneratorRequest(
image Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) *pluginpb.CodeGeneratorRequest {
return imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
nil,
nil,
)
}
// ImagesToCodeGeneratorRequests converts the Images to CodeGeneratorRequests.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeImports is set, only one CodeGeneratorRequest will contain any given file as a FileToGenerate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImagesToCodeGeneratorRequests(
images []Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) []*pluginpb.CodeGeneratorRequest {
requests := make([]*pluginpb.CodeGeneratorRequest, len(images))
// alreadyUsedPaths is a map of paths that have already been added to an image.
//
// We track this if includeImports is set, so that when we find an import, we can
// see if the import was already added to a CodeGeneratorRequest via another Image
// in the Image slice. If the import was already added, we do not add duplicates
// across CodeGeneratorRequests.
var alreadyUsedPaths map[string]struct{}
// nonImportPaths is a map of non-import paths.
//
// We track this if includeImports is set. If we find a non-import file in Image A
// and this file is an import in Image B, the file will have already been added to
// a CodeGeneratorRequest via Image A, so do not add the duplicate to any other
// CodeGeneratorRequest.
var nonImportPaths map[string]struct{}
if includeImports {
// We don't need to track these if includeImports is false, so we only populate
// the maps if includeImports is true. If includeImports is false, only non-imports
// will be added to each CodeGeneratorRequest, so figuring out whether or not
// we should add a given import to a given CodeGeneratorRequest is unnecessary.
//
// imageToCodeGeneratorRequest checks if these maps are nil before every access.
alreadyUsedPaths = make(map[string]struct{})
nonImportPaths = make(map[string]struct{})
for _, image := range images {
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
nonImportPaths[imageFile.Path()] = struct{}{}
}
}
}
}
for i, image := range images {
requests[i] = imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
alreadyUsedPaths,
nonImportPaths,
)
}
return requests
}
// ProtoImageToFileDescriptors returns the FileDescriptors for the proto Image.
func ProtoImageToFileDescriptors(protoImage *imagev1.Image) []protodescriptor.FileDescriptor {
return protoImageFilesToFileDescriptors(protoImage.File)
}
// ImageDependency is a dependency of an image.
//
// This could conceivably be part of ImageFile or bufmoduleref.FileInfo.
// For ImageFile, this would be a field that is ignored when translated to proto,
// and is calculated on creation from proto. IsImport would become ImportType.
// You could go a step further and make this optionally part of the proto definition.
//
// You could even go down to bufmoduleref.FileInfo if you used the AST, but this
// could be error prone.
//
// However, for simplicity now (and to not rewrite the whole codebase), we make
// this a separate type that is calculated off of an Image after the fact.
//
// If this became part of ImageFile or bufmoduleref.FileInfo, you would get
// all the ImageDependencies from the ImageFiles, and then sort | uniq them
// to get the ImageDependencies for an Image. This would remove the requirement
// of this associated type to have a ModuleIdentity and commit, so in
// the IsDirect example below, d.proto would not be "ignored" - it would
// be an ImageFile like any other, with ImportType DIRECT.
//
// Note that if we ever do this, there is validation in newImage that enforces
// that all ImageFiles with the same ModuleIdentity have the same commit. This
// validation will likely have to be moved around.
type ImageModuleDependency interface {
// String() returns remote/owner/repository[:commit].
fmt.Stringer
// Required. Will never be nil.
ModuleIdentity() bufmoduleref.ModuleIdentity
// Optional. May be empty.
Commit() string
// IsDirect returns true if the dependency is a direct dependency.
//
// A dependency is direct if it is only an import of non-imports in the image.
//
// Example:
//
// a.proto, module buf.build/foo/a, is non-import, imports b.proto
// b.proto, module buf.build/foo/b, is import, imports c.proto
// c.proto, module buf.build/foo/c, is import
//
// In this case, the list would contain only buf.build/foo/b, as buf.build/foo/a
// for a.proto is a non-import, and buf.build/foo/c for c.proto is only imported
// by an import
IsDirect() bool
isImageModuleDependency()
}
// ImageModuleDependency returns all ImageModuleDependencies for the Image.
//
// Does not return any ImageModuleDependencies for non-imports, that is the
// ModuleIdentities and commits represented by non-imports are not represented
// in this list.
func ImageModuleDependencies(image Image) []ImageModuleDependency {
importsOfNonImports := make(map[string]struct{})
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
for _, dependency := range imageFile.FileDescriptor().GetDependency() {
importsOfNonImports[dependency] = struct{}{}
}
}
}
// We know that all ImageFiles with the same ModuleIdentity
// have the same commit or no commit, so using String() will properly identify
// unique dependencies.
stringToImageModuleDependency := make(map[string]ImageModuleDependency)
for _, imageFile := range image.Files() {
if imageFile.IsImport() {
if moduleIdentity := imageFile.ModuleIdentity(); moduleIdentity != nil {
_, isDirect := importsOfNonImports[imageFile.Path()]
imageModuleDependency := newImageModuleDependency(
moduleIdentity,
imageFile.Commit(),
isDirect,
)
stringToImageModuleDependency[imageModuleDependency.String()] = imageModuleDependency
}
}
}
imageModuleDependencies := make([]ImageModuleDependency, 0, len(stringToImageModuleDependency))
for _, imageModuleDependency := range stringToImageModuleDependency {
imageModuleDependencies = append(
imageModuleDependencies,
imageModuleDependency,
)
}
sortImageModuleDependencies(imageModuleDependencies)
return imageModuleDependencies
}
type newImageForProtoOptions struct {
noReparse bool
computeUnusedImports bool
}
func reparseImageProto(protoImage *imagev1.Image, computeUnusedImports bool) error {
// TODO right now, NewResolver sets AllowUnresolvable to true all the time
// we want to make this into a check, and we verify if we need this for the individual command
resolver := protoencoding.NewLazyResolver(
ProtoImageToFileDescriptors(
protoImage,
)...,
)
if err := protoencoding.ReparseUnrecognized(resolver, protoImage.ProtoReflect()); err != nil {
return fmt.Errorf("could not reparse image: %v", err)
}
if computeUnusedImports {
tracker := &importTracker{
resolver: resolver,
used: map[string]map[string]struct{}{},
}
tracker.findUsedImports(protoImage)
// Now we can populated list of unused dependencies
for _, file := range protoImage.File {
bufExt := file.BufExtension
if bufExt == nil {
bufExt = &imagev1.ImageFileExtension{}
file.BufExtension = bufExt
}
bufExt.UnusedDependency = nil // reset
usedImports := tracker.used[file.GetName()]
for i, dep := range file.Dependency {
if _, ok := usedImports[dep]; !ok {
// it's fine if it's public
isPublic := false
for _, publicDepIndex := range file.PublicDependency {
if i == int(publicDepIndex) {
isPublic = true
break
}
}
if !isPublic {
bufExt.UnusedDependency = append(bufExt.UnusedDependency, int32(i))
}
}
}
}
}
return nil
}
| {
return nil, err
} | conditional_block |
bufimage.go | // Copyright 2020-2023 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufimage
import (
"fmt"
"sort"
"github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref"
imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1"
"github.com/bufbuild/buf/private/pkg/normalpath"
"github.com/bufbuild/buf/private/pkg/protodescriptor"
"github.com/bufbuild/buf/private/pkg/protoencoding"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/pluginpb"
)
// ImageFile is a Protobuf file within an image.
type ImageFile interface {
bufmoduleref.FileInfo
// Proto is the backing *descriptorpb.FileDescriptorProto for this File.
//
// FileDescriptor should be preferred to Proto. We keep this method around
// because we have code that does modification to the ImageFile via this.
//
// This will never be nil.
// The value Path() is equal to Proto.GetName() .
Proto() *descriptorpb.FileDescriptorProto
// FileDescriptor is the backing FileDescriptor for this File.
//
// This will never be nil.
// The value Path() is equal to FileDescriptor.GetName() .
FileDescriptor() protodescriptor.FileDescriptor
// IsSyntaxUnspecified will be true if the syntax was not explicitly specified.
IsSyntaxUnspecified() bool
// UnusedDependencyIndexes returns the indexes of the unused dependencies within
// FileDescriptor.GetDependency().
//
// All indexes will be valid.
// Will return nil if empty.
UnusedDependencyIndexes() []int32
// ImageFileWithIsImport returns a copy of the ImageFile with the new ImageFile
// now marked as an import.
//
// If the original ImageFile was already an import, this returns
// the original ImageFile.
ImageFileWithIsImport(isImport bool) ImageFile
isImageFile()
}
// NewImageFile returns a new ImageFile.
//
// If externalPath is empty, path is used.
//
// TODO: moduleIdentity and commit should be options since they are optional.
func NewImageFile(
fileDescriptor protodescriptor.FileDescriptor,
moduleIdentity bufmoduleref.ModuleIdentity,
commit string,
externalPath string,
isImport bool,
isSyntaxUnspecified bool,
unusedDependencyIndexes []int32,
) (ImageFile, error) {
return newImageFile(
fileDescriptor,
moduleIdentity,
commit,
externalPath,
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
}
// Image is a buf image.
type Image interface {
// Files are the files that comprise the image.
//
// This contains all files, including imports if available.
// The returned files are in correct DAG order.
//
// All files that have the same ModuleIdentity will also have the same commit, or no commit.
// This is enforced at construction time.
Files() []ImageFile
// GetFile gets the file for the root relative file path.
//
// If the file does not exist, nil is returned.
// The path is expected to be normalized and validated.
// Note that all values of GetDependency() can be used here.
GetFile(path string) ImageFile
isImage()
}
// NewImage returns a new Image for the given ImageFiles.
//
// The input ImageFiles are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
// If imageFiles is empty, returns error
func NewImage(imageFiles []ImageFile) (Image, error) {
return newImage(imageFiles, false)
}
// MergeImages returns a new Image for the given Images. ImageFiles
// treated as non-imports in at least one of the given Images will
// be treated as non-imports in the returned Image. The first non-import
// version of a file will be used in the result.
//
// Reorders the ImageFiles to be in DAG order.
// Duplicates can exist across the Images, but only if duplicates are non-imports.
func MergeImages(images ...Image) (Image, error) {
switch len(images) {
case 0:
return nil, nil
case 1:
return images[0], nil
default:
var paths []string
imageFileSet := make(map[string]ImageFile)
for _, image := range images {
for _, currentImageFile := range image.Files() {
storedImageFile, ok := imageFileSet[currentImageFile.Path()]
if !ok {
imageFileSet[currentImageFile.Path()] = currentImageFile
paths = append(paths, currentImageFile.Path())
continue
}
if !storedImageFile.IsImport() && !currentImageFile.IsImport() {
return nil, fmt.Errorf("%s is a non-import in multiple images", currentImageFile.Path())
}
if storedImageFile.IsImport() && !currentImageFile.IsImport() {
imageFileSet[currentImageFile.Path()] = currentImageFile
}
}
}
// We need to preserve order for deterministic results, so we add
// the files in the order they're given, but base our selection
// on the imageFileSet.
imageFiles := make([]ImageFile, 0, len(imageFileSet))
for _, path := range paths {
imageFiles = append(imageFiles, imageFileSet[path] /* Guaranteed to exist */)
}
return newImage(imageFiles, true)
}
}
// NewImageForProto returns a new Image for the given proto Image.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
//
// TODO: do we want to add the ability to do external path resolution here?
func NewImageForProto(protoImage *imagev1.Image, options ...NewImageForProtoOption) (Image, error) {
var newImageOptions newImageForProtoOptions
for _, option := range options {
option(&newImageOptions)
}
if newImageOptions.noReparse && newImageOptions.computeUnusedImports {
return nil, fmt.Errorf("cannot use both WithNoReparse and WithComputeUnusedImports options; they are mutually exclusive")
}
if !newImageOptions.noReparse {
if err := reparseImageProto(protoImage, newImageOptions.computeUnusedImports); err != nil {
return nil, err
}
}
if err := validateProtoImage(protoImage); err != nil {
return nil, err
}
imageFiles := make([]ImageFile, len(protoImage.File))
for i, protoImageFile := range protoImage.File {
var isImport bool
var isSyntaxUnspecified bool
var unusedDependencyIndexes []int32
var moduleIdentity bufmoduleref.ModuleIdentity
var commit string
var err error
if protoImageFileExtension := protoImageFile.GetBufExtension(); protoImageFileExtension != nil {
isImport = protoImageFileExtension.GetIsImport()
isSyntaxUnspecified = protoImageFileExtension.GetIsSyntaxUnspecified()
unusedDependencyIndexes = protoImageFileExtension.GetUnusedDependency()
if protoModuleInfo := protoImageFileExtension.GetModuleInfo(); protoModuleInfo != nil {
if protoModuleName := protoModuleInfo.GetName(); protoModuleName != nil {
moduleIdentity, err = bufmoduleref.NewModuleIdentity(
protoModuleName.GetRemote(),
protoModuleName.GetOwner(),
protoModuleName.GetRepository(),
)
if err != nil {
return nil, err
}
// we only want to set this if there is a module name
commit = protoModuleInfo.GetCommit()
}
}
}
imageFile, err := NewImageFile(
protoImageFile,
moduleIdentity,
commit,
protoImageFile.GetName(),
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
if err != nil {
return nil, err
}
imageFiles[i] = imageFile
}
return NewImage(imageFiles)
}
// NewImageForCodeGeneratorRequest returns a new Image from a given CodeGeneratorRequest.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
func NewImageForCodeGeneratorRequest(request *pluginpb.CodeGeneratorRequest, options ...NewImageForProtoOption) (Image, error) {
if err := protodescriptor.ValidateCodeGeneratorRequestExceptFileDescriptorProtos(request); err != nil {
return nil, err
}
protoImageFiles := make([]*imagev1.ImageFile, len(request.GetProtoFile()))
for i, fileDescriptorProto := range request.GetProtoFile() {
// we filter whether something is an import or not in ImageWithOnlyPaths
// we cannot determine if the syntax was unset
protoImageFiles[i] = fileDescriptorProtoToProtoImageFile(fileDescriptorProto, false, false, nil, nil, "")
}
image, err := NewImageForProto(
&imagev1.Image{
File: protoImageFiles,
},
options...,
)
if err != nil {
return nil, err
}
return ImageWithOnlyPaths(
image,
request.GetFileToGenerate(),
nil,
)
}
// NewImageForProtoOption is an option for use with NewImageForProto.
type NewImageForProtoOption func(*newImageForProtoOptions)
// WithNoReparse instructs NewImageForProto to skip the reparse step. The reparse
// step is usually needed when unmarshalling the image from bytes. It reconstitutes
// custom options, from unrecognized bytes to known extension fields.
func | () NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.noReparse = true
}
}
// WithUnusedImportsComputation instructs NewImageForProto to compute unused imports
// for the files. These are usually computed by the compiler and stored in the image.
// But some sources of images may not include this information, so this option can be
// used to ensure that information is present in the image and accurate.
//
// This option is NOT compatible with WithNoReparse: the image must be re-parsed for
// there to be adequate information for computing unused imports.
func WithUnusedImportsComputation() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.computeUnusedImports = true
}
}
// ImageWithoutImports returns a copy of the Image without imports.
//
// The backing Files are not copied.
func ImageWithoutImports(image Image) Image {
imageFiles := image.Files()
newImageFiles := make([]ImageFile, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
newImageFiles = append(newImageFiles, imageFile)
}
}
return newImageNoValidate(newImageFiles)
}
// ImageWithOnlyPaths returns a copy of the Image that only includes the files
// with the given root relative file paths or directories.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this errors.
func ImageWithOnlyPaths(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, false)
}
// ImageWithOnlyPathsAllowNotExist returns a copy of the Image that only includes the files
// with the given root relative file paths.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this skips this path.
func ImageWithOnlyPathsAllowNotExist(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, true)
}
// ImageByDir returns multiple images that have non-imports split
// by directory.
//
// That is, each Image will only contain a single directory's files
// as it's non-imports, along with all required imports for the
// files in that directory.
func ImageByDir(image Image) ([]Image, error) {
imageFiles := image.Files()
paths := make([]string, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
paths = append(paths, imageFile.Path())
}
}
dirToPaths := normalpath.ByDir(paths...)
// we need this to produce a deterministic order of the returned Images
dirs := make([]string, 0, len(dirToPaths))
for dir := range dirToPaths {
dirs = append(dirs, dir)
}
sort.Strings(dirs)
newImages := make([]Image, 0, len(dirToPaths))
for _, dir := range dirs {
paths, ok := dirToPaths[dir]
if !ok {
// this should never happen
return nil, fmt.Errorf("no dir for %q in dirToPaths", dir)
}
newImage, err := ImageWithOnlyPaths(image, paths, nil)
if err != nil {
return nil, err
}
newImages = append(newImages, newImage)
}
return newImages, nil
}
// ImageToProtoImage returns a new ProtoImage for the Image.
func ImageToProtoImage(image Image) *imagev1.Image {
imageFiles := image.Files()
protoImage := &imagev1.Image{
File: make([]*imagev1.ImageFile, len(imageFiles)),
}
for i, imageFile := range imageFiles {
protoImage.File[i] = imageFileToProtoImageFile(imageFile)
}
return protoImage
}
// ImageToFileDescriptorSet returns a new FileDescriptorSet for the Image.
func ImageToFileDescriptorSet(image Image) *descriptorpb.FileDescriptorSet {
return protodescriptor.FileDescriptorSetForFileDescriptors(ImageToFileDescriptors(image)...)
}
// ImageToFileDescriptors returns the FileDescriptors for the Image.
func ImageToFileDescriptors(image Image) []protodescriptor.FileDescriptor {
return imageFilesToFileDescriptors(image.Files())
}
// ImageToFileDescriptorProtos returns the FileDescriptorProtos for the Image.
func ImageToFileDescriptorProtos(image Image) []*descriptorpb.FileDescriptorProto {
return imageFilesToFileDescriptorProtos(image.Files())
}
// ImageToCodeGeneratorRequest returns a new CodeGeneratorRequest for the Image.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImageToCodeGeneratorRequest(
image Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) *pluginpb.CodeGeneratorRequest {
return imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
nil,
nil,
)
}
// ImagesToCodeGeneratorRequests converts the Images to CodeGeneratorRequests.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeImports is set, only one CodeGeneratorRequest will contain any given file as a FileToGenerate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImagesToCodeGeneratorRequests(
images []Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) []*pluginpb.CodeGeneratorRequest {
requests := make([]*pluginpb.CodeGeneratorRequest, len(images))
// alreadyUsedPaths is a map of paths that have already been added to an image.
//
// We track this if includeImports is set, so that when we find an import, we can
// see if the import was already added to a CodeGeneratorRequest via another Image
// in the Image slice. If the import was already added, we do not add duplicates
// across CodeGeneratorRequests.
var alreadyUsedPaths map[string]struct{}
// nonImportPaths is a map of non-import paths.
//
// We track this if includeImports is set. If we find a non-import file in Image A
// and this file is an import in Image B, the file will have already been added to
// a CodeGeneratorRequest via Image A, so do not add the duplicate to any other
// CodeGeneratorRequest.
var nonImportPaths map[string]struct{}
if includeImports {
// We don't need to track these if includeImports is false, so we only populate
// the maps if includeImports is true. If includeImports is false, only non-imports
// will be added to each CodeGeneratorRequest, so figuring out whether or not
// we should add a given import to a given CodeGeneratorRequest is unnecessary.
//
// imageToCodeGeneratorRequest checks if these maps are nil before every access.
alreadyUsedPaths = make(map[string]struct{})
nonImportPaths = make(map[string]struct{})
for _, image := range images {
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
nonImportPaths[imageFile.Path()] = struct{}{}
}
}
}
}
for i, image := range images {
requests[i] = imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
alreadyUsedPaths,
nonImportPaths,
)
}
return requests
}
// ProtoImageToFileDescriptors returns the FileDescriptors for the proto Image.
func ProtoImageToFileDescriptors(protoImage *imagev1.Image) []protodescriptor.FileDescriptor {
return protoImageFilesToFileDescriptors(protoImage.File)
}
// ImageDependency is a dependency of an image.
//
// This could conceivably be part of ImageFile or bufmoduleref.FileInfo.
// For ImageFile, this would be a field that is ignored when translated to proto,
// and is calculated on creation from proto. IsImport would become ImportType.
// You could go a step further and make this optionally part of the proto definition.
//
// You could even go down to bufmoduleref.FileInfo if you used the AST, but this
// could be error prone.
//
// However, for simplicity now (and to not rewrite the whole codebase), we make
// this a separate type that is calculated off of an Image after the fact.
//
// If this became part of ImageFile or bufmoduleref.FileInfo, you would get
// all the ImageDependencies from the ImageFiles, and then sort | uniq them
// to get the ImageDependencies for an Image. This would remove the requirement
// of this associated type to have a ModuleIdentity and commit, so in
// the IsDirect example below, d.proto would not be "ignored" - it would
// be an ImageFile like any other, with ImportType DIRECT.
//
// Note that if we ever do this, there is validation in newImage that enforces
// that all ImageFiles with the same ModuleIdentity have the same commit. This
// validation will likely have to be moved around.
type ImageModuleDependency interface {
// String() returns remote/owner/repository[:commit].
fmt.Stringer
// Required. Will never be nil.
ModuleIdentity() bufmoduleref.ModuleIdentity
// Optional. May be empty.
Commit() string
// IsDirect returns true if the dependency is a direct dependency.
//
// A dependency is direct if it is only an import of non-imports in the image.
//
// Example:
//
// a.proto, module buf.build/foo/a, is non-import, imports b.proto
// b.proto, module buf.build/foo/b, is import, imports c.proto
// c.proto, module buf.build/foo/c, is import
//
// In this case, the list would contain only buf.build/foo/b, as buf.build/foo/a
// for a.proto is a non-import, and buf.build/foo/c for c.proto is only imported
// by an import
IsDirect() bool
isImageModuleDependency()
}
// ImageModuleDependency returns all ImageModuleDependencies for the Image.
//
// Does not return any ImageModuleDependencies for non-imports, that is the
// ModuleIdentities and commits represented by non-imports are not represented
// in this list.
func ImageModuleDependencies(image Image) []ImageModuleDependency {
importsOfNonImports := make(map[string]struct{})
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
for _, dependency := range imageFile.FileDescriptor().GetDependency() {
importsOfNonImports[dependency] = struct{}{}
}
}
}
// We know that all ImageFiles with the same ModuleIdentity
// have the same commit or no commit, so using String() will properly identify
// unique dependencies.
stringToImageModuleDependency := make(map[string]ImageModuleDependency)
for _, imageFile := range image.Files() {
if imageFile.IsImport() {
if moduleIdentity := imageFile.ModuleIdentity(); moduleIdentity != nil {
_, isDirect := importsOfNonImports[imageFile.Path()]
imageModuleDependency := newImageModuleDependency(
moduleIdentity,
imageFile.Commit(),
isDirect,
)
stringToImageModuleDependency[imageModuleDependency.String()] = imageModuleDependency
}
}
}
imageModuleDependencies := make([]ImageModuleDependency, 0, len(stringToImageModuleDependency))
for _, imageModuleDependency := range stringToImageModuleDependency {
imageModuleDependencies = append(
imageModuleDependencies,
imageModuleDependency,
)
}
sortImageModuleDependencies(imageModuleDependencies)
return imageModuleDependencies
}
type newImageForProtoOptions struct {
noReparse bool
computeUnusedImports bool
}
func reparseImageProto(protoImage *imagev1.Image, computeUnusedImports bool) error {
// TODO right now, NewResolver sets AllowUnresolvable to true all the time
// we want to make this into a check, and we verify if we need this for the individual command
resolver := protoencoding.NewLazyResolver(
ProtoImageToFileDescriptors(
protoImage,
)...,
)
if err := protoencoding.ReparseUnrecognized(resolver, protoImage.ProtoReflect()); err != nil {
return fmt.Errorf("could not reparse image: %v", err)
}
if computeUnusedImports {
tracker := &importTracker{
resolver: resolver,
used: map[string]map[string]struct{}{},
}
tracker.findUsedImports(protoImage)
// Now we can populated list of unused dependencies
for _, file := range protoImage.File {
bufExt := file.BufExtension
if bufExt == nil {
bufExt = &imagev1.ImageFileExtension{}
file.BufExtension = bufExt
}
bufExt.UnusedDependency = nil // reset
usedImports := tracker.used[file.GetName()]
for i, dep := range file.Dependency {
if _, ok := usedImports[dep]; !ok {
// it's fine if it's public
isPublic := false
for _, publicDepIndex := range file.PublicDependency {
if i == int(publicDepIndex) {
isPublic = true
break
}
}
if !isPublic {
bufExt.UnusedDependency = append(bufExt.UnusedDependency, int32(i))
}
}
}
}
}
return nil
}
| WithNoReparse | identifier_name |
bufimage.go | // Copyright 2020-2023 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufimage
import (
"fmt"
"sort"
"github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref"
imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1"
"github.com/bufbuild/buf/private/pkg/normalpath"
"github.com/bufbuild/buf/private/pkg/protodescriptor"
"github.com/bufbuild/buf/private/pkg/protoencoding"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/pluginpb"
)
// ImageFile is a Protobuf file within an image.
type ImageFile interface {
bufmoduleref.FileInfo
// Proto is the backing *descriptorpb.FileDescriptorProto for this File.
//
// FileDescriptor should be preferred to Proto. We keep this method around
// because we have code that does modification to the ImageFile via this.
//
// This will never be nil.
// The value Path() is equal to Proto.GetName() .
Proto() *descriptorpb.FileDescriptorProto
// FileDescriptor is the backing FileDescriptor for this File.
//
// This will never be nil.
// The value Path() is equal to FileDescriptor.GetName() .
FileDescriptor() protodescriptor.FileDescriptor
// IsSyntaxUnspecified will be true if the syntax was not explicitly specified.
IsSyntaxUnspecified() bool
// UnusedDependencyIndexes returns the indexes of the unused dependencies within
// FileDescriptor.GetDependency().
//
// All indexes will be valid.
// Will return nil if empty.
UnusedDependencyIndexes() []int32
// ImageFileWithIsImport returns a copy of the ImageFile with the new ImageFile
// now marked as an import.
//
// If the original ImageFile was already an import, this returns
// the original ImageFile.
ImageFileWithIsImport(isImport bool) ImageFile
isImageFile()
}
// NewImageFile returns a new ImageFile.
//
// If externalPath is empty, path is used.
//
// TODO: moduleIdentity and commit should be options since they are optional.
func NewImageFile(
fileDescriptor protodescriptor.FileDescriptor,
moduleIdentity bufmoduleref.ModuleIdentity,
commit string,
externalPath string,
isImport bool,
isSyntaxUnspecified bool,
unusedDependencyIndexes []int32,
) (ImageFile, error) {
return newImageFile(
fileDescriptor,
moduleIdentity,
commit,
externalPath,
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
}
// Image is a buf image.
type Image interface {
// Files are the files that comprise the image.
//
// This contains all files, including imports if available.
// The returned files are in correct DAG order.
//
// All files that have the same ModuleIdentity will also have the same commit, or no commit.
// This is enforced at construction time.
Files() []ImageFile
// GetFile gets the file for the root relative file path.
//
// If the file does not exist, nil is returned.
// The path is expected to be normalized and validated.
// Note that all values of GetDependency() can be used here.
GetFile(path string) ImageFile
isImage()
}
// NewImage returns a new Image for the given ImageFiles.
//
// The input ImageFiles are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
// If imageFiles is empty, returns error
func NewImage(imageFiles []ImageFile) (Image, error) {
return newImage(imageFiles, false) | // MergeImages returns a new Image for the given Images. ImageFiles
// treated as non-imports in at least one of the given Images will
// be treated as non-imports in the returned Image. The first non-import
// version of a file will be used in the result.
//
// Reorders the ImageFiles to be in DAG order.
// Duplicates can exist across the Images, but only if duplicates are non-imports.
func MergeImages(images ...Image) (Image, error) {
switch len(images) {
case 0:
return nil, nil
case 1:
return images[0], nil
default:
var paths []string
imageFileSet := make(map[string]ImageFile)
for _, image := range images {
for _, currentImageFile := range image.Files() {
storedImageFile, ok := imageFileSet[currentImageFile.Path()]
if !ok {
imageFileSet[currentImageFile.Path()] = currentImageFile
paths = append(paths, currentImageFile.Path())
continue
}
if !storedImageFile.IsImport() && !currentImageFile.IsImport() {
return nil, fmt.Errorf("%s is a non-import in multiple images", currentImageFile.Path())
}
if storedImageFile.IsImport() && !currentImageFile.IsImport() {
imageFileSet[currentImageFile.Path()] = currentImageFile
}
}
}
// We need to preserve order for deterministic results, so we add
// the files in the order they're given, but base our selection
// on the imageFileSet.
imageFiles := make([]ImageFile, 0, len(imageFileSet))
for _, path := range paths {
imageFiles = append(imageFiles, imageFileSet[path] /* Guaranteed to exist */)
}
return newImage(imageFiles, true)
}
}
// NewImageForProto returns a new Image for the given proto Image.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
//
// TODO: do we want to add the ability to do external path resolution here?
func NewImageForProto(protoImage *imagev1.Image, options ...NewImageForProtoOption) (Image, error) {
var newImageOptions newImageForProtoOptions
for _, option := range options {
option(&newImageOptions)
}
if newImageOptions.noReparse && newImageOptions.computeUnusedImports {
return nil, fmt.Errorf("cannot use both WithNoReparse and WithComputeUnusedImports options; they are mutually exclusive")
}
if !newImageOptions.noReparse {
if err := reparseImageProto(protoImage, newImageOptions.computeUnusedImports); err != nil {
return nil, err
}
}
if err := validateProtoImage(protoImage); err != nil {
return nil, err
}
imageFiles := make([]ImageFile, len(protoImage.File))
for i, protoImageFile := range protoImage.File {
var isImport bool
var isSyntaxUnspecified bool
var unusedDependencyIndexes []int32
var moduleIdentity bufmoduleref.ModuleIdentity
var commit string
var err error
if protoImageFileExtension := protoImageFile.GetBufExtension(); protoImageFileExtension != nil {
isImport = protoImageFileExtension.GetIsImport()
isSyntaxUnspecified = protoImageFileExtension.GetIsSyntaxUnspecified()
unusedDependencyIndexes = protoImageFileExtension.GetUnusedDependency()
if protoModuleInfo := protoImageFileExtension.GetModuleInfo(); protoModuleInfo != nil {
if protoModuleName := protoModuleInfo.GetName(); protoModuleName != nil {
moduleIdentity, err = bufmoduleref.NewModuleIdentity(
protoModuleName.GetRemote(),
protoModuleName.GetOwner(),
protoModuleName.GetRepository(),
)
if err != nil {
return nil, err
}
// we only want to set this if there is a module name
commit = protoModuleInfo.GetCommit()
}
}
}
imageFile, err := NewImageFile(
protoImageFile,
moduleIdentity,
commit,
protoImageFile.GetName(),
isImport,
isSyntaxUnspecified,
unusedDependencyIndexes,
)
if err != nil {
return nil, err
}
imageFiles[i] = imageFile
}
return NewImage(imageFiles)
}
// NewImageForCodeGeneratorRequest returns a new Image from a given CodeGeneratorRequest.
//
// The input Files are expected to be in correct DAG order!
// TODO: Consider checking the above, and if not, reordering the Files.
func NewImageForCodeGeneratorRequest(request *pluginpb.CodeGeneratorRequest, options ...NewImageForProtoOption) (Image, error) {
if err := protodescriptor.ValidateCodeGeneratorRequestExceptFileDescriptorProtos(request); err != nil {
return nil, err
}
protoImageFiles := make([]*imagev1.ImageFile, len(request.GetProtoFile()))
for i, fileDescriptorProto := range request.GetProtoFile() {
// we filter whether something is an import or not in ImageWithOnlyPaths
// we cannot determine if the syntax was unset
protoImageFiles[i] = fileDescriptorProtoToProtoImageFile(fileDescriptorProto, false, false, nil, nil, "")
}
image, err := NewImageForProto(
&imagev1.Image{
File: protoImageFiles,
},
options...,
)
if err != nil {
return nil, err
}
return ImageWithOnlyPaths(
image,
request.GetFileToGenerate(),
nil,
)
}
// NewImageForProtoOption is an option for use with NewImageForProto.
type NewImageForProtoOption func(*newImageForProtoOptions)
// WithNoReparse instructs NewImageForProto to skip the reparse step. The reparse
// step is usually needed when unmarshalling the image from bytes. It reconstitutes
// custom options, from unrecognized bytes to known extension fields.
func WithNoReparse() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.noReparse = true
}
}
// WithUnusedImportsComputation instructs NewImageForProto to compute unused imports
// for the files. These are usually computed by the compiler and stored in the image.
// But some sources of images may not include this information, so this option can be
// used to ensure that information is present in the image and accurate.
//
// This option is NOT compatible with WithNoReparse: the image must be re-parsed for
// there to be adequate information for computing unused imports.
func WithUnusedImportsComputation() NewImageForProtoOption {
return func(options *newImageForProtoOptions) {
options.computeUnusedImports = true
}
}
// ImageWithoutImports returns a copy of the Image without imports.
//
// The backing Files are not copied.
func ImageWithoutImports(image Image) Image {
imageFiles := image.Files()
newImageFiles := make([]ImageFile, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
newImageFiles = append(newImageFiles, imageFile)
}
}
return newImageNoValidate(newImageFiles)
}
// ImageWithOnlyPaths returns a copy of the Image that only includes the files
// with the given root relative file paths or directories.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this errors.
func ImageWithOnlyPaths(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, false)
}
// ImageWithOnlyPathsAllowNotExist returns a copy of the Image that only includes the files
// with the given root relative file paths.
//
// Note that paths can be either files or directories - whether or not a path
// is included is a result of normalpath.EqualsOrContainsPath.
//
// If a root relative file path does not exist, this skips this path.
func ImageWithOnlyPathsAllowNotExist(
image Image,
paths []string,
excludePaths []string,
) (Image, error) {
return imageWithOnlyPaths(image, paths, excludePaths, true)
}
// ImageByDir returns multiple images that have non-imports split
// by directory.
//
// That is, each Image will only contain a single directory's files
// as it's non-imports, along with all required imports for the
// files in that directory.
func ImageByDir(image Image) ([]Image, error) {
imageFiles := image.Files()
paths := make([]string, 0, len(imageFiles))
for _, imageFile := range imageFiles {
if !imageFile.IsImport() {
paths = append(paths, imageFile.Path())
}
}
dirToPaths := normalpath.ByDir(paths...)
// we need this to produce a deterministic order of the returned Images
dirs := make([]string, 0, len(dirToPaths))
for dir := range dirToPaths {
dirs = append(dirs, dir)
}
sort.Strings(dirs)
newImages := make([]Image, 0, len(dirToPaths))
for _, dir := range dirs {
paths, ok := dirToPaths[dir]
if !ok {
// this should never happen
return nil, fmt.Errorf("no dir for %q in dirToPaths", dir)
}
newImage, err := ImageWithOnlyPaths(image, paths, nil)
if err != nil {
return nil, err
}
newImages = append(newImages, newImage)
}
return newImages, nil
}
// ImageToProtoImage returns a new ProtoImage for the Image.
func ImageToProtoImage(image Image) *imagev1.Image {
imageFiles := image.Files()
protoImage := &imagev1.Image{
File: make([]*imagev1.ImageFile, len(imageFiles)),
}
for i, imageFile := range imageFiles {
protoImage.File[i] = imageFileToProtoImageFile(imageFile)
}
return protoImage
}
// ImageToFileDescriptorSet returns a new FileDescriptorSet for the Image.
func ImageToFileDescriptorSet(image Image) *descriptorpb.FileDescriptorSet {
return protodescriptor.FileDescriptorSetForFileDescriptors(ImageToFileDescriptors(image)...)
}
// ImageToFileDescriptors returns the FileDescriptors for the Image.
func ImageToFileDescriptors(image Image) []protodescriptor.FileDescriptor {
return imageFilesToFileDescriptors(image.Files())
}
// ImageToFileDescriptorProtos returns the FileDescriptorProtos for the Image.
func ImageToFileDescriptorProtos(image Image) []*descriptorpb.FileDescriptorProto {
return imageFilesToFileDescriptorProtos(image.Files())
}
// ImageToCodeGeneratorRequest returns a new CodeGeneratorRequest for the Image.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImageToCodeGeneratorRequest(
image Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) *pluginpb.CodeGeneratorRequest {
return imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
nil,
nil,
)
}
// ImagesToCodeGeneratorRequests converts the Images to CodeGeneratorRequests.
//
// All non-imports are added as files to generate.
// If includeImports is set, all non-well-known-type imports are also added as files to generate.
// If includeImports is set, only one CodeGeneratorRequest will contain any given file as a FileToGenerate.
// If includeWellKnownTypes is set, well-known-type imports are also added as files to generate.
// includeWellKnownTypes has no effect if includeImports is not set.
func ImagesToCodeGeneratorRequests(
images []Image,
parameter string,
compilerVersion *pluginpb.Version,
includeImports bool,
includeWellKnownTypes bool,
) []*pluginpb.CodeGeneratorRequest {
requests := make([]*pluginpb.CodeGeneratorRequest, len(images))
// alreadyUsedPaths is a map of paths that have already been added to an image.
//
// We track this if includeImports is set, so that when we find an import, we can
// see if the import was already added to a CodeGeneratorRequest via another Image
// in the Image slice. If the import was already added, we do not add duplicates
// across CodeGeneratorRequests.
var alreadyUsedPaths map[string]struct{}
// nonImportPaths is a map of non-import paths.
//
// We track this if includeImports is set. If we find a non-import file in Image A
// and this file is an import in Image B, the file will have already been added to
// a CodeGeneratorRequest via Image A, so do not add the duplicate to any other
// CodeGeneratorRequest.
var nonImportPaths map[string]struct{}
if includeImports {
// We don't need to track these if includeImports is false, so we only populate
// the maps if includeImports is true. If includeImports is false, only non-imports
// will be added to each CodeGeneratorRequest, so figuring out whether or not
// we should add a given import to a given CodeGeneratorRequest is unnecessary.
//
// imageToCodeGeneratorRequest checks if these maps are nil before every access.
alreadyUsedPaths = make(map[string]struct{})
nonImportPaths = make(map[string]struct{})
for _, image := range images {
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
nonImportPaths[imageFile.Path()] = struct{}{}
}
}
}
}
for i, image := range images {
requests[i] = imageToCodeGeneratorRequest(
image,
parameter,
compilerVersion,
includeImports,
includeWellKnownTypes,
alreadyUsedPaths,
nonImportPaths,
)
}
return requests
}
// ProtoImageToFileDescriptors returns the FileDescriptors for the proto Image.
func ProtoImageToFileDescriptors(protoImage *imagev1.Image) []protodescriptor.FileDescriptor {
return protoImageFilesToFileDescriptors(protoImage.File)
}
// ImageDependency is a dependency of an image.
//
// This could conceivably be part of ImageFile or bufmoduleref.FileInfo.
// For ImageFile, this would be a field that is ignored when translated to proto,
// and is calculated on creation from proto. IsImport would become ImportType.
// You could go a step further and make this optionally part of the proto definition.
//
// You could even go down to bufmoduleref.FileInfo if you used the AST, but this
// could be error prone.
//
// However, for simplicity now (and to not rewrite the whole codebase), we make
// this a separate type that is calculated off of an Image after the fact.
//
// If this became part of ImageFile or bufmoduleref.FileInfo, you would get
// all the ImageDependencies from the ImageFiles, and then sort | uniq them
// to get the ImageDependencies for an Image. This would remove the requirement
// of this associated type to have a ModuleIdentity and commit, so in
// the IsDirect example below, d.proto would not be "ignored" - it would
// be an ImageFile like any other, with ImportType DIRECT.
//
// Note that if we ever do this, there is validation in newImage that enforces
// that all ImageFiles with the same ModuleIdentity have the same commit. This
// validation will likely have to be moved around.
type ImageModuleDependency interface {
// String() returns remote/owner/repository[:commit].
fmt.Stringer
// Required. Will never be nil.
ModuleIdentity() bufmoduleref.ModuleIdentity
// Optional. May be empty.
Commit() string
// IsDirect returns true if the dependency is a direct dependency.
//
// A dependency is direct if it is only an import of non-imports in the image.
//
// Example:
//
// a.proto, module buf.build/foo/a, is non-import, imports b.proto
// b.proto, module buf.build/foo/b, is import, imports c.proto
// c.proto, module buf.build/foo/c, is import
//
// In this case, the list would contain only buf.build/foo/b, as buf.build/foo/a
// for a.proto is a non-import, and buf.build/foo/c for c.proto is only imported
// by an import
IsDirect() bool
isImageModuleDependency()
}
// ImageModuleDependency returns all ImageModuleDependencies for the Image.
//
// Does not return any ImageModuleDependencies for non-imports, that is the
// ModuleIdentities and commits represented by non-imports are not represented
// in this list.
func ImageModuleDependencies(image Image) []ImageModuleDependency {
importsOfNonImports := make(map[string]struct{})
for _, imageFile := range image.Files() {
if !imageFile.IsImport() {
for _, dependency := range imageFile.FileDescriptor().GetDependency() {
importsOfNonImports[dependency] = struct{}{}
}
}
}
// We know that all ImageFiles with the same ModuleIdentity
// have the same commit or no commit, so using String() will properly identify
// unique dependencies.
stringToImageModuleDependency := make(map[string]ImageModuleDependency)
for _, imageFile := range image.Files() {
if imageFile.IsImport() {
if moduleIdentity := imageFile.ModuleIdentity(); moduleIdentity != nil {
_, isDirect := importsOfNonImports[imageFile.Path()]
imageModuleDependency := newImageModuleDependency(
moduleIdentity,
imageFile.Commit(),
isDirect,
)
stringToImageModuleDependency[imageModuleDependency.String()] = imageModuleDependency
}
}
}
imageModuleDependencies := make([]ImageModuleDependency, 0, len(stringToImageModuleDependency))
for _, imageModuleDependency := range stringToImageModuleDependency {
imageModuleDependencies = append(
imageModuleDependencies,
imageModuleDependency,
)
}
sortImageModuleDependencies(imageModuleDependencies)
return imageModuleDependencies
}
type newImageForProtoOptions struct {
noReparse bool
computeUnusedImports bool
}
func reparseImageProto(protoImage *imagev1.Image, computeUnusedImports bool) error {
// TODO right now, NewResolver sets AllowUnresolvable to true all the time
// we want to make this into a check, and we verify if we need this for the individual command
resolver := protoencoding.NewLazyResolver(
ProtoImageToFileDescriptors(
protoImage,
)...,
)
if err := protoencoding.ReparseUnrecognized(resolver, protoImage.ProtoReflect()); err != nil {
return fmt.Errorf("could not reparse image: %v", err)
}
if computeUnusedImports {
tracker := &importTracker{
resolver: resolver,
used: map[string]map[string]struct{}{},
}
tracker.findUsedImports(protoImage)
// Now we can populated list of unused dependencies
for _, file := range protoImage.File {
bufExt := file.BufExtension
if bufExt == nil {
bufExt = &imagev1.ImageFileExtension{}
file.BufExtension = bufExt
}
bufExt.UnusedDependency = nil // reset
usedImports := tracker.used[file.GetName()]
for i, dep := range file.Dependency {
if _, ok := usedImports[dep]; !ok {
// it's fine if it's public
isPublic := false
for _, publicDepIndex := range file.PublicDependency {
if i == int(publicDepIndex) {
isPublic = true
break
}
}
if !isPublic {
bufExt.UnusedDependency = append(bufExt.UnusedDependency, int32(i))
}
}
}
}
}
return nil
} | }
| random_line_split |
decorators.py | """
Copyright (c) 2018, xamoom GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
decorator
contains all decorators of janus. See more in janus.py
spec: http://jsonapi.org/
"""
import traceback
from janus.janus_logging import janus_logger
from janus.janus import DataMessage
from janus.janus import JsonApiMessage
from janus.janus import ErrorMessage
from janus.janus import JanusResponse
class jsonapi(object):
|
class describe(object):
def __init__( self,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None):
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#if this decorator is used the function must return all messages that should be described as a list
messages = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if messages == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
return None
else:
if isinstance(messages, (list, tuple)) == False:
raise Exception('Methods using the "describe" decorator have to return a list of subclasses of DataMessage to describe.')
msg_descriptions = []
for msg in messages:
if issubclass(msg,DataMessage) == False:
raise Exception('All returned classes in the returned list have to be a subclass of DataMessage.')
msg_descriptions.append(msg().describe())
meta = {'message-types':msg_descriptions}
message = JsonApiMessage(meta=meta).to_json() #render json response
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,None)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg).to_json()
return message
return wrapped_f
| def __init__( self,
meta=None,
links=None,
included=None,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None,
cached_get_hook=None,
cached_set_hook=None,
include_relationships=False,
options_hook=None,
nest_in_responses=False,
logging=False):
self.meta = meta
self.links = links
self.included = included
self.message = None #gets set with the JanusResponse passed from calling function
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
self.include_relationships = include_relationships
self.options_hook = options_hook
self.cached_get_hook = cached_get_hook
self.cached_set_hook = cached_set_hook
self.nest_in_responses = nest_in_responses
if logging:
janus_logger.enable()
else:
janus_logger.disable()
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#first check if this is not a HTTP OPTIONS call using a method defined based on the WS framework.
#if it is one return empty array and do nothing else.
if self.options_hook != None:
if self.options_hook() == True:
janus_logger.debug("This was an OPTIONS request.")
return {}
response_obj = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if response_obj == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
janus_logger.debug("Decorated function returned None. Nothing to map.")
return None
else:
#check response object
if isinstance(response_obj,JanusResponse) == False:
#janus_logger.error("Expected JanusResponse got " + str(type(response_obj)))
#raise Exception('Return value has to be instance of JanusResponse')
janus_logger.info("Not a JanusResponse. Will return this as it is. No mapping.")
return response_obj
message = None
#caching
loaded_from_cache = False
if self.cached_get_hook != None:
cached_object = self.cached_get_hook(response_obj)
if cached_object != None:
loaded_from_cache = True
message = cached_object #returned cached, already mapped, response
janus_logger.info("Will return cached message: " + str(loaded_from_cache))
if loaded_from_cache == False: #nothing in cache or cache deactivated
self.message = response_obj.message #get the message type to return
obj = response_obj.data #get the data to return
data = DataMessage.from_object(obj,self.message,do_nesting=self.nest_in_responses) #generate data message with data
#take care of includes
if response_obj.include_relationships != None: self.include_relationships = response_obj.include_relationships
included = None
janus_logger.info("Should map included: " + str(self.include_relationships))
if self.include_relationships:
included = self.__load_included(data,self.nest_in_responses)
#is there custome meta?
if response_obj.meta != None:
if self.meta == None:
self.meta = response_obj.meta
else:
self.meta.update(response_obj.meta)
message = JsonApiMessage(data=data,included=included,meta=self.meta,do_nesting=self.nest_in_responses).to_json() #render json response
#caching
if self.cached_set_hook != None and loaded_from_cache == False:
janus_logger.debug("Caching message")
self.cached_set_hook(response_obj,message)
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,response_obj)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg,meta=self.meta).to_json()
janus_logger.error("Traceback: " + tb)
return message
return wrapped_f
def __load_included(self, data_message,do_nesting=False):
included = []
if isinstance(data_message,list):
for d in data_message:
included = included + d.get_included(do_nesting=do_nesting)
else:
included = data_message.get_included(do_nesting=do_nesting)
#clean dublicates from included
clean_included = []
for item in included:
if (item in clean_included) == False:
clean_included.append(item)
return clean_included | identifier_body |
decorators.py | """
Copyright (c) 2018, xamoom GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
decorator
contains all decorators of janus. See more in janus.py
spec: http://jsonapi.org/
"""
import traceback
from janus.janus_logging import janus_logger
from janus.janus import DataMessage
from janus.janus import JsonApiMessage
from janus.janus import ErrorMessage
from janus.janus import JanusResponse
class jsonapi(object):
def __init__( self,
meta=None,
links=None,
included=None,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None,
cached_get_hook=None,
cached_set_hook=None,
include_relationships=False,
options_hook=None,
nest_in_responses=False,
logging=False):
self.meta = meta
self.links = links
self.included = included
self.message = None #gets set with the JanusResponse passed from calling function
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
self.include_relationships = include_relationships
self.options_hook = options_hook
self.cached_get_hook = cached_get_hook
self.cached_set_hook = cached_set_hook
self.nest_in_responses = nest_in_responses
if logging:
janus_logger.enable()
else:
janus_logger.disable()
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#first check if this is not a HTTP OPTIONS call using a method defined based on the WS framework.
#if it is one return empty array and do nothing else.
if self.options_hook != None:
if self.options_hook() == True:
janus_logger.debug("This was an OPTIONS request.")
return {}
response_obj = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if response_obj == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
janus_logger.debug("Decorated function returned None. Nothing to map.")
return None
else:
#check response object
if isinstance(response_obj,JanusResponse) == False:
#janus_logger.error("Expected JanusResponse got " + str(type(response_obj)))
#raise Exception('Return value has to be instance of JanusResponse')
janus_logger.info("Not a JanusResponse. Will return this as it is. No mapping.")
return response_obj
message = None
#caching
loaded_from_cache = False
if self.cached_get_hook != None:
cached_object = self.cached_get_hook(response_obj)
if cached_object != None:
loaded_from_cache = True
message = cached_object #returned cached, already mapped, response
janus_logger.info("Will return cached message: " + str(loaded_from_cache))
if loaded_from_cache == False: #nothing in cache or cache deactivated
self.message = response_obj.message #get the message type to return
obj = response_obj.data #get the data to return
data = DataMessage.from_object(obj,self.message,do_nesting=self.nest_in_responses) #generate data message with data
#take care of includes
if response_obj.include_relationships != None: self.include_relationships = response_obj.include_relationships
included = None
janus_logger.info("Should map included: " + str(self.include_relationships))
if self.include_relationships:
included = self.__load_included(data,self.nest_in_responses)
#is there custome meta?
if response_obj.meta != None:
if self.meta == None:
self.meta = response_obj.meta
else:
self.meta.update(response_obj.meta)
message = JsonApiMessage(data=data,included=included,meta=self.meta,do_nesting=self.nest_in_responses).to_json() #render json response
#caching
if self.cached_set_hook != None and loaded_from_cache == False:
janus_logger.debug("Caching message")
self.cached_set_hook(response_obj,message)
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,response_obj)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg,meta=self.meta).to_json() |
return message
return wrapped_f
def __load_included(self, data_message,do_nesting=False):
included = []
if isinstance(data_message,list):
for d in data_message:
included = included + d.get_included(do_nesting=do_nesting)
else:
included = data_message.get_included(do_nesting=do_nesting)
#clean dublicates from included
clean_included = []
for item in included:
if (item in clean_included) == False:
clean_included.append(item)
return clean_included
class describe(object):
def __init__( self,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None):
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#if this decorator is used the function must return all messages that should be described as a list
messages = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if messages == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
return None
else:
if isinstance(messages, (list, tuple)) == False:
raise Exception('Methods using the "describe" decorator have to return a list of subclasses of DataMessage to describe.')
msg_descriptions = []
for msg in messages:
if issubclass(msg,DataMessage) == False:
raise Exception('All returned classes in the returned list have to be a subclass of DataMessage.')
msg_descriptions.append(msg().describe())
meta = {'message-types':msg_descriptions}
message = JsonApiMessage(meta=meta).to_json() #render json response
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,None)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg).to_json()
return message
return wrapped_f |
janus_logger.error("Traceback: " + tb) | random_line_split |
decorators.py | """
Copyright (c) 2018, xamoom GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
decorator
contains all decorators of janus. See more in janus.py
spec: http://jsonapi.org/
"""
import traceback
from janus.janus_logging import janus_logger
from janus.janus import DataMessage
from janus.janus import JsonApiMessage
from janus.janus import ErrorMessage
from janus.janus import JanusResponse
class jsonapi(object):
def __init__( self,
meta=None,
links=None,
included=None,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None,
cached_get_hook=None,
cached_set_hook=None,
include_relationships=False,
options_hook=None,
nest_in_responses=False,
logging=False):
self.meta = meta
self.links = links
self.included = included
self.message = None #gets set with the JanusResponse passed from calling function
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
self.include_relationships = include_relationships
self.options_hook = options_hook
self.cached_get_hook = cached_get_hook
self.cached_set_hook = cached_set_hook
self.nest_in_responses = nest_in_responses
if logging:
janus_logger.enable()
else:
janus_logger.disable()
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#first check if this is not a HTTP OPTIONS call using a method defined based on the WS framework.
#if it is one return empty array and do nothing else.
if self.options_hook != None:
if self.options_hook() == True:
janus_logger.debug("This was an OPTIONS request.")
return {}
response_obj = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if response_obj == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
janus_logger.debug("Decorated function returned None. Nothing to map.")
return None
else:
#check response object
if isinstance(response_obj,JanusResponse) == False:
#janus_logger.error("Expected JanusResponse got " + str(type(response_obj)))
#raise Exception('Return value has to be instance of JanusResponse')
janus_logger.info("Not a JanusResponse. Will return this as it is. No mapping.")
return response_obj
message = None
#caching
loaded_from_cache = False
if self.cached_get_hook != None:
cached_object = self.cached_get_hook(response_obj)
if cached_object != None:
loaded_from_cache = True
message = cached_object #returned cached, already mapped, response
janus_logger.info("Will return cached message: " + str(loaded_from_cache))
if loaded_from_cache == False: #nothing in cache or cache deactivated
self.message = response_obj.message #get the message type to return
obj = response_obj.data #get the data to return
data = DataMessage.from_object(obj,self.message,do_nesting=self.nest_in_responses) #generate data message with data
#take care of includes
if response_obj.include_relationships != None: self.include_relationships = response_obj.include_relationships
included = None
janus_logger.info("Should map included: " + str(self.include_relationships))
if self.include_relationships:
included = self.__load_included(data,self.nest_in_responses)
#is there custome meta?
if response_obj.meta != None:
if self.meta == None:
self.meta = response_obj.meta
else:
self.meta.update(response_obj.meta)
message = JsonApiMessage(data=data,included=included,meta=self.meta,do_nesting=self.nest_in_responses).to_json() #render json response
#caching
if self.cached_set_hook != None and loaded_from_cache == False:
janus_logger.debug("Caching message")
self.cached_set_hook(response_obj,message)
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,response_obj)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg,meta=self.meta).to_json()
janus_logger.error("Traceback: " + tb)
return message
return wrapped_f
def __load_included(self, data_message,do_nesting=False):
included = []
if isinstance(data_message,list):
for d in data_message:
included = included + d.get_included(do_nesting=do_nesting)
else:
included = data_message.get_included(do_nesting=do_nesting)
#clean dublicates from included
clean_included = []
for item in included:
if (item in clean_included) == False:
clean_included.append(item)
return clean_included
class describe(object):
def __init__( self,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None):
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#if this decorator is used the function must return all messages that should be described as a list
messages = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if messages == None:
if self.before_send_hook != None:
|
return None
else:
if isinstance(messages, (list, tuple)) == False:
raise Exception('Methods using the "describe" decorator have to return a list of subclasses of DataMessage to describe.')
msg_descriptions = []
for msg in messages:
if issubclass(msg,DataMessage) == False:
raise Exception('All returned classes in the returned list have to be a subclass of DataMessage.')
msg_descriptions.append(msg().describe())
meta = {'message-types':msg_descriptions}
message = JsonApiMessage(meta=meta).to_json() #render json response
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,None)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg).to_json()
return message
return wrapped_f
| self.before_send_hook(204,None,None) | conditional_block |
decorators.py | """
Copyright (c) 2018, xamoom GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
decorator
contains all decorators of janus. See more in janus.py
spec: http://jsonapi.org/
"""
import traceback
from janus.janus_logging import janus_logger
from janus.janus import DataMessage
from janus.janus import JsonApiMessage
from janus.janus import ErrorMessage
from janus.janus import JanusResponse
class jsonapi(object):
def __init__( self,
meta=None,
links=None,
included=None,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None,
cached_get_hook=None,
cached_set_hook=None,
include_relationships=False,
options_hook=None,
nest_in_responses=False,
logging=False):
self.meta = meta
self.links = links
self.included = included
self.message = None #gets set with the JanusResponse passed from calling function
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
self.include_relationships = include_relationships
self.options_hook = options_hook
self.cached_get_hook = cached_get_hook
self.cached_set_hook = cached_set_hook
self.nest_in_responses = nest_in_responses
if logging:
janus_logger.enable()
else:
janus_logger.disable()
def __call__(self, f):
def | (*a, **ka):
try:
#first check if this is not a HTTP OPTIONS call using a method defined based on the WS framework.
#if it is one return empty array and do nothing else.
if self.options_hook != None:
if self.options_hook() == True:
janus_logger.debug("This was an OPTIONS request.")
return {}
response_obj = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if response_obj == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
janus_logger.debug("Decorated function returned None. Nothing to map.")
return None
else:
#check response object
if isinstance(response_obj,JanusResponse) == False:
#janus_logger.error("Expected JanusResponse got " + str(type(response_obj)))
#raise Exception('Return value has to be instance of JanusResponse')
janus_logger.info("Not a JanusResponse. Will return this as it is. No mapping.")
return response_obj
message = None
#caching
loaded_from_cache = False
if self.cached_get_hook != None:
cached_object = self.cached_get_hook(response_obj)
if cached_object != None:
loaded_from_cache = True
message = cached_object #returned cached, already mapped, response
janus_logger.info("Will return cached message: " + str(loaded_from_cache))
if loaded_from_cache == False: #nothing in cache or cache deactivated
self.message = response_obj.message #get the message type to return
obj = response_obj.data #get the data to return
data = DataMessage.from_object(obj,self.message,do_nesting=self.nest_in_responses) #generate data message with data
#take care of includes
if response_obj.include_relationships != None: self.include_relationships = response_obj.include_relationships
included = None
janus_logger.info("Should map included: " + str(self.include_relationships))
if self.include_relationships:
included = self.__load_included(data,self.nest_in_responses)
#is there custome meta?
if response_obj.meta != None:
if self.meta == None:
self.meta = response_obj.meta
else:
self.meta.update(response_obj.meta)
message = JsonApiMessage(data=data,included=included,meta=self.meta,do_nesting=self.nest_in_responses).to_json() #render json response
#caching
if self.cached_set_hook != None and loaded_from_cache == False:
janus_logger.debug("Caching message")
self.cached_set_hook(response_obj,message)
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,response_obj)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg,meta=self.meta).to_json()
janus_logger.error("Traceback: " + tb)
return message
return wrapped_f
def __load_included(self, data_message,do_nesting=False):
included = []
if isinstance(data_message,list):
for d in data_message:
included = included + d.get_included(do_nesting=do_nesting)
else:
included = data_message.get_included(do_nesting=do_nesting)
#clean dublicates from included
clean_included = []
for item in included:
if (item in clean_included) == False:
clean_included.append(item)
return clean_included
class describe(object):
def __init__( self,
success_status=200,
before_send_hook=None,
include_traceback_in_errors=False,
error_hook=None):
self.success_status = success_status
self.before_send_hook = before_send_hook
self.include_traceback_in_errors = include_traceback_in_errors
self.error_hook = error_hook
def __call__(self, f):
def wrapped_f(*a, **ka):
try:
#if this decorator is used the function must return all messages that should be described as a list
messages = f(*a, **ka)
#first check if there is an response object
#if not nothing to return so HTTP 204
#otherwise process response
if messages == None:
if self.before_send_hook != None:
self.before_send_hook(204,None,None)
return None
else:
if isinstance(messages, (list, tuple)) == False:
raise Exception('Methods using the "describe" decorator have to return a list of subclasses of DataMessage to describe.')
msg_descriptions = []
for msg in messages:
if issubclass(msg,DataMessage) == False:
raise Exception('All returned classes in the returned list have to be a subclass of DataMessage.')
msg_descriptions.append(msg().describe())
meta = {'message-types':msg_descriptions}
message = JsonApiMessage(meta=meta).to_json() #render json response
if self.before_send_hook != None: #fire before send hook
self.before_send_hook(self.success_status,message,None)
return message
except Exception as e:
err_msg = ErrorMessage.from_exception(e)
tb = traceback.format_exc()
if self.include_traceback_in_errors:
if err_msg.meta == None: err_msg.meta = {}
err_msg.traceback = tb
if self.error_hook != None:
self.error_hook(int(err_msg.status),err_msg,tb)
message = JsonApiMessage(errors=err_msg).to_json()
return message
return wrapped_f
| wrapped_f | identifier_name |
compression.py | import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
end = x_train.shape[0]
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0
# for layer_id in Sparse_layer:
# gradient_num += 2
# gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def | (root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close()
| encode_huffman_tree | identifier_name |
compression.py | import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
end = x_train.shape[0]
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0
# for layer_id in Sparse_layer:
# gradient_num += 2
# gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def encode_huffman_tree(root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
|
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close()
| frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding | identifier_body |
compression.py | import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
end = x_train.shape[0]
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0 | # gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def encode_huffman_tree(root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close() | # for layer_id in Sparse_layer:
# gradient_num += 2 | random_line_split |
compression.py | import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
|
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0
# for layer_id in Sparse_layer:
# gradient_num += 2
# gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def encode_huffman_tree(root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close()
| end = x_train.shape[0] | conditional_block |
radixsort.go | // Copyright 2014-5 Randall Farmer. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package radixsort
import (
"bytes"
"sort"
)
const radix = 8
const mask = (1 << radix) - 1
// qSortCutoff is when we bail out to a quicksort. It's changed to 1 for
// certain tests so we can more easily exercise the radix sorting. This was
// around the break-even point in some sloppy tests.
var qSortCutoff = 1 << 7
const keyPanicMessage = "sort failed: Key and Less aren't consistent with each other"
const keyUint64Help = " (for float data, sortutil Key functions may help resolve this)"
const panicMessage = "sort failed: could be a data race, a radixsort bug, or a subtle bug in the interface implementation"
// maxRadixDepth limits how deeply the radix part of string sorts can
// recurse before we bail to quicksort. Each recursion uses 2KB stack.
const maxRadixDepth = 32
// task describes a range of data to be sorted and additional
// information the sorter needs: bitshift in a numeric sort, byte offset in
// a string sort, or maximum depth (expressed as -maxDepth-1) for a
// quicksort.
type task struct{ offs, pos, end int }
// ByUint64 sorts data by a uint64 key.
func ByUint64(data Uint64Interface) {
l := data.Len()
shift := guessIntShift(data, l)
parallelSort(data, radixSortUint64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// int64Key generates a uint64 from an int64
func int64Key(i int64) uint64 { return uint64(i) ^ 1<<63 }
// intwrapper tunrs an Int64Interface into a Uint64Interface for
// guessIntShift
type intwrapper struct{ Int64Interface }
func (iw intwrapper) Key(i int) uint64 {
return int64Key(iw.Int64Interface.Key(i))
}
// ByInt64 sorts data by an int64 key.
func ByInt64(data Int64Interface) {
l := data.Len()
shift := guessIntShift(intwrapper{data}, l)
parallelSort(data, radixSortInt64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// ByString sorts data by a string key.
func ByString(data StringInterface) {
l := data.Len()
parallelSort(data, radixSortString, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// ByBytes sorts data by a []byte key.
func ByBytes(data BytesInterface) {
l := data.Len()
parallelSort(data, radixSortBytes, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if bytes.Compare(data.Key(i), data.Key(i-1)) > 0 {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// guessIntShift saves a pass when the data is distributed roughly uniformly
// in a small range (think shuffled indices into a small array), and rarely
// hurts much otherwise: either it just returns 64-radix quickly, or it
// returns too small a shift and the sort notices after one useless counting
// pass.
func guessIntShift(data Uint64Interface, l int) uint {
if l < qSortCutoff {
return 64 - radix
}
step := l >> 5
if l > 1<<16 {
step = l >> 8
}
if step == 0 { // only for tests w/qSortCutoff lowered
step = 1
}
min := data.Key(l - 1)
max := min
for i := 0; i < l; i += step {
k := data.Key(i)
if k < min {
min = k
}
if k > max {
max = k
}
}
diff := min ^ max
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
shiftGuess := log2diff - radix
if shiftGuess < 0 {
return 0
}
return uint(shiftGuess)
}
/*
Thanks to (and please refer to):
Victor J. Duvanenko, "Parallel In-Place Radix Sort Simplified", 2011, at
http://www.drdobbs.com/parallel/parallel-in-place-radix-sort-simplified/229000734
for lots of practical discussion of performance
Michael Herf, "Radix Tricks", 2001, at
http://stereopsis.com/radix.html
for the idea for Float32Key()/Float64Key() (via Pierre Tardiman, "Radix Sort
Revisited", 2000, at http://codercorner.com/RadixSortRevisited.htm) and more
performance talk.
A handy slide deck summarizing Robert Sedgewick and Kevin Wayne's Algorithms
on string sorts:
http://algs4.cs.princeton.edu/lectures/51StringSorts.pdf
for a grounding in string sorts and pointer to American flag sort
McIlroy, Bostic, and McIlroy, "Engineering Radix Sort", 1993 at
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.22.6990
for laying out American flag sort
- We're not using American flag sort's trick of keeping our own stack. It
might help on some data, but just bailing to qsort after 32 bytes is
enough to keep stack use from exploding.
- I suspect the quicksort phase could be sped up, especially for strings.
If you collected the next, say, eight bytes of each string in an array,
sorted those, and only compared full strings as a tiebreaker, you could
likely avoid following a lot of pointers and use cache better. That's a
lot of work and a lot of code, though.
- I'm sure with a radically different approach--like with a type like this:
type Index struct { Indices, Keys uint64 }
you could do a bunch of other cool things.
*/
// All three radixSort functions below do a counting pass and a swapping
// pass, then recurse. They fall back to comparison sort for small buckets
// and equal ranges, and the int sorts try to skip bits that are identical
// across the whole range being sorted.
func radixSortUint64(dataI sort.Interface, t task, sortRange func(task)) |
func radixSortInt64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Int64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := int64Key(data.Key(a))
max := min
for i := a; i < b; i++ {
k := int64Key(data.Key(i))
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (int64Key(data.Key(i)) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortString(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(StringInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
func radixSortBytes(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(BytesInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
| {
data := dataI.(Uint64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := data.Key(a)
max := min
for i := a; i < b; i++ {
k := data.Key(i)
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (data.Key(i) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
} | identifier_body |
radixsort.go | // Copyright 2014-5 Randall Farmer. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package radixsort
import (
"bytes"
"sort"
)
const radix = 8
const mask = (1 << radix) - 1
// qSortCutoff is when we bail out to a quicksort. It's changed to 1 for | var qSortCutoff = 1 << 7
const keyPanicMessage = "sort failed: Key and Less aren't consistent with each other"
const keyUint64Help = " (for float data, sortutil Key functions may help resolve this)"
const panicMessage = "sort failed: could be a data race, a radixsort bug, or a subtle bug in the interface implementation"
// maxRadixDepth limits how deeply the radix part of string sorts can
// recurse before we bail to quicksort. Each recursion uses 2KB stack.
const maxRadixDepth = 32
// task describes a range of data to be sorted and additional
// information the sorter needs: bitshift in a numeric sort, byte offset in
// a string sort, or maximum depth (expressed as -maxDepth-1) for a
// quicksort.
type task struct{ offs, pos, end int }
// ByUint64 sorts data by a uint64 key.
func ByUint64(data Uint64Interface) {
l := data.Len()
shift := guessIntShift(data, l)
parallelSort(data, radixSortUint64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// int64Key generates a uint64 from an int64
func int64Key(i int64) uint64 { return uint64(i) ^ 1<<63 }
// intwrapper tunrs an Int64Interface into a Uint64Interface for
// guessIntShift
type intwrapper struct{ Int64Interface }
func (iw intwrapper) Key(i int) uint64 {
return int64Key(iw.Int64Interface.Key(i))
}
// ByInt64 sorts data by an int64 key.
func ByInt64(data Int64Interface) {
l := data.Len()
shift := guessIntShift(intwrapper{data}, l)
parallelSort(data, radixSortInt64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// ByString sorts data by a string key.
func ByString(data StringInterface) {
l := data.Len()
parallelSort(data, radixSortString, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// ByBytes sorts data by a []byte key.
func ByBytes(data BytesInterface) {
l := data.Len()
parallelSort(data, radixSortBytes, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if bytes.Compare(data.Key(i), data.Key(i-1)) > 0 {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// guessIntShift saves a pass when the data is distributed roughly uniformly
// in a small range (think shuffled indices into a small array), and rarely
// hurts much otherwise: either it just returns 64-radix quickly, or it
// returns too small a shift and the sort notices after one useless counting
// pass.
func guessIntShift(data Uint64Interface, l int) uint {
if l < qSortCutoff {
return 64 - radix
}
step := l >> 5
if l > 1<<16 {
step = l >> 8
}
if step == 0 { // only for tests w/qSortCutoff lowered
step = 1
}
min := data.Key(l - 1)
max := min
for i := 0; i < l; i += step {
k := data.Key(i)
if k < min {
min = k
}
if k > max {
max = k
}
}
diff := min ^ max
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
shiftGuess := log2diff - radix
if shiftGuess < 0 {
return 0
}
return uint(shiftGuess)
}
/*
Thanks to (and please refer to):
Victor J. Duvanenko, "Parallel In-Place Radix Sort Simplified", 2011, at
http://www.drdobbs.com/parallel/parallel-in-place-radix-sort-simplified/229000734
for lots of practical discussion of performance
Michael Herf, "Radix Tricks", 2001, at
http://stereopsis.com/radix.html
for the idea for Float32Key()/Float64Key() (via Pierre Tardiman, "Radix Sort
Revisited", 2000, at http://codercorner.com/RadixSortRevisited.htm) and more
performance talk.
A handy slide deck summarizing Robert Sedgewick and Kevin Wayne's Algorithms
on string sorts:
http://algs4.cs.princeton.edu/lectures/51StringSorts.pdf
for a grounding in string sorts and pointer to American flag sort
McIlroy, Bostic, and McIlroy, "Engineering Radix Sort", 1993 at
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.22.6990
for laying out American flag sort
- We're not using American flag sort's trick of keeping our own stack. It
might help on some data, but just bailing to qsort after 32 bytes is
enough to keep stack use from exploding.
- I suspect the quicksort phase could be sped up, especially for strings.
If you collected the next, say, eight bytes of each string in an array,
sorted those, and only compared full strings as a tiebreaker, you could
likely avoid following a lot of pointers and use cache better. That's a
lot of work and a lot of code, though.
- I'm sure with a radically different approach--like with a type like this:
type Index struct { Indices, Keys uint64 }
you could do a bunch of other cool things.
*/
// All three radixSort functions below do a counting pass and a swapping
// pass, then recurse. They fall back to comparison sort for small buckets
// and equal ranges, and the int sorts try to skip bits that are identical
// across the whole range being sorted.
func radixSortUint64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Uint64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := data.Key(a)
max := min
for i := a; i < b; i++ {
k := data.Key(i)
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (data.Key(i) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortInt64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Int64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := int64Key(data.Key(a))
max := min
for i := a; i < b; i++ {
k := int64Key(data.Key(i))
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (int64Key(data.Key(i)) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortString(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(StringInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
func radixSortBytes(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(BytesInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
} | // certain tests so we can more easily exercise the radix sorting. This was
// around the break-even point in some sloppy tests. | random_line_split |
radixsort.go | // Copyright 2014-5 Randall Farmer. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package radixsort
import (
"bytes"
"sort"
)
const radix = 8
const mask = (1 << radix) - 1
// qSortCutoff is when we bail out to a quicksort. It's changed to 1 for
// certain tests so we can more easily exercise the radix sorting. This was
// around the break-even point in some sloppy tests.
var qSortCutoff = 1 << 7
const keyPanicMessage = "sort failed: Key and Less aren't consistent with each other"
const keyUint64Help = " (for float data, sortutil Key functions may help resolve this)"
const panicMessage = "sort failed: could be a data race, a radixsort bug, or a subtle bug in the interface implementation"
// maxRadixDepth limits how deeply the radix part of string sorts can
// recurse before we bail to quicksort. Each recursion uses 2KB stack.
const maxRadixDepth = 32
// task describes a range of data to be sorted and additional
// information the sorter needs: bitshift in a numeric sort, byte offset in
// a string sort, or maximum depth (expressed as -maxDepth-1) for a
// quicksort.
type task struct{ offs, pos, end int }
// ByUint64 sorts data by a uint64 key.
func ByUint64(data Uint64Interface) {
l := data.Len()
shift := guessIntShift(data, l)
parallelSort(data, radixSortUint64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// int64Key generates a uint64 from an int64
func int64Key(i int64) uint64 { return uint64(i) ^ 1<<63 }
// intwrapper tunrs an Int64Interface into a Uint64Interface for
// guessIntShift
type intwrapper struct{ Int64Interface }
func (iw intwrapper) Key(i int) uint64 {
return int64Key(iw.Int64Interface.Key(i))
}
// ByInt64 sorts data by an int64 key.
func ByInt64(data Int64Interface) {
l := data.Len()
shift := guessIntShift(intwrapper{data}, l)
parallelSort(data, radixSortInt64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// ByString sorts data by a string key.
func ByString(data StringInterface) {
l := data.Len()
parallelSort(data, radixSortString, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// ByBytes sorts data by a []byte key.
func ByBytes(data BytesInterface) {
l := data.Len()
parallelSort(data, radixSortBytes, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if bytes.Compare(data.Key(i), data.Key(i-1)) > 0 {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// guessIntShift saves a pass when the data is distributed roughly uniformly
// in a small range (think shuffled indices into a small array), and rarely
// hurts much otherwise: either it just returns 64-radix quickly, or it
// returns too small a shift and the sort notices after one useless counting
// pass.
func guessIntShift(data Uint64Interface, l int) uint {
if l < qSortCutoff {
return 64 - radix
}
step := l >> 5
if l > 1<<16 {
step = l >> 8
}
if step == 0 { // only for tests w/qSortCutoff lowered
step = 1
}
min := data.Key(l - 1)
max := min
for i := 0; i < l; i += step {
k := data.Key(i)
if k < min {
min = k
}
if k > max {
max = k
}
}
diff := min ^ max
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
shiftGuess := log2diff - radix
if shiftGuess < 0 {
return 0
}
return uint(shiftGuess)
}
/*
Thanks to (and please refer to):
Victor J. Duvanenko, "Parallel In-Place Radix Sort Simplified", 2011, at
http://www.drdobbs.com/parallel/parallel-in-place-radix-sort-simplified/229000734
for lots of practical discussion of performance
Michael Herf, "Radix Tricks", 2001, at
http://stereopsis.com/radix.html
for the idea for Float32Key()/Float64Key() (via Pierre Tardiman, "Radix Sort
Revisited", 2000, at http://codercorner.com/RadixSortRevisited.htm) and more
performance talk.
A handy slide deck summarizing Robert Sedgewick and Kevin Wayne's Algorithms
on string sorts:
http://algs4.cs.princeton.edu/lectures/51StringSorts.pdf
for a grounding in string sorts and pointer to American flag sort
McIlroy, Bostic, and McIlroy, "Engineering Radix Sort", 1993 at
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.22.6990
for laying out American flag sort
- We're not using American flag sort's trick of keeping our own stack. It
might help on some data, but just bailing to qsort after 32 bytes is
enough to keep stack use from exploding.
- I suspect the quicksort phase could be sped up, especially for strings.
If you collected the next, say, eight bytes of each string in an array,
sorted those, and only compared full strings as a tiebreaker, you could
likely avoid following a lot of pointers and use cache better. That's a
lot of work and a lot of code, though.
- I'm sure with a radically different approach--like with a type like this:
type Index struct { Indices, Keys uint64 }
you could do a bunch of other cool things.
*/
// All three radixSort functions below do a counting pass and a swapping
// pass, then recurse. They fall back to comparison sort for small buckets
// and equal ranges, and the int sorts try to skip bits that are identical
// across the whole range being sorted.
func radixSortUint64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Uint64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := data.Key(a)
max := min
for i := a; i < b; i++ {
k := data.Key(i)
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (data.Key(i) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortInt64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Int64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := int64Key(data.Key(a))
max := min
for i := a; i < b; i++ {
k := int64Key(data.Key(i))
bucketStarts[(k>>shift)&mask]++
if k < min |
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (int64Key(data.Key(i)) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortString(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(StringInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
func radixSortBytes(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(BytesInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
| {
min = k
} | conditional_block |
radixsort.go | // Copyright 2014-5 Randall Farmer. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package radixsort
import (
"bytes"
"sort"
)
const radix = 8
const mask = (1 << radix) - 1
// qSortCutoff is when we bail out to a quicksort. It's changed to 1 for
// certain tests so we can more easily exercise the radix sorting. This was
// around the break-even point in some sloppy tests.
var qSortCutoff = 1 << 7
const keyPanicMessage = "sort failed: Key and Less aren't consistent with each other"
const keyUint64Help = " (for float data, sortutil Key functions may help resolve this)"
const panicMessage = "sort failed: could be a data race, a radixsort bug, or a subtle bug in the interface implementation"
// maxRadixDepth limits how deeply the radix part of string sorts can
// recurse before we bail to quicksort. Each recursion uses 2KB stack.
const maxRadixDepth = 32
// task describes a range of data to be sorted and additional
// information the sorter needs: bitshift in a numeric sort, byte offset in
// a string sort, or maximum depth (expressed as -maxDepth-1) for a
// quicksort.
type task struct{ offs, pos, end int }
// ByUint64 sorts data by a uint64 key.
func ByUint64(data Uint64Interface) {
l := data.Len()
shift := guessIntShift(data, l)
parallelSort(data, radixSortUint64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// int64Key generates a uint64 from an int64
func int64Key(i int64) uint64 { return uint64(i) ^ 1<<63 }
// intwrapper tunrs an Int64Interface into a Uint64Interface for
// guessIntShift
type intwrapper struct{ Int64Interface }
func (iw intwrapper) Key(i int) uint64 {
return int64Key(iw.Int64Interface.Key(i))
}
// ByInt64 sorts data by an int64 key.
func ByInt64(data Int64Interface) {
l := data.Len()
shift := guessIntShift(intwrapper{data}, l)
parallelSort(data, radixSortInt64, task{offs: int(shift), end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage + keyUint64Help)
}
panic(panicMessage)
}
}
}
// ByString sorts data by a string key.
func ByString(data StringInterface) {
l := data.Len()
parallelSort(data, radixSortString, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if data.Key(i) > data.Key(i-1) {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// ByBytes sorts data by a []byte key.
func ByBytes(data BytesInterface) {
l := data.Len()
parallelSort(data, radixSortBytes, task{end: l})
// check results!
for i := 1; i < l; i++ {
if data.Less(i, i-1) {
if bytes.Compare(data.Key(i), data.Key(i-1)) > 0 {
panic(keyPanicMessage)
}
panic(panicMessage)
}
}
}
// guessIntShift saves a pass when the data is distributed roughly uniformly
// in a small range (think shuffled indices into a small array), and rarely
// hurts much otherwise: either it just returns 64-radix quickly, or it
// returns too small a shift and the sort notices after one useless counting
// pass.
func | (data Uint64Interface, l int) uint {
if l < qSortCutoff {
return 64 - radix
}
step := l >> 5
if l > 1<<16 {
step = l >> 8
}
if step == 0 { // only for tests w/qSortCutoff lowered
step = 1
}
min := data.Key(l - 1)
max := min
for i := 0; i < l; i += step {
k := data.Key(i)
if k < min {
min = k
}
if k > max {
max = k
}
}
diff := min ^ max
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
shiftGuess := log2diff - radix
if shiftGuess < 0 {
return 0
}
return uint(shiftGuess)
}
/*
Thanks to (and please refer to):
Victor J. Duvanenko, "Parallel In-Place Radix Sort Simplified", 2011, at
http://www.drdobbs.com/parallel/parallel-in-place-radix-sort-simplified/229000734
for lots of practical discussion of performance
Michael Herf, "Radix Tricks", 2001, at
http://stereopsis.com/radix.html
for the idea for Float32Key()/Float64Key() (via Pierre Tardiman, "Radix Sort
Revisited", 2000, at http://codercorner.com/RadixSortRevisited.htm) and more
performance talk.
A handy slide deck summarizing Robert Sedgewick and Kevin Wayne's Algorithms
on string sorts:
http://algs4.cs.princeton.edu/lectures/51StringSorts.pdf
for a grounding in string sorts and pointer to American flag sort
McIlroy, Bostic, and McIlroy, "Engineering Radix Sort", 1993 at
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.22.6990
for laying out American flag sort
- We're not using American flag sort's trick of keeping our own stack. It
might help on some data, but just bailing to qsort after 32 bytes is
enough to keep stack use from exploding.
- I suspect the quicksort phase could be sped up, especially for strings.
If you collected the next, say, eight bytes of each string in an array,
sorted those, and only compared full strings as a tiebreaker, you could
likely avoid following a lot of pointers and use cache better. That's a
lot of work and a lot of code, though.
- I'm sure with a radically different approach--like with a type like this:
type Index struct { Indices, Keys uint64 }
you could do a bunch of other cool things.
*/
// All three radixSort functions below do a counting pass and a swapping
// pass, then recurse. They fall back to comparison sort for small buckets
// and equal ranges, and the int sorts try to skip bits that are identical
// across the whole range being sorted.
func radixSortUint64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Uint64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := data.Key(a)
max := min
for i := a; i < b; i++ {
k := data.Key(i)
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (data.Key(i) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortInt64(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(Int64Interface)
shift, a, b := uint(t.offs), t.pos, t.end
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
// use a single pass over the keys to bucket data and find min/max
// (for skipping over bits that are always identical)
var bucketStarts, bucketEnds [1 << radix]int
min := int64Key(data.Key(a))
max := min
for i := a; i < b; i++ {
k := int64Key(data.Key(i))
bucketStarts[(k>>shift)&mask]++
if k < min {
min = k
}
if k > max {
max = k
}
}
// skip past common prefixes, bail if all keys equal
diff := min ^ max
if diff == 0 {
return
}
if diff>>shift == 0 || diff>>(shift+radix) != 0 {
// find highest 1 bit in diff
log2diff := 0
for diff != 0 {
log2diff++
diff >>= 1
}
nextShift := log2diff - radix
if nextShift < 0 {
nextShift = 0
}
sortRange(task{int(nextShift), a, b})
return
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
}
for curBucket, bucketEnd := range bucketEnds {
i := bucketStarts[curBucket]
for i < bucketEnd {
destBucket := (int64Key(data.Key(i)) >> shift) & mask
if destBucket == uint64(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
}
if shift == 0 {
// each bucket is a unique key
return
}
nextShift := shift - radix
if shift < radix {
nextShift = 0
}
pos = a
for _, end := range bucketEnds {
if end > pos+1 {
sortRange(task{int(nextShift), pos, end})
}
pos = end
}
}
func radixSortString(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(StringInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
func radixSortBytes(dataI sort.Interface, t task, sortRange func(task)) {
data := dataI.(BytesInterface)
offset, a, b := t.offs, t.pos, t.end
if offset < 0 {
// in a parallel quicksort of items w/long common key prefix
quickSortWorker(data, t, sortRange)
return
}
if b-a < qSortCutoff {
qSort(data, a, b)
return
}
if offset == maxRadixDepth {
qSortPar(data, t, sortRange)
return
}
// swap too-short strings to start and count bucket sizes
bucketStarts, bucketEnds := [256]int{}, [256]int{}
for i := a; i < b; i++ {
k := data.Key(i)
if len(k) <= offset {
// swap too-short strings to start
data.Swap(a, i)
a++
continue
}
bucketStarts[k[offset]]++
}
pos := a
for i, c := range bucketStarts {
bucketStarts[i] = pos
pos += c
bucketEnds[i] = pos
if bucketStarts[i] == a && bucketEnds[i] == b {
// everything was in the same bucket
sortRange(task{offset + 1, a, b})
return
}
}
i := a
for curBucket, bucketEnd := range bucketEnds {
start := i
i = bucketStarts[curBucket]
for i < bucketEnd {
destBucket := data.Key(i)[offset]
if destBucket == byte(curBucket) {
i++
bucketStarts[destBucket]++
continue
}
data.Swap(i, bucketStarts[destBucket])
bucketStarts[destBucket]++
}
if i > start+1 {
sortRange(task{offset + 1, start, i})
}
}
}
| guessIntShift | identifier_name |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> |
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret != 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
} | identifier_body |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret != 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn | (path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| stat | identifier_name |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret != 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
}) | #[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
} | .collect();
Ok(entries)
}
| random_line_split |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask | else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret != 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} | conditional_block |
folder.js | // Roaming motor behind the folder app.
// It is used whenever a user hits a directory file.
// The following code is covered by the AGPLv3 license.
(function() {
// Globals.
//
function encodePath(path) {
return encodeURIComponent(path).replace(/%2F/g, unescape);
}
// Useful DOM Elements.
var toolbar = document.getElementById('toolbar');
var search = document.getElementById('search');
var filelist = document.getElementById('filelist');
// Current working directory.
var cwd = decodeURIComponent(document.location.pathname);
if (cwd[cwd.length-1] !== '/') cwd += '/';
window.cwd = cwd;
// Fast search init.
var leaves = [];
(function() {
var links = document.querySelectorAll('#filelist>li>a');
for (var i = 0; i < links.length; i++) {
var path = links[i].textContent;
var href = links[i].href;
var type = 'file';
if (href[href.length-1] === '/') { path += '/'; type = 'folder'; }
leaves.push({path:path, meta:{type: type}});
}
})();
// Toolbar controls.
//
(function() {
var methodFromFileType = { 'text': 'PUT', 'folder': 'MKCOL' };
// File and folder creation.
function handle(type) {
// Handling newfile, newfolder.
var newFile = document.getElementById('new' + type);
newFile.addEventListener('click', function() {
var name = search.value;
if (name === '') {
search.value = type; search.focus(); search.select(); return false;
}
var xhr = new XMLHttpRequest();
var newPath = cwd + name;
xhr.open(methodFromFileType[type], newPath, true);
xhr.withCredentials = true;
xhr.onload = function(r) {
if (xhr.status < 200 || xhr.status >= 300) {
alert("Creating new file failed: " + xhr.status + " " + xhr.statusText);
} else if (newPath) {
document.location = encodePath(newPath);
}
};
xhr.send(null);
});
};
handle('folder');
handle('text');
// handle('binary');
// handle('link');
// Multiple file uploads.
var uploader = document.getElementById('uploader');
var upform = document.getElementById('upform');
var chooser = upform.content;
upform.action = cwd;
uploader.onload = function (event) {
try {
if (uploader.contentDocument.location.pathname === cwd) {
window.location.reload(true);
}
} catch(e) { alert("Failed upload"); }
};
chooser.onchange = function (event) {
if (chooser.value.length > 0) upform.submit();
};
document.getElementById('upload').onclick = function (event) {
chooser.click();
};
// Drag-and-drop file upload.
var fileDropHoverDepth = 0;
filelist.addEventListener('dragover', function(event) {
event.dataTransfer.dropEffect = 'copy';
event.preventDefault();
return false;
});
filelist.addEventListener('dragenter', function(event) {
fileDropHoverDepth++;
filelist.classList.add('dropfile');
});
var removeDragOverClass = function(event) {
fileDropHoverDepth--;
if (fileDropHoverDepth === 0) {
filelist.classList.remove('dropfile');
}
};
filelist.addEventListener('dragleave', removeDragOverClass);
filelist.addEventListener('dragend', removeDragOverClass);
filelist.addEventListener('drop', function(event) {
event.stopPropagation();
event.preventDefault();
var files = event.dataTransfer.files;
chooser.files = files;
filelist.classList.remove('dropfile');
fileDropHoverDepth = 0;
});
})();
// File navigation.
//
// State.
var pointer = -1; // Item selected (-1 means "none").
var slots; // DOM slots wherein you may show a cursor, or a space.
// (Those are initialized by the `init` function).
var fileSelectionEventListenerReset = [];
var fileHoverEventListenerReset = [];
var hoverSetsCursor = null;
// Initializes the `slots` list and sets up event listeners on them.
function initSlots() {
slots = document.querySelectorAll('#filelist>li');
setCursor(pointer);
// File selection.
for (var i = 0, len = fileSelectionEventListenerReset.length; i < len; i++) {
fileSelectionEventListenerReset[i]();
}
fileSelectionEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('click', (function(i) {
var onclick = function onclick(e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
if (nomod) toggleAddToSelection(i);
};
var slot = slots[i];
fileSelectionEventListenerReset.push(function resetOnClick() {
slot.removeEventListener('click', onclick);
});
return onclick;
}(i)));
}
initHoverListn(slots);
selectedFile = {};
nSelectedFiles = 0; // Number of selected files.
}
function initHoverListn() {
for (var i = 0, len = fileHoverEventListenerReset.length; i < len; i++) {
fileHoverEventListenerReset[i]();
}
fileHoverEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('mouseenter', (function(i) {
var onhover = function() { hoverSetCursor(i); };
var slot = slots[i];
fileHoverEventListenerReset.push(function resetOnHover() {
slot.removeEventListener('mouseenter', onhover);
});
return onhover;
}(i)));
}
}
// Initialization occurs when the drop down entries are reset (or started). The
// entries already have the cursor.
function initFileSelection() {
// If there is an entry, set the pointer to the first entry.
if (filelist.children.length > 0) { // If there is at least one entry…
pointer = 0; // … set the pointer to the first item.
}
// Populate slots.
initSlots();
// Set the event listener.
addEventListener('keydown', keyListener, false);
}
initFileSelection();
function hoverSetCursor (entry) {
if (hoverSetsCursor != null) return;
setCursor(entry);
}
function keyboardSetCursor (entry) {
clearTimeout(hoverSetsCursor);
setCursor(entry);
var box = slots[pointer].getBoundingClientRect();
var viewportHeight = window.innerHeight;
// We remove 30px for the top toolbar.
var boxAbove = box.top - toolbar.clientHeight;
var boxBelow = box.bottom - viewportHeight;
var slotAbove = boxAbove < 0;
var slotBelow = boxBelow > 0;
var slotInvisible = slotAbove || slotBelow;
if (slotInvisible) {
hoverSetsCursor = false;
if (slotAbove) {
filelist.scrollTop += boxAbove;
} else if (slotBelow) {
filelist.scrollTop += boxBelow;
}
// Temporarily deactivate "hover selects slot".
hoverSetsCursor = setTimeout(function() { hoverSetsCursor = null; }, 100);
}
}
// Set the cursor to the entry specified.
//
// entry :: Number
function setCursor (entry) {
if (slots.length === 0) return;
entry %= slots.length;
if (entry < 0) { entry = slots.length - 1; }
if (pointer >= 0) { slots[pointer].classList.remove('focus'); }
pointer = entry;
slots[pointer].classList.add('focus');
}
var cursorIncrement = 1;
var cursorIncrementTimeout;
function tweakCursorIncrement () {
// If the timeout is not cleared, we must increase the cursor increment.
if (cursorIncrementTimeout != null) cursorIncrement += 0.1;
clearTimeout(cursorIncrementTimeout);
cursorIncrementTimeout = setTimeout(function() { cursorIncrement = 1; }, 100);
}
function nextEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer + (cursorIncrement >>> 0));
}
function prevEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer - (cursorIncrement >>> 0));
}
// When the search widget is focused, if the user presses up/down keys, and
// the enter key.
function keyListener (e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
var empty = search.value.length === 0;
if (nomod) {
if (e.keyCode === 40) { // Down.
nextEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 38) { // Up.
prevEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 13 || (empty && e.keyCode === 39)) {
// Enter or (Empty and Right).
window.location = slots[pointer].firstElementChild.href;
} else if (empty && (e.keyCode === 8 || e.keyCode === 37)) {
// Empty and (Backspace or Left).
var loc = window.location;
window.location = loc.protocol + '//' + loc.host +
loc.pathname.replace(/\/[^\/]+[\/]*$/,'/') + loc.search;
}
// Additional keys when the search widget is not focused.
if (document.activeElement !== search) {
if (e.keyCode === 74) { // J (same as down).
nextEntry();
} else if (e.keyCode === 75) { // K (same as up).
prevEntry();
} else if (e.keyCode === 88) { // X (click).
slots[pointer].click();
} else if (e.keyCode === 191) { // /.
search.focus();
e.preventDefault(); // Don't input it in the search bar.
}
} else { // The search widget is focused.
if (e.keyCode === 27) { // ESC.
search.blur();
}
}
}
}
// Fuzzy matching.
//
function sorter (file1, file2) { return file2.stars - file1.stars; }
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `stars` is a Number to compare leaves according to the query.
// `indexes` is the positions of matched letters.
function Score(leaf, stars, indexes) {
if (leaf.meta.type == 'folder' && leaf.path[leaf.path.length - 1] !== '/') {
leaf.path += '/'; // FIXME server side bug
}
this.leaf = leaf;
this.stars = (stars|0) || 0;
this.indexes = indexes || [];
}
Score.prototype = {
add: function(n) { this.stars += (n|0); },
};
// Return a Score object, {leaf, stars, indexes}:
//
// - `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// - `stars` is a Number to compare leaves according to the query.
// - `indexes` is the positions of matched letters.
//
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `query` is a String to fuzzy match.
function score(leaf, query) {
var queryLower = query.toLowerCase();
var leafLower = leaf.path.toLowerCase();
var score = new Score(leaf);
var index = queryLower.length - 1;
var countlettersmatched = 0; // Consecutive letters matched.
var alpha = /[a-zA-Z0-9]/;
var lookingAhead = false; // Grant one last run and terminate.
// The idea is to begin with the end of the `query`, and for each letter
// matched, the letter is captured, its position influences the score, and we
// go to the next letter.
for (var i = leafLower.length - 1; i >= 0; i--) {
var l = leafLower[i]; // letter
if (countlettersmatched > 0 && !alpha.test(l)) {
score.add(2);
}
if (lookingAhead) break;
if (l === queryLower[index]) {
score.indexes.push(i);
score.add(1 + countlettersmatched);
countlettersmatched++;
index--;
} else {
countlettersmatched = 0;
}
if (index < 0) lookingAhead = true; // Grant last run now.
}
if (lookingAhead) { score.add(1); }
return score;
}
// List of Scores = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function fuzzy (leaves, query) {
var fuzzied = [];
for (var i = 0; i < leaves.length; i++) {
if (leaves[i].path.length === 0) continue;
var sc = score(leaves[i], query);
if (sc.indexes.length === query.length) {
fuzzied.push(sc);
}
}
return fuzzied.sort(sorter);
}
function htmlEscape(html) {
return html.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
}
// Return an html string with all matched letters in bold.
function scorify (score) {
var path = score.leaf.path;
var html = '';
var beforelet = '<b>';
var afterlet = '</b>';
var index = 0;
for (var i = score.indexes.length - 1; i >= 0; i--) {
html += htmlEscape(path.slice(index, score.indexes[i])) + beforelet
+ htmlEscape(path[score.indexes[i]]) + afterlet;
index = score.indexes[i] + 1;
}
html += htmlEscape(path.slice(index));
return html;
}
function countSlashes(path) {
var count = 0; // count slashes.
for (var i = 0; i < path.length; i++) {
if (path[i] === '/' && i !== path.length - 1) { count++; }
}
return count;
}
// List of Score = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function exactSearch(leaves, query) {
var found = [];
var querySlashes = countSlashes(query);
if (query.length === 0) { querySlashes = -1; }
for (var i = 0; i < leaves.length; i++) {
leaves[i].path = leaves[i].path.replace(new RegExp('^' + cwd), '');
if (leaves[i].path.length === 0) continue;
var pathSlashes = countSlashes(leaves[i].path);
if (leaves[i].path.slice(0, query.length) === query &&
(pathSlashes === querySlashes || pathSlashes === querySlashes + 1)) {
found.push(new Score(leaves[i]));
}
}
return found;
}
// Display the search results on the page.
function showfuzzy () {
var html = '';
var query = search.value;
var scores = [];
// If it ends with a slash, do an exact search.
if (query.length === 0 || query[query.length - 1] === '/') {
scores = exactSearch(leaves, query);
}
if (scores.length === 0) {
scores = fuzzy(leaves, query);
}
for (var i = 0; i < scores.length; i++) {
// There is no remaining query (if the query is not complete, it is
// not shown).
var path = scorify(scores[i]);
html += '<li class=' +
(scores[i].leaf.meta.type === 'folder' ? 'folder' : 'file' ) +
'><a href="' +
encodePath(cwd + scores[i].leaf.path) + '">' + path + '</a></li>';
}
filelist.innerHTML = html;
resetSelection();
initFileSelection();
}
search.addEventListener('input', showfuzzy, false);
// Fetch list of files.
(function() {
if (folderContent) { setupLeaves(); }
var xhr = new XMLHttpRequest();
xhr.open('GET', cwd + '?app=data', true);
xhr.setRequestHeader('Depth', '2');
xhr.withCredentials = true;
xhr.onload = function() {
var response = xhr.responseText;
if (response) {
folderContent = JSON.parse(xhr.responseText);
setupLeaves();
}
};
xhr.send(null);
}());
function setupLeaves() { leaves = populateLeaves(folderContent); }
function populateLeaves(files, leaves, path) {
leaves = leaves || [];
path = path || '';
for (var filename in files) {
if (path === '') { var subpath = filename; }
else { var subpath = path + '/' + filename; }
var subfiles = files[filename].files;
leaves.push({
path: subpath,
meta: {type: files[filename].meta.type}
});
if (subfiles) { populateLeaves(subfiles, leaves, subpath); }
}
return leaves;
}
// File menus.
//
// File deletion.
var deleteFileBut = document.getElementById('deletefiles');
function onDeleteFile(event) {
if (confirm('Do you really wish to delete'
+ (nSelectedFiles === 1
? (' this file?')
: (' those ' + nSelectedFiles + ' files?')))) {
Object.keys(selectedFile).forEach(function (selectedFileIndex) {
var slot = slots[+selectedFileIndex];
var href = slot.firstElementChild.getAttribute('href');
if (href[0] !== '/') { href = cwd + href; }
var xhr = new XMLHttpRequest();
xhr.open('DELETE', href, true);
xhr.withCredentials = true;
xhr.onload = function() {
if (xhr.status < 200 || xhr.status >= 300) {
alert('The file ' + href + ' did not get deleted: ' + xhr.status + ' '
+ xhr.statusText);
} else {
// Remove the item from the list.
slot.parentNode.removeChild(slot);
initSlots();
}
};
xhr.send();
});
}
}
deleteFileBut.addEventListener('click', onDeleteFile);
// File configuration.
var confFileBut = document.getElementById('conffile');
function queryParams() {
return window.location.search.slice(1).split('&').reduce(function(acc, param) {
var parts = param.split('=');
var key = parts[0], value = parts[1];
if (acc[key] instanceof Array) {
acc[key].push(value);
} else if (acc[key] !== undefined) {
acc[key] = [acc[key], value];
} else {
acc[key] = value;
}
return acc;
}, Object.create(null));
}
function searchFromQueryParams(params) {
var search = '?';
for (key in params) {
if (params[key] instanceof Array) {
for (var i = 0; i < params[key].length; i++) {
search += encodeURIComponent(key) + '=' + encodeURIComponent(params[key][i]) + '&';
}
} else {
search += encodeURIComponent(key) + '=' + (params[key] !== undefined? encodeURIComponent(params[key]): '') + '&';
}
}
return search.slice(0, -1);
}
function onConfFile(event) {
var selIdx = 0;
for (var i in selectedFile) { selIdx = i; break; }
window.location = slots[selIdx].firstElementChild.href + searchFromQueryParams({app: 'conf'});
}
confFileBut.addEventListener('click', onConfFile);
// Map from slot index to truthy values, true if they're selected.
var selectedFile = Object.create(null);
var nSelectedFiles = 0; // Number of selected files.
// Add slot of index i to the selection of files,
// or remove from the selection if it is selected.
function toggleAddToSelection(i) {
if (!!selectedFile[i]) {
| e {
// Select it.
selectedFile[i] = true;
nSelectedFiles++;
slots[i].classList.add('selected-file');
if (nSelectedFiles === 1) {
// Show the file-specific buttons.
deleteFileBut.style.display = 'block';
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
} else if (nSelectedFiles > 1) {
deleteFileBut.title = 'Delete files';
confFileBut.style.display = 'none';
}
}
}
// Unselect all selected files.
function resetSelection() {
for (var selectedFileIndex in selectedFile) {
toggleAddToSelection(selectedFileIndex);
}
}
}) ();
| // It is already selected.
delete selectedFile[i];
nSelectedFiles--;
slots[i].classList.remove('selected-file');
if (nSelectedFiles <= 0) {
// Hide the file-specific buttons.
deleteFileBut.style.display = 'none';
confFileBut.style.display = 'none';
} else if (nSelectedFiles === 1) {
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
}
} els | conditional_block |
folder.js | // Roaming motor behind the folder app.
// It is used whenever a user hits a directory file.
// The following code is covered by the AGPLv3 license.
(function() {
// Globals.
//
function encodePath(path) {
return encodeURIComponent(path).replace(/%2F/g, unescape);
}
// Useful DOM Elements.
var toolbar = document.getElementById('toolbar');
var search = document.getElementById('search');
var filelist = document.getElementById('filelist');
// Current working directory.
var cwd = decodeURIComponent(document.location.pathname);
if (cwd[cwd.length-1] !== '/') cwd += '/';
window.cwd = cwd;
// Fast search init.
var leaves = [];
(function() {
var links = document.querySelectorAll('#filelist>li>a');
for (var i = 0; i < links.length; i++) {
var path = links[i].textContent;
var href = links[i].href;
var type = 'file';
if (href[href.length-1] === '/') { path += '/'; type = 'folder'; }
leaves.push({path:path, meta:{type: type}});
}
})();
// Toolbar controls.
//
(function() {
var methodFromFileType = { 'text': 'PUT', 'folder': 'MKCOL' };
// File and folder creation.
function handle(type) {
// Handling newfile, newfolder.
var newFile = document.getElementById('new' + type);
newFile.addEventListener('click', function() {
var name = search.value;
if (name === '') {
search.value = type; search.focus(); search.select(); return false;
}
var xhr = new XMLHttpRequest();
var newPath = cwd + name;
xhr.open(methodFromFileType[type], newPath, true);
xhr.withCredentials = true;
xhr.onload = function(r) {
if (xhr.status < 200 || xhr.status >= 300) {
alert("Creating new file failed: " + xhr.status + " " + xhr.statusText);
} else if (newPath) {
document.location = encodePath(newPath);
}
};
xhr.send(null);
});
};
handle('folder');
handle('text');
// handle('binary');
// handle('link');
// Multiple file uploads.
var uploader = document.getElementById('uploader');
var upform = document.getElementById('upform');
var chooser = upform.content;
upform.action = cwd;
uploader.onload = function (event) {
try {
if (uploader.contentDocument.location.pathname === cwd) {
window.location.reload(true);
}
} catch(e) { alert("Failed upload"); }
};
chooser.onchange = function (event) {
if (chooser.value.length > 0) upform.submit();
};
document.getElementById('upload').onclick = function (event) {
chooser.click();
};
// Drag-and-drop file upload.
var fileDropHoverDepth = 0;
filelist.addEventListener('dragover', function(event) {
event.dataTransfer.dropEffect = 'copy';
event.preventDefault();
return false;
});
filelist.addEventListener('dragenter', function(event) {
fileDropHoverDepth++;
filelist.classList.add('dropfile');
});
var removeDragOverClass = function(event) {
fileDropHoverDepth--;
if (fileDropHoverDepth === 0) {
filelist.classList.remove('dropfile');
}
};
filelist.addEventListener('dragleave', removeDragOverClass);
filelist.addEventListener('dragend', removeDragOverClass);
filelist.addEventListener('drop', function(event) {
event.stopPropagation();
event.preventDefault();
var files = event.dataTransfer.files;
chooser.files = files;
filelist.classList.remove('dropfile');
fileDropHoverDepth = 0;
});
})();
// File navigation.
//
// State.
var pointer = -1; // Item selected (-1 means "none").
var slots; // DOM slots wherein you may show a cursor, or a space.
// (Those are initialized by the `init` function).
var fileSelectionEventListenerReset = [];
var fileHoverEventListenerReset = [];
var hoverSetsCursor = null;
// Initializes the `slots` list and sets up event listeners on them.
function initSlots() {
slots = document.querySelectorAll('#filelist>li');
setCursor(pointer);
// File selection.
for (var i = 0, len = fileSelectionEventListenerReset.length; i < len; i++) {
fileSelectionEventListenerReset[i]();
}
fileSelectionEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('click', (function(i) {
var onclick = function onclick(e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
if (nomod) toggleAddToSelection(i);
};
var slot = slots[i];
fileSelectionEventListenerReset.push(function resetOnClick() {
slot.removeEventListener('click', onclick);
});
return onclick;
}(i)));
}
initHoverListn(slots);
selectedFile = {};
nSelectedFiles = 0; // Number of selected files.
}
function initHoverListn() {
for (var i = 0, len = fileHoverEventListenerReset.length; i < len; i++) {
fileHoverEventListenerReset[i]();
}
fileHoverEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('mouseenter', (function(i) {
var onhover = function() { hoverSetCursor(i); };
var slot = slots[i];
fileHoverEventListenerReset.push(function resetOnHover() {
slot.removeEventListener('mouseenter', onhover);
});
return onhover;
}(i)));
}
}
// Initialization occurs when the drop down entries are reset (or started). The
// entries already have the cursor.
function initFileSelection() {
// If there is an entry, set the pointer to the first entry.
if (filelist.children.length > 0) { // If there is at least one entry…
pointer = 0; // … set the pointer to the first item.
}
// Populate slots.
initSlots();
// Set the event listener.
addEventListener('keydown', keyListener, false);
}
initFileSelection();
function hoverSetCursor (entry) {
if (hoverSetsCursor != null) return;
setCursor(entry);
}
function keyboardSetCursor (entry) {
clearTimeout(hoverSetsCursor);
setCursor(entry);
var box = slots[pointer].getBoundingClientRect();
var viewportHeight = window.innerHeight;
// We remove 30px for the top toolbar.
var boxAbove = box.top - toolbar.clientHeight;
var boxBelow = box.bottom - viewportHeight;
var slotAbove = boxAbove < 0;
var slotBelow = boxBelow > 0;
var slotInvisible = slotAbove || slotBelow;
if (slotInvisible) {
hoverSetsCursor = false;
if (slotAbove) {
filelist.scrollTop += boxAbove;
} else if (slotBelow) {
filelist.scrollTop += boxBelow;
}
// Temporarily deactivate "hover selects slot".
hoverSetsCursor = setTimeout(function() { hoverSetsCursor = null; }, 100);
}
}
// Set the cursor to the entry specified.
//
// entry :: Number
function setCursor (entry) {
if (slots.length === 0) return;
entry %= slots.length;
if (entry < 0) { entry = slots.length - 1; }
if (pointer >= 0) { slots[pointer].classList.remove('focus'); }
pointer = entry;
slots[pointer].classList.add('focus');
}
var cursorIncrement = 1;
var cursorIncrementTimeout;
function tweakCursorIncrement () {
// If the timeout is not cleared, we must increase the cursor increment.
if (cursorIncrementTimeout != null) cursorIncrement += 0.1;
clearTimeout(cursorIncrementTimeout);
cursorIncrementTimeout = setTimeout(function() { cursorIncrement = 1; }, 100);
}
function nextEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer + (cursorIncrement >>> 0));
}
function prevEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer - (cursorIncrement >>> 0));
}
// When the search widget is focused, if the user presses up/down keys, and
// the enter key.
function keyListener (e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
var empty = search.value.length === 0;
if (nomod) {
if (e.keyCode === 40) { // Down.
nextEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 38) { // Up.
prevEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 13 || (empty && e.keyCode === 39)) {
// Enter or (Empty and Right).
window.location = slots[pointer].firstElementChild.href;
} else if (empty && (e.keyCode === 8 || e.keyCode === 37)) {
// Empty and (Backspace or Left).
var loc = window.location;
window.location = loc.protocol + '//' + loc.host +
loc.pathname.replace(/\/[^\/]+[\/]*$/,'/') + loc.search;
}
// Additional keys when the search widget is not focused.
if (document.activeElement !== search) {
if (e.keyCode === 74) { // J (same as down).
nextEntry();
} else if (e.keyCode === 75) { // K (same as up).
prevEntry();
} else if (e.keyCode === 88) { // X (click).
slots[pointer].click();
} else if (e.keyCode === 191) { // /.
search.focus();
e.preventDefault(); // Don't input it in the search bar.
}
} else { // The search widget is focused.
if (e.keyCode === 27) { // ESC.
search.blur();
}
}
}
}
// Fuzzy matching.
//
function sorter (file1, file2) { return file2.stars - file1.stars; }
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `stars` is a Number to compare leaves according to the query.
// `indexes` is the positions of matched letters.
function Score(leaf, stars, indexes) {
if (leaf.meta.type == 'folder' && leaf.path[leaf.path.length - 1] !== '/') {
leaf.path += '/'; // FIXME server side bug
}
this.leaf = leaf;
this.stars = (stars|0) || 0;
this.indexes = indexes || [];
}
Score.prototype = {
add: function(n) { this.stars += (n|0); },
};
// Return a Score object, {leaf, stars, indexes}:
//
// - `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// - `stars` is a Number to compare leaves according to the query.
// - `indexes` is the positions of matched letters.
//
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `query` is a String to fuzzy match.
function score(leaf, query) {
var queryLower = query.toLowerCase();
var leafLower = leaf.path.toLowerCase();
var score = new Score(leaf);
var index = queryLower.length - 1;
var countlettersmatched = 0; // Consecutive letters matched.
var alpha = /[a-zA-Z0-9]/;
var lookingAhead = false; // Grant one last run and terminate.
// The idea is to begin with the end of the `query`, and for each letter
// matched, the letter is captured, its position influences the score, and we
// go to the next letter.
for (var i = leafLower.length - 1; i >= 0; i--) {
var l = leafLower[i]; // letter
if (countlettersmatched > 0 && !alpha.test(l)) {
score.add(2);
}
if (lookingAhead) break;
if (l === queryLower[index]) {
score.indexes.push(i);
score.add(1 + countlettersmatched);
countlettersmatched++;
index--;
} else {
countlettersmatched = 0;
}
if (index < 0) lookingAhead = true; // Grant last run now.
}
if (lookingAhead) { score.add(1); }
return score;
}
// List of Scores = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function fuzzy (leaves, query) {
var fuzzied = [];
for (var i = 0; i < leaves.length; i++) {
if (leaves[i].path.length === 0) continue;
var sc = score(leaves[i], query);
if (sc.indexes.length === query.length) {
fuzzied.push(sc);
}
}
return fuzzied.sort(sorter);
}
function htmlEscape(html) {
return html.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
}
// Return an html string with all matched letters in bold.
function scorify (score) {
var path = score.leaf.path;
var html = '';
var beforelet = '<b>';
var afterlet = '</b>';
var index = 0;
for (var i = score.indexes.length - 1; i >= 0; i--) {
html += htmlEscape(path.slice(index, score.indexes[i])) + beforelet
+ htmlEscape(path[score.indexes[i]]) + afterlet;
index = score.indexes[i] + 1;
}
html += htmlEscape(path.slice(index));
return html;
}
function countSlashes(path) {
var count = 0; // count slashes.
for (var i = 0; i < path.length; i++) {
if (path[i] === '/' && i !== path.length - 1) { count++; }
}
return count;
}
// List of Score = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function exactSearch(leaves, query) {
var found = [];
var querySlashes = countSlashes(query);
if (query.length === 0) { querySlashes = -1; }
for (var i = 0; i < leaves.length; i++) {
leaves[i].path = leaves[i].path.replace(new RegExp('^' + cwd), '');
if (leaves[i].path.length === 0) continue;
var pathSlashes = countSlashes(leaves[i].path);
if (leaves[i].path.slice(0, query.length) === query &&
(pathSlashes === querySlashes || pathSlashes === querySlashes + 1)) {
found.push(new Score(leaves[i]));
}
}
return found;
}
// Display the search results on the page.
function showfuzzy () {
var html = '';
var query = search.value;
var scores = [];
// If it ends with a slash, do an exact search.
if (query.length === 0 || query[query.length - 1] === '/') {
scores = exactSearch(leaves, query);
}
if (scores.length === 0) {
scores = fuzzy(leaves, query);
}
for (var i = 0; i < scores.length; i++) {
// There is no remaining query (if the query is not complete, it is
// not shown).
var path = scorify(scores[i]);
html += '<li class=' +
(scores[i].leaf.meta.type === 'folder' ? 'folder' : 'file' ) +
'><a href="' +
encodePath(cwd + scores[i].leaf.path) + '">' + path + '</a></li>';
}
filelist.innerHTML = html;
resetSelection();
initFileSelection();
}
search.addEventListener('input', showfuzzy, false);
// Fetch list of files.
(function() {
if (folderContent) { setupLeaves(); }
var xhr = new XMLHttpRequest();
xhr.open('GET', cwd + '?app=data', true);
xhr.setRequestHeader('Depth', '2');
xhr.withCredentials = true;
xhr.onload = function() {
var response = xhr.responseText;
if (response) {
folderContent = JSON.parse(xhr.responseText);
setupLeaves();
}
};
xhr.send(null);
}());
function setupLeaves() { leaves = populateLeaves(folderContent); }
function populateLeaves(files, leaves, path) {
leaves = leaves || [];
path = path || '';
for (var filename in files) {
if (path === '') { var subpath = filename; }
else { var subpath = path + '/' + filename; }
var subfiles = files[filename].files;
leaves.push({
path: subpath,
meta: {type: files[filename].meta.type}
});
if (subfiles) { populateLeaves(subfiles, leaves, subpath); }
}
return leaves;
}
// File menus.
//
// File deletion.
var deleteFileBut = document.getElementById('deletefiles');
function onDeleteFile(event) {
if (confirm('Do you really wish to delete'
+ (nSelectedFiles === 1
? (' this file?')
: (' those ' + nSelectedFiles + ' files?')))) {
Object.keys(selectedFile).forEach(function (selectedFileIndex) {
var slot = slots[+selectedFileIndex];
var href = slot.firstElementChild.getAttribute('href');
if (href[0] !== '/') { href = cwd + href; }
var xhr = new XMLHttpRequest();
xhr.open('DELETE', href, true);
xhr.withCredentials = true;
xhr.onload = function() {
if (xhr.status < 200 || xhr.status >= 300) {
alert('The file ' + href + ' did not get deleted: ' + xhr.status + ' '
+ xhr.statusText);
} else {
// Remove the item from the list.
slot.parentNode.removeChild(slot);
initSlots();
}
};
xhr.send();
});
}
}
deleteFileBut.addEventListener('click', onDeleteFile);
// File configuration.
var confFileBut = document.getElementById('conffile');
function queryParams() {
return window.location.search.slice(1).split('&').reduce(function(acc, param) {
var parts = param.split('=');
var key = parts[0], value = parts[1];
if (acc[key] instanceof Array) {
acc[key].push(value);
} else if (acc[key] !== undefined) {
acc[key] = [acc[key], value];
} else {
acc[key] = value;
}
return acc;
}, Object.create(null));
}
function searchFromQueryParams(params) {
| ction onConfFile(event) {
var selIdx = 0;
for (var i in selectedFile) { selIdx = i; break; }
window.location = slots[selIdx].firstElementChild.href + searchFromQueryParams({app: 'conf'});
}
confFileBut.addEventListener('click', onConfFile);
// Map from slot index to truthy values, true if they're selected.
var selectedFile = Object.create(null);
var nSelectedFiles = 0; // Number of selected files.
// Add slot of index i to the selection of files,
// or remove from the selection if it is selected.
function toggleAddToSelection(i) {
if (!!selectedFile[i]) {
// It is already selected.
delete selectedFile[i];
nSelectedFiles--;
slots[i].classList.remove('selected-file');
if (nSelectedFiles <= 0) {
// Hide the file-specific buttons.
deleteFileBut.style.display = 'none';
confFileBut.style.display = 'none';
} else if (nSelectedFiles === 1) {
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
}
} else {
// Select it.
selectedFile[i] = true;
nSelectedFiles++;
slots[i].classList.add('selected-file');
if (nSelectedFiles === 1) {
// Show the file-specific buttons.
deleteFileBut.style.display = 'block';
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
} else if (nSelectedFiles > 1) {
deleteFileBut.title = 'Delete files';
confFileBut.style.display = 'none';
}
}
}
// Unselect all selected files.
function resetSelection() {
for (var selectedFileIndex in selectedFile) {
toggleAddToSelection(selectedFileIndex);
}
}
}) ();
| var search = '?';
for (key in params) {
if (params[key] instanceof Array) {
for (var i = 0; i < params[key].length; i++) {
search += encodeURIComponent(key) + '=' + encodeURIComponent(params[key][i]) + '&';
}
} else {
search += encodeURIComponent(key) + '=' + (params[key] !== undefined? encodeURIComponent(params[key]): '') + '&';
}
}
return search.slice(0, -1);
}
fun | identifier_body |
folder.js | // Roaming motor behind the folder app.
// It is used whenever a user hits a directory file.
// The following code is covered by the AGPLv3 license.
(function() {
// Globals.
//
function encodePath(path) {
return encodeURIComponent(path).replace(/%2F/g, unescape);
}
// Useful DOM Elements.
var toolbar = document.getElementById('toolbar');
var search = document.getElementById('search');
var filelist = document.getElementById('filelist');
// Current working directory.
var cwd = decodeURIComponent(document.location.pathname);
if (cwd[cwd.length-1] !== '/') cwd += '/';
window.cwd = cwd;
// Fast search init.
var leaves = [];
(function() {
var links = document.querySelectorAll('#filelist>li>a');
for (var i = 0; i < links.length; i++) {
var path = links[i].textContent;
var href = links[i].href;
var type = 'file';
if (href[href.length-1] === '/') { path += '/'; type = 'folder'; }
leaves.push({path:path, meta:{type: type}});
}
})();
// Toolbar controls.
//
(function() {
var methodFromFileType = { 'text': 'PUT', 'folder': 'MKCOL' };
// File and folder creation.
function handle(type) {
// Handling newfile, newfolder.
var newFile = document.getElementById('new' + type);
newFile.addEventListener('click', function() {
var name = search.value;
if (name === '') {
search.value = type; search.focus(); search.select(); return false;
}
var xhr = new XMLHttpRequest();
var newPath = cwd + name;
xhr.open(methodFromFileType[type], newPath, true);
xhr.withCredentials = true;
xhr.onload = function(r) {
if (xhr.status < 200 || xhr.status >= 300) {
alert("Creating new file failed: " + xhr.status + " " + xhr.statusText);
} else if (newPath) {
document.location = encodePath(newPath);
}
};
xhr.send(null);
});
};
handle('folder');
handle('text');
// handle('binary');
// handle('link');
// Multiple file uploads.
var uploader = document.getElementById('uploader');
var upform = document.getElementById('upform');
var chooser = upform.content;
upform.action = cwd;
uploader.onload = function (event) {
try {
if (uploader.contentDocument.location.pathname === cwd) {
window.location.reload(true);
}
} catch(e) { alert("Failed upload"); }
};
chooser.onchange = function (event) {
if (chooser.value.length > 0) upform.submit();
};
document.getElementById('upload').onclick = function (event) {
chooser.click();
};
// Drag-and-drop file upload.
var fileDropHoverDepth = 0;
filelist.addEventListener('dragover', function(event) {
event.dataTransfer.dropEffect = 'copy';
event.preventDefault();
return false;
});
filelist.addEventListener('dragenter', function(event) {
fileDropHoverDepth++;
filelist.classList.add('dropfile');
});
var removeDragOverClass = function(event) {
fileDropHoverDepth--;
if (fileDropHoverDepth === 0) {
filelist.classList.remove('dropfile');
}
};
filelist.addEventListener('dragleave', removeDragOverClass);
filelist.addEventListener('dragend', removeDragOverClass);
filelist.addEventListener('drop', function(event) {
event.stopPropagation();
event.preventDefault();
var files = event.dataTransfer.files;
chooser.files = files;
filelist.classList.remove('dropfile');
fileDropHoverDepth = 0;
});
})();
// File navigation.
//
// State.
var pointer = -1; // Item selected (-1 means "none").
var slots; // DOM slots wherein you may show a cursor, or a space.
// (Those are initialized by the `init` function).
var fileSelectionEventListenerReset = [];
var fileHoverEventListenerReset = [];
var hoverSetsCursor = null;
// Initializes the `slots` list and sets up event listeners on them.
function initSlots() {
slots = document.querySelectorAll('#filelist>li');
setCursor(pointer);
// File selection.
for (var i = 0, len = fileSelectionEventListenerReset.length; i < len; i++) {
fileSelectionEventListenerReset[i]();
}
fileSelectionEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('click', (function(i) {
var onclick = function onclick(e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
if (nomod) toggleAddToSelection(i);
};
var slot = slots[i];
fileSelectionEventListenerReset.push(function resetOnClick() {
slot.removeEventListener('click', onclick);
});
return onclick;
}(i)));
}
initHoverListn(slots);
selectedFile = {};
nSelectedFiles = 0; // Number of selected files.
}
function initHoverListn() {
for (var i = 0, len = fileHoverEventListenerReset.length; i < len; i++) {
fileHoverEventListenerReset[i]();
}
fileHoverEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('mouseenter', (function(i) {
var onhover = function() { hoverSetCursor(i); };
var slot = slots[i];
fileHoverEventListenerReset.push(function resetOnHover() {
slot.removeEventListener('mouseenter', onhover);
});
return onhover;
}(i)));
}
}
// Initialization occurs when the drop down entries are reset (or started). The
// entries already have the cursor.
function initFileSelection() {
// If there is an entry, set the pointer to the first entry.
if (filelist.children.length > 0) { // If there is at least one entry…
pointer = 0; // … set the pointer to the first item.
}
// Populate slots.
initSlots();
// Set the event listener.
addEventListener('keydown', keyListener, false);
}
initFileSelection();
function hoverSetCursor (entry) {
if (hoverSetsCursor != null) return;
setCursor(entry);
}
function keyboardSetCursor (entry) {
clearTimeout(hoverSetsCursor);
setCursor(entry);
var box = slots[pointer].getBoundingClientRect();
var viewportHeight = window.innerHeight;
// We remove 30px for the top toolbar.
var boxAbove = box.top - toolbar.clientHeight;
var boxBelow = box.bottom - viewportHeight;
var slotAbove = boxAbove < 0;
var slotBelow = boxBelow > 0;
var slotInvisible = slotAbove || slotBelow;
if (slotInvisible) {
hoverSetsCursor = false;
if (slotAbove) {
filelist.scrollTop += boxAbove;
} else if (slotBelow) {
filelist.scrollTop += boxBelow;
}
// Temporarily deactivate "hover selects slot".
hoverSetsCursor = setTimeout(function() { hoverSetsCursor = null; }, 100);
}
}
// Set the cursor to the entry specified.
//
// entry :: Number
function setCursor (entry) {
if (slots.length === 0) return;
entry %= slots.length;
if (entry < 0) { entry = slots.length - 1; }
if (pointer >= 0) { slots[pointer].classList.remove('focus'); }
pointer = entry;
slots[pointer].classList.add('focus');
}
var cursorIncrement = 1;
var cursorIncrementTimeout;
function tweakCursorIncrement () {
// If the timeout is not cleared, we must increase the cursor increment.
if (cursorIncrementTimeout != null) cursorIncrement += 0.1;
clearTimeout(cursorIncrementTimeout);
cursorIncrementTimeout = setTimeout(function() { cursorIncrement = 1; }, 100);
}
function nextEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer + (cursorIncrement >>> 0));
}
function prevEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer - (cursorIncrement >>> 0));
}
// When the search widget is focused, if the user presses up/down keys, and
// the enter key.
function keyListener (e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
var empty = search.value.length === 0;
if (nomod) {
if (e.keyCode === 40) { // Down.
nextEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 38) { // Up.
prevEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 13 || (empty && e.keyCode === 39)) {
// Enter or (Empty and Right).
window.location = slots[pointer].firstElementChild.href;
} else if (empty && (e.keyCode === 8 || e.keyCode === 37)) {
// Empty and (Backspace or Left).
var loc = window.location;
window.location = loc.protocol + '//' + loc.host +
loc.pathname.replace(/\/[^\/]+[\/]*$/,'/') + loc.search;
}
// Additional keys when the search widget is not focused.
if (document.activeElement !== search) {
if (e.keyCode === 74) { // J (same as down).
nextEntry();
} else if (e.keyCode === 75) { // K (same as up).
prevEntry();
} else if (e.keyCode === 88) { // X (click).
slots[pointer].click();
} else if (e.keyCode === 191) { // /.
search.focus();
e.preventDefault(); // Don't input it in the search bar.
}
} else { // The search widget is focused.
if (e.keyCode === 27) { // ESC.
search.blur();
}
}
}
}
// Fuzzy matching.
//
function sorter (file1, file2) { return file2.stars - file1.stars; }
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `stars` is a Number to compare leaves according to the query.
// `indexes` is the positions of matched letters.
function Score(leaf, stars, indexes) {
if (leaf.meta.type == 'folder' && leaf.path[leaf.path.length - 1] !== '/') {
leaf.path += '/'; // FIXME server side bug
}
this.leaf = leaf;
this.stars = (stars|0) || 0;
this.indexes = indexes || [];
}
Score.prototype = {
add: function(n) { this.stars += (n|0); },
};
// Return a Score object, {leaf, stars, indexes}:
//
// - `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// - `stars` is a Number to compare leaves according to the query.
// - `indexes` is the positions of matched letters.
//
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `query` is a String to fuzzy match.
function score(leaf, query) {
var queryLower = query.toLowerCase();
var leafLower = leaf.path.toLowerCase();
var score = new Score(leaf);
var index = queryLower.length - 1;
var countlettersmatched = 0; // Consecutive letters matched.
var alpha = /[a-zA-Z0-9]/;
var lookingAhead = false; // Grant one last run and terminate.
// The idea is to begin with the end of the `query`, and for each letter
// matched, the letter is captured, its position influences the score, and we
// go to the next letter.
for (var i = leafLower.length - 1; i >= 0; i--) {
var l = leafLower[i]; // letter
if (countlettersmatched > 0 && !alpha.test(l)) {
score.add(2);
}
if (lookingAhead) break;
if (l === queryLower[index]) {
score.indexes.push(i);
score.add(1 + countlettersmatched);
countlettersmatched++;
index--;
} else {
countlettersmatched = 0;
}
if (index < 0) lookingAhead = true; // Grant last run now.
}
if (lookingAhead) { score.add(1); }
return score;
}
// List of Scores = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function fuzzy (leaves, query) {
var fuzzied = [];
for (var i = 0; i < leaves.length; i++) {
if (leaves[i].path.length === 0) continue;
var sc = score(leaves[i], query);
if (sc.indexes.length === query.length) {
fuzzied.push(sc);
}
}
return fuzzied.sort(sorter);
}
function htmlEscape(html) {
return html.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
}
// Return an html string with all matched letters in bold.
function scorify (score) {
var path = score.leaf.path;
var html = '';
var beforelet = '<b>';
var afterlet = '</b>';
var index = 0;
for (var i = score.indexes.length - 1; i >= 0; i--) {
html += htmlEscape(path.slice(index, score.indexes[i])) + beforelet
+ htmlEscape(path[score.indexes[i]]) + afterlet;
index = score.indexes[i] + 1;
}
html += htmlEscape(path.slice(index));
return html;
}
function countSlashes(path) {
var count = 0; // count slashes.
for (var i = 0; i < path.length; i++) {
if (path[i] === '/' && i !== path.length - 1) { count++; }
}
return count;
}
// List of Score = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function exactSearch(leaves, query) {
var found = [];
var querySlashes = countSlashes(query);
if (query.length === 0) { querySlashes = -1; }
for (var i = 0; i < leaves.length; i++) {
leaves[i].path = leaves[i].path.replace(new RegExp('^' + cwd), '');
if (leaves[i].path.length === 0) continue;
var pathSlashes = countSlashes(leaves[i].path);
if (leaves[i].path.slice(0, query.length) === query &&
(pathSlashes === querySlashes || pathSlashes === querySlashes + 1)) {
found.push(new Score(leaves[i]));
}
}
return found;
}
// Display the search results on the page.
function showfuzzy () {
var html = '';
var query = search.value;
var scores = [];
// If it ends with a slash, do an exact search.
if (query.length === 0 || query[query.length - 1] === '/') {
scores = exactSearch(leaves, query);
}
if (scores.length === 0) {
scores = fuzzy(leaves, query);
}
for (var i = 0; i < scores.length; i++) {
// There is no remaining query (if the query is not complete, it is
// not shown).
var path = scorify(scores[i]);
html += '<li class=' +
(scores[i].leaf.meta.type === 'folder' ? 'folder' : 'file' ) +
'><a href="' +
encodePath(cwd + scores[i].leaf.path) + '">' + path + '</a></li>';
}
filelist.innerHTML = html;
resetSelection();
initFileSelection();
}
search.addEventListener('input', showfuzzy, false);
// Fetch list of files.
(function() {
if (folderContent) { setupLeaves(); }
var xhr = new XMLHttpRequest();
xhr.open('GET', cwd + '?app=data', true);
xhr.setRequestHeader('Depth', '2');
xhr.withCredentials = true;
xhr.onload = function() {
var response = xhr.responseText;
if (response) {
folderContent = JSON.parse(xhr.responseText);
setupLeaves();
}
};
xhr.send(null);
}());
function setupLeaves() { leaves = populateLeaves(folderContent); }
function populateLeaves(files, leaves, path) {
leaves = leaves || [];
path = path || '';
for (var filename in files) {
if (path === '') { var subpath = filename; }
else { var subpath = path + '/' + filename; }
var subfiles = files[filename].files;
leaves.push({
path: subpath,
meta: {type: files[filename].meta.type}
});
if (subfiles) { populateLeaves(subfiles, leaves, subpath); }
}
return leaves;
}
// File menus.
//
// File deletion.
var deleteFileBut = document.getElementById('deletefiles');
function onDeleteFile(event) {
if (confirm('Do you really wish to delete'
+ (nSelectedFiles === 1
? (' this file?')
: (' those ' + nSelectedFiles + ' files?')))) {
Object.keys(selectedFile).forEach(function (selectedFileIndex) {
var slot = slots[+selectedFileIndex];
var href = slot.firstElementChild.getAttribute('href');
if (href[0] !== '/') { href = cwd + href; }
var xhr = new XMLHttpRequest();
xhr.open('DELETE', href, true);
xhr.withCredentials = true;
xhr.onload = function() {
if (xhr.status < 200 || xhr.status >= 300) {
alert('The file ' + href + ' did not get deleted: ' + xhr.status + ' '
+ xhr.statusText);
} else {
// Remove the item from the list.
slot.parentNode.removeChild(slot);
initSlots();
}
};
xhr.send();
});
}
}
deleteFileBut.addEventListener('click', onDeleteFile);
// File configuration.
var confFileBut = document.getElementById('conffile');
function queryParams() { | } else if (acc[key] !== undefined) {
acc[key] = [acc[key], value];
} else {
acc[key] = value;
}
return acc;
}, Object.create(null));
}
function searchFromQueryParams(params) {
var search = '?';
for (key in params) {
if (params[key] instanceof Array) {
for (var i = 0; i < params[key].length; i++) {
search += encodeURIComponent(key) + '=' + encodeURIComponent(params[key][i]) + '&';
}
} else {
search += encodeURIComponent(key) + '=' + (params[key] !== undefined? encodeURIComponent(params[key]): '') + '&';
}
}
return search.slice(0, -1);
}
function onConfFile(event) {
var selIdx = 0;
for (var i in selectedFile) { selIdx = i; break; }
window.location = slots[selIdx].firstElementChild.href + searchFromQueryParams({app: 'conf'});
}
confFileBut.addEventListener('click', onConfFile);
// Map from slot index to truthy values, true if they're selected.
var selectedFile = Object.create(null);
var nSelectedFiles = 0; // Number of selected files.
// Add slot of index i to the selection of files,
// or remove from the selection if it is selected.
function toggleAddToSelection(i) {
if (!!selectedFile[i]) {
// It is already selected.
delete selectedFile[i];
nSelectedFiles--;
slots[i].classList.remove('selected-file');
if (nSelectedFiles <= 0) {
// Hide the file-specific buttons.
deleteFileBut.style.display = 'none';
confFileBut.style.display = 'none';
} else if (nSelectedFiles === 1) {
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
}
} else {
// Select it.
selectedFile[i] = true;
nSelectedFiles++;
slots[i].classList.add('selected-file');
if (nSelectedFiles === 1) {
// Show the file-specific buttons.
deleteFileBut.style.display = 'block';
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
} else if (nSelectedFiles > 1) {
deleteFileBut.title = 'Delete files';
confFileBut.style.display = 'none';
}
}
}
// Unselect all selected files.
function resetSelection() {
for (var selectedFileIndex in selectedFile) {
toggleAddToSelection(selectedFileIndex);
}
}
}) (); | return window.location.search.slice(1).split('&').reduce(function(acc, param) {
var parts = param.split('=');
var key = parts[0], value = parts[1];
if (acc[key] instanceof Array) {
acc[key].push(value); | random_line_split |
folder.js | // Roaming motor behind the folder app.
// It is used whenever a user hits a directory file.
// The following code is covered by the AGPLv3 license.
(function() {
// Globals.
//
function encodePath(path) {
return encodeURIComponent(path).replace(/%2F/g, unescape);
}
// Useful DOM Elements.
var toolbar = document.getElementById('toolbar');
var search = document.getElementById('search');
var filelist = document.getElementById('filelist');
// Current working directory.
var cwd = decodeURIComponent(document.location.pathname);
if (cwd[cwd.length-1] !== '/') cwd += '/';
window.cwd = cwd;
// Fast search init.
var leaves = [];
(function() {
var links = document.querySelectorAll('#filelist>li>a');
for (var i = 0; i < links.length; i++) {
var path = links[i].textContent;
var href = links[i].href;
var type = 'file';
if (href[href.length-1] === '/') { path += '/'; type = 'folder'; }
leaves.push({path:path, meta:{type: type}});
}
})();
// Toolbar controls.
//
(function() {
var methodFromFileType = { 'text': 'PUT', 'folder': 'MKCOL' };
// File and folder creation.
function handle(type) {
// Handling newfile, newfolder.
var newFile = document.getElementById('new' + type);
newFile.addEventListener('click', function() {
var name = search.value;
if (name === '') {
search.value = type; search.focus(); search.select(); return false;
}
var xhr = new XMLHttpRequest();
var newPath = cwd + name;
xhr.open(methodFromFileType[type], newPath, true);
xhr.withCredentials = true;
xhr.onload = function(r) {
if (xhr.status < 200 || xhr.status >= 300) {
alert("Creating new file failed: " + xhr.status + " " + xhr.statusText);
} else if (newPath) {
document.location = encodePath(newPath);
}
};
xhr.send(null);
});
};
handle('folder');
handle('text');
// handle('binary');
// handle('link');
// Multiple file uploads.
var uploader = document.getElementById('uploader');
var upform = document.getElementById('upform');
var chooser = upform.content;
upform.action = cwd;
uploader.onload = function (event) {
try {
if (uploader.contentDocument.location.pathname === cwd) {
window.location.reload(true);
}
} catch(e) { alert("Failed upload"); }
};
chooser.onchange = function (event) {
if (chooser.value.length > 0) upform.submit();
};
document.getElementById('upload').onclick = function (event) {
chooser.click();
};
// Drag-and-drop file upload.
var fileDropHoverDepth = 0;
filelist.addEventListener('dragover', function(event) {
event.dataTransfer.dropEffect = 'copy';
event.preventDefault();
return false;
});
filelist.addEventListener('dragenter', function(event) {
fileDropHoverDepth++;
filelist.classList.add('dropfile');
});
var removeDragOverClass = function(event) {
fileDropHoverDepth--;
if (fileDropHoverDepth === 0) {
filelist.classList.remove('dropfile');
}
};
filelist.addEventListener('dragleave', removeDragOverClass);
filelist.addEventListener('dragend', removeDragOverClass);
filelist.addEventListener('drop', function(event) {
event.stopPropagation();
event.preventDefault();
var files = event.dataTransfer.files;
chooser.files = files;
filelist.classList.remove('dropfile');
fileDropHoverDepth = 0;
});
})();
// File navigation.
//
// State.
var pointer = -1; // Item selected (-1 means "none").
var slots; // DOM slots wherein you may show a cursor, or a space.
// (Those are initialized by the `init` function).
var fileSelectionEventListenerReset = [];
var fileHoverEventListenerReset = [];
var hoverSetsCursor = null;
// Initializes the `slots` list and sets up event listeners on them.
function initSlots() {
slots = document.querySelectorAll('#filelist>li');
setCursor(pointer);
// File selection.
for (var i = 0, len = fileSelectionEventListenerReset.length; i < len; i++) {
fileSelectionEventListenerReset[i]();
}
fileSelectionEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('click', (function(i) {
var onclick = function onclick(e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
if (nomod) toggleAddToSelection(i);
};
var slot = slots[i];
fileSelectionEventListenerReset.push(function resetOnClick() {
slot.removeEventListener('click', onclick);
});
return onclick;
}(i)));
}
initHoverListn(slots);
selectedFile = {};
nSelectedFiles = 0; // Number of selected files.
}
function initHoverListn() {
for (var i = 0, len = fileHoverEventListenerReset.length; i < len; i++) {
fileHoverEventListenerReset[i]();
}
fileHoverEventListenerReset = [];
for (var i = 0; i < slots.length; i++) {
slots[i].addEventListener('mouseenter', (function(i) {
var onhover = function() { hoverSetCursor(i); };
var slot = slots[i];
fileHoverEventListenerReset.push(function resetOnHover() {
slot.removeEventListener('mouseenter', onhover);
});
return onhover;
}(i)));
}
}
// Initialization occurs when the drop down entries are reset (or started). The
// entries already have the cursor.
function initFileSelection() {
// If there is an entry, set the pointer to the first entry.
if (filelist.children.length > 0) { // If there is at least one entry…
pointer = 0; // … set the pointer to the first item.
}
// Populate slots.
initSlots();
// Set the event listener.
addEventListener('keydown', keyListener, false);
}
initFileSelection();
function hoverSetCursor (entry) {
if (hoverSetsCursor != null) return;
setCursor(entry);
}
function keyboardSetCursor (entry) {
clearTimeout(hoverSetsCursor);
setCursor(entry);
var box = slots[pointer].getBoundingClientRect();
var viewportHeight = window.innerHeight;
// We remove 30px for the top toolbar.
var boxAbove = box.top - toolbar.clientHeight;
var boxBelow = box.bottom - viewportHeight;
var slotAbove = boxAbove < 0;
var slotBelow = boxBelow > 0;
var slotInvisible = slotAbove || slotBelow;
if (slotInvisible) {
hoverSetsCursor = false;
if (slotAbove) {
filelist.scrollTop += boxAbove;
} else if (slotBelow) {
filelist.scrollTop += boxBelow;
}
// Temporarily deactivate "hover selects slot".
hoverSetsCursor = setTimeout(function() { hoverSetsCursor = null; }, 100);
}
}
// Set the cursor to the entry specified.
//
// entry :: Number
function setCursor (entry) {
if (slots.length === 0) return;
entry %= slots.length;
if (entry < 0) { entry = slots.length - 1; }
if (pointer >= 0) { slots[pointer].classList.remove('focus'); }
pointer = entry;
slots[pointer].classList.add('focus');
}
var cursorIncrement = 1;
var cursorIncrementTimeout;
function tweakCursorIncrement () {
// If the timeout is not cleared, we must increase the cursor increment.
if (cursorIncrementTimeout != null) cursorIncrement += 0.1;
clearTimeout(cursorIncrementTimeout);
cursorIncrementTimeout = setTimeout(function() { cursorIncrement = 1; }, 100);
}
function nextEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer + (cursorIncrement >>> 0));
}
function prevEntry () {
tweakCursorIncrement();
keyboardSetCursor(pointer - (cursorIncrement >>> 0));
}
// When the search widget is focused, if the user presses up/down keys, and
// the enter key.
function keyListener (e) {
var nomod = !(e.ctrlKey || e.shiftKey || e.altKey || e.metaKey);
var empty = search.value.length === 0;
if (nomod) {
if (e.keyCode === 40) { // Down.
nextEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 38) { // Up.
prevEntry();
e.preventDefault(); // Don't change the search cursor position.
} else if (e.keyCode === 13 || (empty && e.keyCode === 39)) {
// Enter or (Empty and Right).
window.location = slots[pointer].firstElementChild.href;
} else if (empty && (e.keyCode === 8 || e.keyCode === 37)) {
// Empty and (Backspace or Left).
var loc = window.location;
window.location = loc.protocol + '//' + loc.host +
loc.pathname.replace(/\/[^\/]+[\/]*$/,'/') + loc.search;
}
// Additional keys when the search widget is not focused.
if (document.activeElement !== search) {
if (e.keyCode === 74) { // J (same as down).
nextEntry();
} else if (e.keyCode === 75) { // K (same as up).
prevEntry();
} else if (e.keyCode === 88) { // X (click).
slots[pointer].click();
} else if (e.keyCode === 191) { // /.
search.focus();
e.preventDefault(); // Don't input it in the search bar.
}
} else { // The search widget is focused.
if (e.keyCode === 27) { // ESC.
search.blur();
}
}
}
}
// Fuzzy matching.
//
function sorter (file1, file2) { return file2.stars - file1.stars; }
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `stars` is a Number to compare leaves according to the query.
// `indexes` is the positions of matched letters.
function Score(leaf, stars, indexes) {
if (leaf.meta.type == 'folder' && leaf.path[leaf.path.length - 1] !== '/') {
leaf.path += '/'; // FIXME server side bug
}
this.leaf = leaf;
this.stars = (stars|0) || 0;
this.indexes = indexes || [];
}
Score.prototype = {
add: function(n) { this.stars += (n|0); },
};
// Return a Score object, {leaf, stars, indexes}:
//
// - `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// - `stars` is a Number to compare leaves according to the query.
// - `indexes` is the positions of matched letters.
//
// `leaf` is an Object like {path: '/', meta: {type: 'folder'}}
// `query` is a String to fuzzy match.
function score(leaf, query) {
var queryLower = query.toLowerCase();
var leafLower = leaf.path.toLowerCase();
var score = new Score(leaf);
var index = queryLower.length - 1;
var countlettersmatched = 0; // Consecutive letters matched.
var alpha = /[a-zA-Z0-9]/;
var lookingAhead = false; // Grant one last run and terminate.
// The idea is to begin with the end of the `query`, and for each letter
// matched, the letter is captured, its position influences the score, and we
// go to the next letter.
for (var i = leafLower.length - 1; i >= 0; i--) {
var l = leafLower[i]; // letter
if (countlettersmatched > 0 && !alpha.test(l)) {
score.add(2);
}
if (lookingAhead) break;
if (l === queryLower[index]) {
score.indexes.push(i);
score.add(1 + countlettersmatched);
countlettersmatched++;
index--;
} else {
countlettersmatched = 0;
}
if (index < 0) lookingAhead = true; // Grant last run now.
}
if (lookingAhead) { score.add(1); }
return score;
}
// List of Scores = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function fuzzy (leaves, query) {
var fuzzied = [];
for (var i = 0; i < leaves.length; i++) {
if (leaves[i].path.length === 0) continue;
var sc = score(leaves[i], query);
if (sc.indexes.length === query.length) {
fuzzied.push(sc);
}
}
return fuzzied.sort(sorter);
}
function htmlEscape(html) {
return html.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
}
// Return an html string with all matched letters in bold.
function scor | ore) {
var path = score.leaf.path;
var html = '';
var beforelet = '<b>';
var afterlet = '</b>';
var index = 0;
for (var i = score.indexes.length - 1; i >= 0; i--) {
html += htmlEscape(path.slice(index, score.indexes[i])) + beforelet
+ htmlEscape(path[score.indexes[i]]) + afterlet;
index = score.indexes[i] + 1;
}
html += htmlEscape(path.slice(index));
return html;
}
function countSlashes(path) {
var count = 0; // count slashes.
for (var i = 0; i < path.length; i++) {
if (path[i] === '/' && i !== path.length - 1) { count++; }
}
return count;
}
// List of Score = {leaf, stars, indexes}, ordered by the stars.
// Leaves that do not match the whole query are discarded.
//
// `leaves` is an Array of Objects with paths from here to the leaf.
// `query` is a String to fuzzy match.
function exactSearch(leaves, query) {
var found = [];
var querySlashes = countSlashes(query);
if (query.length === 0) { querySlashes = -1; }
for (var i = 0; i < leaves.length; i++) {
leaves[i].path = leaves[i].path.replace(new RegExp('^' + cwd), '');
if (leaves[i].path.length === 0) continue;
var pathSlashes = countSlashes(leaves[i].path);
if (leaves[i].path.slice(0, query.length) === query &&
(pathSlashes === querySlashes || pathSlashes === querySlashes + 1)) {
found.push(new Score(leaves[i]));
}
}
return found;
}
// Display the search results on the page.
function showfuzzy () {
var html = '';
var query = search.value;
var scores = [];
// If it ends with a slash, do an exact search.
if (query.length === 0 || query[query.length - 1] === '/') {
scores = exactSearch(leaves, query);
}
if (scores.length === 0) {
scores = fuzzy(leaves, query);
}
for (var i = 0; i < scores.length; i++) {
// There is no remaining query (if the query is not complete, it is
// not shown).
var path = scorify(scores[i]);
html += '<li class=' +
(scores[i].leaf.meta.type === 'folder' ? 'folder' : 'file' ) +
'><a href="' +
encodePath(cwd + scores[i].leaf.path) + '">' + path + '</a></li>';
}
filelist.innerHTML = html;
resetSelection();
initFileSelection();
}
search.addEventListener('input', showfuzzy, false);
// Fetch list of files.
(function() {
if (folderContent) { setupLeaves(); }
var xhr = new XMLHttpRequest();
xhr.open('GET', cwd + '?app=data', true);
xhr.setRequestHeader('Depth', '2');
xhr.withCredentials = true;
xhr.onload = function() {
var response = xhr.responseText;
if (response) {
folderContent = JSON.parse(xhr.responseText);
setupLeaves();
}
};
xhr.send(null);
}());
function setupLeaves() { leaves = populateLeaves(folderContent); }
function populateLeaves(files, leaves, path) {
leaves = leaves || [];
path = path || '';
for (var filename in files) {
if (path === '') { var subpath = filename; }
else { var subpath = path + '/' + filename; }
var subfiles = files[filename].files;
leaves.push({
path: subpath,
meta: {type: files[filename].meta.type}
});
if (subfiles) { populateLeaves(subfiles, leaves, subpath); }
}
return leaves;
}
// File menus.
//
// File deletion.
var deleteFileBut = document.getElementById('deletefiles');
function onDeleteFile(event) {
if (confirm('Do you really wish to delete'
+ (nSelectedFiles === 1
? (' this file?')
: (' those ' + nSelectedFiles + ' files?')))) {
Object.keys(selectedFile).forEach(function (selectedFileIndex) {
var slot = slots[+selectedFileIndex];
var href = slot.firstElementChild.getAttribute('href');
if (href[0] !== '/') { href = cwd + href; }
var xhr = new XMLHttpRequest();
xhr.open('DELETE', href, true);
xhr.withCredentials = true;
xhr.onload = function() {
if (xhr.status < 200 || xhr.status >= 300) {
alert('The file ' + href + ' did not get deleted: ' + xhr.status + ' '
+ xhr.statusText);
} else {
// Remove the item from the list.
slot.parentNode.removeChild(slot);
initSlots();
}
};
xhr.send();
});
}
}
deleteFileBut.addEventListener('click', onDeleteFile);
// File configuration.
var confFileBut = document.getElementById('conffile');
function queryParams() {
return window.location.search.slice(1).split('&').reduce(function(acc, param) {
var parts = param.split('=');
var key = parts[0], value = parts[1];
if (acc[key] instanceof Array) {
acc[key].push(value);
} else if (acc[key] !== undefined) {
acc[key] = [acc[key], value];
} else {
acc[key] = value;
}
return acc;
}, Object.create(null));
}
function searchFromQueryParams(params) {
var search = '?';
for (key in params) {
if (params[key] instanceof Array) {
for (var i = 0; i < params[key].length; i++) {
search += encodeURIComponent(key) + '=' + encodeURIComponent(params[key][i]) + '&';
}
} else {
search += encodeURIComponent(key) + '=' + (params[key] !== undefined? encodeURIComponent(params[key]): '') + '&';
}
}
return search.slice(0, -1);
}
function onConfFile(event) {
var selIdx = 0;
for (var i in selectedFile) { selIdx = i; break; }
window.location = slots[selIdx].firstElementChild.href + searchFromQueryParams({app: 'conf'});
}
confFileBut.addEventListener('click', onConfFile);
// Map from slot index to truthy values, true if they're selected.
var selectedFile = Object.create(null);
var nSelectedFiles = 0; // Number of selected files.
// Add slot of index i to the selection of files,
// or remove from the selection if it is selected.
function toggleAddToSelection(i) {
if (!!selectedFile[i]) {
// It is already selected.
delete selectedFile[i];
nSelectedFiles--;
slots[i].classList.remove('selected-file');
if (nSelectedFiles <= 0) {
// Hide the file-specific buttons.
deleteFileBut.style.display = 'none';
confFileBut.style.display = 'none';
} else if (nSelectedFiles === 1) {
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
}
} else {
// Select it.
selectedFile[i] = true;
nSelectedFiles++;
slots[i].classList.add('selected-file');
if (nSelectedFiles === 1) {
// Show the file-specific buttons.
deleteFileBut.style.display = 'block';
deleteFileBut.title = 'Delete file';
confFileBut.style.display = 'block';
confFileBut.title = 'Configure file';
} else if (nSelectedFiles > 1) {
deleteFileBut.title = 'Delete files';
confFileBut.style.display = 'none';
}
}
}
// Unselect all selected files.
function resetSelection() {
for (var selectedFileIndex in selectedFile) {
toggleAddToSelection(selectedFileIndex);
}
}
}) ();
| ify (sc | identifier_name |
main.py | import sys, pygame, pygame.freetype, time, math, random, re
from includes.constants import *
from includes.helpers import *
from includes.elements import *
pygame.init()
pygame.display.set_caption('Retro Tetris')
startTime = time.time()
def displayText(text, color, pos):
textSurface, rect = GAME_FONT.render(text, color)
screen.fill(pygame.Color("black"), (pos[0], pos[1], rect.width, rect.height))
screen.blit(textSurface, pos)
return (textSurface, rect)
def updateTime(color, pos):
elapsedTimeInSecs = math.floor(time.time() - startTime)
formattedTime = formatSec(elapsedTimeInSecs)
screen.fill(pygame.Color("black"), (35, 50, 75, 18))
newTextSurface, rect = GAME_FONT.render(formattedTime, color)
screen.blit(newTextSurface, pos)
def moveBlockX(currentElement, d):
"""
Shift the current element horizontally by 1 unit
Also make sure that the rotation does not place the element out of the map
"""
# If element is at the very right/left of the map do not allow further X-shift in that dir.
for sq in currentElement:
if (d == 'l' and sq.pos.left == 150) or (d == 'r' and sq.pos.left == 425):
return
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.moveHorizontally(d)
screen.blit(sq.image, sq.pos)
def rotateElement(element):
"""
Rotate 90 deg each square that the tetris element is made up of around a pivot point
The point is the center of the bounding box
"""
# Make sure that the rotation will not place the tetromino out of map at the top
for sq in element:
if sq.pos[1] <= 0:
return
for sq in element:
screen.blit(background, sq.pos, (150, 0, 26, 26))
# Also, make sure that element stays within the map after rotation
tmpArr = []
for sq in element:
sq.rotate()
tmpArr.append(sq.pos.left)
leftmostElement = min(tmpArr)
rightmostElement = max(tmpArr)
if leftmostElement < 150:
xShift = 150 - leftmostElement
elif rightmostElement > 425:
xShift = 425 - rightmostElement
else:
xShift = 0
for sq in element:
sq.pos.left += xShift
screen.blit(sq.image, sq.pos)
def displayNextElement(element):
"""
Display the next element for the player
"""
screen.fill(pygame.Color("black"), (455, 50, 140, 140))
# Position the element to the top right of the screen
# Also make sure that the element is centered
l = []
for sq in element:
l.append(sq.pos[0])
# There is a 150px wide section at both sides of the screen
minMaxDiff = max(l) - min(l)
# First shift the element to the very right of the map
diffCorrigated = 425 - max(l)
# Then add margins to both sides so that it will be centered in the 150px region
diffCorrigated += 150 - (125 - minMaxDiff) / 2
for i in range(len(element)):
element[i].pos = element[i].pos.move(diffCorrigated, 75)
screen.blit(element[i].image, element[i].pos)
def getTextWidth(text):
|
def updateScore(currentScore, highScore):
"""
Update current score and high score on the screen & also make sure they're centered
"""
screen.fill(pygame.Color("black"), (10, 210, 130, 20))
hsWidth = getTextWidth(str(highScore))
hsPos = (150 - hsWidth) // 2, 210
displayText(str(highScore), GOLD, hsPos)
screen.fill(pygame.Color("black"), (10, 130, 130, 20))
csWidth = getTextWidth(str(currentScore))
csPos = (150 - csWidth) // 2, 130
displayText(str(currentScore), GOLD, csPos)
def getNextElements():
(nextShape, nextColor) = selectRandomElement(extra = True)
nextElementGroup = nextShape(nextColor, ex = False)
nextElement = nextElementGroup.squares
nextElementDisplay = nextShape(nextColor, ex = False).squares
return [nextElementGroup, nextElement, nextElementDisplay]
def updateBothScores(currentScore, highScore, n):
currentScore += n
if currentScore > highScore:
highScore = currentScore
updateHighScore(highScore)
updateScore(currentScore, highScore)
return [currentScore, highScore]
def displayGameOver():
"""
When the game is over clear the screen and display a game over message
"""
screen.fill(pygame.Color('black'), (0, 0, SCREEN_SIZE[0], SCREEN_SIZE[1]))
goText, rect1 = GAME_FONT.render('Game Over', WHITE)
paText, rect2 = GAME_FONT.render('Press [Enter] to play again', WHITE)
width1 = goText.get_width()
height1 = goText.get_height()
width2 = paText.get_width()
height2 = paText.get_height()
posX1 = (SCREEN_SIZE[0] - width1) / 2
posY1 = (SCREEN_SIZE[1] - height1) / 2
posX2 = (SCREEN_SIZE[0] - width2) / 2
posY2 = (SCREEN_SIZE[1] - height2) / 2 + 30
screen.blit(goText, (posX1, posY1))
screen.blit(paText, (posX2, posY2))
# Create display and set screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
background = pygame.image.load('sprites/bgImage.png').convert()
def graphicsInit():
"""
Display graphical elements and text on the screen
"""
screen.blit(background, (round((SCREEN_SIZE[0] - 300) / 2), 0))
hsWidth = getTextWidth(str(getHighScore()))
hsPos = (150 - hsWidth) // 2, 210
# Initialize font, display elapsed time, current score and high score
(timeLabel, x) = displayText('Time', WHITE, (50, 20))
(elapsedTime, x) = displayText('00:00:00', GOLD, (35, 50))
(currentScoreLabel, x) = displayText('Current Score', WHITE, (10, 100))
(cs, x) = displayText('0', GOLD, (70, 130))
(highScoreLabel, x) = displayText('High Score', WHITE, (25, 180))
(highScore, x) = displayText(str(getHighScore()), GOLD, hsPos)
(linesLabel, x) = displayText('Lines', WHITE, (50, 260))
(linesCount, x) = displayText('0', GOLD, (70, 290))
(nextLabel, x) = displayText('Next', WHITE, (500, 20))
graphicsInit()
def soundInit():
"""
Initialize sounds, set volumes
"""
# Play background music forever
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load('sounds/bgMusic.mp3')
pygame.mixer.music.play(-1)
lineSound = pygame.mixer.Sound('sounds/success.wav')
lineSound.set_volume(0.9)
gameOverSound = pygame.mixer.Sound('sounds/gameover.wav')
gameOverSound.set_volume(0.9)
return [lineSound, gameOverSound]
lineSound, gameOverSound = soundInit()
def main():
"""
Main event loop of the game
"""
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
gameOver = False
highScore = getHighScore()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
# Move the image of the next element to the top right corner
displayNextElement(nextElementDisplay)
while True:
if not gameOver:
posBefore = []
for sq in currentElement:
posBefore.append((sq.pos[0], sq.pos[1]))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Handle keyboard events
if event.type == pygame.KEYDOWN:
# Move the current element left/right by one unit (if not already at the sides)
if (event.key == pygame.K_LEFT and
not checkCollision(currentElement, Game.tetrominos.copy(), -25, 0)):
moveBlockX(currentElement, 'l')
elif (event.key == pygame.K_RIGHT and not checkCollision(currentElement,
Game.tetrominos.copy(), 25, 0)):
moveBlockX(currentElement, 'r')
# Rotate the current element in clockwise direction
elif (event.key == pygame.K_UP and not checkCollision(currentElement,
Game.tetrominos.copy(), 0, 0, rotate = True)):
rotateElement(currentElement)
# Speed up the falling of a tetromino
elif event.key == pygame.K_DOWN:
precision = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
precision = 0
eTime = round(time.time() - startTime, precision)
# At every second move current tetromino down by 1 unit
if eTime > prevMoveSec:
if precision:
currentScore, highScore = updateBothScores(currentScore, highScore, 1)
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.move()
screen.blit(sq.image, sq.pos)
# Check if there is a filled row
posYs = Game.checkForRows()
if posYs and currentElementGroup.didCollide():
pygame.mixer.Channel(0).play(lineSound)
Game.clearRow(posYs, screen, background)
Game.shiftRows(posYs, screen, background)
# Update number of filled lines and current score
lines += len(posYs)
displayText(str(lines), GOLD, (70, 290))
n = len(posYs) * (len(posYs) - 1) * 100
currentScore, highScore = updateBothScores(currentScore, highScore, n)
prevMoveSec = eTime
# Check if the current tetromino reached the bottom of the map
if currentElementGroup.didCollide():
# Check for game end: if the player cannot move the current element anymore
posAfter = []
for sq in currentElement:
posAfter.append((sq.pos[0], sq.pos[1]))
# Game over, reset states
if posAfter == posBefore:
pygame.mixer.Sound.play(gameOverSound)
pygame.mixer.music.stop()
Game.tetrominos = []
displayGameOver()
gameOver = True
if not gameOver:
currentElementGroup = nextElementGroup
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
Game.tetrominos.extend(currentElement)
if not gameOver:
updateTime(GOLD, (35, 50))
pygame.display.update()
else:
# Listen for [Enter] -> play again
# Also reset states
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.type == pygame.QUIT:
sys.exit()
if event.key == pygame.K_RETURN:
gameOver = False
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
graphicsInit()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
soundInit()
if __name__ == '__main__':
tetris = main()
| hsTxt, _ = GAME_FONT.render(text, BLACK)
return hsTxt.get_width() | identifier_body |
main.py | import sys, pygame, pygame.freetype, time, math, random, re
from includes.constants import *
from includes.helpers import *
from includes.elements import *
pygame.init()
pygame.display.set_caption('Retro Tetris')
startTime = time.time()
def displayText(text, color, pos):
textSurface, rect = GAME_FONT.render(text, color)
screen.fill(pygame.Color("black"), (pos[0], pos[1], rect.width, rect.height))
screen.blit(textSurface, pos)
return (textSurface, rect)
def updateTime(color, pos):
elapsedTimeInSecs = math.floor(time.time() - startTime)
formattedTime = formatSec(elapsedTimeInSecs)
screen.fill(pygame.Color("black"), (35, 50, 75, 18))
newTextSurface, rect = GAME_FONT.render(formattedTime, color)
screen.blit(newTextSurface, pos)
def moveBlockX(currentElement, d):
"""
Shift the current element horizontally by 1 unit
Also make sure that the rotation does not place the element out of the map
"""
# If element is at the very right/left of the map do not allow further X-shift in that dir.
for sq in currentElement:
if (d == 'l' and sq.pos.left == 150) or (d == 'r' and sq.pos.left == 425):
return
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.moveHorizontally(d)
screen.blit(sq.image, sq.pos)
def rotateElement(element):
"""
Rotate 90 deg each square that the tetris element is made up of around a pivot point | """
# Make sure that the rotation will not place the tetromino out of map at the top
for sq in element:
if sq.pos[1] <= 0:
return
for sq in element:
screen.blit(background, sq.pos, (150, 0, 26, 26))
# Also, make sure that element stays within the map after rotation
tmpArr = []
for sq in element:
sq.rotate()
tmpArr.append(sq.pos.left)
leftmostElement = min(tmpArr)
rightmostElement = max(tmpArr)
if leftmostElement < 150:
xShift = 150 - leftmostElement
elif rightmostElement > 425:
xShift = 425 - rightmostElement
else:
xShift = 0
for sq in element:
sq.pos.left += xShift
screen.blit(sq.image, sq.pos)
def displayNextElement(element):
"""
Display the next element for the player
"""
screen.fill(pygame.Color("black"), (455, 50, 140, 140))
# Position the element to the top right of the screen
# Also make sure that the element is centered
l = []
for sq in element:
l.append(sq.pos[0])
# There is a 150px wide section at both sides of the screen
minMaxDiff = max(l) - min(l)
# First shift the element to the very right of the map
diffCorrigated = 425 - max(l)
# Then add margins to both sides so that it will be centered in the 150px region
diffCorrigated += 150 - (125 - minMaxDiff) / 2
for i in range(len(element)):
element[i].pos = element[i].pos.move(diffCorrigated, 75)
screen.blit(element[i].image, element[i].pos)
def getTextWidth(text):
hsTxt, _ = GAME_FONT.render(text, BLACK)
return hsTxt.get_width()
def updateScore(currentScore, highScore):
"""
Update current score and high score on the screen & also make sure they're centered
"""
screen.fill(pygame.Color("black"), (10, 210, 130, 20))
hsWidth = getTextWidth(str(highScore))
hsPos = (150 - hsWidth) // 2, 210
displayText(str(highScore), GOLD, hsPos)
screen.fill(pygame.Color("black"), (10, 130, 130, 20))
csWidth = getTextWidth(str(currentScore))
csPos = (150 - csWidth) // 2, 130
displayText(str(currentScore), GOLD, csPos)
def getNextElements():
(nextShape, nextColor) = selectRandomElement(extra = True)
nextElementGroup = nextShape(nextColor, ex = False)
nextElement = nextElementGroup.squares
nextElementDisplay = nextShape(nextColor, ex = False).squares
return [nextElementGroup, nextElement, nextElementDisplay]
def updateBothScores(currentScore, highScore, n):
currentScore += n
if currentScore > highScore:
highScore = currentScore
updateHighScore(highScore)
updateScore(currentScore, highScore)
return [currentScore, highScore]
def displayGameOver():
"""
When the game is over clear the screen and display a game over message
"""
screen.fill(pygame.Color('black'), (0, 0, SCREEN_SIZE[0], SCREEN_SIZE[1]))
goText, rect1 = GAME_FONT.render('Game Over', WHITE)
paText, rect2 = GAME_FONT.render('Press [Enter] to play again', WHITE)
width1 = goText.get_width()
height1 = goText.get_height()
width2 = paText.get_width()
height2 = paText.get_height()
posX1 = (SCREEN_SIZE[0] - width1) / 2
posY1 = (SCREEN_SIZE[1] - height1) / 2
posX2 = (SCREEN_SIZE[0] - width2) / 2
posY2 = (SCREEN_SIZE[1] - height2) / 2 + 30
screen.blit(goText, (posX1, posY1))
screen.blit(paText, (posX2, posY2))
# Create display and set screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
background = pygame.image.load('sprites/bgImage.png').convert()
def graphicsInit():
"""
Display graphical elements and text on the screen
"""
screen.blit(background, (round((SCREEN_SIZE[0] - 300) / 2), 0))
hsWidth = getTextWidth(str(getHighScore()))
hsPos = (150 - hsWidth) // 2, 210
# Initialize font, display elapsed time, current score and high score
(timeLabel, x) = displayText('Time', WHITE, (50, 20))
(elapsedTime, x) = displayText('00:00:00', GOLD, (35, 50))
(currentScoreLabel, x) = displayText('Current Score', WHITE, (10, 100))
(cs, x) = displayText('0', GOLD, (70, 130))
(highScoreLabel, x) = displayText('High Score', WHITE, (25, 180))
(highScore, x) = displayText(str(getHighScore()), GOLD, hsPos)
(linesLabel, x) = displayText('Lines', WHITE, (50, 260))
(linesCount, x) = displayText('0', GOLD, (70, 290))
(nextLabel, x) = displayText('Next', WHITE, (500, 20))
graphicsInit()
def soundInit():
"""
Initialize sounds, set volumes
"""
# Play background music forever
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load('sounds/bgMusic.mp3')
pygame.mixer.music.play(-1)
lineSound = pygame.mixer.Sound('sounds/success.wav')
lineSound.set_volume(0.9)
gameOverSound = pygame.mixer.Sound('sounds/gameover.wav')
gameOverSound.set_volume(0.9)
return [lineSound, gameOverSound]
lineSound, gameOverSound = soundInit()
def main():
"""
Main event loop of the game
"""
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
gameOver = False
highScore = getHighScore()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
# Move the image of the next element to the top right corner
displayNextElement(nextElementDisplay)
while True:
if not gameOver:
posBefore = []
for sq in currentElement:
posBefore.append((sq.pos[0], sq.pos[1]))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Handle keyboard events
if event.type == pygame.KEYDOWN:
# Move the current element left/right by one unit (if not already at the sides)
if (event.key == pygame.K_LEFT and
not checkCollision(currentElement, Game.tetrominos.copy(), -25, 0)):
moveBlockX(currentElement, 'l')
elif (event.key == pygame.K_RIGHT and not checkCollision(currentElement,
Game.tetrominos.copy(), 25, 0)):
moveBlockX(currentElement, 'r')
# Rotate the current element in clockwise direction
elif (event.key == pygame.K_UP and not checkCollision(currentElement,
Game.tetrominos.copy(), 0, 0, rotate = True)):
rotateElement(currentElement)
# Speed up the falling of a tetromino
elif event.key == pygame.K_DOWN:
precision = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
precision = 0
eTime = round(time.time() - startTime, precision)
# At every second move current tetromino down by 1 unit
if eTime > prevMoveSec:
if precision:
currentScore, highScore = updateBothScores(currentScore, highScore, 1)
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.move()
screen.blit(sq.image, sq.pos)
# Check if there is a filled row
posYs = Game.checkForRows()
if posYs and currentElementGroup.didCollide():
pygame.mixer.Channel(0).play(lineSound)
Game.clearRow(posYs, screen, background)
Game.shiftRows(posYs, screen, background)
# Update number of filled lines and current score
lines += len(posYs)
displayText(str(lines), GOLD, (70, 290))
n = len(posYs) * (len(posYs) - 1) * 100
currentScore, highScore = updateBothScores(currentScore, highScore, n)
prevMoveSec = eTime
# Check if the current tetromino reached the bottom of the map
if currentElementGroup.didCollide():
# Check for game end: if the player cannot move the current element anymore
posAfter = []
for sq in currentElement:
posAfter.append((sq.pos[0], sq.pos[1]))
# Game over, reset states
if posAfter == posBefore:
pygame.mixer.Sound.play(gameOverSound)
pygame.mixer.music.stop()
Game.tetrominos = []
displayGameOver()
gameOver = True
if not gameOver:
currentElementGroup = nextElementGroup
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
Game.tetrominos.extend(currentElement)
if not gameOver:
updateTime(GOLD, (35, 50))
pygame.display.update()
else:
# Listen for [Enter] -> play again
# Also reset states
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.type == pygame.QUIT:
sys.exit()
if event.key == pygame.K_RETURN:
gameOver = False
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
graphicsInit()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
soundInit()
if __name__ == '__main__':
tetris = main() | The point is the center of the bounding box | random_line_split |
main.py | import sys, pygame, pygame.freetype, time, math, random, re
from includes.constants import *
from includes.helpers import *
from includes.elements import *
pygame.init()
pygame.display.set_caption('Retro Tetris')
startTime = time.time()
def displayText(text, color, pos):
textSurface, rect = GAME_FONT.render(text, color)
screen.fill(pygame.Color("black"), (pos[0], pos[1], rect.width, rect.height))
screen.blit(textSurface, pos)
return (textSurface, rect)
def updateTime(color, pos):
elapsedTimeInSecs = math.floor(time.time() - startTime)
formattedTime = formatSec(elapsedTimeInSecs)
screen.fill(pygame.Color("black"), (35, 50, 75, 18))
newTextSurface, rect = GAME_FONT.render(formattedTime, color)
screen.blit(newTextSurface, pos)
def moveBlockX(currentElement, d):
"""
Shift the current element horizontally by 1 unit
Also make sure that the rotation does not place the element out of the map
"""
# If element is at the very right/left of the map do not allow further X-shift in that dir.
for sq in currentElement:
if (d == 'l' and sq.pos.left == 150) or (d == 'r' and sq.pos.left == 425):
return
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.moveHorizontally(d)
screen.blit(sq.image, sq.pos)
def rotateElement(element):
"""
Rotate 90 deg each square that the tetris element is made up of around a pivot point
The point is the center of the bounding box
"""
# Make sure that the rotation will not place the tetromino out of map at the top
for sq in element:
if sq.pos[1] <= 0:
return
for sq in element:
screen.blit(background, sq.pos, (150, 0, 26, 26))
# Also, make sure that element stays within the map after rotation
tmpArr = []
for sq in element:
sq.rotate()
tmpArr.append(sq.pos.left)
leftmostElement = min(tmpArr)
rightmostElement = max(tmpArr)
if leftmostElement < 150:
xShift = 150 - leftmostElement
elif rightmostElement > 425:
xShift = 425 - rightmostElement
else:
xShift = 0
for sq in element:
sq.pos.left += xShift
screen.blit(sq.image, sq.pos)
def displayNextElement(element):
"""
Display the next element for the player
"""
screen.fill(pygame.Color("black"), (455, 50, 140, 140))
# Position the element to the top right of the screen
# Also make sure that the element is centered
l = []
for sq in element:
l.append(sq.pos[0])
# There is a 150px wide section at both sides of the screen
minMaxDiff = max(l) - min(l)
# First shift the element to the very right of the map
diffCorrigated = 425 - max(l)
# Then add margins to both sides so that it will be centered in the 150px region
diffCorrigated += 150 - (125 - minMaxDiff) / 2
for i in range(len(element)):
element[i].pos = element[i].pos.move(diffCorrigated, 75)
screen.blit(element[i].image, element[i].pos)
def getTextWidth(text):
hsTxt, _ = GAME_FONT.render(text, BLACK)
return hsTxt.get_width()
def updateScore(currentScore, highScore):
"""
Update current score and high score on the screen & also make sure they're centered
"""
screen.fill(pygame.Color("black"), (10, 210, 130, 20))
hsWidth = getTextWidth(str(highScore))
hsPos = (150 - hsWidth) // 2, 210
displayText(str(highScore), GOLD, hsPos)
screen.fill(pygame.Color("black"), (10, 130, 130, 20))
csWidth = getTextWidth(str(currentScore))
csPos = (150 - csWidth) // 2, 130
displayText(str(currentScore), GOLD, csPos)
def getNextElements():
(nextShape, nextColor) = selectRandomElement(extra = True)
nextElementGroup = nextShape(nextColor, ex = False)
nextElement = nextElementGroup.squares
nextElementDisplay = nextShape(nextColor, ex = False).squares
return [nextElementGroup, nextElement, nextElementDisplay]
def updateBothScores(currentScore, highScore, n):
currentScore += n
if currentScore > highScore:
|
updateScore(currentScore, highScore)
return [currentScore, highScore]
def displayGameOver():
"""
When the game is over clear the screen and display a game over message
"""
screen.fill(pygame.Color('black'), (0, 0, SCREEN_SIZE[0], SCREEN_SIZE[1]))
goText, rect1 = GAME_FONT.render('Game Over', WHITE)
paText, rect2 = GAME_FONT.render('Press [Enter] to play again', WHITE)
width1 = goText.get_width()
height1 = goText.get_height()
width2 = paText.get_width()
height2 = paText.get_height()
posX1 = (SCREEN_SIZE[0] - width1) / 2
posY1 = (SCREEN_SIZE[1] - height1) / 2
posX2 = (SCREEN_SIZE[0] - width2) / 2
posY2 = (SCREEN_SIZE[1] - height2) / 2 + 30
screen.blit(goText, (posX1, posY1))
screen.blit(paText, (posX2, posY2))
# Create display and set screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
background = pygame.image.load('sprites/bgImage.png').convert()
def graphicsInit():
"""
Display graphical elements and text on the screen
"""
screen.blit(background, (round((SCREEN_SIZE[0] - 300) / 2), 0))
hsWidth = getTextWidth(str(getHighScore()))
hsPos = (150 - hsWidth) // 2, 210
# Initialize font, display elapsed time, current score and high score
(timeLabel, x) = displayText('Time', WHITE, (50, 20))
(elapsedTime, x) = displayText('00:00:00', GOLD, (35, 50))
(currentScoreLabel, x) = displayText('Current Score', WHITE, (10, 100))
(cs, x) = displayText('0', GOLD, (70, 130))
(highScoreLabel, x) = displayText('High Score', WHITE, (25, 180))
(highScore, x) = displayText(str(getHighScore()), GOLD, hsPos)
(linesLabel, x) = displayText('Lines', WHITE, (50, 260))
(linesCount, x) = displayText('0', GOLD, (70, 290))
(nextLabel, x) = displayText('Next', WHITE, (500, 20))
graphicsInit()
def soundInit():
"""
Initialize sounds, set volumes
"""
# Play background music forever
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load('sounds/bgMusic.mp3')
pygame.mixer.music.play(-1)
lineSound = pygame.mixer.Sound('sounds/success.wav')
lineSound.set_volume(0.9)
gameOverSound = pygame.mixer.Sound('sounds/gameover.wav')
gameOverSound.set_volume(0.9)
return [lineSound, gameOverSound]
lineSound, gameOverSound = soundInit()
def main():
"""
Main event loop of the game
"""
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
gameOver = False
highScore = getHighScore()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
# Move the image of the next element to the top right corner
displayNextElement(nextElementDisplay)
while True:
if not gameOver:
posBefore = []
for sq in currentElement:
posBefore.append((sq.pos[0], sq.pos[1]))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Handle keyboard events
if event.type == pygame.KEYDOWN:
# Move the current element left/right by one unit (if not already at the sides)
if (event.key == pygame.K_LEFT and
not checkCollision(currentElement, Game.tetrominos.copy(), -25, 0)):
moveBlockX(currentElement, 'l')
elif (event.key == pygame.K_RIGHT and not checkCollision(currentElement,
Game.tetrominos.copy(), 25, 0)):
moveBlockX(currentElement, 'r')
# Rotate the current element in clockwise direction
elif (event.key == pygame.K_UP and not checkCollision(currentElement,
Game.tetrominos.copy(), 0, 0, rotate = True)):
rotateElement(currentElement)
# Speed up the falling of a tetromino
elif event.key == pygame.K_DOWN:
precision = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
precision = 0
eTime = round(time.time() - startTime, precision)
# At every second move current tetromino down by 1 unit
if eTime > prevMoveSec:
if precision:
currentScore, highScore = updateBothScores(currentScore, highScore, 1)
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.move()
screen.blit(sq.image, sq.pos)
# Check if there is a filled row
posYs = Game.checkForRows()
if posYs and currentElementGroup.didCollide():
pygame.mixer.Channel(0).play(lineSound)
Game.clearRow(posYs, screen, background)
Game.shiftRows(posYs, screen, background)
# Update number of filled lines and current score
lines += len(posYs)
displayText(str(lines), GOLD, (70, 290))
n = len(posYs) * (len(posYs) - 1) * 100
currentScore, highScore = updateBothScores(currentScore, highScore, n)
prevMoveSec = eTime
# Check if the current tetromino reached the bottom of the map
if currentElementGroup.didCollide():
# Check for game end: if the player cannot move the current element anymore
posAfter = []
for sq in currentElement:
posAfter.append((sq.pos[0], sq.pos[1]))
# Game over, reset states
if posAfter == posBefore:
pygame.mixer.Sound.play(gameOverSound)
pygame.mixer.music.stop()
Game.tetrominos = []
displayGameOver()
gameOver = True
if not gameOver:
currentElementGroup = nextElementGroup
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
Game.tetrominos.extend(currentElement)
if not gameOver:
updateTime(GOLD, (35, 50))
pygame.display.update()
else:
# Listen for [Enter] -> play again
# Also reset states
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.type == pygame.QUIT:
sys.exit()
if event.key == pygame.K_RETURN:
gameOver = False
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
graphicsInit()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
soundInit()
if __name__ == '__main__':
tetris = main()
| highScore = currentScore
updateHighScore(highScore) | conditional_block |
main.py | import sys, pygame, pygame.freetype, time, math, random, re
from includes.constants import *
from includes.helpers import *
from includes.elements import *
pygame.init()
pygame.display.set_caption('Retro Tetris')
startTime = time.time()
def displayText(text, color, pos):
textSurface, rect = GAME_FONT.render(text, color)
screen.fill(pygame.Color("black"), (pos[0], pos[1], rect.width, rect.height))
screen.blit(textSurface, pos)
return (textSurface, rect)
def updateTime(color, pos):
elapsedTimeInSecs = math.floor(time.time() - startTime)
formattedTime = formatSec(elapsedTimeInSecs)
screen.fill(pygame.Color("black"), (35, 50, 75, 18))
newTextSurface, rect = GAME_FONT.render(formattedTime, color)
screen.blit(newTextSurface, pos)
def moveBlockX(currentElement, d):
"""
Shift the current element horizontally by 1 unit
Also make sure that the rotation does not place the element out of the map
"""
# If element is at the very right/left of the map do not allow further X-shift in that dir.
for sq in currentElement:
if (d == 'l' and sq.pos.left == 150) or (d == 'r' and sq.pos.left == 425):
return
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.moveHorizontally(d)
screen.blit(sq.image, sq.pos)
def | (element):
"""
Rotate 90 deg each square that the tetris element is made up of around a pivot point
The point is the center of the bounding box
"""
# Make sure that the rotation will not place the tetromino out of map at the top
for sq in element:
if sq.pos[1] <= 0:
return
for sq in element:
screen.blit(background, sq.pos, (150, 0, 26, 26))
# Also, make sure that element stays within the map after rotation
tmpArr = []
for sq in element:
sq.rotate()
tmpArr.append(sq.pos.left)
leftmostElement = min(tmpArr)
rightmostElement = max(tmpArr)
if leftmostElement < 150:
xShift = 150 - leftmostElement
elif rightmostElement > 425:
xShift = 425 - rightmostElement
else:
xShift = 0
for sq in element:
sq.pos.left += xShift
screen.blit(sq.image, sq.pos)
def displayNextElement(element):
"""
Display the next element for the player
"""
screen.fill(pygame.Color("black"), (455, 50, 140, 140))
# Position the element to the top right of the screen
# Also make sure that the element is centered
l = []
for sq in element:
l.append(sq.pos[0])
# There is a 150px wide section at both sides of the screen
minMaxDiff = max(l) - min(l)
# First shift the element to the very right of the map
diffCorrigated = 425 - max(l)
# Then add margins to both sides so that it will be centered in the 150px region
diffCorrigated += 150 - (125 - minMaxDiff) / 2
for i in range(len(element)):
element[i].pos = element[i].pos.move(diffCorrigated, 75)
screen.blit(element[i].image, element[i].pos)
def getTextWidth(text):
hsTxt, _ = GAME_FONT.render(text, BLACK)
return hsTxt.get_width()
def updateScore(currentScore, highScore):
"""
Update current score and high score on the screen & also make sure they're centered
"""
screen.fill(pygame.Color("black"), (10, 210, 130, 20))
hsWidth = getTextWidth(str(highScore))
hsPos = (150 - hsWidth) // 2, 210
displayText(str(highScore), GOLD, hsPos)
screen.fill(pygame.Color("black"), (10, 130, 130, 20))
csWidth = getTextWidth(str(currentScore))
csPos = (150 - csWidth) // 2, 130
displayText(str(currentScore), GOLD, csPos)
def getNextElements():
(nextShape, nextColor) = selectRandomElement(extra = True)
nextElementGroup = nextShape(nextColor, ex = False)
nextElement = nextElementGroup.squares
nextElementDisplay = nextShape(nextColor, ex = False).squares
return [nextElementGroup, nextElement, nextElementDisplay]
def updateBothScores(currentScore, highScore, n):
currentScore += n
if currentScore > highScore:
highScore = currentScore
updateHighScore(highScore)
updateScore(currentScore, highScore)
return [currentScore, highScore]
def displayGameOver():
"""
When the game is over clear the screen and display a game over message
"""
screen.fill(pygame.Color('black'), (0, 0, SCREEN_SIZE[0], SCREEN_SIZE[1]))
goText, rect1 = GAME_FONT.render('Game Over', WHITE)
paText, rect2 = GAME_FONT.render('Press [Enter] to play again', WHITE)
width1 = goText.get_width()
height1 = goText.get_height()
width2 = paText.get_width()
height2 = paText.get_height()
posX1 = (SCREEN_SIZE[0] - width1) / 2
posY1 = (SCREEN_SIZE[1] - height1) / 2
posX2 = (SCREEN_SIZE[0] - width2) / 2
posY2 = (SCREEN_SIZE[1] - height2) / 2 + 30
screen.blit(goText, (posX1, posY1))
screen.blit(paText, (posX2, posY2))
# Create display and set screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
background = pygame.image.load('sprites/bgImage.png').convert()
def graphicsInit():
"""
Display graphical elements and text on the screen
"""
screen.blit(background, (round((SCREEN_SIZE[0] - 300) / 2), 0))
hsWidth = getTextWidth(str(getHighScore()))
hsPos = (150 - hsWidth) // 2, 210
# Initialize font, display elapsed time, current score and high score
(timeLabel, x) = displayText('Time', WHITE, (50, 20))
(elapsedTime, x) = displayText('00:00:00', GOLD, (35, 50))
(currentScoreLabel, x) = displayText('Current Score', WHITE, (10, 100))
(cs, x) = displayText('0', GOLD, (70, 130))
(highScoreLabel, x) = displayText('High Score', WHITE, (25, 180))
(highScore, x) = displayText(str(getHighScore()), GOLD, hsPos)
(linesLabel, x) = displayText('Lines', WHITE, (50, 260))
(linesCount, x) = displayText('0', GOLD, (70, 290))
(nextLabel, x) = displayText('Next', WHITE, (500, 20))
graphicsInit()
def soundInit():
"""
Initialize sounds, set volumes
"""
# Play background music forever
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.load('sounds/bgMusic.mp3')
pygame.mixer.music.play(-1)
lineSound = pygame.mixer.Sound('sounds/success.wav')
lineSound.set_volume(0.9)
gameOverSound = pygame.mixer.Sound('sounds/gameover.wav')
gameOverSound.set_volume(0.9)
return [lineSound, gameOverSound]
lineSound, gameOverSound = soundInit()
def main():
"""
Main event loop of the game
"""
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
gameOver = False
highScore = getHighScore()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
# Move the image of the next element to the top right corner
displayNextElement(nextElementDisplay)
while True:
if not gameOver:
posBefore = []
for sq in currentElement:
posBefore.append((sq.pos[0], sq.pos[1]))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Handle keyboard events
if event.type == pygame.KEYDOWN:
# Move the current element left/right by one unit (if not already at the sides)
if (event.key == pygame.K_LEFT and
not checkCollision(currentElement, Game.tetrominos.copy(), -25, 0)):
moveBlockX(currentElement, 'l')
elif (event.key == pygame.K_RIGHT and not checkCollision(currentElement,
Game.tetrominos.copy(), 25, 0)):
moveBlockX(currentElement, 'r')
# Rotate the current element in clockwise direction
elif (event.key == pygame.K_UP and not checkCollision(currentElement,
Game.tetrominos.copy(), 0, 0, rotate = True)):
rotateElement(currentElement)
# Speed up the falling of a tetromino
elif event.key == pygame.K_DOWN:
precision = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
precision = 0
eTime = round(time.time() - startTime, precision)
# At every second move current tetromino down by 1 unit
if eTime > prevMoveSec:
if precision:
currentScore, highScore = updateBothScores(currentScore, highScore, 1)
for sq in currentElement:
screen.blit(background, sq.pos, (150, 0, 26, 26))
for sq in currentElement:
sq.move()
screen.blit(sq.image, sq.pos)
# Check if there is a filled row
posYs = Game.checkForRows()
if posYs and currentElementGroup.didCollide():
pygame.mixer.Channel(0).play(lineSound)
Game.clearRow(posYs, screen, background)
Game.shiftRows(posYs, screen, background)
# Update number of filled lines and current score
lines += len(posYs)
displayText(str(lines), GOLD, (70, 290))
n = len(posYs) * (len(posYs) - 1) * 100
currentScore, highScore = updateBothScores(currentScore, highScore, n)
prevMoveSec = eTime
# Check if the current tetromino reached the bottom of the map
if currentElementGroup.didCollide():
# Check for game end: if the player cannot move the current element anymore
posAfter = []
for sq in currentElement:
posAfter.append((sq.pos[0], sq.pos[1]))
# Game over, reset states
if posAfter == posBefore:
pygame.mixer.Sound.play(gameOverSound)
pygame.mixer.music.stop()
Game.tetrominos = []
displayGameOver()
gameOver = True
if not gameOver:
currentElementGroup = nextElementGroup
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
Game.tetrominos.extend(currentElement)
if not gameOver:
updateTime(GOLD, (35, 50))
pygame.display.update()
else:
# Listen for [Enter] -> play again
# Also reset states
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.type == pygame.QUIT:
sys.exit()
if event.key == pygame.K_RETURN:
gameOver = False
prevMoveSec = 0
precision = 0
currentScore = 0
lines = 0
graphicsInit()
currentElementGroup = selectRandomElement()
currentElement = currentElementGroup.squares
(nextElementGroup, nextElement, nextElementDisplay) = getNextElements()
displayNextElement(nextElementDisplay)
soundInit()
if __name__ == '__main__':
tetris = main()
| rotateElement | identifier_name |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if !self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if !self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else | ;
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if !self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| {
false
} | conditional_block |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if !self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if !self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if !self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs |
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
} | identifier_body |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http; | const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if !self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if !self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if !self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
} | use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private"; | random_line_split |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if !self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if !self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if !self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn | <'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| subcommand | identifier_name |
bamToNucleosomePositions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from singlecellmultiomics.molecule import MoleculeIterator, CHICMolecule
from singlecellmultiomics.fragment import CHICFragment
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_path_from_bam, get_contigs
from collections import defaultdict
import pyBigWig
import pysam
from singlecellmultiomics.bamProcessing.bamBinCounts import get_binned_counts
import pandas as pd
import argparse
from singlecellmultiomics.bamProcessing import get_contig_sizes
from singlecellmultiomics.utils import is_autosome, pool_wrapper
import numpy as np
from multiprocessing import Pool
from more_itertools import windowed
from glob import glob
import pyBigWig
from scipy.ndimage import gaussian_filter1d
from scipy.signal import find_peaks
import argparse
from itertools import product
import asyncio
def get_aligned_chic_len(molecule):
for f in molecule:
f.R2_primer_length = 6
mlen = molecule.estimated_max_length
if mlen is None:
return None
return mlen + 2 # 2 extra bases because 1 base is potentially lost on both sides
def _generate_molecule_coordinate_dict(args, max_fragment_size=800):
bam_path, sel_contig= args
molecule_coordinate_dict = defaultdict(list) # cell->[ (start,end), (start,end) ..]
with pysam.AlignmentFile(bam_path) as alignments:
# pysam.FastaFile(get_reference_path_from_bam(alignments)) as reference:
for molecule in MoleculeIterator(alignments,
contig=sel_contig,
molecule_class=CHICMolecule,
fragment_class = CHICFragment,
#molecule_class_args = {'reference':reference}
):
contig, start, strand = molecule.get_cut_site()
if not molecule.is_completely_matching:
continue
if molecule.get_mean_mapping_qual()<55:
continue
slen = get_aligned_chic_len(molecule)
if slen is None or slen>max_fragment_size:
continue
if strand:
end = start - slen
else:
end = start + slen
molecule_coordinate_dict[molecule.sample].append( (start,end) )
for sample in molecule_coordinate_dict:
molecule_coordinate_dict[sample] = sorted(molecule_coordinate_dict[sample])
return sel_contig, molecule_coordinate_dict
def generate_molecule_coordinate_dict(bams, n_threads=None):
molecule_coordinate_dicts = {}
contigs = get_contigs(bams[0])
with Pool(n_threads) as workers:
for contig,d in workers.imap( _generate_molecule_coordinate_dict,
product(bams,filter(is_autosome, contigs))):
if not contig in molecule_coordinate_dicts:
molecule_coordinate_dicts[contig] = d
else:
molecule_coordinate_dicts[contig].update( d )
print(f'Finished {contig}')
return molecule_coordinate_dicts
#bin_size = 100_000
#distances_per_bin = defaultdict(lambda: defaultdict(list) )
def contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25):
#molecule_coordinate_dicts[contig] = d
# n = size of contig
vote_nucleosome = np.zeros(int( n/p_nuc_bin_size) )
vote_linker = np.zeros(int( n/p_nuc_bin_size) )
for cell in d.keys():
if len(d[cell])<min_total_cuts_per_cell:
continue
for (start_a, end_a),(start_b,end_b) in windowed( d[cell],2):
o_start_a = start_a
#Check if the fragments are not overlapping:
(start_a, end_a) = min(start_a, end_a), max(start_a, end_a)
(start_b, end_b) = min(start_b, end_b), max(start_b, end_b)
# The locations of the nucleosomes are constrained:
# \\ start_a ----- end_a \\ \\ start_b ---- end_b \\
if end_a - start_a >= 147 and end_a - start_a < (147*2+10): # it could contain a single nucleosome, but not more
s = int(((start_a + 70 ))/p_nuc_bin_size)
e = int(((end_a - 70))/p_nuc_bin_size)
vote_nucleosome[ s:(e+1) ] += 1/(e-s)
# Any starting point of a molecule is _always_ part of a linker
# lets say += 25bp
c=o_start_a
s = int(((c-linker_vote_radius))/p_nuc_bin_size)
e = int(((c+linker_vote_radius))/p_nuc_bin_size)+1
vote_linker[s:(e+1)] += 1/(e-s)
if end_a > start_b:
continue
if start_b - end_a > max_linker_length: # The distance is larger than 90bp, which is a very long linker. Skip the linker vote
continue
s = int(((end_a))/p_nuc_bin_size)
e = int(((start_b))/p_nuc_bin_size)
vote_linker[s:(e+1)] += 1/(e-s+1)
return vote_nucleosome, vote_linker
async def write_to_bw(handle, starts, ends, values, contig,size=None):
if size is not None:
print( ends[ends>=size] )
handle.addEntries(
[contig]*len(starts), #Contig
starts.astype(np.int64) , #Start
ends= ends.astype(np.int64) , #end
values= values.astype(np.float64)
)
async def coordinate_dicts_to_nucleosome_position_files(bams, molecule_coordinate_dicts, p_nuc_bin_size = 5, alias='npos', n_threads=None ):
contigs = get_contigs(bams[0])
sizes = get_contig_sizes(bams[0])
# Create output bigwig file handles and worker pool
linker_vote_write_path= f'{alias}_linkers.bw'
nuc_vote_write_path= f'{alias}_nucleosomes.bw'
merged_vote_write_path= f'{alias}_nucleosomes_min_linkers.bw'
centroids = f'{alias}_nucleosome_centers.bed'
with Pool(n_threads) as workers, \
pyBigWig.open(linker_vote_write_path,'w') as linkers_out, \
pyBigWig.open(nuc_vote_write_path,'w') as nuc_out, \
pyBigWig.open(merged_vote_write_path,'w') as merged_out, \
open(centroids,'w') as centroids_out:
| h.addHeader(list(zip(contigs, [sizes[c] for c in contigs])))
# Obtain nucleosome and linker votes for each contig
for ret_contig, (vote_nucleosome, vote_linker) in zip(
molecule_coordinate_dicts.keys(),
workers.imap(pool_wrapper,
(
(contig_coordinate_dict_to_votes,
{
'd':d,
'n':sizes[contig],
'min_total_cuts_per_cell':3,
'p_nuc_bin_size':5,
'max_linker_length':90,
'linker_vote_radius':25
})
for contig, d in molecule_coordinate_dicts.items()
))):
print(f'Writing votes for {ret_contig}')
contig_len = sizes[ret_contig]
smooth_linkers = gaussian_filter1d(vote_linker,2)
smooth_nuc = gaussian_filter1d(vote_nucleosome,2)
# Write nucleosome vote track:
starts = np.array( np.linspace(0, contig_len-p_nuc_bin_size-1, len(vote_nucleosome)) )
size = sizes[ret_contig]
# Check if all numbers are preceding...
d = np.diff(starts)
d = d[d<1]
if len(d):
print(d)
else:
print(f'{len(starts)} Coordinates are in correct order')
await asyncio.gather(
write_to_bw(nuc_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc), ret_contig, size),
write_to_bw(linkers_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_linkers), ret_contig, size),
write_to_bw(merged_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc - smooth_linkers), ret_contig, size=size)
)
# Use the find peak function to select properly spaced nucleosome positions.
print(f'Writing centroids for {ret_contig}')
mindist = 147
min_dist_bins = int( mindist / p_nuc_bin_size )
estimated_nucleosome_positions = find_peaks( gaussian_filter1d(smooth_nuc - smooth_linkers,2),distance=min_dist_bins)[0]
for nucpos, score in zip( estimated_nucleosome_positions*p_nuc_bin_size, smooth_nuc[estimated_nucleosome_positions]):
centroids_out.write(f'{ret_contig}\t{nucpos}\t{nucpos+p_nuc_bin_size}\t{score}\n')
#contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Estimate nucleosome positions from scCHiC seq bam file(s)')
argparser.add_argument('alignmentfiles', type=str, nargs='+')
argparser.add_argument('-o', type=str, required=True, help='output prefix')
argparser.add_argument('-bin_size', type=int, default=3, help='Nucleosome position precision (bp), increasing this value increases memory consumption linearly')
argparser.add_argument('-n_threads', type=int, help='Amount of worker threads')
args = argparser.parse_args()
async def main(args):
# Run the analysis:
print('Creating molecule coordinate database')
d = generate_molecule_coordinate_dict(args.alignmentfiles, args.n_threads)
print('Estimating nucleosome positions')
await coordinate_dicts_to_nucleosome_position_files(args.alignmentfiles,
d,
p_nuc_bin_size = args.bin_size,
n_threads=args.n_threads,
alias=args.o)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
#asyncio.run(main(args)) # python >= 3.7 | # Add headers for all output bigwigfiles
for h in (linkers_out, nuc_out, merged_out):
# Write header | random_line_split |
bamToNucleosomePositions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from singlecellmultiomics.molecule import MoleculeIterator, CHICMolecule
from singlecellmultiomics.fragment import CHICFragment
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_path_from_bam, get_contigs
from collections import defaultdict
import pyBigWig
import pysam
from singlecellmultiomics.bamProcessing.bamBinCounts import get_binned_counts
import pandas as pd
import argparse
from singlecellmultiomics.bamProcessing import get_contig_sizes
from singlecellmultiomics.utils import is_autosome, pool_wrapper
import numpy as np
from multiprocessing import Pool
from more_itertools import windowed
from glob import glob
import pyBigWig
from scipy.ndimage import gaussian_filter1d
from scipy.signal import find_peaks
import argparse
from itertools import product
import asyncio
def get_aligned_chic_len(molecule):
|
def _generate_molecule_coordinate_dict(args, max_fragment_size=800):
bam_path, sel_contig= args
molecule_coordinate_dict = defaultdict(list) # cell->[ (start,end), (start,end) ..]
with pysam.AlignmentFile(bam_path) as alignments:
# pysam.FastaFile(get_reference_path_from_bam(alignments)) as reference:
for molecule in MoleculeIterator(alignments,
contig=sel_contig,
molecule_class=CHICMolecule,
fragment_class = CHICFragment,
#molecule_class_args = {'reference':reference}
):
contig, start, strand = molecule.get_cut_site()
if not molecule.is_completely_matching:
continue
if molecule.get_mean_mapping_qual()<55:
continue
slen = get_aligned_chic_len(molecule)
if slen is None or slen>max_fragment_size:
continue
if strand:
end = start - slen
else:
end = start + slen
molecule_coordinate_dict[molecule.sample].append( (start,end) )
for sample in molecule_coordinate_dict:
molecule_coordinate_dict[sample] = sorted(molecule_coordinate_dict[sample])
return sel_contig, molecule_coordinate_dict
def generate_molecule_coordinate_dict(bams, n_threads=None):
molecule_coordinate_dicts = {}
contigs = get_contigs(bams[0])
with Pool(n_threads) as workers:
for contig,d in workers.imap( _generate_molecule_coordinate_dict,
product(bams,filter(is_autosome, contigs))):
if not contig in molecule_coordinate_dicts:
molecule_coordinate_dicts[contig] = d
else:
molecule_coordinate_dicts[contig].update( d )
print(f'Finished {contig}')
return molecule_coordinate_dicts
#bin_size = 100_000
#distances_per_bin = defaultdict(lambda: defaultdict(list) )
def contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25):
#molecule_coordinate_dicts[contig] = d
# n = size of contig
vote_nucleosome = np.zeros(int( n/p_nuc_bin_size) )
vote_linker = np.zeros(int( n/p_nuc_bin_size) )
for cell in d.keys():
if len(d[cell])<min_total_cuts_per_cell:
continue
for (start_a, end_a),(start_b,end_b) in windowed( d[cell],2):
o_start_a = start_a
#Check if the fragments are not overlapping:
(start_a, end_a) = min(start_a, end_a), max(start_a, end_a)
(start_b, end_b) = min(start_b, end_b), max(start_b, end_b)
# The locations of the nucleosomes are constrained:
# \\ start_a ----- end_a \\ \\ start_b ---- end_b \\
if end_a - start_a >= 147 and end_a - start_a < (147*2+10): # it could contain a single nucleosome, but not more
s = int(((start_a + 70 ))/p_nuc_bin_size)
e = int(((end_a - 70))/p_nuc_bin_size)
vote_nucleosome[ s:(e+1) ] += 1/(e-s)
# Any starting point of a molecule is _always_ part of a linker
# lets say += 25bp
c=o_start_a
s = int(((c-linker_vote_radius))/p_nuc_bin_size)
e = int(((c+linker_vote_radius))/p_nuc_bin_size)+1
vote_linker[s:(e+1)] += 1/(e-s)
if end_a > start_b:
continue
if start_b - end_a > max_linker_length: # The distance is larger than 90bp, which is a very long linker. Skip the linker vote
continue
s = int(((end_a))/p_nuc_bin_size)
e = int(((start_b))/p_nuc_bin_size)
vote_linker[s:(e+1)] += 1/(e-s+1)
return vote_nucleosome, vote_linker
async def write_to_bw(handle, starts, ends, values, contig,size=None):
if size is not None:
print( ends[ends>=size] )
handle.addEntries(
[contig]*len(starts), #Contig
starts.astype(np.int64) , #Start
ends= ends.astype(np.int64) , #end
values= values.astype(np.float64)
)
async def coordinate_dicts_to_nucleosome_position_files(bams, molecule_coordinate_dicts, p_nuc_bin_size = 5, alias='npos', n_threads=None ):
contigs = get_contigs(bams[0])
sizes = get_contig_sizes(bams[0])
# Create output bigwig file handles and worker pool
linker_vote_write_path= f'{alias}_linkers.bw'
nuc_vote_write_path= f'{alias}_nucleosomes.bw'
merged_vote_write_path= f'{alias}_nucleosomes_min_linkers.bw'
centroids = f'{alias}_nucleosome_centers.bed'
with Pool(n_threads) as workers, \
pyBigWig.open(linker_vote_write_path,'w') as linkers_out, \
pyBigWig.open(nuc_vote_write_path,'w') as nuc_out, \
pyBigWig.open(merged_vote_write_path,'w') as merged_out, \
open(centroids,'w') as centroids_out:
# Add headers for all output bigwigfiles
for h in (linkers_out, nuc_out, merged_out):
# Write header
h.addHeader(list(zip(contigs, [sizes[c] for c in contigs])))
# Obtain nucleosome and linker votes for each contig
for ret_contig, (vote_nucleosome, vote_linker) in zip(
molecule_coordinate_dicts.keys(),
workers.imap(pool_wrapper,
(
(contig_coordinate_dict_to_votes,
{
'd':d,
'n':sizes[contig],
'min_total_cuts_per_cell':3,
'p_nuc_bin_size':5,
'max_linker_length':90,
'linker_vote_radius':25
})
for contig, d in molecule_coordinate_dicts.items()
))):
print(f'Writing votes for {ret_contig}')
contig_len = sizes[ret_contig]
smooth_linkers = gaussian_filter1d(vote_linker,2)
smooth_nuc = gaussian_filter1d(vote_nucleosome,2)
# Write nucleosome vote track:
starts = np.array( np.linspace(0, contig_len-p_nuc_bin_size-1, len(vote_nucleosome)) )
size = sizes[ret_contig]
# Check if all numbers are preceding...
d = np.diff(starts)
d = d[d<1]
if len(d):
print(d)
else:
print(f'{len(starts)} Coordinates are in correct order')
await asyncio.gather(
write_to_bw(nuc_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc), ret_contig, size),
write_to_bw(linkers_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_linkers), ret_contig, size),
write_to_bw(merged_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc - smooth_linkers), ret_contig, size=size)
)
# Use the find peak function to select properly spaced nucleosome positions.
print(f'Writing centroids for {ret_contig}')
mindist = 147
min_dist_bins = int( mindist / p_nuc_bin_size )
estimated_nucleosome_positions = find_peaks( gaussian_filter1d(smooth_nuc - smooth_linkers,2),distance=min_dist_bins)[0]
for nucpos, score in zip( estimated_nucleosome_positions*p_nuc_bin_size, smooth_nuc[estimated_nucleosome_positions]):
centroids_out.write(f'{ret_contig}\t{nucpos}\t{nucpos+p_nuc_bin_size}\t{score}\n')
#contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Estimate nucleosome positions from scCHiC seq bam file(s)')
argparser.add_argument('alignmentfiles', type=str, nargs='+')
argparser.add_argument('-o', type=str, required=True, help='output prefix')
argparser.add_argument('-bin_size', type=int, default=3, help='Nucleosome position precision (bp), increasing this value increases memory consumption linearly')
argparser.add_argument('-n_threads', type=int, help='Amount of worker threads')
args = argparser.parse_args()
async def main(args):
# Run the analysis:
print('Creating molecule coordinate database')
d = generate_molecule_coordinate_dict(args.alignmentfiles, args.n_threads)
print('Estimating nucleosome positions')
await coordinate_dicts_to_nucleosome_position_files(args.alignmentfiles,
d,
p_nuc_bin_size = args.bin_size,
n_threads=args.n_threads,
alias=args.o)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
#asyncio.run(main(args)) # python >= 3.7
| for f in molecule:
f.R2_primer_length = 6
mlen = molecule.estimated_max_length
if mlen is None:
return None
return mlen + 2 # 2 extra bases because 1 base is potentially lost on both sides | identifier_body |
bamToNucleosomePositions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from singlecellmultiomics.molecule import MoleculeIterator, CHICMolecule
from singlecellmultiomics.fragment import CHICFragment
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_path_from_bam, get_contigs
from collections import defaultdict
import pyBigWig
import pysam
from singlecellmultiomics.bamProcessing.bamBinCounts import get_binned_counts
import pandas as pd
import argparse
from singlecellmultiomics.bamProcessing import get_contig_sizes
from singlecellmultiomics.utils import is_autosome, pool_wrapper
import numpy as np
from multiprocessing import Pool
from more_itertools import windowed
from glob import glob
import pyBigWig
from scipy.ndimage import gaussian_filter1d
from scipy.signal import find_peaks
import argparse
from itertools import product
import asyncio
def get_aligned_chic_len(molecule):
for f in molecule:
f.R2_primer_length = 6
mlen = molecule.estimated_max_length
if mlen is None:
return None
return mlen + 2 # 2 extra bases because 1 base is potentially lost on both sides
def _generate_molecule_coordinate_dict(args, max_fragment_size=800):
bam_path, sel_contig= args
molecule_coordinate_dict = defaultdict(list) # cell->[ (start,end), (start,end) ..]
with pysam.AlignmentFile(bam_path) as alignments:
# pysam.FastaFile(get_reference_path_from_bam(alignments)) as reference:
for molecule in MoleculeIterator(alignments,
contig=sel_contig,
molecule_class=CHICMolecule,
fragment_class = CHICFragment,
#molecule_class_args = {'reference':reference}
):
contig, start, strand = molecule.get_cut_site()
if not molecule.is_completely_matching:
continue
if molecule.get_mean_mapping_qual()<55:
continue
slen = get_aligned_chic_len(molecule)
if slen is None or slen>max_fragment_size:
continue
if strand:
end = start - slen
else:
end = start + slen
molecule_coordinate_dict[molecule.sample].append( (start,end) )
for sample in molecule_coordinate_dict:
molecule_coordinate_dict[sample] = sorted(molecule_coordinate_dict[sample])
return sel_contig, molecule_coordinate_dict
def generate_molecule_coordinate_dict(bams, n_threads=None):
molecule_coordinate_dicts = {}
contigs = get_contigs(bams[0])
with Pool(n_threads) as workers:
for contig,d in workers.imap( _generate_molecule_coordinate_dict,
product(bams,filter(is_autosome, contigs))):
if not contig in molecule_coordinate_dicts:
molecule_coordinate_dicts[contig] = d
else:
molecule_coordinate_dicts[contig].update( d )
print(f'Finished {contig}')
return molecule_coordinate_dicts
#bin_size = 100_000
#distances_per_bin = defaultdict(lambda: defaultdict(list) )
def contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25):
#molecule_coordinate_dicts[contig] = d
# n = size of contig
vote_nucleosome = np.zeros(int( n/p_nuc_bin_size) )
vote_linker = np.zeros(int( n/p_nuc_bin_size) )
for cell in d.keys():
if len(d[cell])<min_total_cuts_per_cell:
continue
for (start_a, end_a),(start_b,end_b) in windowed( d[cell],2):
o_start_a = start_a
#Check if the fragments are not overlapping:
(start_a, end_a) = min(start_a, end_a), max(start_a, end_a)
(start_b, end_b) = min(start_b, end_b), max(start_b, end_b)
# The locations of the nucleosomes are constrained:
# \\ start_a ----- end_a \\ \\ start_b ---- end_b \\
if end_a - start_a >= 147 and end_a - start_a < (147*2+10): # it could contain a single nucleosome, but not more
s = int(((start_a + 70 ))/p_nuc_bin_size)
e = int(((end_a - 70))/p_nuc_bin_size)
vote_nucleosome[ s:(e+1) ] += 1/(e-s)
# Any starting point of a molecule is _always_ part of a linker
# lets say += 25bp
c=o_start_a
s = int(((c-linker_vote_radius))/p_nuc_bin_size)
e = int(((c+linker_vote_radius))/p_nuc_bin_size)+1
vote_linker[s:(e+1)] += 1/(e-s)
if end_a > start_b:
continue
if start_b - end_a > max_linker_length: # The distance is larger than 90bp, which is a very long linker. Skip the linker vote
continue
s = int(((end_a))/p_nuc_bin_size)
e = int(((start_b))/p_nuc_bin_size)
vote_linker[s:(e+1)] += 1/(e-s+1)
return vote_nucleosome, vote_linker
async def write_to_bw(handle, starts, ends, values, contig,size=None):
if size is not None:
print( ends[ends>=size] )
handle.addEntries(
[contig]*len(starts), #Contig
starts.astype(np.int64) , #Start
ends= ends.astype(np.int64) , #end
values= values.astype(np.float64)
)
async def | (bams, molecule_coordinate_dicts, p_nuc_bin_size = 5, alias='npos', n_threads=None ):
contigs = get_contigs(bams[0])
sizes = get_contig_sizes(bams[0])
# Create output bigwig file handles and worker pool
linker_vote_write_path= f'{alias}_linkers.bw'
nuc_vote_write_path= f'{alias}_nucleosomes.bw'
merged_vote_write_path= f'{alias}_nucleosomes_min_linkers.bw'
centroids = f'{alias}_nucleosome_centers.bed'
with Pool(n_threads) as workers, \
pyBigWig.open(linker_vote_write_path,'w') as linkers_out, \
pyBigWig.open(nuc_vote_write_path,'w') as nuc_out, \
pyBigWig.open(merged_vote_write_path,'w') as merged_out, \
open(centroids,'w') as centroids_out:
# Add headers for all output bigwigfiles
for h in (linkers_out, nuc_out, merged_out):
# Write header
h.addHeader(list(zip(contigs, [sizes[c] for c in contigs])))
# Obtain nucleosome and linker votes for each contig
for ret_contig, (vote_nucleosome, vote_linker) in zip(
molecule_coordinate_dicts.keys(),
workers.imap(pool_wrapper,
(
(contig_coordinate_dict_to_votes,
{
'd':d,
'n':sizes[contig],
'min_total_cuts_per_cell':3,
'p_nuc_bin_size':5,
'max_linker_length':90,
'linker_vote_radius':25
})
for contig, d in molecule_coordinate_dicts.items()
))):
print(f'Writing votes for {ret_contig}')
contig_len = sizes[ret_contig]
smooth_linkers = gaussian_filter1d(vote_linker,2)
smooth_nuc = gaussian_filter1d(vote_nucleosome,2)
# Write nucleosome vote track:
starts = np.array( np.linspace(0, contig_len-p_nuc_bin_size-1, len(vote_nucleosome)) )
size = sizes[ret_contig]
# Check if all numbers are preceding...
d = np.diff(starts)
d = d[d<1]
if len(d):
print(d)
else:
print(f'{len(starts)} Coordinates are in correct order')
await asyncio.gather(
write_to_bw(nuc_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc), ret_contig, size),
write_to_bw(linkers_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_linkers), ret_contig, size),
write_to_bw(merged_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc - smooth_linkers), ret_contig, size=size)
)
# Use the find peak function to select properly spaced nucleosome positions.
print(f'Writing centroids for {ret_contig}')
mindist = 147
min_dist_bins = int( mindist / p_nuc_bin_size )
estimated_nucleosome_positions = find_peaks( gaussian_filter1d(smooth_nuc - smooth_linkers,2),distance=min_dist_bins)[0]
for nucpos, score in zip( estimated_nucleosome_positions*p_nuc_bin_size, smooth_nuc[estimated_nucleosome_positions]):
centroids_out.write(f'{ret_contig}\t{nucpos}\t{nucpos+p_nuc_bin_size}\t{score}\n')
#contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Estimate nucleosome positions from scCHiC seq bam file(s)')
argparser.add_argument('alignmentfiles', type=str, nargs='+')
argparser.add_argument('-o', type=str, required=True, help='output prefix')
argparser.add_argument('-bin_size', type=int, default=3, help='Nucleosome position precision (bp), increasing this value increases memory consumption linearly')
argparser.add_argument('-n_threads', type=int, help='Amount of worker threads')
args = argparser.parse_args()
async def main(args):
# Run the analysis:
print('Creating molecule coordinate database')
d = generate_molecule_coordinate_dict(args.alignmentfiles, args.n_threads)
print('Estimating nucleosome positions')
await coordinate_dicts_to_nucleosome_position_files(args.alignmentfiles,
d,
p_nuc_bin_size = args.bin_size,
n_threads=args.n_threads,
alias=args.o)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
#asyncio.run(main(args)) # python >= 3.7
| coordinate_dicts_to_nucleosome_position_files | identifier_name |
bamToNucleosomePositions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from singlecellmultiomics.molecule import MoleculeIterator, CHICMolecule
from singlecellmultiomics.fragment import CHICFragment
from singlecellmultiomics.bamProcessing.bamFunctions import get_reference_path_from_bam, get_contigs
from collections import defaultdict
import pyBigWig
import pysam
from singlecellmultiomics.bamProcessing.bamBinCounts import get_binned_counts
import pandas as pd
import argparse
from singlecellmultiomics.bamProcessing import get_contig_sizes
from singlecellmultiomics.utils import is_autosome, pool_wrapper
import numpy as np
from multiprocessing import Pool
from more_itertools import windowed
from glob import glob
import pyBigWig
from scipy.ndimage import gaussian_filter1d
from scipy.signal import find_peaks
import argparse
from itertools import product
import asyncio
def get_aligned_chic_len(molecule):
for f in molecule:
f.R2_primer_length = 6
mlen = molecule.estimated_max_length
if mlen is None:
return None
return mlen + 2 # 2 extra bases because 1 base is potentially lost on both sides
def _generate_molecule_coordinate_dict(args, max_fragment_size=800):
bam_path, sel_contig= args
molecule_coordinate_dict = defaultdict(list) # cell->[ (start,end), (start,end) ..]
with pysam.AlignmentFile(bam_path) as alignments:
# pysam.FastaFile(get_reference_path_from_bam(alignments)) as reference:
for molecule in MoleculeIterator(alignments,
contig=sel_contig,
molecule_class=CHICMolecule,
fragment_class = CHICFragment,
#molecule_class_args = {'reference':reference}
):
contig, start, strand = molecule.get_cut_site()
if not molecule.is_completely_matching:
continue
if molecule.get_mean_mapping_qual()<55:
continue
slen = get_aligned_chic_len(molecule)
if slen is None or slen>max_fragment_size:
continue
if strand:
end = start - slen
else:
end = start + slen
molecule_coordinate_dict[molecule.sample].append( (start,end) )
for sample in molecule_coordinate_dict:
molecule_coordinate_dict[sample] = sorted(molecule_coordinate_dict[sample])
return sel_contig, molecule_coordinate_dict
def generate_molecule_coordinate_dict(bams, n_threads=None):
molecule_coordinate_dicts = {}
contigs = get_contigs(bams[0])
with Pool(n_threads) as workers:
for contig,d in workers.imap( _generate_molecule_coordinate_dict,
product(bams,filter(is_autosome, contigs))):
if not contig in molecule_coordinate_dicts:
molecule_coordinate_dicts[contig] = d
else:
molecule_coordinate_dicts[contig].update( d )
print(f'Finished {contig}')
return molecule_coordinate_dicts
#bin_size = 100_000
#distances_per_bin = defaultdict(lambda: defaultdict(list) )
def contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25):
#molecule_coordinate_dicts[contig] = d
# n = size of contig
vote_nucleosome = np.zeros(int( n/p_nuc_bin_size) )
vote_linker = np.zeros(int( n/p_nuc_bin_size) )
for cell in d.keys():
if len(d[cell])<min_total_cuts_per_cell:
continue
for (start_a, end_a),(start_b,end_b) in windowed( d[cell],2):
o_start_a = start_a
#Check if the fragments are not overlapping:
(start_a, end_a) = min(start_a, end_a), max(start_a, end_a)
(start_b, end_b) = min(start_b, end_b), max(start_b, end_b)
# The locations of the nucleosomes are constrained:
# \\ start_a ----- end_a \\ \\ start_b ---- end_b \\
if end_a - start_a >= 147 and end_a - start_a < (147*2+10): # it could contain a single nucleosome, but not more
|
# Any starting point of a molecule is _always_ part of a linker
# lets say += 25bp
c=o_start_a
s = int(((c-linker_vote_radius))/p_nuc_bin_size)
e = int(((c+linker_vote_radius))/p_nuc_bin_size)+1
vote_linker[s:(e+1)] += 1/(e-s)
if end_a > start_b:
continue
if start_b - end_a > max_linker_length: # The distance is larger than 90bp, which is a very long linker. Skip the linker vote
continue
s = int(((end_a))/p_nuc_bin_size)
e = int(((start_b))/p_nuc_bin_size)
vote_linker[s:(e+1)] += 1/(e-s+1)
return vote_nucleosome, vote_linker
async def write_to_bw(handle, starts, ends, values, contig,size=None):
if size is not None:
print( ends[ends>=size] )
handle.addEntries(
[contig]*len(starts), #Contig
starts.astype(np.int64) , #Start
ends= ends.astype(np.int64) , #end
values= values.astype(np.float64)
)
async def coordinate_dicts_to_nucleosome_position_files(bams, molecule_coordinate_dicts, p_nuc_bin_size = 5, alias='npos', n_threads=None ):
contigs = get_contigs(bams[0])
sizes = get_contig_sizes(bams[0])
# Create output bigwig file handles and worker pool
linker_vote_write_path= f'{alias}_linkers.bw'
nuc_vote_write_path= f'{alias}_nucleosomes.bw'
merged_vote_write_path= f'{alias}_nucleosomes_min_linkers.bw'
centroids = f'{alias}_nucleosome_centers.bed'
with Pool(n_threads) as workers, \
pyBigWig.open(linker_vote_write_path,'w') as linkers_out, \
pyBigWig.open(nuc_vote_write_path,'w') as nuc_out, \
pyBigWig.open(merged_vote_write_path,'w') as merged_out, \
open(centroids,'w') as centroids_out:
# Add headers for all output bigwigfiles
for h in (linkers_out, nuc_out, merged_out):
# Write header
h.addHeader(list(zip(contigs, [sizes[c] for c in contigs])))
# Obtain nucleosome and linker votes for each contig
for ret_contig, (vote_nucleosome, vote_linker) in zip(
molecule_coordinate_dicts.keys(),
workers.imap(pool_wrapper,
(
(contig_coordinate_dict_to_votes,
{
'd':d,
'n':sizes[contig],
'min_total_cuts_per_cell':3,
'p_nuc_bin_size':5,
'max_linker_length':90,
'linker_vote_radius':25
})
for contig, d in molecule_coordinate_dicts.items()
))):
print(f'Writing votes for {ret_contig}')
contig_len = sizes[ret_contig]
smooth_linkers = gaussian_filter1d(vote_linker,2)
smooth_nuc = gaussian_filter1d(vote_nucleosome,2)
# Write nucleosome vote track:
starts = np.array( np.linspace(0, contig_len-p_nuc_bin_size-1, len(vote_nucleosome)) )
size = sizes[ret_contig]
# Check if all numbers are preceding...
d = np.diff(starts)
d = d[d<1]
if len(d):
print(d)
else:
print(f'{len(starts)} Coordinates are in correct order')
await asyncio.gather(
write_to_bw(nuc_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc), ret_contig, size),
write_to_bw(linkers_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_linkers), ret_contig, size),
write_to_bw(merged_out, starts, starts+p_nuc_bin_size, np.nan_to_num(smooth_nuc - smooth_linkers), ret_contig, size=size)
)
# Use the find peak function to select properly spaced nucleosome positions.
print(f'Writing centroids for {ret_contig}')
mindist = 147
min_dist_bins = int( mindist / p_nuc_bin_size )
estimated_nucleosome_positions = find_peaks( gaussian_filter1d(smooth_nuc - smooth_linkers,2),distance=min_dist_bins)[0]
for nucpos, score in zip( estimated_nucleosome_positions*p_nuc_bin_size, smooth_nuc[estimated_nucleosome_positions]):
centroids_out.write(f'{ret_contig}\t{nucpos}\t{nucpos+p_nuc_bin_size}\t{score}\n')
#contig_coordinate_dict_to_votes(d, n, min_total_cuts_per_cell=3, p_nuc_bin_size=5, max_linker_length=90, linker_vote_radius=25)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Estimate nucleosome positions from scCHiC seq bam file(s)')
argparser.add_argument('alignmentfiles', type=str, nargs='+')
argparser.add_argument('-o', type=str, required=True, help='output prefix')
argparser.add_argument('-bin_size', type=int, default=3, help='Nucleosome position precision (bp), increasing this value increases memory consumption linearly')
argparser.add_argument('-n_threads', type=int, help='Amount of worker threads')
args = argparser.parse_args()
async def main(args):
# Run the analysis:
print('Creating molecule coordinate database')
d = generate_molecule_coordinate_dict(args.alignmentfiles, args.n_threads)
print('Estimating nucleosome positions')
await coordinate_dicts_to_nucleosome_position_files(args.alignmentfiles,
d,
p_nuc_bin_size = args.bin_size,
n_threads=args.n_threads,
alias=args.o)
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
#asyncio.run(main(args)) # python >= 3.7
| s = int(((start_a + 70 ))/p_nuc_bin_size)
e = int(((end_a - 70))/p_nuc_bin_size)
vote_nucleosome[ s:(e+1) ] += 1/(e-s) | conditional_block |
ast.js | /**
* Abstract Syntax Tree for a localization message in 'Banana' format
* @param {string} message
* @param {Object} options options
* @param {boolean} [options.wikilinks] whether the wiki style link syntax should be parsed or not
*/
export default function BananaMessage (message, { wikilinks = false } = {}) {
let pos = 0
// Try parsers until one works, if none work return null
function choice (parserSyntax) {
return () => {
for (let i = 0; i < parserSyntax.length; i++) {
const result = parserSyntax[i]()
if (result !== null) {
return result
}
}
return null
}
}
// Try several parserSyntax-es in a row.
// All must succeed; otherwise, return null.
// This is the only eager one.
function sequence (parserSyntax) {
const originalPos = pos
const result = []
for (let i = 0; i < parserSyntax.length; i++) {
const res = parserSyntax[i]()
if (res === null) {
pos = originalPos
return null
}
result.push(res)
}
return result
}
// Run the same parser over and over until it fails.
// Must succeed a minimum of n times; otherwise, return null.
function nOrMore (n, p) {
return () => {
const originalPos = pos
const result = []
let parsed = p()
while (parsed !== null) {
result.push(parsed)
parsed = p()
}
if (result.length < n) {
pos = originalPos
return null
}
return result
}
}
// Helpers -- just make parserSyntax out of simpler JS builtin types
function makeStringParser (s) {
const len = s.length
return () => {
let result = null
if (message.slice(pos, pos + len) === s) {
result = s
pos += len
}
return result
}
}
function makeRegexParser (regex) {
return () => {
const matches = message.slice(pos).match(regex)
if (matches === null) {
return null
}
pos += matches[0].length
return matches[0]
}
}
const whitespace = makeRegexParser(/^\s+/)
const pipe = makeStringParser('|')
const colon = makeStringParser(':')
const backslash = makeStringParser('\\')
const anyCharacter = makeRegexParser(/^./)
const dollar = makeStringParser('$')
const digits = makeRegexParser(/^\d+/)
const doubleQuote = makeStringParser('"')
const singleQuote = makeStringParser('\'')
// A literal is any character except the special characters in the message markup
// Special characters are: [, ], {, }, $, \, <, >
// If wikilinks parsing is disabled, treat [ and ] as regular text.
const regularLiteral = wikilinks ? makeRegexParser(/^[^{}[\]$<\\]/) : makeRegexParser(/^[^{}$<\\]/)
const regularLiteralWithoutBar = wikilinks ? makeRegexParser(/^[^{}[\]$\\|]/) : makeRegexParser(/^[^{}$\\|]/)
const regularLiteralWithoutSpace = wikilinks ? makeRegexParser(/^[^{}[\]$\s]/) : makeRegexParser(/^[^{}$\s]/)
// There is a general pattern:
// parse a thing;
// if it worked, apply transform,
// otherwise return null.
// But using this as a combinator seems to cause problems
// when combined with nOrMore().
// May be some scoping issue.
function transform (p, fn) {
return () => {
const result = p()
return result === null ? null : fn(result)
}
}
// Used to define "literals" within template parameters. The pipe
// character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literalWithoutBar () {
const result = nOrMore(1, escapedOrLiteralWithoutBar)()
return result === null ? null : result.join('')
}
// Used to define "literals" within template parameters.
// The pipe character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literal () {
const result = nOrMore(1, escapedOrRegularLiteral)()
return result === null ? null : result.join('')
}
const escapedOrLiteralWithoutSpace = choice([
escapedLiteral,
regularLiteralWithoutSpace
])
// Used to define "literals" without spaces, in space-delimited situations
function literalWithoutSpace () {
const result = nOrMore(1, escapedOrLiteralWithoutSpace)()
return result === null ? null : result.join('')
}
function escapedLiteral () {
const result = sequence([backslash, anyCharacter])
return result === null ? null : result[1]
}
choice([escapedLiteral, regularLiteralWithoutSpace])
const escapedOrLiteralWithoutBar = choice([escapedLiteral, regularLiteralWithoutBar])
const escapedOrRegularLiteral = choice([escapedLiteral, regularLiteral])
function replacement () {
const result = sequence([dollar, digits])
if (result === null) {
return null
}
return ['REPLACE', parseInt(result[1], 10) - 1]
}
const templateName = transform(
// see $wgLegalTitleChars
// not allowing : due to the need to catch "PLURAL:$1"
makeRegexParser(/^[ !"$&'()*,./0-9;=?@A-Z^_`a-z~\x80-\xFF+-]+/),
function (result) {
return result.toString()
}
)
function templateParam () {
const result = sequence([pipe, nOrMore(0, paramExpression)])
if (result === null) {
return null
}
const expr = result[1]
// use a "CONCAT" operator if there are multiple nodes,
// otherwise return the first node, raw.
return expr.length > 1 ? ['CONCAT'].concat(expr) : expr[0]
}
function templateWithReplacement () {
const result = sequence([templateName, colon, replacement])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutReplacement () {
const result = sequence([templateName, colon, paramExpression])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutFirstParameter () {
const result = sequence([templateName, colon])
return result === null ? null : [result[0], '']
}
const templateContents = choice([
function () {
const res = sequence([
// templates can have placeholders for dynamic
// replacement eg: {{PLURAL:$1|one car|$1 cars}}
// or no placeholders eg:{{GRAMMAR:genitive|{{SITENAME}}}
// Templates can also have empty first param eg:{{GENDER:|A|B|C}}
// to indicate current user in the context. We need to parse them without
// error, but can only fallback to gender neutral form.
choice([templateWithReplacement, templateWithOutReplacement, templateWithOutFirstParameter]),
nOrMore(0, templateParam)
])
return res === null ? null : res[0].concat(res[1])
},
function () {
const res = sequence([templateName, nOrMore(0, templateParam)])
if (res === null) {
return null
}
return [res[0]].concat(res[1])
}
])
const openTemplate = makeStringParser('{{')
const closeTemplate = makeStringParser('}}')
const openWikilink = makeStringParser('[[')
const closeWikilink = makeStringParser(']]')
const openExtlink = makeStringParser('[')
const closeExtlink = makeStringParser(']')
/**
* An expression in the form of {{...}}
*/
function template () {
const result = sequence([openTemplate, templateContents, closeTemplate])
return result === null ? null : result[1]
}
function pipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression),
pipe,
nOrMore(1, expression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0]),
['CONCAT'].concat(result[2])
]
}
function unpipedWikilink () |
const wikilinkContents = choice([
pipedWikilink,
unpipedWikilink
])
function wikilink () {
let result = null
const parsedResult = sequence([
openWikilink,
wikilinkContents,
closeWikilink
])
if (parsedResult !== null) {
const parsedLinkContents = parsedResult[1]
result = ['WIKILINK'].concat(parsedLinkContents)
}
return result
}
// this extlink MUST have inner contents, e.g. [foo] not allowed; [foo bar] [foo <i>bar</i>], etc. are allowed
function extlink () {
let result = null
const parsedResult = sequence([
openExtlink,
nOrMore(1, nonWhitespaceExpression),
whitespace,
nOrMore(1, expression),
closeExtlink
])
if (parsedResult !== null) {
// When the entire link target is a single parameter, we can't use CONCAT, as we allow
// passing fancy parameters (like a whole jQuery object or a function) to use for the
// link. Check only if it's a single match, since we can either do CONCAT or not for
// singles with the same effect.
const target = parsedResult[1].length === 1
? parsedResult[1][0]
: ['CONCAT'].concat(parsedResult[1])
result = [
'EXTLINK',
target,
['CONCAT'].concat(parsedResult[3])
]
}
return result
}
const asciiAlphabetLiteral = makeRegexParser(/^[A-Za-z]+/)
/**
* Checks if HTML is allowed
*
* @param {string} startTagName HTML start tag name
* @param {string} endTagName HTML start tag name
* @param {Object} attributes array of consecutive key value pairs,
* with index 2 * n being a name and 2 * n + 1 the associated value
* @return {boolean} true if this is HTML is allowed, false otherwise
*/
function isAllowedHtml (startTagName, endTagName, attributes, settings = {
// Whitelist for allowed HTML elements in wikitext.
// Self-closing tags are not currently supported.
allowedHtmlElements: ['b', 'bdi', 'del', 'i', 'ins', 'u', 'font', 'big', 'small', 'sub',
'sup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'cite', 'code', 'em', 's', 'strike', 'strong',
'tt', 'var', 'div', 'center', 'blockquote', 'ol', 'ul', 'dl', 'table', 'caption', 'pre',
'ruby', 'rb', 'rp', 'rt', 'rtc', 'p', 'span', 'abbr', 'dfn', 'kbd', 'samp', 'data', 'time',
'mark', 'li', 'dt', 'dd'],
// Key tag name, value allowed attributes for that tag.
// Sourced from Parsoid's Sanitizer::setupAttributeWhitelist
allowedHtmlCommonAttributes: [
// HTML
'id',
'class',
'style',
'lang',
'dir',
'title',
// WAI-ARIA
'aria-describedby',
'aria-flowto',
'aria-hidden',
'aria-label',
'aria-labelledby',
'aria-owns',
'role',
// RDFa
// These attributes are specified in section 9 of
// https://www.w3.org/TR/2008/REC-rdfa-syntax-20081014
'about',
'property',
'resource',
'datatype',
'typeof',
// Microdata. These are specified by
// https://html.spec.whatwg.org/multipage/microdata.html#the-microdata-model
'itemid',
'itemprop',
'itemref',
'itemscope',
'itemtype'
],
// Attributes allowed for specific elements.
// Key is element name in lower case
// Value is array of allowed attributes for that element
allowedHtmlAttributesByElement: {}
}) {
startTagName = startTagName.toLowerCase()
endTagName = endTagName.toLowerCase()
if (startTagName !== endTagName || settings.allowedHtmlElements.indexOf(startTagName) === -1) {
return false
}
const badStyle = /[\000-\010\013\016-\037\177]|expression|filter\s*:|accelerator\s*:|-o-link\s*:|-o-link-source\s*:|-o-replace\s*:|url\s*\(|image\s*\(|image-set\s*\(/i
for (let i = 0, len = attributes.length; i < len; i += 2) {
const attributeName = attributes[i]
if (settings.allowedHtmlCommonAttributes.indexOf(attributeName) === -1 &&
(settings.allowedHtmlAttributesByElement[startTagName] || []).indexOf(attributeName) === -1) {
return false
}
if (attributeName === 'style' && attributes[i + 1].search(badStyle) !== -1) {
return false
}
}
return true
}
function doubleQuotedHtmlAttributeValue () {
const htmlDoubleQuoteAttributeValue = makeRegexParser(/^[^"]*/)
const parsedResult = sequence([
doubleQuote,
htmlDoubleQuoteAttributeValue,
doubleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function singleQuotedHtmlAttributeValue () {
const htmlSingleQuoteAttributeValue = makeRegexParser(/^[^']*/)
const parsedResult = sequence([
singleQuote,
htmlSingleQuoteAttributeValue,
singleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function htmlAttribute () {
const htmlAttributeEquals = makeRegexParser(/^\s*=\s*/)
const parsedResult = sequence([
whitespace,
asciiAlphabetLiteral,
htmlAttributeEquals,
choice([
doubleQuotedHtmlAttributeValue,
singleQuotedHtmlAttributeValue
])
])
return parsedResult === null ? null : [parsedResult[1], parsedResult[3]]
}
function htmlAttributes () {
const parsedResult = nOrMore(0, htmlAttribute)()
// Un-nest attributes array due to structure of emitter operations.
return Array.prototype.concat.apply(['HTMLATTRIBUTES'], parsedResult)
}
// Parse, validate and escape HTML content in messages using a whitelisted tag names
// and attributes.
function html () {
let result = null
// Break into three sequence calls. That should allow accurate reconstruction of the original HTML, and requiring an exact tag name match.
// 1. open through closeHtmlTag
// 2. expression
// 3. openHtmlEnd through close
// This will allow recording the positions to reconstruct if HTML is to be treated as text.
const startOpenTagPos = pos
const openHtmlStartTag = makeStringParser('<')
const optionalForwardSlash = makeRegexParser(/^\/?/)
const closeHtmlTag = makeRegexParser(/^\s*>/)
const parsedOpenTagResult = sequence([
openHtmlStartTag,
asciiAlphabetLiteral,
htmlAttributes,
optionalForwardSlash,
closeHtmlTag
])
if (parsedOpenTagResult === null) {
return null
}
const endOpenTagPos = pos
const startTagName = parsedOpenTagResult[1]
const parsedHtmlContents = nOrMore(0, expression)()
const startCloseTagPos = pos
const openHtmlEndTag = makeStringParser('</')
const parsedCloseTagResult = sequence([
openHtmlEndTag,
asciiAlphabetLiteral,
closeHtmlTag
])
if (parsedCloseTagResult === null) {
// Closing tag failed. Return the start tag and contents.
return ['CONCAT', message.slice(startOpenTagPos, endOpenTagPos)]
.concat(parsedHtmlContents)
}
const endCloseTagPos = pos
const endTagName = parsedCloseTagResult[1]
const wrappedAttributes = parsedOpenTagResult[2]
const attributes = wrappedAttributes.slice(1)
if (isAllowedHtml(startTagName, endTagName, attributes)) {
result = ['HTMLELEMENT', startTagName, wrappedAttributes]
.concat(parsedHtmlContents)
} else {
// HTML is not allowed, so contents will remain how
// it was, while HTML markup at this level will be
// treated as text
// E.g. assuming script tags are not allowed:
//
// <script>[[Foo|bar]]</script>
//
// results in '<script>' and '</script>'
// (not treated as an HTML tag), surrounding a fully
// parsed HTML link.
//
// Concatenate everything from the tag, flattening the contents.
const escapeHTML = (unsafeContent) => unsafeContent
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''')
result = ['CONCAT', escapeHTML(message.slice(startOpenTagPos, endOpenTagPos))]
.concat(parsedHtmlContents, escapeHTML(message.slice(startCloseTagPos, endCloseTagPos)))
}
return result
}
const nonWhitespaceExpression = choice([
template,
replacement,
wikilink,
extlink,
literalWithoutSpace
])
const expression = choice([
template,
replacement,
wikilink,
extlink,
html,
literal
])
const paramExpression = choice([template, replacement, literalWithoutBar])
function start () {
const result = nOrMore(0, expression)()
if (result === null) {
return null
}
return ['CONCAT'].concat(result)
}
const result = start()
/*
* For success, the pos must have gotten to the end of the input
* and returned a non-null.
* n.b. This is part of language infrastructure, so we do not throw an internationalizable message.
*/
if (result === null || pos !== message.length) {
throw new Error('Parse error at position ' + pos.toString() + ' in input: ' + message)
}
return result
}
| {
const result = sequence([
nOrMore(1, paramExpression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0])
]
} | identifier_body |
ast.js | /**
* Abstract Syntax Tree for a localization message in 'Banana' format
* @param {string} message
* @param {Object} options options
* @param {boolean} [options.wikilinks] whether the wiki style link syntax should be parsed or not
*/
export default function BananaMessage (message, { wikilinks = false } = {}) {
let pos = 0
// Try parsers until one works, if none work return null
function choice (parserSyntax) {
return () => {
for (let i = 0; i < parserSyntax.length; i++) {
const result = parserSyntax[i]()
if (result !== null) {
return result
}
}
return null
}
}
// Try several parserSyntax-es in a row.
// All must succeed; otherwise, return null.
// This is the only eager one.
function sequence (parserSyntax) {
const originalPos = pos
const result = []
for (let i = 0; i < parserSyntax.length; i++) {
const res = parserSyntax[i]()
if (res === null) {
pos = originalPos
return null
}
result.push(res)
}
return result
}
// Run the same parser over and over until it fails.
// Must succeed a minimum of n times; otherwise, return null.
function nOrMore (n, p) {
return () => {
const originalPos = pos
const result = []
let parsed = p()
while (parsed !== null) {
result.push(parsed)
parsed = p()
}
if (result.length < n) {
pos = originalPos
return null
}
return result
}
}
// Helpers -- just make parserSyntax out of simpler JS builtin types
function makeStringParser (s) {
const len = s.length
return () => {
let result = null
if (message.slice(pos, pos + len) === s) {
result = s
pos += len
}
return result
}
}
function makeRegexParser (regex) {
return () => {
const matches = message.slice(pos).match(regex)
if (matches === null) {
return null
}
pos += matches[0].length
return matches[0]
}
}
const whitespace = makeRegexParser(/^\s+/)
const pipe = makeStringParser('|')
const colon = makeStringParser(':')
const backslash = makeStringParser('\\')
const anyCharacter = makeRegexParser(/^./)
const dollar = makeStringParser('$')
const digits = makeRegexParser(/^\d+/)
const doubleQuote = makeStringParser('"')
const singleQuote = makeStringParser('\'')
// A literal is any character except the special characters in the message markup
// Special characters are: [, ], {, }, $, \, <, >
// If wikilinks parsing is disabled, treat [ and ] as regular text.
const regularLiteral = wikilinks ? makeRegexParser(/^[^{}[\]$<\\]/) : makeRegexParser(/^[^{}$<\\]/)
const regularLiteralWithoutBar = wikilinks ? makeRegexParser(/^[^{}[\]$\\|]/) : makeRegexParser(/^[^{}$\\|]/)
const regularLiteralWithoutSpace = wikilinks ? makeRegexParser(/^[^{}[\]$\s]/) : makeRegexParser(/^[^{}$\s]/)
// There is a general pattern:
// parse a thing;
// if it worked, apply transform,
// otherwise return null.
// But using this as a combinator seems to cause problems
// when combined with nOrMore().
// May be some scoping issue.
function transform (p, fn) {
return () => {
const result = p()
return result === null ? null : fn(result)
}
}
// Used to define "literals" within template parameters. The pipe
// character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literalWithoutBar () {
const result = nOrMore(1, escapedOrLiteralWithoutBar)()
return result === null ? null : result.join('')
}
// Used to define "literals" within template parameters.
// The pipe character is the parameter delimeter, so by default
// it is not a literal in the parameter
function literal () {
const result = nOrMore(1, escapedOrRegularLiteral)()
return result === null ? null : result.join('')
}
const escapedOrLiteralWithoutSpace = choice([
escapedLiteral,
regularLiteralWithoutSpace
])
// Used to define "literals" without spaces, in space-delimited situations
function literalWithoutSpace () {
const result = nOrMore(1, escapedOrLiteralWithoutSpace)()
return result === null ? null : result.join('')
}
function escapedLiteral () {
const result = sequence([backslash, anyCharacter])
return result === null ? null : result[1]
}
choice([escapedLiteral, regularLiteralWithoutSpace])
const escapedOrLiteralWithoutBar = choice([escapedLiteral, regularLiteralWithoutBar])
const escapedOrRegularLiteral = choice([escapedLiteral, regularLiteral])
function replacement () {
const result = sequence([dollar, digits])
if (result === null) {
return null
}
return ['REPLACE', parseInt(result[1], 10) - 1]
}
const templateName = transform(
// see $wgLegalTitleChars
// not allowing : due to the need to catch "PLURAL:$1"
makeRegexParser(/^[ !"$&'()*,./0-9;=?@A-Z^_`a-z~\x80-\xFF+-]+/),
function (result) {
return result.toString()
}
)
function templateParam () {
const result = sequence([pipe, nOrMore(0, paramExpression)])
if (result === null) {
return null
}
const expr = result[1]
// use a "CONCAT" operator if there are multiple nodes,
// otherwise return the first node, raw.
return expr.length > 1 ? ['CONCAT'].concat(expr) : expr[0]
}
function templateWithReplacement () {
const result = sequence([templateName, colon, replacement])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutReplacement () {
const result = sequence([templateName, colon, paramExpression])
return result === null ? null : [result[0], result[2]]
}
function templateWithOutFirstParameter () {
const result = sequence([templateName, colon])
return result === null ? null : [result[0], '']
}
const templateContents = choice([
function () {
const res = sequence([
// templates can have placeholders for dynamic
// replacement eg: {{PLURAL:$1|one car|$1 cars}}
// or no placeholders eg:{{GRAMMAR:genitive|{{SITENAME}}}
// Templates can also have empty first param eg:{{GENDER:|A|B|C}}
// to indicate current user in the context. We need to parse them without
// error, but can only fallback to gender neutral form.
choice([templateWithReplacement, templateWithOutReplacement, templateWithOutFirstParameter]),
nOrMore(0, templateParam)
]) | return res === null ? null : res[0].concat(res[1])
},
function () {
const res = sequence([templateName, nOrMore(0, templateParam)])
if (res === null) {
return null
}
return [res[0]].concat(res[1])
}
])
const openTemplate = makeStringParser('{{')
const closeTemplate = makeStringParser('}}')
const openWikilink = makeStringParser('[[')
const closeWikilink = makeStringParser(']]')
const openExtlink = makeStringParser('[')
const closeExtlink = makeStringParser(']')
/**
* An expression in the form of {{...}}
*/
function template () {
const result = sequence([openTemplate, templateContents, closeTemplate])
return result === null ? null : result[1]
}
function pipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression),
pipe,
nOrMore(1, expression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0]),
['CONCAT'].concat(result[2])
]
}
function unpipedWikilink () {
const result = sequence([
nOrMore(1, paramExpression)
])
return result === null
? null
: [
['CONCAT'].concat(result[0])
]
}
const wikilinkContents = choice([
pipedWikilink,
unpipedWikilink
])
function wikilink () {
let result = null
const parsedResult = sequence([
openWikilink,
wikilinkContents,
closeWikilink
])
if (parsedResult !== null) {
const parsedLinkContents = parsedResult[1]
result = ['WIKILINK'].concat(parsedLinkContents)
}
return result
}
// this extlink MUST have inner contents, e.g. [foo] not allowed; [foo bar] [foo <i>bar</i>], etc. are allowed
function extlink () {
let result = null
const parsedResult = sequence([
openExtlink,
nOrMore(1, nonWhitespaceExpression),
whitespace,
nOrMore(1, expression),
closeExtlink
])
if (parsedResult !== null) {
// When the entire link target is a single parameter, we can't use CONCAT, as we allow
// passing fancy parameters (like a whole jQuery object or a function) to use for the
// link. Check only if it's a single match, since we can either do CONCAT or not for
// singles with the same effect.
const target = parsedResult[1].length === 1
? parsedResult[1][0]
: ['CONCAT'].concat(parsedResult[1])
result = [
'EXTLINK',
target,
['CONCAT'].concat(parsedResult[3])
]
}
return result
}
const asciiAlphabetLiteral = makeRegexParser(/^[A-Za-z]+/)
/**
* Checks if HTML is allowed
*
* @param {string} startTagName HTML start tag name
* @param {string} endTagName HTML start tag name
* @param {Object} attributes array of consecutive key value pairs,
* with index 2 * n being a name and 2 * n + 1 the associated value
* @return {boolean} true if this is HTML is allowed, false otherwise
*/
function isAllowedHtml (startTagName, endTagName, attributes, settings = {
// Whitelist for allowed HTML elements in wikitext.
// Self-closing tags are not currently supported.
allowedHtmlElements: ['b', 'bdi', 'del', 'i', 'ins', 'u', 'font', 'big', 'small', 'sub',
'sup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'cite', 'code', 'em', 's', 'strike', 'strong',
'tt', 'var', 'div', 'center', 'blockquote', 'ol', 'ul', 'dl', 'table', 'caption', 'pre',
'ruby', 'rb', 'rp', 'rt', 'rtc', 'p', 'span', 'abbr', 'dfn', 'kbd', 'samp', 'data', 'time',
'mark', 'li', 'dt', 'dd'],
// Key tag name, value allowed attributes for that tag.
// Sourced from Parsoid's Sanitizer::setupAttributeWhitelist
allowedHtmlCommonAttributes: [
// HTML
'id',
'class',
'style',
'lang',
'dir',
'title',
// WAI-ARIA
'aria-describedby',
'aria-flowto',
'aria-hidden',
'aria-label',
'aria-labelledby',
'aria-owns',
'role',
// RDFa
// These attributes are specified in section 9 of
// https://www.w3.org/TR/2008/REC-rdfa-syntax-20081014
'about',
'property',
'resource',
'datatype',
'typeof',
// Microdata. These are specified by
// https://html.spec.whatwg.org/multipage/microdata.html#the-microdata-model
'itemid',
'itemprop',
'itemref',
'itemscope',
'itemtype'
],
// Attributes allowed for specific elements.
// Key is element name in lower case
// Value is array of allowed attributes for that element
allowedHtmlAttributesByElement: {}
}) {
startTagName = startTagName.toLowerCase()
endTagName = endTagName.toLowerCase()
if (startTagName !== endTagName || settings.allowedHtmlElements.indexOf(startTagName) === -1) {
return false
}
const badStyle = /[\000-\010\013\016-\037\177]|expression|filter\s*:|accelerator\s*:|-o-link\s*:|-o-link-source\s*:|-o-replace\s*:|url\s*\(|image\s*\(|image-set\s*\(/i
for (let i = 0, len = attributes.length; i < len; i += 2) {
const attributeName = attributes[i]
if (settings.allowedHtmlCommonAttributes.indexOf(attributeName) === -1 &&
(settings.allowedHtmlAttributesByElement[startTagName] || []).indexOf(attributeName) === -1) {
return false
}
if (attributeName === 'style' && attributes[i + 1].search(badStyle) !== -1) {
return false
}
}
return true
}
function doubleQuotedHtmlAttributeValue () {
const htmlDoubleQuoteAttributeValue = makeRegexParser(/^[^"]*/)
const parsedResult = sequence([
doubleQuote,
htmlDoubleQuoteAttributeValue,
doubleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function singleQuotedHtmlAttributeValue () {
const htmlSingleQuoteAttributeValue = makeRegexParser(/^[^']*/)
const parsedResult = sequence([
singleQuote,
htmlSingleQuoteAttributeValue,
singleQuote
])
return parsedResult === null ? null : parsedResult[1]
}
function htmlAttribute () {
const htmlAttributeEquals = makeRegexParser(/^\s*=\s*/)
const parsedResult = sequence([
whitespace,
asciiAlphabetLiteral,
htmlAttributeEquals,
choice([
doubleQuotedHtmlAttributeValue,
singleQuotedHtmlAttributeValue
])
])
return parsedResult === null ? null : [parsedResult[1], parsedResult[3]]
}
function htmlAttributes () {
const parsedResult = nOrMore(0, htmlAttribute)()
// Un-nest attributes array due to structure of emitter operations.
return Array.prototype.concat.apply(['HTMLATTRIBUTES'], parsedResult)
}
// Parse, validate and escape HTML content in messages using a whitelisted tag names
// and attributes.
function html () {
let result = null
// Break into three sequence calls. That should allow accurate reconstruction of the original HTML, and requiring an exact tag name match.
// 1. open through closeHtmlTag
// 2. expression
// 3. openHtmlEnd through close
// This will allow recording the positions to reconstruct if HTML is to be treated as text.
const startOpenTagPos = pos
const openHtmlStartTag = makeStringParser('<')
const optionalForwardSlash = makeRegexParser(/^\/?/)
const closeHtmlTag = makeRegexParser(/^\s*>/)
const parsedOpenTagResult = sequence([
openHtmlStartTag,
asciiAlphabetLiteral,
htmlAttributes,
optionalForwardSlash,
closeHtmlTag
])
if (parsedOpenTagResult === null) {
return null
}
const endOpenTagPos = pos
const startTagName = parsedOpenTagResult[1]
const parsedHtmlContents = nOrMore(0, expression)()
const startCloseTagPos = pos
const openHtmlEndTag = makeStringParser('</')
const parsedCloseTagResult = sequence([
openHtmlEndTag,
asciiAlphabetLiteral,
closeHtmlTag
])
if (parsedCloseTagResult === null) {
// Closing tag failed. Return the start tag and contents.
return ['CONCAT', message.slice(startOpenTagPos, endOpenTagPos)]
.concat(parsedHtmlContents)
}
const endCloseTagPos = pos
const endTagName = parsedCloseTagResult[1]
const wrappedAttributes = parsedOpenTagResult[2]
const attributes = wrappedAttributes.slice(1)
if (isAllowedHtml(startTagName, endTagName, attributes)) {
result = ['HTMLELEMENT', startTagName, wrappedAttributes]
.concat(parsedHtmlContents)
} else {
// HTML is not allowed, so contents will remain how
// it was, while HTML markup at this level will be
// treated as text
// E.g. assuming script tags are not allowed:
//
// <script>[[Foo|bar]]</script>
//
// results in '<script>' and '</script>'
// (not treated as an HTML tag), surrounding a fully
// parsed HTML link.
//
// Concatenate everything from the tag, flattening the contents.
const escapeHTML = (unsafeContent) => unsafeContent
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''')
result = ['CONCAT', escapeHTML(message.slice(startOpenTagPos, endOpenTagPos))]
.concat(parsedHtmlContents, escapeHTML(message.slice(startCloseTagPos, endCloseTagPos)))
}
return result
}
const nonWhitespaceExpression = choice([
template,
replacement,
wikilink,
extlink,
literalWithoutSpace
])
const expression = choice([
template,
replacement,
wikilink,
extlink,
html,
literal
])
const paramExpression = choice([template, replacement, literalWithoutBar])
function start () {
const result = nOrMore(0, expression)()
if (result === null) {
return null
}
return ['CONCAT'].concat(result)
}
const result = start()
/*
* For success, the pos must have gotten to the end of the input
* and returned a non-null.
* n.b. This is part of language infrastructure, so we do not throw an internationalizable message.
*/
if (result === null || pos !== message.length) {
throw new Error('Parse error at position ' + pos.toString() + ' in input: ' + message)
}
return result
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.