file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
scaledobject_controller.go | scaledObject *kedav1alpha1.ScaledObject) (string, error) {
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Check the label needed for Metrics servers is present on ScaledObject
err := r.ensureScaledObjectLabel(logger, scaledObject)
if err != nil {
return "Failed to update ScaledObject with scaledObjectName label", err
}
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject)
if err != nil {
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Create a new HPA or update existing one according to ScaledObject
newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr)
if err != nil {
return "Failed to ensure HPA is correctly created for ScaledObject", err
}
scaleObjectSpecChanged := false
if !newHPACreated {
// Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec
// if it was changed we should start a new ScaleLoop
// (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway)
scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject)
if err != nil {
return "Failed to check whether ScaledObject's Generation was changed", err
}
}
// Notify ScaleHandler if a new HPA was created or if ScaledObject was updated
if newHPACreated || scaleObjectSpecChanged {
if r.requestScaleLoop(logger, scaledObject) != nil {
return "Failed to start a new scale loop with scaling logic", err
}
logger.Info("Initializing Scaling logic according to ScaledObject Specification")
}
return "ScaledObject is defined correctly and is ready for scaling", nil
}
// ensureScaledObjectLabel ensures that scaledObjectName=<scaledObject.Name> label exist in the ScaledObject
// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it.
func (r *ScaledObjectReconciler) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
const labelScaledObjectName = "scaledObjectName"
if scaledObject.Labels == nil {
scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name}
} else {
value, found := scaledObject.Labels[labelScaledObjectName]
if found && value == scaledObject.Name {
return nil
}
scaledObject.Labels[labelScaledObjectName] = scaledObject.Name
}
logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name)
return r.Client.Update(context.TODO(), scaledObject)
}
// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
return gvkr, err
}
gvkString := gvkr.GVKString()
logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource)
// let's try to detect /scale subresource
scale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if errScale != nil {
// not able to get /scale subresource -> let's check if the resource even exist in the cluster
unstruct := &unstructured.Unstructured{}
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, errScale
}
// if it is not already present in ScaledObject Status:
// - store discovered GVK and GVKR
// - store original scaleTarget's replica count (before scaling with KEDA)
if scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {
status := scaledObject.Status.DeepCopy()
if scaledObject.Status.ScaleTargetKind != gvkString {
status.ScaleTargetKind = gvkString
status.ScaleTargetGVKR = &gvkr
}
if scaledObject.Status.OriginalReplicaCount == nil {
status.OriginalReplicaCount = &scale.Spec.Replicas
}
if err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {
return gvkr, err
}
logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
}
return gvkr, nil
}
// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created
func (r *ScaledObjectReconciler) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) {
hpaName := getHPAName(scaledObject)
foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
// Check if HPA for this ScaledObject already exists
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa)
if err != nil && errors.IsNotFound(err) {
// HPA wasn't found -> let's create a new one
err = r.createAndDeployNewHPA(logger, scaledObject, gvkr)
if err != nil {
return false, err
}
// check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18
r.checkMinK8sVersionforHPABehavior(logger, scaledObject)
// new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop
return true, nil
} else if err != nil {
logger.Error(err, "Failed to get HPA from cluster")
return false, err
}
// HPA was found -> let's check if we need to update it
err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr)
if err != nil {
logger.Error(err, "Failed to check HPA for possible update")
return false, err
}
return false, nil
}
// startScaleLoop starts ScaleLoop handler for the respective ScaledObject
func (r *ScaledObjectReconciler) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
logger.V(1).Info("Notify scaleHandler of an update in scaledObject")
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil {
return err
}
// store ScaledObject's current Generation
r.scaledObjectsGenerations.Store(key, scaledObject.Generation)
return nil
}
// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject
func (r *ScaledObjectReconciler) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil {
return err
}
// delete ScaledObject's current Generation
r.scaledObjectsGenerations.Delete(key)
return nil
}
// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed
func (r *ScaledObjectReconciler) | scaledObjectGenerationChanged | identifier_name | |
create_secret.go | a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`))
)
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command | }
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
| {
cmd := &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd | identifier_body |
create_secret.go | &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]",
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretTLS(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName)
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
return cmd
}
// CreateSecretTLS is the implementation of the create secret tls command
func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"cert", "key"}
for _, requiredFlag := range requiredFlags | {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
} | conditional_block | |
create_secret.go | a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`)) | Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
| )
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{ | random_line_split |
create_secret.go | secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`))
)
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func | (f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
| CreateSecretGeneric | identifier_name |
utils.js | .dispose()
$bitmap.dispose()
`.replace(/\r?\n|\r|\u2028|\u2029/g, "\r\n");
execFileSync("powershell.exe", [
"-NoLogo",
"-NoProfile",
"-NonInteractive",
"-WindowStyle", "Hidden",
"-Command", "-",
], {input, encoding: "utf8", windowsHide: true});
break;
default:
throw new Error("Desktop capture requires Windows or macOS");
}
},
/**
* Save a screenshot of the workspace window.
* @param {"png"|"jpg"|"pdf"} [format="png"] - Screenshot format
* @param {Number} [quality=75] - JPEG quality (0–100)
* @return {Promise<Uint8Array>}
*/
async captureWindow(format = "png", quality = 75){
const {remote} = require("electron");
const page = remote.getCurrentWebContents();
switch(format = String(format).toLowerCase()){
case "pdf":
const width = Math.ceil(CSS.px(window.innerWidth * 1000).to("mm").value);
const height = Math.ceil(CSS.px(window.innerHeight * 1000).to("mm").value);
const {buffer} = await page.printToPDF({
marginsType: 1,
printBackground: true,
pageSize: {width, height},
});
return new Uint8Array(buffer);
case "jpg":
case "jpeg":
quality = isNaN(quality) ? 75 : Math.max(Math.min(100, ~~quality), 0);
return (await page.capturePage()).toJPEG(quality);
case "png":
return (await page.capturePage()).toPNG();
default:
throw new TypeError(`Unsupported file-format: ${format}`);
}
},
/**
* Open a file to a specific line and column in the user's editor-pane.
* @param {String} path
* @param {Number} line
* @param {Number} columns
*/
jumpToFile(path, line, column){
const jumpURL = "atom://core/open/file?filename="
+ encodeURIComponent(path)
+ `&line=${line}&column=${column}`;
return require("electron").shell.openExternal(jumpURL);
},
/**
* Open a list of files relative to currently-running test.
*
* @param {...String} paths
* @return {Promise<TextEditor|TextEditor[]>}
*/
async open(...paths){
if(!paths.length) return atom.workspace.open();
const testPath = dirname((AtomMocha.runner.currentRunnable || {file: __filename}).file);
const editors = await Promise.all(paths.map(path => {
path = path.replace(/[\\/]/g, sep);
return atom.workspace.open(isAbsolute(path) ? path : resolve(testPath, normalize(path)));
}));
return (paths.length > 1) ? editors : editors[0];
},
/**
* Return a {@link Promise} which resolves once an event has been emitted.
*
* @param {EventEmitter} source - Something with an {@link Emitter} object
* @param {String} eventName - Name of event to listen for
* @return {Promise}
*/
async waitForEvent(source, eventName){
return new Promise(resolve => {
const disposable = source.emitter.on(eventName, result => {
disposable.dispose();
resolve(result);
});
});
},
/**
* Wrapper for creating a new DOM element, optionally assigning it a hash of properties upon construction.
*
* @param {String} nodeType - Element type to create.
* @param {Object} obj - An optional hash of properties to assign the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
const POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X;
matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
/**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexF | flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
| romString(src, | identifier_name |
utils.js | context.dispose()
$bitmap.dispose()
`.replace(/\r?\n|\r|\u2028|\u2029/g, "\r\n");
execFileSync("powershell.exe", [
"-NoLogo",
"-NoProfile",
"-NonInteractive",
"-WindowStyle", "Hidden",
"-Command", "-",
], {input, encoding: "utf8", windowsHide: true});
break;
default:
throw new Error("Desktop capture requires Windows or macOS");
}
},
/**
* Save a screenshot of the workspace window.
* @param {"png"|"jpg"|"pdf"} [format="png"] - Screenshot format
* @param {Number} [quality=75] - JPEG quality (0–100)
* @return {Promise<Uint8Array>}
*/
async captureWindow(format = "png", quality = 75){
const {remote} = require("electron");
const page = remote.getCurrentWebContents();
switch(format = String(format).toLowerCase()){
case "pdf":
const width = Math.ceil(CSS.px(window.innerWidth * 1000).to("mm").value);
const height = Math.ceil(CSS.px(window.innerHeight * 1000).to("mm").value);
const {buffer} = await page.printToPDF({
marginsType: 1,
printBackground: true,
pageSize: {width, height},
});
return new Uint8Array(buffer);
case "jpg":
case "jpeg":
quality = isNaN(quality) ? 75 : Math.max(Math.min(100, ~~quality), 0);
return (await page.capturePage()).toJPEG(quality);
case "png":
return (await page.capturePage()).toPNG();
default:
throw new TypeError(`Unsupported file-format: ${format}`);
}
},
/**
* Open a file to a specific line and column in the user's editor-pane.
* @param {String} path
* @param {Number} line
* @param {Number} columns
*/
jumpToFile(path, line, column){
const jumpURL = "atom://core/open/file?filename="
+ encodeURIComponent(path)
+ `&line=${line}&column=${column}`;
return require("electron").shell.openExternal(jumpURL);
},
/**
* Open a list of files relative to currently-running test.
*
* @param {...String} paths
* @return {Promise<TextEditor|TextEditor[]>}
*/
async open(...paths){
if(!paths.length) return atom.workspace.open();
const testPath = dirname((AtomMocha.runner.currentRunnable || {file: __filename}).file);
const editors = await Promise.all(paths.map(path => {
path = path.replace(/[\\/]/g, sep);
return atom.workspace.open(isAbsolute(path) ? path : resolve(testPath, normalize(path)));
}));
return (paths.length > 1) ? editors : editors[0];
},
/**
* Return a {@link Promise} which resolves once an event has been emitted.
*
* @param {EventEmitter} source - Something with an {@link Emitter} object
* @param {String} eventName - Name of event to listen for
* @return {Promise}
*/
async waitForEvent(source, eventName){
return new Promise(resolve => {
const disposable = source.emitter.on(eventName, result => {
disposable.dispose();
resolve(result);
});
});
},
/**
* Wrapper for creating a new DOM element, optionally assigning it a hash of properties upon construction.
*
* @param {String} nodeType - Element type to create.
* @param {Object} obj - An optional hash of properties to assign the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
co | matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
/**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexFromString(src, flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
| nst POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X; | identifier_body |
utils.js | the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
const POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X;
matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
/**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexFromString(src, flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
* Affected characters are: & < > "
*
* NOTE: Named entities are NOT checked, and will be double-escaped.
* Exceptions are made for `"`, `<` and `>`, due to their
* abundant use. Numeric entities, even with invalid codepoints, are
* also safe from double-encoding.
*
* @example "name"<email> -> "name"<email>
* @param {String} input
* @return {String}
*/
escapeHTML(input){
return input.replace(/["<>]|&(?!quot;|gt;|lt;|#x?[A-F\d]+;)/gi, s => "&#"+s.charCodeAt(0)+";");
},
/**
* Parse a list of keywords into an object of boolean "true" values.
*
* @example parseKeywords("top left") -> {top: true, left: true}
* @param {Mixed} keywords - A space-delimited string or an array of strings
* @return {Object}
*/
parseKeywords(keywords){
if(!Array.isArray(keywords)){
if(!keywords) return null;
keywords = [keywords];
}
const output = {};
for(const k of keywords)
k.split(/\s+/g).filter(i => i).forEach(k => output[k] = true);
return output;
},
/**
* Return a {@link Promise} which auto-resolves after a delay.
*
* @param {Number} [delay=100] - Delay in milliseconds
* @return {Promise<void>}
*/
wait(delay = 100){
return new Promise(resolve => {
setTimeout(() => resolve(), delay);
});
},
/**
* Keep calling a function until it returns a truthy value.
*
* @example poll(async () => (await fetch(url)).done);
* @param {Function} fn
* @param {Object} [opts={}]
* @param {Number} [opts.rate=100]
* @param {Number} [opts.timeout=0]
* @param {Boolean} [opts.negate=false]
* @return {Promise<void>}
*/
async poll(fn, opts = {}){
const {rate = 100, timeout = 0, negate = false} = opts;
const start = Date.now();
for(;;){
const result = await fn();
if(!negate === !!result) return result;
if(timeout && Date.now() - start > timeout)
throw new Error("Timed out");
await new Promise($ => setTimeout($, rate));
}
},
/**
* Strip excess whitespace from a multiline string.
*
* Intended to be used with tagged template literals,
* but will work on any multiline string value.
*
* @example
* const HTML = deindent;
* let output = HTML `
* <div>
* (Text)
* </div>
* `;
* output == "<div>\n\t(Text)\n</div>";
*
* @param {Object|String} input
* @param {...String} [args]
* @return {String}
*/
deindent(input, ...args){
// Avoid breaking on String.raw if called as an ordinary function
if("object" !== typeof input || "object" !== typeof input.raw)
return deindent `${input}`;
const depthTable = [];
let maxDepth = Number.NEGATIVE_INFINITY;
let minDepth = Number.POSITIVE_INFINITY;
| // Normalise newlines and strip leading or trailing blank lines
const chunk = String.raw.call(null, input, ...args)
.replace(/\r(\n?)/g, "$1") | random_line_split | |
HFIF_RunTime.py | result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
| def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self | random_line_split | |
HFIF_RunTime.py | result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.app | stPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]- | end(ff.pm)
print(intNTime,*tuple(li | conditional_block |
HFIF_RunTime.py | result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
| self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]- | def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
| identifier_body |
HFIF_RunTime.py | result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
| self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self | identifier_name | |
utils.py | return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute() | user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
# | def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None): | random_line_split |
utils.py | return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
|
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
| return dc.get(name, default) | conditional_block |
utils.py | return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def | (ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
| toDatetime | identifier_name |
utils.py | return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
|
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
| month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day) | identifier_body |
peers_tests.rs | , "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends[&bob_id].endpoints.contains_key(&bob_addr))
}
// Finally see if Bob got the message.
unwrap!(wait_for_log(&bob.log, 1., &|_| bob_pctx | .direct_chunks
.load(Ordering::Relaxed)
> 0));
let start = now_float(); | random_line_split | |
peers_tests.rs | lim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
| /// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
| use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
| identifier_body |
peers_tests.rs | lim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
| // NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
| return;
}
| conditional_block |
peers_tests.rs | _cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn | (mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
| destruction_check | identifier_name |
fbb_churn_eval_ensemb_alberto.py | ).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
print "[Info Main FbbChurn] Non-informative feat: " + f
####################
### 2. TEST DATA ###
####################
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_ini_" + tr_ttdates
#if (pathExist(path)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
#ttdf_ini = spark.read.parquet(path)
#else:
ttdf_ini = getFbbChurnLabeledCarCycles(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
#if (pathExist(path)):
# print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
# ttdf = spark.read.parquet(path)
#else:
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client in tt"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the test set " + str(ttdf.count())
print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf to HDFS "
#tdf = ttdf.repartition(300)
#ttdf.repartition(300).write.save(path, format='parquet', mode='overwrite')
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline = Pipeline(stages=[assembler, classifier])
model = pipeline.fit(trdf)
feat_importance = getOrderedRelevantFeats(model, featCols, 'f', 'rf')
| for fimp in feat_importance:
print "[Info Main FbbChurn] Imp feat " + str(fimp[0]) + ": " + str(fimp[1])
| random_line_split | |
fbb_churn_eval_ensemb_alberto.py | SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] | MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS) | identifier_body | |
fbb_churn_eval_ensemb_alberto.py | )
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
| print "[Info Main FbbChurn] Non-informative feat: " + f | conditional_block | |
fbb_churn_eval_ensemb_alberto.py | (min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " | setting_bdp | identifier_name | |
feature_extraction.py | 1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph)
# Single features
features += get_single_features(signal)
list_of_features.append(features)
return np.array(list_of_features), np.array(list_of_labels)
def evaluate_model(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
score = acc * 100.0
return score
# Summarize scores
def summarize_results(scores, classifiers):
# summarize mean and standard deviation
for score, clf in zip(scores, classifiers):
m, s = np.mean(score), np.std(score)
print("-"*40)
print('Average score: %.3f%% (+/-%.3f)' % (m, s))
print(clf, "\n")
# Box-plot of scores
# plt.boxplot(scores, labels=[clf.__class__.__name__ if clf.__class__.__name__ != 'Pipeline'
# else clf[-1].__class__.__name__ for clf in classifiers])
# plt.title("Accuracy")
# plt.show()
# Run an experiment
def | run_experiment | identifier_name | |
feature_extraction.py | edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
|
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph | idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak | conditional_block |
feature_extraction.py | edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks | if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph | if ind.size and ind[0] == 0:
ind = ind[1:] | random_line_split |
feature_extraction.py | edge='rising',
kpsh=False, valley=False, show=False, ax=None):
| valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), | """Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`. | identifier_body |
main.js | entries.forEach(function(entry) {
graph.addNode({
id: entry.name,
label: entry.name,
x: entry.name.charCodeAt(0), // Positions are refined below
y: entry.name.charCodeAt(1),
size: 5 + Math.pow(entry.students ? entry.students.length : 0, 0.8)
// TODO: Assign node colors in some meaningful way
});
if (Object.keys(entry).indexOf('year') !== -1) {
yearMap[entry.name] = "'" + ('' + entry.year).substring(2);
}
});
// Add edges
var inMap = {};
var outMap = {};
var edgesToColors = {};
var seenCourses = {};
entries.forEach(function(teacher) {
if (teacher.students) {
teacher.students.forEach(function(student) {
var edgeId = teacher.name + ':' + student.name + ':' + student.class;
var edgeColor = classToColor(student.class);
graph.addEdge({
id: edgeId,
source: teacher.name,
target: student.name,
type: 'arrow',
size: 1,
color: edgeColor
});
edgesToColors[edgeId] = edgeColor;
if (Object.keys(seenCourses).indexOf(student.class) === -1) {
seenCourses[student.class] = true;
}
// Save in/out info for detailed "info" view (on node hover)
if (!inMap[student.name]) {
inMap[student.name] = [];
}
if (!outMap[teacher.name]) {
outMap[teacher.name] = [];
}
inMap[student.name].push(teacher.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
outMap[teacher.name].push(student.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
// Approximate tree-forming: if student is above teacher, swap their y-coordinates
// TODO: Make this better
if (layout !== 'forceDirected') {
var teacherNode = graph.nodes(teacher.name);
var studentNode = graph.nodes(student.name);
if (studentNode.y < teacherNode.y) {
var tmp = studentNode.y;
studentNode.y = teacherNode.y;
teacherNode.y = tmp;
}
}
});
}
});
// Fill in filtering dropdown
var seenCoursesList = Object.keys(seenCourses);
seenCoursesList.sort();
seenCoursesList.forEach(function(course) {
$('#filter').innerHTML += '<option value="' + course + '">' + course + '</option>';
});
// Bind node hover handler
s.bind('overNode', function(e) {
if (activeSearchHit) {
return;
}
var node = e.data.node;
showPersonInfo(node, inMap, outMap);
var edges = s.graph.edges();
edges.forEach(function(edge) {
var idParts = edge.id.split(':');
var teacher = idParts[0];
var student = idParts[1];
if (teacher != node.id && student != node.id) {
edge.color = 'transparent';
} else {
edge.size = 3;
}
});
s.refresh();
});
// Bind node un-hover handler
s.bind('outNode', function(e) {
if (activeSearchHit) {
return;
}
$('#info').style.display = 'none';
if (activeFilter) { // Hack to reapply filter
var activeFilterCopy = activeFilter;
activeFilter = '';
filterByCourse(activeFilterCopy);
} else {
var edges = s.graph.edges();
edges.forEach(function(edge) {
edge.color = edgesToColors[edge.id];
edge.size = 1;
});
s.refresh();
}
});
// Bind search handler
$('#search').onkeydown = function(e) {
if (e.keyCode == 13) {
if (highlightSearchHit($('#search').value)) {
$('#layout-wrapper').style.display = 'none';
$('#filter-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
$('#search-cancel').style.display = 'inline';
}
}
};
// $('body').onkeydown = function(e) {
// if (e.keyCode == 27) {
// cancelSearchHit();
// }
// };
// Set up autocomplete for search
var names = entries.map(function(e) { return e.name; });
names.sort();
new Awesomplete($('#search'), {
list: names,
minChars: 1,
autoFirst: true
});
$('body').addEventListener('awesomplete-selectcomplete', function(e) {
$('#search').onkeydown({ keyCode: 13 }); // trigger search handler
});
// Zoom out a tiny bit then render
var c = s.cameras[0];
c.ratio *= 1.2;
defaultCameraSettings = {
x: c.x,
y: c.y,
ratio: c.ratio,
angle: c.angle
};
s.refresh();
if (!layout || layout === 'forceDirected') {
s.startForceAtlas2({
gravity: 0.5,
linLogMode: true
});
window.setTimeout(function() { s.killForceAtlas2(); }, 5000);
} else {
// Make sure no nodes overlap
s.configNoverlap({
gridSize: 50,
nodeMargin: 20
});
s.startNoverlap();
}
var elapsedTime = ((new Date()) - startTime) / 1000;
console.log('main() finished in ' + elapsedTime + 's')
};
var showColorLegend = function() {
var newHTML = '';
$.each(classToColorMap, function(className, color) {
newHTML += '<span style="color: ' + color + '" onclick="filterByCourse(\'' + className + '\')"><br>' + className + '</span>';
});
newHTML += '<span style="color: ' + defaultClassColor + '" onclick="filterByCourse(\'\')"><br>Other</span>';
$('#legend').innerHTML = newHTML;
};
var highlightSearchHit = function(name) {
cancelSearchHit();
node = s.graph.nodes(name);
if (node) {
$('#search').value = '';
s.dispatchEvent('overNode', { node: node });
s.cameras[0].goTo(defaultCameraSettings);
activeSearchHit = node.id;
node.color = '#FFA726';
s.refresh();
return true;
}
return false;
};
var cancelSearchHit = function() {
if (activeSearchHit) {
activeSearchHit = '';
$('#layout-wrapper').style.display = 'inline';
$('#filter-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
$('#search-cancel').style.display = 'none';
$('#search').focus();
s.dispatchEvent('outNode', { node: node });
s.graph.nodes().forEach(function(node) {
node.color = 'black';
});
s.refresh();
}
};
var showPersonInfo = function(node, inMap, outMap) {
var name = node.id;
var newHTML = '';
newHTML += '<b>' + name + (Object.keys(yearMap).indexOf(name) !== -1 ? ' (' + yearMap[name] + ')' : '') + '</b>';
if (inMap[name] && inMap[name].length) {
newHTML += '<p>Teachers:<ul>';
inMap[name].forEach(function(teacher) {
newHTML += '<li>' + teacher + '</li>';
| outMap[name].forEach(function(student) {
newHTML += '<li>' + student + '</li>';
});
newHTML += '</ul>';
}
$('#info').innerHTML = newHTML;
$('#info').style.display = 'block';
};
var filterByCourse = function(course) {
if ($('#filter').value !== course) {
$('#filter').value = course;
}
if (course) {
activeFilter = course;
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
if (idParts[2] !== course) {
edge.color = 'transparent';
edge.size = 1;
} else {
edge.color = classToColor(idParts[2]);
edge.size = 1;
}
});
s.refresh();
$('#layout-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
} else {
activeFilter = '';
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
edge.color = classToColor(idParts[2]);
edge.size = 1;
});
s.refresh();
$('#layout-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
}
}
var goToLayout = function(layoutName) {
window.location.href = window.location.href.split('?')[0] + '?layout=' + layoutName;
}
var computeLongest = function() {
entries.forEach(function(e) { longestPath | });
newHTML += '</ul>';
}
if (outMap[name] && outMap[name].length) {
newHTML += '<p>Students:<ul>';
| random_line_split |
main.js | entries.forEach(function(entry) {
graph.addNode({
id: entry.name,
label: entry.name,
x: entry.name.charCodeAt(0), // Positions are refined below
y: entry.name.charCodeAt(1),
size: 5 + Math.pow(entry.students ? entry.students.length : 0, 0.8)
// TODO: Assign node colors in some meaningful way
});
if (Object.keys(entry).indexOf('year') !== -1) {
yearMap[entry.name] = "'" + ('' + entry.year).substring(2);
}
});
// Add edges
var inMap = {};
var outMap = {};
var edgesToColors = {};
var seenCourses = {};
entries.forEach(function(teacher) {
if (teacher.students) {
teacher.students.forEach(function(student) {
var edgeId = teacher.name + ':' + student.name + ':' + student.class;
var edgeColor = classToColor(student.class);
graph.addEdge({
id: edgeId,
source: teacher.name,
target: student.name,
type: 'arrow',
size: 1,
color: edgeColor
});
edgesToColors[edgeId] = edgeColor;
if (Object.keys(seenCourses).indexOf(student.class) === -1) {
seenCourses[student.class] = true;
}
// Save in/out info for detailed "info" view (on node hover)
if (!inMap[student.name]) {
inMap[student.name] = [];
}
if (!outMap[teacher.name]) {
outMap[teacher.name] = [];
}
inMap[student.name].push(teacher.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
outMap[teacher.name].push(student.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
// Approximate tree-forming: if student is above teacher, swap their y-coordinates
// TODO: Make this better
if (layout !== 'forceDirected') {
var teacherNode = graph.nodes(teacher.name);
var studentNode = graph.nodes(student.name);
if (studentNode.y < teacherNode.y) {
var tmp = studentNode.y;
studentNode.y = teacherNode.y;
teacherNode.y = tmp;
}
}
});
}
});
// Fill in filtering dropdown
var seenCoursesList = Object.keys(seenCourses);
seenCoursesList.sort();
seenCoursesList.forEach(function(course) {
$('#filter').innerHTML += '<option value="' + course + '">' + course + '</option>';
});
// Bind node hover handler
s.bind('overNode', function(e) {
if (activeSearchHit) {
return;
}
var node = e.data.node;
showPersonInfo(node, inMap, outMap);
var edges = s.graph.edges();
edges.forEach(function(edge) {
var idParts = edge.id.split(':');
var teacher = idParts[0];
var student = idParts[1];
if (teacher != node.id && student != node.id) {
edge.color = 'transparent';
} else {
edge.size = 3;
}
});
s.refresh();
});
// Bind node un-hover handler
s.bind('outNode', function(e) {
if (activeSearchHit) {
return;
}
$('#info').style.display = 'none';
if (activeFilter) { // Hack to reapply filter
var activeFilterCopy = activeFilter;
activeFilter = '';
filterByCourse(activeFilterCopy);
} else {
var edges = s.graph.edges();
edges.forEach(function(edge) {
edge.color = edgesToColors[edge.id];
edge.size = 1;
});
s.refresh();
}
});
// Bind search handler
$('#search').onkeydown = function(e) {
if (e.keyCode == 13) {
if (highlightSearchHit($('#search').value)) {
$('#layout-wrapper').style.display = 'none';
$('#filter-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
$('#search-cancel').style.display = 'inline';
}
}
};
// $('body').onkeydown = function(e) {
// if (e.keyCode == 27) {
// cancelSearchHit();
// }
// };
// Set up autocomplete for search
var names = entries.map(function(e) { return e.name; });
names.sort();
new Awesomplete($('#search'), {
list: names,
minChars: 1,
autoFirst: true
});
$('body').addEventListener('awesomplete-selectcomplete', function(e) {
$('#search').onkeydown({ keyCode: 13 }); // trigger search handler
});
// Zoom out a tiny bit then render
var c = s.cameras[0];
c.ratio *= 1.2;
defaultCameraSettings = {
x: c.x,
y: c.y,
ratio: c.ratio,
angle: c.angle
};
s.refresh();
if (!layout || layout === 'forceDirected') {
s.startForceAtlas2({
gravity: 0.5,
linLogMode: true
});
window.setTimeout(function() { s.killForceAtlas2(); }, 5000);
} else {
// Make sure no nodes overlap
s.configNoverlap({
gridSize: 50,
nodeMargin: 20
});
s.startNoverlap();
}
var elapsedTime = ((new Date()) - startTime) / 1000;
console.log('main() finished in ' + elapsedTime + 's')
};
var showColorLegend = function() {
var newHTML = '';
$.each(classToColorMap, function(className, color) {
newHTML += '<span style="color: ' + color + '" onclick="filterByCourse(\'' + className + '\')"><br>' + className + '</span>';
});
newHTML += '<span style="color: ' + defaultClassColor + '" onclick="filterByCourse(\'\')"><br>Other</span>';
$('#legend').innerHTML = newHTML;
};
var highlightSearchHit = function(name) {
cancelSearchHit();
node = s.graph.nodes(name);
if (node) {
$('#search').value = '';
s.dispatchEvent('overNode', { node: node });
s.cameras[0].goTo(defaultCameraSettings);
activeSearchHit = node.id;
node.color = '#FFA726';
s.refresh();
return true;
}
return false;
};
var cancelSearchHit = function() {
if (activeSearchHit) {
activeSearchHit = '';
$('#layout-wrapper').style.display = 'inline';
$('#filter-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
$('#search-cancel').style.display = 'none';
$('#search').focus();
s.dispatchEvent('outNode', { node: node });
s.graph.nodes().forEach(function(node) {
node.color = 'black';
});
s.refresh();
}
};
var showPersonInfo = function(node, inMap, outMap) {
var name = node.id;
var newHTML = '';
newHTML += '<b>' + name + (Object.keys(yearMap).indexOf(name) !== -1 ? ' (' + yearMap[name] + ')' : '') + '</b>';
if (inMap[name] && inMap[name].length) {
newHTML += '<p>Teachers:<ul>';
inMap[name].forEach(function(teacher) {
newHTML += '<li>' + teacher + '</li>';
});
newHTML += '</ul>';
}
if (outMap[name] && outMap[name].length) {
newHTML += '<p>Students:<ul>';
outMap[name].forEach(function(student) {
newHTML += '<li>' + student + '</li>';
});
newHTML += '</ul>';
}
$('#info').innerHTML = newHTML;
$('#info').style.display = 'block';
};
var filterByCourse = function(course) {
if ($('#filter').value !== course) {
$('#filter').value = course;
}
if (course) | else {
activeFilter = '';
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
edge.color = classToColor(idParts[2]);
edge.size = 1;
});
s.refresh();
$('#layout-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
}
}
var goToLayout = function(layoutName) {
window.location.href = window.location.href.split('?')[0] + '?layout=' + layoutName;
}
var computeLongest = function() {
entries.forEach(function(e) { longestPath(e | {
activeFilter = course;
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
if (idParts[2] !== course) {
edge.color = 'transparent';
edge.size = 1;
} else {
edge.color = classToColor(idParts[2]);
edge.size = 1;
}
});
s.refresh();
$('#layout-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
} | conditional_block |
Trade.go | _service_district,omitempty"`
// o2oServiceTown
O2oServiceTown string `json:"o2o_service_town,omitempty" xml:"o2o_service_town,omitempty"`
// o2oServiceAddress
O2oServiceAddress string `json:"o2o_service_address,omitempty" xml:"o2o_service_address,omitempty"`
// o2oStepTradeDetailNew
O2oStepTradeDetailNew string `json:"o2o_step_trade_detail_new,omitempty" xml:"o2o_step_trade_detail_new,omitempty"`
// o2oXiaopiao
O2oXiaopiao string `json:"o2o_xiaopiao,omitempty" xml:"o2o_xiaopiao,omitempty"`
// o2oContract
O2oContract string `json:"o2o_contract,omitempty" xml:"o2o_contract,omitempty"`
// 新零售门店编码
RetailStoreCode string `json:"retail_store_code,omitempty" xml:"retail_store_code,omitempty"`
// 新零售线下订单id
RetailOutOrderId string `json:"retail_out_order_id,omitempty" xml:"retail_out_order_id,omitempty"`
// rechargeFee
RechargeFee string `json:"recharge_fee,omitempty" xml:"recharge_fee,omitempty"`
// platformSubsidyFee
PlatformSubsidyFee string `json:"platform_subsidy_fee,omitempty" xml:"platform_subsidy_fee,omitempty"`
// nrOffline
NrOffline string `json:"nr_offline,omitempty" xml:"nr_offline,omitempty"`
// 网厅订单垂直表信息
WttParam string `json:"wtt_param,omitempty" xml:"wtt_param,omitempty"`
// logisticsInfos
LogisticsInfos []LogisticsInfo `json:"logistics_infos,omitempty" xml:"logistics_infos,omitempty"`
// nrStoreOrderId
NrStoreOrderId string `json:"nr_store_order_id,omitempty" xml:"nr_store_order_id,omitempty"`
// 门店 ID
NrShopId string `json:"nr_shop_id,omitempty" xml:"nr_shop_id,omitempty"`
// 门店名称
NrShopName string `json:"nr_shop_name,omitempty" xml:"nr_shop_name,omitempty"`
// 导购员ID
NrShopGuideId string `json:"nr_shop_guide_id,omitempty" xml:"nr_shop_guide_id,omitempty"`
// 导购员名称
NrShopGuideName string `json:"nr_shop_guide_name,omitempty" xml:"nr_shop_guide_name,omitempty"`
// sortInfo
SortInfo string `json:"sort_info,omitempty" xml:"sort_info,omitempty"`
// 1已排序 2不排序
Sorted int64 `json:"sorted,omitempty" xml:"sorted,omitempty"`
// 一小时达不处理订单
NrNoHandle string `json:"nr_no_handle,omitempty" xml:"nr_no_handle,omitempty"`
// isGift
IsGift bool `json:"is_gift,omitempty" xml:"is_gift,omitempty"`
// doneeNick
DoneeNick string `json:"donee_nick,omitempty" xml:"donee_nick,omitempty"`
// doneeUid
DoneeOpenUid string `json:"donee_open_uid,omitempty" xml:"donee_open_uid,omitempty"`
// suningShopCode
SuningShopCode string `json:"suning_shop_code,omitempty" xml:"suning_shop_code,omitempty"`
// suningShopValid
SuningShopValid int64 `json:"suning_shop_valid,omitempty" xml:"suning_shop_valid,omitempty"`
// retailStoreId
RetailStoreId string `json:"retail_store_id,omitempty" xml:"retail_store_id,omitempty"`
// isIstore
IsIstore bool `json:"is_istore,omitempty" xml:"is_istore,omitempty"`
// ua
Ua string `json:"ua,omitempty" xml:"ua,omitempty"`
// 截单时间
CutoffMinutes string `json:"cutoff_minutes,omitempty" xml:"cutoff_minutes,omitempty"`
// 时效:天
EsTime string `json:"es_time,omitempty" xml:"es_time,omitempty"`
// 发货时间
DeliveryTime string `json:"delivery_time,omitempty" xml:"delivery_time,omitempty"`
// 揽收时间
CollectTime string `json:"collect_time,omitempty" xml:"collect_time,omitempty"`
// 派送时间
DispatchTime string `json:"dispatch_time,omitempty" xml:"dispatch_time,omitempty"`
// 签收时间
SignTime string `json:"sign_time,omitempty" xml:"sign_time,omitempty"`
// 派送CP
DeliveryCps string `json:"delivery_cps,omitempty" xml:"delivery_cps,omitempty"`
// linkedmall透传参数
LinkedmallExtInfo string `json:"linkedmall_ext_info,omitempty" xml:"linkedmall_ext_info,omitempty"`
// 新零售全渠道订单:订单类型,自提订单:pickUp,电商发货:tmall,门店发货(配送、骑手):storeSend
RtOmniSendType string `json:"rt_omni_send_type,omitempty" xml:"rt_omni_send_type,omitempty"`
// 新零售全渠道订单:发货门店ID
RtOmniStoreId string `json:"rt_omni_store_id,omitempty" xml:"rt_omni_store_id,omitempty"`
// 新零售全渠道订单:商家自有发货门店编码
RtOmniOuterStoreId string `json:"rt_omni_outer_store_id,omitempty" xml:"rt_omni_outer_store_id,omitempty"`
// 同城预约配送开始时间
TcpsStart string `json:"tcps_start,omitempty" xml:"tcps_start,omitempty"`
// 同城业务类型,com.tmall.dsd:定时送,storeDsd-fn-3-1:淘速达3公里蜂鸟配送
TcpsCode string `json:"tcps_code,omitempty" xml:"tcps_code,omitempty"`
// 同城预约配送结束时间
TcpsEnd string `json:"tcps_end,omitempty" xml:"tcps_end,omitempty"`
//
MTariffFee string `json:"m_tariff_fee,omitempty" xml:"m_tariff_fee,omitempty"`
// 时效服务身份,如tmallPromise代表天猫时效承诺
TimingPromise string `json:"timing_promise,omitempty" xml:"timing_promise,omitempty"`
// 时效服务字段,服务字段,会有多个服务值,以英文半角逗号","切割
PromiseService string `json:"promise_service,omitempty" xml:"promise_service,omitempty"`
// 苏宁预约安装,用户安装时间段
OiRange string `json:"oi_range,omitempty" xml:"oi_range,omitempty"`
// 苏宁预约安装,用户安装时间
OiDate string `json:"oi_date,omitempty" xml:"oi_date,omitempty"`
// 苏宁预约安装,暂不安装
HoldInstall string `json:"hold_install,omitempty" xml:"hold_install,omitempty"`
// 外部会员id
OuterPartnerMemberId string `json:"outer_partner_member_id,omitempty" xml:"outer_partner_member_id,omitempty"`
// 叶子分类
RootCat string `json:"root_cat,omitempty" xml:"root_cat,omitempty"`
// 1-gifting订单
Gifting string `json:"gifting,omitempty" xml:"gifting,omitempty"`
// 1-coffee gifting订单
GiftingTakeout string `json:"gifting_takeout,omitempty" xml:"gifting_takeout,omitempty"`
// 订单来源
AppName string `json:"app_name,omitempty" xml:"app_name,omitempty"`
// 居然之家同城站订单类型 deposit:预约到店,direct:直接购买,tail:尾款核销
EasyHomeCityType string `json:"easy_home_city_type,omitempty" xml:"easy_home_city_type,omitempty"`
// 同城站关联订单号
NrDepositOrderId string `json:"nr_deposit_order_id,omitempty" xml:"nr_deposit_order_id,omitempty"`
// 摊位id
NrStoreCode string `json:"nr_store_code,omitempty" xml:"nr_store_code,omitempty"`
// 使用淘金币的数量,以分为单位,和订单标propoint中间那一段一样,没有返回null
Propoint string `json:"propoint,omitempty" xml:"propoint,omitempty"`
// 1-周期送订单
ZqsOrderTag string `json:"zqs_order_tag,omitempty" xml:"zqs_order_tag,omitempty"`
// 天鲜配冰柜id
TxpFreezerId string `json:"txp_freezer_id,omitempty" xml:"txp_freezer_id,omitempty"`
// 天鲜配自提方式
TxpReceiveMethod string `json:"txp_receive_method,omitempty" xml:"txp_receive_method,omitempty"`
// 同城购门店ID
BrandLightShopStoreId string `json:"brand_light_shop_store_id,omitempty" xml:"brand_light_shop_store_id,omitempty"`
| // 同城购订单source | random_line_split | |
asistencia.component.ts | this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
| ( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date | timeDateXform | identifier_name |
asistencia.component.ts | this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ) | else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${ | {
this.asistData[asesor]['data'][inicio]['loading'] = true
} | conditional_block |
asistencia.component.ts | .showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block) |
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${ | {
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
} | identifier_body |
asistencia.component.ts | this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
| let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date | formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone') | random_line_split |
simple-handler.go | asmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil | {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
} | conditional_block | |
simple-handler.go |
StaticHandler http.Handler // returns static assets from Dir with appropriate filtering or appropriate error
wasmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm") | if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env G | // w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back | random_line_split |
simple-handler.go | , "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func dirTimestamp(dir string) (ts time.Time, reterr error) | {
dirf, err := os.Open(dir)
if err != nil {
return ts, err
}
defer dirf.Close()
fis, err := dirf.Readdir(-1)
if err != nil {
return ts, err
}
for _, fi := range fis {
if fi.Name() == "." || fi.Name() == ".." {
continue
}
// for directories we recurse | identifier_body | |
simple-handler.go | (w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func | dirTimestamp | identifier_name | |
parse_dwarf.py | <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
children = None
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class | compile_unit | identifier_name | |
parse_dwarf.py | read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
children = None
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class compile_unit:
def __init__ (self, tree, by_pos):
self.tree = tree
tag, where, self.attrs, self.children = tree
assert (tag == 'compile_unit')
self.by_pos = by_pos
def __repr__ (self):
return '<compile_unit %r at 0x%x>' % (self.attrs['name'], id(self))
def dump (self, file):
self.dump_tree (file, self.tree, 0)
def __getitem__ (self, pos):
return self.by_pos[pos]
def dump_tree (self, file, ob, depth):
tag, where, attrs, children = ob
print '%6d%s %s' % (where, ' ' * depth, tag),
for attr, data in attrs.iteritems():
print '%s:%r' % (attr, data),
print
if children:
for child in children:
self.dump_tree (file, child, depth + 1)
# http://dwarfstd.org/dwarf-2.0.0.pdf
# see pg 95 for a good example of the relationship between the different sections
# see pg 71/72 for descriptions of DW_FORMs
# location descriptions: start on page 72, hopefully we don't need to implement
# the whole stack machine thing. Most of the offsets appear to be simple
# DW_OP_plus_uconst ('#'/0x23), which encodes as a uleb128
def read (path, elf_info):
| """read (<path>, <elf_info>) => <iterator>
generate a list of <compile_unit> objects for file <path>"""
ehdr, phdrs, shdrs, syms, core_info = elf_info
info = abbrev = strings = None
for shdr in shdrs:
if shdr['name'] == '.debug_info':
info = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_abbrev':
abbrev = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_str':
strings = shdr['offset'], shdr['size']
if not info:
return []
else:
abbrevs = abbrev_section (path, abbrev[0], abbrev[1])
if strings:
strings = string_section (path, strings[0], strings[1])
info = info_section (path, info[0], info[1])
return info.read_all (abbrevs, strings) | identifier_body | |
parse_dwarf.py | 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
| children = None | conditional_block | |
parse_dwarf.py | = 0x02
DW_AT_name = 0x03
DW_AT_ordering = 0x09
DW_AT_subscr_data = 0x0a
DW_AT_byte_size = 0x0b
DW_AT_bit_offset = 0x0c
DW_AT_bit_size = 0x0d
DW_AT_element_list = 0x0f
DW_AT_stmt_list = 0x10
DW_AT_low_pc = 0x11
DW_AT_high_pc = 0x12
DW_AT_language = 0x13
DW_AT_member = 0x14
DW_AT_discr = 0x15
DW_AT_discr_value = 0x16
DW_AT_visibility = 0x17
DW_AT_import = 0x18
DW_AT_string_length = 0x19
DW_AT_common_reference = 0x1a
DW_AT_comp_dir = 0x1b
DW_AT_const_value = 0x1c
DW_AT_containing_type = 0x1d
DW_AT_default_value = 0x1e
DW_AT_inline = 0x20
DW_AT_is_optional = 0x21
DW_AT_lower_bound = 0x22
DW_AT_producer = 0x25
DW_AT_prototyped = 0x27
DW_AT_return_addr = 0x2a
DW_AT_start_scope = 0x2c
DW_AT_stride_size = 0x2e
DW_AT_upper_bound = 0x2f
DW_AT_abstract_origin = 0x31
DW_AT_accessibility = 0x32
DW_AT_address_class = 0x33
DW_AT_artificial = 0x34
DW_AT_base_types = 0x35
DW_AT_calling_convention = 0x36
DW_AT_count = 0x37
DW_AT_data_member_location = 0x38
DW_AT_decl_column = 0x39
DW_AT_decl_file = 0x3a
DW_AT_decl_line = 0x3b
DW_AT_declaration = 0x3c
DW_AT_discr_list = 0x3d
DW_AT_encoding = 0x3e
DW_AT_external = 0x3f
DW_AT_frame_base = 0x40
DW_AT_friend = 0x41
DW_AT_identifier_case = 0x42
DW_AT_macro_info = 0x43
DW_AT_namelist_item = 0x44
DW_AT_priority = 0x45
DW_AT_segment = 0x46
DW_AT_specification = 0x47
DW_AT_static_link = 0x48
DW_AT_type = 0x49
DW_AT_use_location = 0x4a
DW_AT_variable_parameter = 0x4b
DW_AT_virtuality = 0x4c
DW_AT_vtable_elem_location = 0x4d
# these are supposed to be in DWARF3 only, but I'm seeing them
# in DWARF2 files?
DW_AT_allocated = 0x4e # DWARF3
DW_AT_associated = 0x4f # DWARF3
DW_AT_data_location = 0x50 # DWARF3
DW_AT_stride = 0x51 # DWARF3
DW_AT_entry_pc = 0x52 # DWARF3
DW_AT_use_UTF8 = 0x53 # DWARF3
DW_AT_extension = 0x54 # DWARF3
DW_AT_ranges = 0x55 # DWARF3
DW_AT_trampoline = 0x56 # DWARF3
DW_AT_call_column = 0x57 # DWARF3
DW_AT_call_file = 0x58 # DWARF3
DW_AT_call_line = 0x59 # DWARF3
DW_AT_description = 0x5a # DWARF3
# DWARF4
DW_AT_description = 0x5a
DW_AT_binary_scale = 0x5b
DW_AT_decimal_scale = 0x5c
DW_AT_small = 0x5d
DW_AT_decimal_sign = 0x5e
DW_AT_digit_count = 0x5f
DW_AT_picture_string = 0x60
DW_AT_mutable = 0x61
DW_AT_threads_scaled = 0x62
DW_AT_explicit = 0x63
DW_AT_object_pointer = 0x64
DW_AT_endianity = 0x65
DW_AT_elemental = 0x66
DW_AT_pure = 0x67
DW_AT_recursive = 0x68
DW_AT_signature = 0x69
DW_AT_main_subprogram = 0x6a
DW_AT_data_bit_offset = 0x6b
DW_AT_const_expr = 0x6c
DW_AT_enum_class = 0x6d
DW_AT_linkage_name = 0x6e
# gcc spits this one out at times
DW_AT_MIPS_linkage_name = 0x2007 # MIPS/SGI
ATS = {}
for name in dir():
if name.startswith ('DW_AT_'):
ATS[eval(name)] = name[6:]
DW_ATE_address = 0x01 | DW_ATE_boolean = 0x02
DW_ATE_complex_float = 0x03
DW_ATE_float = 0x04
DW_ATE_signed = 0x05
DW_ATE_signed_char = 0x06
DW_ATE_unsigned = 0x07
DW_ATE_unsigned_char = 0x08
DW_ATE_imaginary_float = 0x09
DW_ATE_packed_decimal = 0x0a
DW_ATE_numeric_string = 0x0b
DW_ATE_edited = 0x0c
DW_ATE_signed_fixed = 0x0d
DW_ATE_unsigned_fixed = 0x0e
ATES = {}
for name in dir():
if name.startswith ('DW_ATE_'):
ATES[eval(name)] = name[7:]
# DWARF forms
DW_FORM_addr = 0x01
DW_FORM_block2 = 0x03
DW_FORM_block4 = 0x04
DW_FORM_data2 = 0x05
DW_FORM_data4 = 0x06
DW_FORM_data8 = 0x07
DW_FORM_string = 0x08
DW_FORM_block = 0x09
DW_FORM_block1 = 0x0a
DW_FORM_data1 = 0x0b
DW_FORM_flag = 0x0c
DW_FORM_sdata = 0x0d
DW_FORM_strp = 0x0e
DW_FORM_udata = 0x0f
DW_FORM_ref_addr = 0x10
DW_FORM_ref1 = 0x11
DW_FORM_ref2 = 0x12
DW_FORM_ref4 = 0x13
DW_FORM_ref8 = 0x14
DW_FORM_ref_udata = 0x15
DW_FORM_indirect = 0x16
# DWARF 4
DW_FORM_sec_offset = 0x17
DW_FORM_exprloc = 0x18
DW_FORM_flag_present = 0x19
DW_FORM_ref_sig8 = 0x20
FORMS = {}
for name in dir():
if name.startswith ('DW_FORM_'):
FORMS[eval(name)] = name[8:]
header_spec = '=lhlb'
header_size = struct.calcsize (header_spec)
def read_uleb128 (f):
"read an 'unsigned little-endian base 128' from <f>"
result = 0
shift = 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte | random_line_split | |
report.go | FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func getResourceRequest(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits) | return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt | } | random_line_split |
report.go | Resources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt.Printf("%v\n", pod.PodName)
for node, count := range pod.ReplicasOnNodes {
fmt.Printf("\t- %v: %v instance(s)\n", node, count)
}
}
printNodeCapacity(r.Status.Nodes)
printClusterCapacity("========== Cluster capacity", r.Status.Nodes)
printLabeledCapacity(nodeLabels, r.Status.Nodes)
}
}
func printLabeledCapacity(nodeLabels []string, nodes []*ClusterCapacityNodeResult) {
labeledResults := map[string][]*ClusterCapacityNodeResult{}
for _, node := range nodes {
for _, label := range nodeLabels {
value, ok := node.Labels[label]
if !ok {
continue
}
resultName := fmt.Sprintf("%s:%s", label, value)
labeledResults[resultName] = append(labeledResults[resultName], node)
}
}
for label, results := range labeledResults {
printClusterCapacity(label, results)
}
}
func printClusterCapacity(title string, nodes []*ClusterCapacityNodeResult) {
var (
clusterCPUAllocatable, clusterCPURequested, clusterCPULimit,
clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit,
clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit int64
)
for _, node := range nodes {
clusterCPUAllocatable += node.Allocatable.MilliCPU
clusterCPURequested += node.Requested.MilliCPU
clusterCPULimit += node.Limits.MilliCPU
clusterMemoryAllocatable += node.Allocatable.Memory
clusterMemoryRequested += node.Requested.Memory
clusterMemoryLimit += node.Limits.Memory
clusterStorageAllocatable += node.Allocatable.EphemeralStorage
clusterStorageRequested += node.Requested.EphemeralStorage
clusterStorageLimit += node.Limits.EphemeralStorage
}
fmt.Printf("\n%s:\n", title)
printCapacity(clusterCPUAllocatable, clusterCPURequested, clusterCPULimit, "CPU", "m")
printCapacity(clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit, "Memory", "bytes")
printCapacity(clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit, "EphemeralStorage", "bytes")
}
func printNodeCapacity(nodes []*ClusterCapacityNodeResult) {
fmt.Printf("\n========== Node capacity\n")
for _, node := range nodes {
fmt.Printf("%s\n", node.NodeName)
fmt.Printf("\t- pod count: %v\n", node.PodCount)
printCapacity(node.Allocatable.MilliCPU, node.Requested.MilliCPU, node.Limits.MilliCPU, "CPU", "m")
printCapacity(node.Allocatable.Memory, node.Requested.Memory, node.Limits.Memory, "Memory", "bytes")
printCapacity(node.Allocatable.EphemeralStorage, node.Requested.EphemeralStorage, node.Limits.EphemeralStorage, "EphemeralStorage", "bytes")
}
}
func printCapacity(allocatable, requested, limit int64, label, unit string) | {
cap := float64(requested) / float64(allocatable) * 100
fmt.Printf("\t- %s requested: %v%s/%v%s %.2f%% allocated\n",
label, requested, unit, allocatable, unit, cap)
commit := float64(limit) / float64(allocatable) * 100
fmt.Printf("\t- %s limited: %v%s/%v%s %.2f%% allocated\n",
label, limit, unit, allocatable, unit, commit)
} | identifier_body | |
report.go | FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func getResourceRequest(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits)
}
return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results |
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
| {
resultSlc = append(resultSlc, v)
} | conditional_block |
report.go | FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func | (pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits)
}
return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
| getResourceRequest | identifier_name |
lib.rs | conversion of Balance -> `u64`. This is used for
/// staking's election calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance |
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn convert(x: Balance) -> u64 {
(x / Self::factor()) as u64
}
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance {
x * Self::factor()
}
}
parameter_types! {
pub const SessionsPerEra: sp_staking::SessionIndex = 3; // 3 hours
pub const BondingDuration: pallet_staking::EraIndex = 4; // 12 hours
pub const SlashDeferDuration: pallet_staking::EraIndex = 2; // 6 hours
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 64;
pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4;
pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
pub const MaxIterations: u32 = 5;
// 0.05%. The higher the value, the more strict solution acceptance becomes.
pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000);
}
/// pallet_staking::offchain by skyh 0927
impl pallet_staking::Trait for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = CurrencyToVoteHandler;
type RewardRemainder = Treasury;
type RewardCurve = RewardCurve;
type Slash = Treasury; // send the slashed funds to the pallet treasury.
type Reward = (); // rewards are minted from the void
type SessionInterface = Self;
type SessionsPerEra = SessionsPerEra;
type BondingDuration = BondingDuration;
type SlashDeferDuration = SlashDeferDuration;
/// A super-majority of the council can cancel the slash.
type SlashCancelOrigin = frame_system::EnsureRoot<Self::AccountId>; // TODO
type NextNewSession = Session;
type ElectionLookahead = ElectionLookahead;
type UnsignedPriority = StakingUnsignedPriority;
type MaxIterations = MaxIterations;
type MinSolutionScoreBump = MinSolutionScoreBump;
type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator;
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const CouncilMotionDuration: BlockNumber = 3 * DAYS;
pub const CouncilMaxProposals: u32 = 100;
pub const CouncilMaxMembers: u32 = 100;
}
type CouncilCollective = pallet_collective::Instance1;
pub type MoreThanHalfCouncil = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>
>;
pub type SlashCancelOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>
>;
/// pallet_collective by skyh 1020
impl pallet_collective::Trait<CouncilCollective> for Runtime {
type MotionDuration = CouncilMotionDuration;
type MaxProposals = CouncilMaxProposals;
type MaxMembers = CouncilMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
type Origin = Origin;
type Proposal = Call;
type Event = Event;
}
parameter_types! {
pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS;
pub const TechnicalMaxProposals: u32 = 100;
pub const TechnicalMaxMembers: u32 = 100;
}
type TechnicalCollective = pallet_collective::Instance2;
impl pallet_collective::Trait<TechnicalCollective> for Runtime {
type Origin = Origin;
type Proposal = Call;
type Event = Event;
type MotionDuration = TechnicalMotionDuration;
type MaxProposals = TechnicalMaxProposals;
type MaxMembers = TechnicalMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
}
parameter_types! {
pub const LaunchPeriod: BlockNumber = 7 * DAYS;
pub const VotingPeriod: BlockNumber = 7 * DAYS;
pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS;
pub const MinimumDeposit: Balance = 1 * DOLLARS;
pub const EnactmentPeriod: BlockNumber = 8 * DAYS;
pub const CooloffPeriod: BlockNumber = 7 * DAYS;
// One cent: $10,000 / MB
pub const PreimageByteDeposit: Balance = 10 * MILLICENTS;
pub const InstantAllowed: bool = true;
pub const MaxVotes: u32 = 100;
pub const MaxProposals: u32 = 100;
}
/// pallet_democracy by skyh 1020
impl pallet_democracy::Trait for Runtime {
type Proposal = Call;
type Event = Event;
type Currency = Balances;
type EnactmentPeriod = EnactmentPeriod;
type LaunchPeriod = LaunchPeriod;
type VotingPeriod = VotingPeriod;
type CooloffPeriod = CooloffPeriod;
type PalletsOrigin = OriginCaller;
type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>;
type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime | {
(Balances::total_issuance() / u64::max_value() as Balance).max(1)
} | identifier_body |
lib.rs | conversion of Balance -> `u64`. This is used for
/// staking's election calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance {
(Balances::total_issuance() / u64::max_value() as Balance).max(1)
}
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn | (x: Balance) -> u64 {
(x / Self::factor()) as u64
}
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance {
x * Self::factor()
}
}
parameter_types! {
pub const SessionsPerEra: sp_staking::SessionIndex = 3; // 3 hours
pub const BondingDuration: pallet_staking::EraIndex = 4; // 12 hours
pub const SlashDeferDuration: pallet_staking::EraIndex = 2; // 6 hours
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 64;
pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4;
pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
pub const MaxIterations: u32 = 5;
// 0.05%. The higher the value, the more strict solution acceptance becomes.
pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000);
}
/// pallet_staking::offchain by skyh 0927
impl pallet_staking::Trait for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = CurrencyToVoteHandler;
type RewardRemainder = Treasury;
type RewardCurve = RewardCurve;
type Slash = Treasury; // send the slashed funds to the pallet treasury.
type Reward = (); // rewards are minted from the void
type SessionInterface = Self;
type SessionsPerEra = SessionsPerEra;
type BondingDuration = BondingDuration;
type SlashDeferDuration = SlashDeferDuration;
/// A super-majority of the council can cancel the slash.
type SlashCancelOrigin = frame_system::EnsureRoot<Self::AccountId>; // TODO
type NextNewSession = Session;
type ElectionLookahead = ElectionLookahead;
type UnsignedPriority = StakingUnsignedPriority;
type MaxIterations = MaxIterations;
type MinSolutionScoreBump = MinSolutionScoreBump;
type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator;
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const CouncilMotionDuration: BlockNumber = 3 * DAYS;
pub const CouncilMaxProposals: u32 = 100;
pub const CouncilMaxMembers: u32 = 100;
}
type CouncilCollective = pallet_collective::Instance1;
pub type MoreThanHalfCouncil = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>
>;
pub type SlashCancelOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>
>;
/// pallet_collective by skyh 1020
impl pallet_collective::Trait<CouncilCollective> for Runtime {
type MotionDuration = CouncilMotionDuration;
type MaxProposals = CouncilMaxProposals;
type MaxMembers = CouncilMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
type Origin = Origin;
type Proposal = Call;
type Event = Event;
}
parameter_types! {
pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS;
pub const TechnicalMaxProposals: u32 = 100;
pub const TechnicalMaxMembers: u32 = 100;
}
type TechnicalCollective = pallet_collective::Instance2;
impl pallet_collective::Trait<TechnicalCollective> for Runtime {
type Origin = Origin;
type Proposal = Call;
type Event = Event;
type MotionDuration = TechnicalMotionDuration;
type MaxProposals = TechnicalMaxProposals;
type MaxMembers = TechnicalMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
}
parameter_types! {
pub const LaunchPeriod: BlockNumber = 7 * DAYS;
pub const VotingPeriod: BlockNumber = 7 * DAYS;
pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS;
pub const MinimumDeposit: Balance = 1 * DOLLARS;
pub const EnactmentPeriod: BlockNumber = 8 * DAYS;
pub const CooloffPeriod: BlockNumber = 7 * DAYS;
// One cent: $10,000 / MB
pub const PreimageByteDeposit: Balance = 10 * MILLICENTS;
pub const InstantAllowed: bool = true;
pub const MaxVotes: u32 = 100;
pub const MaxProposals: u32 = 100;
}
/// pallet_democracy by skyh 1020
impl pallet_democracy::Trait for Runtime {
type Proposal = Call;
type Event = Event;
type Currency = Balances;
type EnactmentPeriod = EnactmentPeriod;
type LaunchPeriod = LaunchPeriod;
type VotingPeriod = VotingPeriod;
type CooloffPeriod = CooloffPeriod;
type PalletsOrigin = OriginCaller;
type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>;
type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime {
| convert | identifier_name |
lib.rs | EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime {
type Event = Event;
type AddOrigin = MoreThanHalfCouncil;
type RemoveOrigin = MoreThanHalfCouncil;
type SwapOrigin = MoreThanHalfCouncil;
type ResetOrigin = MoreThanHalfCouncil;
type PrimeOrigin = MoreThanHalfCouncil;
type MembershipInitialized = TechnicalCommittee;
type MembershipChanged = TechnicalCommittee;
}
parameter_types! {
pub const ProposalBond: Permill = Permill::from_percent(5);
pub const ProposalBondMinimum: Balance = 20 * DOLLARS;
pub const SpendPeriod: BlockNumber = 6 * DAYS;
pub const Burn: Permill = Permill::from_perthousand(2);
pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry");
pub const TipCountdown: BlockNumber = 1 * DAYS;
pub const TipFindersFee: Percent = Percent::from_percent(20);
pub const TipReportDepositBase: Balance = 1 * DOLLARS;
pub const DataDepositPerByte: Balance = 1 * CENTS;
pub const BountyDepositBase: Balance = 1 * DOLLARS;
pub const BountyDepositPayoutDelay: BlockNumber = 4 * DAYS;
pub const BountyUpdatePeriod: BlockNumber = 90 * DAYS;
pub const MaximumReasonLength: u32 = 16384;
pub const BountyCuratorDeposit: Permill = Permill::from_percent(50);
pub const BountyValueMinimum: Balance = 2 * DOLLARS;
}
type ApproveOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>
>;
/// pallet_treasury by skyh 1020
impl pallet_treasury::Trait for Runtime {
type ModuleId = TreasuryModuleId;
type Currency = Balances;
type ApproveOrigin = ApproveOrigin;
type RejectOrigin = MoreThanHalfCouncil;
type DataDepositPerByte = DataDepositPerByte;
type Tippers = ElectionsPhragmen;
type TipCountdown = TipCountdown;
type TipFindersFee = TipFindersFee;
type TipReportDepositBase = TipReportDepositBase;
type BountyDepositBase = BountyDepositBase;
type BountyDepositPayoutDelay = BountyDepositPayoutDelay;
type BountyUpdatePeriod = BountyUpdatePeriod;
type MaximumReasonLength = MaximumReasonLength;
type BountyCuratorDeposit = BountyCuratorDeposit;
type BountyValueMinimum = BountyValueMinimum;
// type BurnDestination = Society;
type ProposalBond = ProposalBond;
type ProposalBondMinimum = ProposalBondMinimum;
type SpendPeriod = SpendPeriod;
type OnSlash = Treasury;
type Burn = Burn;
type BurnDestination = ();
// type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const MaxScheduledPerBlock: u32 = 50;
}
/// pallet_scheduler by skyh 1020
impl pallet_scheduler::Trait for Runtime {
type PalletsOrigin = OriginCaller;
type MaximumWeight = MaximumBlockWeight;
type ScheduleOrigin = EnsureRoot<AccountId>;
type MaxScheduledPerBlock = MaxScheduledPerBlock;
type WeightInfo = ();
type Origin = Origin;
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _;
pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value();
}
impl pallet_im_online::Trait for Runtime {
type AuthorityId = ImOnlineId;
type SessionDuration = SessionDuration;
type ReportUnresponsiveness = Offences;
type UnsignedPriority = ImOnlineUnsignedPriority;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const IndexDeposit: Balance = DOLLARS;
}
/// pallet_indices by skyh 1020
impl pallet_indices::Trait for Runtime {
type AccountIndex = AccountIndex;
type Deposit = IndexDeposit;
type Currency = Balances;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get();
}
/// pallet_offences by skyh 1020
impl pallet_offences::Trait for Runtime {
type IdentificationTuple = pallet_session::historical::IdentificationTuple<Self>;
type WeightSoftLimit = OffencesWeightSoftLimit;
type OnOffenceHandler = Staking;
type Event = Event;
}
impl pallet_utility::Trait for Runtime {
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const MinVestedTransfer: Balance = 100 * DOLLARS;
}
/// pallet_vesting by skyh 1020
impl pallet_vesting::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type BlockNumberToBalance = ConvertInto;
type MinVestedTransfer = MinVestedTransfer;
type WeightInfo = ();
}
impl pallet_authority_discovery::Trait for Runtime {}
parameter_types! {
pub const UncleGenerations: BlockNumber = 5;
}
/// pallet_authorship origin
impl pallet_authorship::Trait for Runtime {
type FindAuthor = pallet_session::FindAccountFromAuthorIndex<Self, Babe>;
type UncleGenerations = UncleGenerations;
type FilterUncle = ();
type EventHandler = (Staking, ()); // ImOnline
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
/// pallet_timestamp origin
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = Moment;
type OnTimestampSet = Babe;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500; // 0
pub const MaxLocks: u32 = 50;
}
/// pallet_balances origin
impl pallet_balances::Trait for Runtime {
type Balance = Balance;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = MaxLocks; | type AccountStore = System; //
type Event = Event; | random_line_split | |
paxos.go | .ENOENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) | (seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Se | send_decided | identifier_name |
paxos.go | OENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error |
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Se | {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
} | identifier_body |
paxos.go | OENT && err1.Err != syscall.ECONNREFUSED |
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Se | {
fmt.Printf("paxos Dial() failed: %v\n", err1)
} | conditional_block |
paxos.go | .ENOENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
|
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Se | px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value) | random_line_split |
event.rs | , Eq, Default, Hash)]
pub struct EventId(pub usize);
impl From<usize> for EventId {
fn from(x: usize) -> Self {
Self(x)
}
}
lazy_static! {
pub static ref EVENT_ID_MAPPINGS: Mutex<Mappings<TypeId, EventId>> =
Mutex::new(Mappings::new());
}
/// Returns the event ID for the given type.
pub fn event_id_for<E>() -> EventId
where
E: Event,
{
EVENT_ID_MAPPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type.
pub unsafe trait RawEventHandler: Send + Sync + 'static {
/// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 |
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
| {
return; // Nothing to do
} | conditional_block |
event.rs | APPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type.
pub unsafe trait RawEventHandler: Send + Sync + 'static {
/// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 {
return; // Nothing to do
}
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
self.queued
.drain(..)
.enumerate()
.for_each(|(index, event)| unsafe {
ptr::write(ptr.offset(index as isize), event);
});
self.ctx
.sender
.send(TaskMessage::TriggerEvents {
id: self.id,
ptr: ptr as *const (),
len,
})
.unwrap();
}
}
impl<E> Trigger<E>
where
E: Send + Sync + 'static,
{
pub fn | trigger | identifier_name | |
event.rs | , Eq, Default, Hash)]
pub struct EventId(pub usize);
impl From<usize> for EventId {
fn from(x: usize) -> Self {
Self(x)
}
}
lazy_static! {
pub static ref EVENT_ID_MAPPINGS: Mutex<Mappings<TypeId, EventId>> =
Mutex::new(Mappings::new());
}
/// Returns the event ID for the given type.
pub fn event_id_for<E>() -> EventId
where
E: Event,
{
EVENT_ID_MAPPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type. | /// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 {
return; // Nothing to do
}
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
self | pub unsafe trait RawEventHandler: Send + Sync + 'static { | random_line_split |
tutorial.js | specifics of Gremlin, its good to know what you are getting yourself into. Moreover, its important to know if Gremlin can be of use to you. Below is a list of a few key benefits of Gremlin:") + BR() +
DIV(LIST(
[
'1. Gremlin is useful for manually working with your graph;',
'2. Gremlin allows you to query a graph;',
'3. Gremlin can express complex graph traversals succinctly;',
'4. Gremlin is useful for exploring and learning about graphs;',
'5. Gremlin allows you to explore the Semantic Web/Web of Data;',
'6. Gremlin allows for universal path-based computations.'
]));
}
this._a = function() {
this.prev = this._annotation;
this.next = this._b;
return P("Chapter 2a - Defining variables.") + BR() +
P(" Gremlin gives you possibility to work with variables.") + BR() +
P(" Variables in Gremlin must be proceeded by a $ character.") +
P(" The assignment operator is ':=' and it is used to assign a value to a variable or an element to a list or map:") + BR() +
PLIST("$foo := 'bar'") + PLIST("$i := 1 + 5");
}
this._b = function() {
this.prev = this._a;
this.next = this._c;
return P("Chapter 2b - Using ”$_” and ”$_g” and ”.” special variables.") + BR() +
P(" There are three special variables in Gremlin ”$_” and ”$_g” and ”.”:") + BR() +
PLIST("• ”$_” is a reserved variable that denotes the root list. In this way, the root list can be redefined.") +
PLIST("• ”$_g” denotes the graph object. It allows the user to assign a working graph that will be referenced by graph functions when no graph argument is provided.") +
PLIST("• ”.” denotes reference to the root list.");
}
this._c = function() {
this.prev = this._b;
this.next = this._d; | return P("Chapter 2c - Using Gremlin build-in functions and data structures (maps, lists).") + BR() +
P(" Gremlin provides build-in functions and data structures which will be very useful while working with graphs.") + BR() +
P("To execute a function you should call it using special format - ”<prefix>:<function_name>(<arg>, ...)”:") + BR() +
PLIST("g:print('hello world!') - will execute build-in print function.") + BR() +
P("or without arguments:") + BR() +
PLIST("g:print() - will print empty string.") + BR() +
P("There are functions which could be referenced without <prefix> - global functions - like: null(), false(), true()") + BR() +
PLIST("$foo := false() - value returned by false() will be assigned to $foo variable.") + BR() +
P(" Gremlin has own implementation of Map and List data structures (will be familiar to Java developers):") + BR() +
PLIST("g:map(<key>, <value>, ...) - function used to construct map objects:") +
PLIST("g:map('foo', 'bar') - will return {'foo'='bar'} map.") + BR() +
P("the same goes for List:") + BR() +
PLIST("g:list(<value>,...) - function used to construct list objecs:") +
PLIST("g:list(1,2,3,4) - will return [1.0, 2.0, 3.0, 4.0].") + BR() +
P("result of map or list function could be assigned to a variable:") + BR() +
PLIST("$foo := g:map('foo', 'bar')") +
PLIST("$foo := g:list('foo', 'bar')") + BR() +
P("to get value from map use g:get(element, string) function:") + BR() +
PLIST("g:get(g:map('foo', 'bar'), 'foo') - returns 'bar'") + BR() +
P("g:get(list, number) function used to get values from list:") + BR() +
PLIST("g:get(g:list(3, 4), 1) - returns '3.0'") + BR() +
P("to assign new elements to map use g:assign(map,object,object) function:") + BR() +
PLIST("$foo := g:map('foo', 'bar')") +
PLIST("g:assign($foo, 'foo2', 'bar2') - returns 'bar2'") +
PLIST("g:print($foo) - returns {foo2=bar2, foo=bar}") + BR() +
P("Gremlin Function Library Reference could be found " + LINK("http://wiki.github.com/tinkerpop/gremlin/gremlin-function-library", "here"));
}
this._d = function() {
this.prev = this._c;
this.next = this._e;
return P("Chapter 2d - Gremlin Loops (foreach, while).") + BR() +
P(" Gremlin also has build-in loop support - foreach and while:") + BR() +
P("1. Foreach") + BR() +
P(" The foreach statement will loop over its body the number of times as there are values in the provided loop list. Each item in the list is assigned to a variable and that variable can be referenced in the loop body. The generic structure and example of foreach is provided below.") + BR() +
PLIST("foreach variable in xpath_list<br/> statement*<br />end") + BR() +
P("Here is a little example how to use it:") + BR() +
PLIST("$i := 0") +
PLIST("foreach $j in 1 | 2 | 3") +
PLIST(" $i := $i + $j") +
PLIST("end") + BR() +
P("this will return - 6.0.") + BR() +
P("2. While") + BR() +
P(" The while statement will loop over its body until the provided condition is met. The generic structure and example of while is provided below.") + BR() +
PLIST("while xpath_boolean<br/> statement*<br/>end") + BR() +
P("Here is a little example how to use it:") + BR() +
PLIST("$i := 'g'") +
PLIST("while not(matches($i,'ggg'))") +
PLIST(" $i := concat($i,'g')") +
PLIST("end") + BR() +
P("this will return - 'ggg'");
}
this._e = function() {
this.prev = this._d;
this.next = this._f;
return P("Chapter 2e - Defining custom functions/paths.") + BR() +
P(" Gremlin gives you possibility to define custom functions and paths.") + BR() +
P("Function can be defined using following syntax:") + BR() +
PLIST("func <prefix>:<functiona-name>($var, ...)<br/> statement*<br/>end") +
P("Example") + BR() +
PLIST("func u:hello-name($name)<br/> g:print(concat('hello ', $name))<br/>end") + BR() +
P("and if you then run `u:hello-name('pavel')` result will be 'hello pavel'.") +
P("Please note - there are no return statement - function returns value of its last statement!") + BR() +
P("Path can be defined using following syntax:") + BR() +
PLIST("path string<br/> statement*<br/>end") + BR() +
P("Example") + BR() +
PLIST("path co-developer<br/> ./outE[@label='created']/inV/inE[@label='created']/outV[g:except($_)]<br/>end");
}
this._f = function() {
this.prev = this._e;
this.next = this._p3;
return P("Chapter 2f - Basic graph traversals.") + BR() +
P(" First of all we should learn how to open graph and how to load graph data from file.") +
P("As Gremlin has alot of backends graph could be opened using different functions, most common if then are:") + BR() +
PLIST("1. tg:open() - used to open TinkerGraph;") +
PLIST("2. neo4j:open(<database-name-as-string>) - used to open Neo4j database connection;") +
PLIST | random_line_split | |
main_classes.py | if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon | 'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0: | random_line_split | |
main_classes.py | (formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
| self.inventory.remove(nice_weapons[0])
return nice_weapons[0] | conditional_block | |
main_classes.py | self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def | __init__ | identifier_name | |
main_classes.py | 0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
|
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None | print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.") | identifier_body |
tanzania-improved.py | ()
# In[9]:
#dropping the labels that you are supposed to predict and the excess from train_head
cols = ['ID','mobile_money', 'savings', 'borrowing','insurance']
train_data = train_data.drop(cols, axis=1)
x_test = test_data.drop(['ID'], axis=1)
# In[10]:
#looking at the unique classification classes
train_data['mobile_money_classification'].unique()
# In[11]:
#lets look at both the train and test data and see if they match after the drop
train_data.columns
# In[12]:
x_test.columns
# In[13]:
X = train_data.drop(['mobile_money_classification'], axis=1)
y = train_data['mobile_money_classification']
# In[14]:
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
rf.fit(X, y)
#using the base model to build the feature importance
import pandas as pd
feature_importances = pd.DataFrame(rf.feature_importances_,
index = X.columns,
columns=['importance']).sort_values('importance',ascending=False)
print(feature_importances)
# In[15]:
#lets drop the most irrelevant columns in both X and the x_test
#from the already done tests we find that dropping the last three is what works best
X1 = X.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
x_test1 = x_test.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
# In[16]:
#lets normalize the datasets
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X2 = scaler.fit_transform(X1)
X_test1=scaler.fit_transform(x_test1)
# In[17]:
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
scores = []
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
cv = KFold(n_splits=10, random_state=42, shuffle=False)
for train_index, test_index in cv.split(X):
print("Train Index: ", train_index, "\n")
print("Test Index: ", test_index)
X_train, X_test, y_train, y_test = X2[train_index], X2[test_index], y[train_index], y[test_index]
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[18]:
from pprint import pprint
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
# In[19]:
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# In[20]:
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf1 = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf1_random = RandomizedSearchCV(estimator = rf1, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)
# In[21]:
# Fit the random search model
#rf1_random.fit(X_train, y_train)
# In[22]:
#rf1_random.best_params_
# In[23]:
#grid search using cross validation
#Random search allowed us to narrow down the range for each hyperparameter.
#Now that we know where to concentrate our search, we can explicitly specify every combination of settings to try.
#We do this with GridSearchCV, a method that, instead of sampling randomly from a distribution, evaluates all combinations we define.
#To use Grid Search, we make another grid based on the best values provided by random search:
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [100, 100, 110, 120],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [800, 1000, 1600, 2000]
}
# Create a based model
rf2 = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf2, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
#grid_search.fit(X_train,y_train)
#grid_search.best_params_
# In[24]:
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[25]:
scores.append(rf.score(X_test, y_test))
# In[26]:
print(np.mean(scores))
# In[27]:
from sklearn.ensemble import RandomForestClassifier
rfc2 = RandomForestClassifier(n_estimators=2000, max_depth=120, min_samples_split=10,
min_samples_leaf=3,max_features='sqrt', bootstrap=True,random_state=42)
rfc2 .fit(X2, y)
# In[28]:
from sklearn.ensemble import RandomForestClassifier
rfc1 = RandomForestClassifier(n_estimators=1400, max_depth=100, min_samples_split=5,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc1 .fit(X2, y)
# In[29]:
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=1400, max_depth=80, min_samples_split=10,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc .fit(X2, y)
# In[30]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred = pd.DataFrame(data=q)
df_pred = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[31]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc2.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc2.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred2 = pd.DataFrame(data=q)
df_pred2 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[32]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc1.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc1.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[33]:
| # In[ |
df_pred.head()
| random_line_split |
tanzania-improved.py | .head()
# In[9]:
#dropping the labels that you are supposed to predict and the excess from train_head
cols = ['ID','mobile_money', 'savings', 'borrowing','insurance']
train_data = train_data.drop(cols, axis=1)
x_test = test_data.drop(['ID'], axis=1)
# In[10]:
#looking at the unique classification classes
train_data['mobile_money_classification'].unique()
# In[11]:
#lets look at both the train and test data and see if they match after the drop
train_data.columns
# In[12]:
x_test.columns
# In[13]:
X = train_data.drop(['mobile_money_classification'], axis=1)
y = train_data['mobile_money_classification']
# In[14]:
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
rf.fit(X, y)
#using the base model to build the feature importance
import pandas as pd
feature_importances = pd.DataFrame(rf.feature_importances_,
index = X.columns,
columns=['importance']).sort_values('importance',ascending=False)
print(feature_importances)
# In[15]:
#lets drop the most irrelevant columns in both X and the x_test
#from the already done tests we find that dropping the last three is what works best
X1 = X.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
x_test1 = x_test.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
# In[16]:
#lets normalize the datasets
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X2 = scaler.fit_transform(X1)
X_test1=scaler.fit_transform(x_test1)
# In[17]:
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
scores = []
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
cv = KFold(n_splits=10, random_state=42, shuffle=False)
for train_index, test_index in cv.split(X):
|
# In[18]:
from pprint import pprint
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
# In[19]:
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# In[20]:
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf1 = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf1_random = RandomizedSearchCV(estimator = rf1, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)
# In[21]:
# Fit the random search model
#rf1_random.fit(X_train, y_train)
# In[22]:
#rf1_random.best_params_
# In[23]:
#grid search using cross validation
#Random search allowed us to narrow down the range for each hyperparameter.
#Now that we know where to concentrate our search, we can explicitly specify every combination of settings to try.
#We do this with GridSearchCV, a method that, instead of sampling randomly from a distribution, evaluates all combinations we define.
#To use Grid Search, we make another grid based on the best values provided by random search:
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [100, 100, 110, 120],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [800, 1000, 1600, 2000]
}
# Create a based model
rf2 = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf2, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
#grid_search.fit(X_train,y_train)
#grid_search.best_params_
# In[24]:
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[25]:
scores.append(rf.score(X_test, y_test))
# In[26]:
print(np.mean(scores))
# In[27]:
from sklearn.ensemble import RandomForestClassifier
rfc2 = RandomForestClassifier(n_estimators=2000, max_depth=120, min_samples_split=10,
min_samples_leaf=3,max_features='sqrt', bootstrap=True,random_state=42)
rfc2 .fit(X2, y)
# In[28]:
from sklearn.ensemble import RandomForestClassifier
rfc1 = RandomForestClassifier(n_estimators=1400, max_depth=100, min_samples_split=5,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc1 .fit(X2, y)
# In[29]:
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=1400, max_depth=80, min_samples_split=10,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc .fit(X2, y)
# In[30]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred = pd.DataFrame(data=q)
df_pred = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[31]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc2.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc2.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred2 = pd.DataFrame(data=q)
df_pred2 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[32]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc1.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc1.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[33]:
df_pred.head()
# In | print("Train Index: ", train_index, "\n")
print("Test Index: ", test_index)
X_train, X_test, y_train, y_test = X2[train_index], X2[test_index], y[train_index], y[test_index]
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test)) | conditional_block |
main.js | id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion', | servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results=4&nat=us, | 'Tour a caballo guiado por el bosque.',
false,
0
)
); | random_line_split |
main.js | ;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
|
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results=4&nat=us | cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
}; | conditional_block |
main.js | id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
| url: 'https://randomuser.me/api/?results=4&nat=us | $.ajax({
| identifier_name |
main.js |
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results= | {
this.id = id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
} | identifier_body | |
nginx_controller.go | rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) | (ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
}
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1 | Reconcile | identifier_name |
nginx_controller.go | {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1.NginxStatus{
CurrentReplicas: replicas,
PodSelector: k8s.LabelsForNginxString(nginx.Name),
Deployments: deployStatuses,
Services: services,
Ingresses: ingresses,
}
if reflect.DeepEqual(nginx.Status, status) {
return nil
}
nginx.Status = status
err = r.Client.Status().Update(ctx, nginx)
if err != nil {
return fmt.Errorf("failed to update nginx status: %v", err)
}
return nil
}
func listDeployments(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]appsv1.Deployment, error) {
var deployList appsv1.DeploymentList
err := c.List(ctx, &deployList, &client.ListOptions{
Namespace: nginx.Namespace,
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
})
if err != nil {
return nil, err
}
deploys := deployList.Items
// NOTE: specific implementation for backward compatibility w/ Deployments
// that does not have Nginx labels yet.
if len(deploys) == 0 {
err = c.List(ctx, &deployList, &client.ListOptions{Namespace: nginx.Namespace})
if err != nil {
return nil, err
}
desired := *metav1.NewControllerRef(nginx, schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: "Nginx",
})
for _, deploy := range deployList.Items {
for _, owner := range deploy.OwnerReferences {
if reflect.DeepEqual(owner, desired) {
deploys = append(deploys, deploy)
}
}
}
}
sort.Slice(deploys, func(i, j int) bool { return deploys[i].Name < deploys[j].Name })
return deploys, nil
}
// listServices return all the services for the given nginx sorted by name
func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {
serviceList := &corev1.ServiceList{}
labelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))
listOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}
err := c.List(ctx, serviceList, listOps)
if err != nil {
return nil, err
}
var services []nginxv1alpha1.ServiceStatus
for _, s := range serviceList.Items {
services = append(services, nginxv1alpha1.ServiceStatus{
Name: s.Name,
})
}
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return services, nil
}
func listIngresses(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.IngressStatus, error) {
var ingressList networkingv1.IngressList
options := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
Namespace: nginx.Namespace,
}
if err := c.List(ctx, &ingressList, options); err != nil | {
return nil, err
} | conditional_block | |
nginx_controller.go | rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error |
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha | {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
} | identifier_body |
nginx_controller.go | "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tsuru/nginx-operator/api/v1alpha1"
nginxv1alpha1 "github.com/tsuru/nginx-operator/api/v1alpha1"
"github.com/tsuru/nginx-operator/pkg/k8s"
)
// NginxReconciler reconciles a Nginx object
type NginxReconciler struct {
client.Client
EventRecorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
AnnotationFilter labels.Selector
}
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
}
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual | networkingv1 "k8s.io/api/networking/v1" | random_line_split | |
provider.go | v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
state[tag] = v
} else {
state[tag] = ToOutput(val)
}
}
return resource.URN(), state, nil
}
type callFunc func(ctx *Context, tok string, args map[string]interface{}) (Input, error)
// call adapts the gRPC CallRequest/CallResponse to/from the Pulumi Go SDK programming model.
func call(ctx context.Context, req *pulumirpc.CallRequest, engineConn *grpc.ClientConn,
callF callFunc) (*pulumirpc.CallResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
argDependencies := req.GetArgDependencies()
deserializedArgs, err := plugin.UnmarshalProperties(
req.GetArgs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
args := make(map[string]interface{}, len(deserializedArgs))
for key, value := range deserializedArgs {
k := string(key)
var deps []Resource
if inputDeps, ok := argDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
args[k] = &constructInput{
value: value,
deps: deps,
}
}
result, err := callF(pulumiCtx, req.GetTok(), args)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
// Serialize all result properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(result)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.CallResponse_ReturnDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.CallResponse_ReturnDependencies{
Urns: urns,
}
}
return &pulumirpc.CallResponse{
Return: rpcProps,
ReturnDependencies: rpcPropertyDeps,
}, nil
}
// callArgsCopyTo sets the args on the given args struct. If there is a `__self__` argument, it will be
// returned, otherwise it will return nil.
func callArgsCopyTo(ctx *Context, source map[string]interface{}, args interface{}) (Resource, error) {
// Use the same implementation as construct.
if err := constructInputsCopyTo(ctx, source, args); err != nil {
return nil, err
}
// Retrieve the `__self__` arg.
self, err := callArgsSelf(ctx, source)
if err != nil {
return nil, err
}
return self, nil
}
// callArgsSelf retrieves the `__self__` argument. If `__self__` is present the value is returned,
// otherwise the returned value will be nil.
func | callArgsSelf | identifier_name | |
provider.go |
// Deserialize the inputs and apply appropriate dependencies.
inputDependencies := req.GetInputDependencies()
deserializedInputs, err := plugin.UnmarshalProperties(
req.GetInputs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
inputs := make(map[string]interface{}, len(deserializedInputs))
for key, value := range deserializedInputs {
k := string(key)
var deps []Resource
if inputDeps, ok := inputDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
inputs[k] = &constructInput{
value: value,
deps: deps,
}
}
// Rebuild the resource options.
aliases := make([]Alias, len(req.GetAliases()))
for i, urn := range req.GetAliases() {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil")
}
argsV := reflect.ValueOf(args)
typ := argsV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
| {
return nil, errors.Wrap(err, "constructing run context")
} | conditional_block | |
provider.go | () {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil") | if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its | }
argsV := reflect.ValueOf(args)
typ := argsV.Type() | random_line_split |
provider.go | .Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil")
}
argsV := reflect.ValueOf(args)
typ := argsV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) | {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi") | identifier_body | |
build_server.go | -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams
if err := json.Unmarshal(*req.Params, &p); err != nil {
return nil, err
}
dirsHint, haveDirsHint := p.Hints["dirs"]
if haveDirsHint {
dirs := dirsHint.([]interface{})
for i, dir := range dirs {
dirs[i] = rewriteURIFromClient(lsp.DocumentURI(dir.(string)))
}
// Arbitrarily chosen limit on the number of directories that
// may be searched by workspace/xreferences. Large repositories
// like kubernetes would simply take too long (>15s) to fetch
// their dependencies and typecheck them otherwise. This number
// was chosen as a 'sweet-spot' based on kubernetes solely.
if len(dirs) > 15 {
dirs = dirs[:15]
}
dirsHint = dirs
p.Hints["dirs"] = dirs
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
}
var result interface{}
if err := h.callLangServer(ctx, conn, req.Method, req.ID, req.Params, &result); err != nil {
return nil, err
}
// (Un-)rewrite URI fields in the result. E.g.:
//
// file:///src/github.com/user/repo/dir/file.go -> file:///dir/file.go
var walkErr error
lspext.WalkURIFields(result, nil, func(uri lsp.DocumentURI) lsp.DocumentURI {
// HACK: Work around https://github.com/sourcegraph/sourcegraph/issues/10541 by
// converting uri == "file://" (which is actually an empty URI in the langserver result)
// to "file:///" instead of emitting an error. This will likely cause the result to be displayed
// with an error on the client, but it's better than the whole
// textDocument/implementation request failing.
if req.Method == "textDocument/implementation" && (uri == "" || uri == "file://") {
return "file:///"
}
newURI, err := h.rewriteURIFromLangServer(uri)
if err != nil {
walkErr = err
}
return newURI
})
if walkErr != nil {
return nil, fmt.Errorf("%s (in Go language server response)", walkErr)
}
return result, nil
}
}
func (h *BuildHandler) rewriteURIFromLangServer(uri lsp.DocumentURI) (lsp.DocumentURI, error) {
u, err := url.Parse(string(uri))
if err != nil {
return "", err
}
if !u.IsAbs() {
return "", fmt.Errorf("invalid relative URI %q", u)
}
switch u.Scheme {
case "file":
if !filepath.IsAbs(u.Path) {
return "", fmt.Errorf("invalid relative file path in URI %q", uri)
}
// Refers to a file in the Go stdlib?
if util.PathHasPrefix(u.Path, goroot) {
fileInGoStdlib := util.PathTrimPrefix(u.Path, goroot)
if h.rootImportPath == "" {
if h.clientUsesFileSchemeWithinWorkspace {
// The workspace is the Go stdlib and this refers to
// something in the Go stdlib, so let's use file:///
// so that the client adds our current rev, instead
// of using runtime.Version() (which is not
// necessarily the commit of the Go stdlib we're
// analyzing).
return lsp.DocumentURI("file:///" + fileInGoStdlib), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = fileInGoStdlib
return lsp.DocumentURI(newURI.String()), nil
}
return lsp.DocumentURI("git://github.com/golang/go?" + gosrc.RuntimeVersion + "#" + fileInGoStdlib), nil
}
// Refers to a file in the same workspace?
if util.PathHasPrefix(u.Path, h.RootFSPath) {
if h.clientUsesFileSchemeWithinWorkspace {
pathInThisWorkspace := util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI("file:///" + pathInThisWorkspace), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI(newURI.String()), nil
}
// Refers to a file in the GOPATH (that's from another repo)?
if gopathSrcDir := path.Join(gopath, "src"); util.PathHasPrefix(u.Path, gopathSrcDir) {
p := util.PathTrimPrefix(u.Path, gopathSrcDir) // "github.com/foo/bar/baz/qux.go"
// Go through the list of directories we have
// mounted. We make a copy instead of holding the lock
// in the for loop to avoid holding the lock for
// longer than necessary.
h.HandlerShared.Mu.Lock()
deps := make([]*gosrc.Directory, len(h.gopathDeps))
copy(deps, h.gopathDeps)
h.HandlerShared.Mu.Unlock()
var d *gosrc.Directory
for _, dep := range deps {
pathComponents := strings.Split(p, "/")
depComponents := strings.Split(dep.ProjectRoot, "/")
if reflect.DeepEqual(pathComponents[:len(depComponents)], depComponents) {
d = dep
}
}
if d != nil {
rev := d.Rev
if rev == "" {
rev = "HEAD"
}
i := strings.Index(d.CloneURL, "://")
if i >= 0 {
repo := d.CloneURL[i+len("://"):]
path := strings.TrimPrefix(strings.TrimPrefix(p, d.ProjectRoot), "/")
// HACK
// In some cases, we see import paths of the form "blah/blah.git" or "blah/blah.git/blah/blah".
// The name for the repository containing such a package is "blah/blah", so we strip the ".git"
// from the location URI here. In addition, we strip any leading ".git/" from the path that
// might get added as a side-effect of stripping the suffix.
repo = strings.TrimSuffix(repo, ".git")
path = strings.TrimPrefix(path, ".git/")
return lsp.DocumentURI(fmt.Sprintf("%s://%s?%s#%s", d.VCS, repo, rev, path)), nil
}
}
}
return lsp.DocumentURI("unresolved:" + u.Path), nil
default:
return "", fmt.Errorf("invalid non-file URI %q", uri)
}
}
// callLangServer sends the (usually modified) request to the wrapped Go
// language server. Do not send notifications via this interface! Rather just
// directly pass on the jsonrpc2.Request via h.lang.Handle.
//
// Although bypasses the JSON-RPC wire protocol ( just sending it
// in-memory for simplicity/speed), it behaves in the same way as
// though the peer language server were remote.
func (h *BuildHandler) callLangServer(ctx context.Context, conn *jsonrpc2.Conn, method string, id jsonrpc2.ID, params, result interface{}) error {
req := jsonrpc2.Request{
ID: id,
Method: method,
}
if err := req.SetParams(params); err != nil {
return err
}
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
result0, err := h.lang.Handle(ctx, wrappedConn, &req)
if err != nil {
return err
}
// Don't pass the interface{} value, to avoid the build and
// language servers from breaking the abstraction that they are in
// separate memory spaces.
b, err := json.Marshal(result0)
if err != nil {
return err
}
if result != nil {
if err := json.Unmarshal(b, result); err != nil {
return err
}
}
return nil
}
// Close implements io.Closer
func (h *BuildHandler) Close() error | {
var result error
for _, closer := range h.closers {
err := closer.Close()
if err != nil {
result = multierror.Append(result, err)
}
}
return result
} | identifier_body | |
build_server.go | po").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
if err != nil {
return uri
}
if h.originalRootURI == nil {
return uri
}
path = currentURL.Fragment
currentURL.Fragment = ""
if *currentURL != *h.originalRootURI {
return uri // refers to a resource outside of this workspace
}
}
path = pathpkg.Join(h.RootFSPath, path)
if !util.PathHasPrefix(path, h.RootFSPath) {
panic(fmt.Sprintf("file path %q must have prefix %q (file URI is %q, root URI is %q)", path, h.RootFSPath, uri, h.init.RootPath))
}
newURI := lsp.DocumentURI("file://" + path)
urisInRequest = append(urisInRequest, newURI) // collect
return newURI
}
lspext.WalkURIFields(params, nil, rewriteURIFromClient)
// Store back to req.Params to avoid 2 different versions of the data.
if req.Params != nil {
b, err := json.Marshal(params)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
// Immediately handle notifications. We do not have a response
// to rewrite, so we can pass it on directly and avoid the
// cost of marshalling again. NOTE: FS operations are frequent
// and are notifications.
if req.Notif {
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
// Avoid extracting the tracer again, it is already attached to ctx.
req.Meta = nil
return h.lang.Handle(ctx, wrappedConn, req)
}
// workspace/symbol queries must have their `dir:` query filter
// rewritten for github.com/golang/go due to its specialized directory
// structure. e.g. `dir:src/net/http` should work, but the language
// server will expect `dir:net/http` as any real/valid Go project will
// have package paths align with the directory structure.
if req.Method == "workspace/symbol" && strings.HasPrefix(string(h.init.OriginalRootURI), "git://github.com/golang/go") {
var wsparams lspext.WorkspaceSymbolParams
if err := json.Unmarshal(*req.Params, &wsparams); err != nil {
return nil, err
}
q := langserver.ParseQuery(wsparams.Query)
if q.Filter == langserver.FilterDir {
// If the query does not start with `src/` and it is a request
// for a stdlib dir, it should return no results (the filter is
// dir, not package path).
if gosrc.IsStdlibPkg(q.Dir) && !strings.HasPrefix(q.Dir, "src") {
q.Dir = "sginvalid"
} else {
q.Dir = util.PathTrimPrefix(q.Dir, "src") // "src/net/http" -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams | random_line_split | ||
build_server.go | (ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) {
// Prevent any uncaught panics from taking the entire server down.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("unexpected panic: %v", r)
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic serving %v: %v\n%s", req.Method, r, buf)
return
}
}()
h.mu.Lock()
if req.Method != "initialize" && h.init == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" {
err = nil
}
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo | Handle | identifier_name | |
build_server.go | == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" |
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
| {
err = nil
} | conditional_block |
cdn_log.go | ) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) | if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueue | {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID) | identifier_body |
cdn_log.go | .Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" | {
continue
} | conditional_block | |
cdn_log.go | StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" {
continue
}
sdk.GoRoutine(ctx, "cdn-dequeue-job-message", func(ctx context.Context) {
if err := s.dequeueJobMessages(ctx, jobQueueKey, jobID); err != nil {
log.Error(ctx, "unable to dequeue redis incoming job queue: %v", err)
}
})
}
time.Sleep(250 * time.Millisecond)
}
}
}
func (s *Service) dequeueJobMessages(ctx context.Context, jobLogsQueueKey string, jobID string) error {
log.Info(ctx, "Dequeue %s", jobLogsQueueKey)
var t0 = time.Now()
var t1 = time.Now()
var nbMessages int
defer func() {
delta := t1.Sub(t0)
log.Info(ctx, "processLogs[%s] - %d messages received in %v", jobLogsQueueKey, nbMessages, delta)
}()
defer func() {
// Remove heartbeat
_ = s.Cache.Delete(cache.Key(keyJobHearbeat, jobID))
}()
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
b, err := s.Cache.Exist(jobLogsQueueKey)
if err != nil {
log.Error(ctx, "unable to check if queue still exist: %v", err)
continue
} else if !b {
// leave dequeue if queue does not exist anymore
log.Info(ctx, "leaving job queue %s (queue no more exists)", jobLogsQueueKey)
return nil
}
// heartbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
log.Error(ctx, "unable to hearbeat %s: %v", heartbeatKey, err)
continue
}
default:
dequeuCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
var hm handledMessage
if err := s.Cache.DequeueWithContext(dequeuCtx, jobLogsQueueKey, 30*time.Millisecond, &hm); err != nil {
cancel()
if strings.Contains(err.Error(), "context deadline exceeded") {
return nil
}
log.Error(ctx, "unable to dequeue job logs queue %s: %v", jobLogsQueueKey, err)
continue
}
cancel()
if hm.Signature.Worker == nil {
continue
}
nbMessages++
t1 = time.Now()
currentLog := buildMessage(hm.Signature, hm.Msg)
if err := workflow.AppendLog(s.Db, hm.Signature.JobID, hm.Signature.NodeRunID, hm.Signature.Worker.StepOrder, currentLog, s.Cfg.Log.StepMaxSize); err != nil {
log.Error(ctx, "unable to process log: %+v", err)
}
}
}
}
func (s *Service) canDequeue(jobID string) (string, error) {
jobQueueKey := cache.Key(keyJobLogQueue, jobID)
heatbeatKey := cache.Key(keyJobHearbeat, jobID)
// Take a lock
lockKey := cache.Key(keyJobLock, jobID)
b, err := s.Cache.Lock(lockKey, 5*time.Second, 0, 1)
if err != nil {
return "", err
}
defer func() {
_ = s.Cache.Unlock(lockKey)
}()
if !b {
return "", nil
}
exist, err := s.Cache.Exist(heatbeatKey)
if err != nil {
return "", err
} | // if key exist, that mean that someone is already dequeuing | random_line_split | |
cdn_log.go | ) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) | (ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueue | getHatchery | identifier_name |
test_runner.go | t *testing.T
imsis map[string]bool
activePCRFs []string
activeOCSs []string
startTime time.Time
}
// imsi -> ruleID -> record
type RecordByIMSI map[string]map[string]*lteprotos.RuleRecord
// NewTestRunner initializes a new TestRunner by making a UESim client and
// and setting the next IMSI.
func NewTestRunner(t *testing.T) *TestRunner {
startTime := time.Now()
fmt.Println("************* TestRunner setup")
fmt.Printf("Adding Mock HSS service at %s:%d\n", CwagIP, HSSPort)
registry.AddService(MockHSSRemote, CwagIP, HSSPort)
fmt.Printf("Adding Mock PCRF service at %s:%d\n", CwagIP, PCRFPort)
registry.AddService(MockPCRFRemote, CwagIP, PCRFPort)
fmt.Printf("Adding Mock OCS service at %s:%d\n", CwagIP, OCSPort)
registry.AddService(MockOCSRemote, CwagIP, OCSPort)
fmt.Printf("Adding Pipelined service at %s:%d\n", CwagIP, PipelinedPort)
registry.AddService(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil |
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// | {
return nil, err
} | conditional_block |
test_runner.go | Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
}
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForNoEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs disappear for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v disappear from enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
fmt.Printf("%s are no longer in enforcement stats!\n", imsi)
return true
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] != nil {
return false
}
}
fmt.Printf("%s, %v are no longer in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForEnforcementStatsForRuleGreaterThan(imsi, ruleID string, min uint64) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
fmt.Printf("Waiting until %s, %s has more than %d bytes in enforcement stats...\n", imsi, ruleID, min)
records, err := tr.GetPolicyUsage()
imsi = prependIMSIPrefix(imsi)
if err != nil {
return false
}
if records[imsi] == nil {
return false
}
record := records[imsi][ruleID]
if record == nil {
return false
}
txBytes := record.BytesTx
if record.BytesTx <= min {
return false
}
fmt.Printf("%s, %s now passed %d > %d in enforcement stats!\n", imsi, ruleID, txBytes, min)
return true
}
}
//WaitForPolicyReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForPolicyReAuthToProcess(raa *fegprotos.PolicyReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
//WaitForChargingReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForChargingReAuthToProcess(raa *fegprotos.ChargingReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
func (tr *TestRunner) PrintElapsedTime() {
now := time.Now()
fmt.Printf("Elapsed Time: %s\n", now.Sub(tr.startTime))
}
// generateRandomIMSIS creates a slice of unique Random IMSIs taking into consideration a previous list with IMSIS
func | generateRandomIMSIS | identifier_name | |
test_runner.go | Service(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil {
return nil, err
}
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
| func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
} | random_line_split | |
test_runner.go | and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil {
return nil, err
}
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
}
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool | {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
} | identifier_body | |
dt.py | ia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
|
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data | data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s | identifier_body |
dt.py | ia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
|
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test | node = node.children[1] | conditional_block |
dt.py | ia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
| for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data | # initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = [] | random_line_split |
dt.py | ia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def | (self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test | compute_info_gain | identifier_name |
main.go | m.InitMain()
m.InitMysqlDB()
m.InitMysqlTable()
m.InitConnPooling()
if isUpdated {
// save mysql proxy.
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", | dLogHostStatus()
return proxy
}
// To init the necessary data.
func (m *MysqlProxy) Init() {
| identifier_body | |
main.go | ", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
| schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
| tables := []* | identifier_name |
main.go | redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
| m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs
= shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
| if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
| conditional_block |
main.go | Group: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
shardDb,err := m.GetShardDbById(shardDbId)
if err != nil { return nil, err }
shardTable := &schema.MysqlShardTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardDBId: shardDbId,
Created: int64(tb["Created"].(float64)),
ShardDB: shardDb,
}
tables = append(tables, shardTable)
}
return tables, nil
}
func (m *MysqlProxy) UpdateToRedisDB() error {
return redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
}
// get the shard db by ids.
func (m *MysqlProxy) GetShardDbById(sid string) (*schema.MysqlShardDB, error) {
if sid == "" { return nil, errors.New("Sorry, the shard db id connot is empty") }
sdb, err := redis.ReadDB("MysqlShardDB", sid)
if err != nil { return nil, err }
if len(sdb) != 1 { return nil, errors.New("Load shard db wrong!") }
tsdb := sdb[0][sid].(map[string]interface {})
groupId := tsdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
if err != nil { return nil, err }
shardDB := &schema.MysqlShardDB{
Id: tsdb["Id"].(string),
Name: tsdb["Name"].(string),
TableTotal: uint64(tsdb["TableTotal"].(float64)),
SizeTotal: uint64(tsdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(tsdb["Created"].(float64)),
HostGroup: curGroup,
}
schema.ShardDBCnt++
return shardDB, nil
}
// to init the connection pooling.
func (m *MysqlProxy) InitConnPooling() {
// because the database/sql support the connection pooling
// so just to use it.
// 这里决定不采用预先就将所有的链接生成,还是使用到时再初始化连接.
}
func (m *MysqlProxy) BuildNewShardDB(group *host.Group, name string) (*schema.MysqlShardDB, error) {
if name == "" { return nil, errors.New("Sorry, can not build the no name databases") }
// init the shard db to host.
master := group.Master[0]
db, err := (&master).ConnToDB("mysql")
if err != nil { return nil, err }
stmt, err := db.Prepare(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", name))
if err != nil { return nil, err }
_, err = stmt.Exec()
if err != nil { return nil, err }
stmt.Close()
shardDbId := redis.BuildPrimaryKey(name, true)
shardDb := &schema.MysqlShardDB{
Id: shardDbId,
Name: name,
TableTotal: 0,
SizeTotal: 0,
HostGroupId: group.Id,
Created: redis.GetCurTime(),
HostGroup: group,
}
// save this new shard database to tracker.
err = redis.WriteDB(shardDbId, redis.EncodeData(shardDb), "MysqlShardDB")
if err != nil { return nil, err }
(&master).CloseDB()
schema.ShardDBCnt++
return shardDb, nil
}
// add a new table to mysql proxy
func (m *MysqlProxy) AddTable(tab *schema.MysqlTable) error {
tables := m.Tables
tableIds := m.TableIds
if tables == nil {
tables = []*schema.MysqlTable{ tab }
tableIds = []string{ tab.Id }
} else {
tables = append(tables, tab)
tableIds = append(tableIds, tab.Id)
}
m.Tables = tables | schema.Tables = tables
m.TableIds = tableIds
return m.UpdateToRedisDB() | random_line_split | |
test_framework.py | out"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time.time()
locked = False
while True:
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
except:
# TX not received yet?
pass
if time.time() > start + 10:
break
time.sleep(0.5)
return locked
def wait_for_sporks_same(self, timeout=30):
st = time.time()
while time.time() < st + timeout:
if self.check_sporks_same():
return
time.sleep(0.5)
raise AssertionError("wait_for_sporks_same timed out")
def check_sporks_same(self):
sporks = self.nodes[0].spork('show')
for node in self.nodes[1:]:
sporks2 = node.spork('show')
if sporks != sporks2:
return False
return True
def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for mn in self.mninfo:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_5_60" not in s:
all_ok = False
break
s = s["llmq_5_60"]
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
| if s[check_received_messages] < check_received_messages_count:
all_ok = False
break | conditional_block | |
test_framework.py | for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(self.start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def | create_raw_tx | identifier_name | |
test_framework.py | oshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
|
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: dashd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqg | Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache.""" | random_line_split |
test_framework.py |
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dashds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop dashds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing dashd/dash-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
self.log.info("Note: dashds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, | extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args, stderr=stderr) | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.