file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
scaledobject_controller.go | package controllers
import (
"context"
"fmt"
"sync"
"github.com/go-logr/logr"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
kedav1alpha1 "github.com/kedacore/keda/api/v1alpha1"
kedacontrollerutil "github.com/kedacore/keda/controllers/util"
"github.com/kedacore/keda/pkg/scaling"
kedautil "github.com/kedacore/keda/pkg/util"
)
// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*"
// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs="*"
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs="*"
// +kubebuilder:rbac:groups="",resources=configmaps;configmaps/status;events,verbs="*"
// +kubebuilder:rbac:groups="",resources=pods;services;services;secrets;external,verbs=get;list;watch
// +kubebuilder:rbac:groups="*",resources="*/scale",verbs="*"
// +kubebuilder:rbac:groups="*",resources="*",verbs=get
// ScaledObjectReconciler reconciles a ScaledObject object
type ScaledObjectReconciler struct {
Log logr.Logger
Client client.Client
Scheme *runtime.Scheme
scaleClient *scale.ScalesGetter
restMapper meta.RESTMapper
scaledObjectsGenerations *sync.Map
scaleHandler scaling.ScaleHandler
kubeVersion kedautil.K8sVersion
}
// SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager) error {
// create Discovery clientset
clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
r.Log.Error(err, "Not able to create Discovery clientset")
return err
}
// Find out Kubernetes version
version, err := clientset.ServerVersion()
if err == nil {
r.kubeVersion = kedautil.NewK8sVersion(version)
r.Log.Info("Running on Kubernetes "+r.kubeVersion.PrettyVersion, "version", version)
} else {
r.Log.Error(err, "Not able to get Kubernetes version")
}
// Create Scale Client
scaleClient := initScaleClient(mgr, clientset)
r.scaleClient = &scaleClient
// Init the rest of ScaledObjectReconciler
r.restMapper = mgr.GetRESTMapper()
r.scaledObjectsGenerations = &sync.Map{}
r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), r.scaleClient, mgr.GetScheme())
// Start controller
return ctrl.NewControllerManagedBy(mgr).
// predicate.GenerationChangedPredicate{} ignore updates to ScaledObject Status
// (in this case metadata.Generation does not change)
// so reconcile loop is not started on Status updates
For(&kedav1alpha1.ScaledObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
Owns(&autoscalingv2beta2.HorizontalPodAutoscaler{}).
Complete(r)
}
func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) scale.ScalesGetter {
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset)
return scale.New(
clientset.RESTClient(), mgr.GetRESTMapper(),
dynamic.LegacyAPIPathResolverFunc,
scaleKindResolver,
)
}
// Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any).
func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("ScaledObject.Namespace", req.Namespace, "ScaledObject.Name", req.Name)
// Fetch the ScaledObject instance
scaledObject := &kedav1alpha1.ScaledObject{}
err := r.Client.Get(context.TODO(), req.NamespacedName, scaledObject)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get ScaledObject")
return ctrl.Result{}, err
}
reqLogger.Info("Reconciling ScaledObject")
// Check if the ScaledObject instance is marked to be deleted, which is
// indicated by the deletion timestamp being set.
if scaledObject.GetDeletionTimestamp() != nil {
return ctrl.Result{}, r.finalizeScaledObject(reqLogger, scaledObject)
}
// ensure finalizer is set on this CR
if err := r.ensureFinalizer(reqLogger, scaledObject); err != nil {
return ctrl.Result{}, err
}
// ensure Status Conditions are initialized
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions)
}
// reconcile ScaledObject and set status appropriately
msg, err := r.reconcileScaledObject(reqLogger, scaledObject)
conditions := scaledObject.Status.Conditions.DeepCopy()
if err != nil {
reqLogger.Error(err, msg)
conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg)
conditions.SetActiveCondition(metav1.ConditionUnknown, "UnkownState", "ScaledObject check failed")
} else {
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg)
}
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions)
return ctrl.Result{}, err
}
// reconcileScaledObject implements reconciler logic for ScaleObject
func (r *ScaledObjectReconciler) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) |
// ensureScaledObjectLabel ensures that scaledObjectName=<scaledObject.Name> label exist in the ScaledObject
// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it.
func (r *ScaledObjectReconciler) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
const labelScaledObjectName = "scaledObjectName"
if scaledObject.Labels == nil {
scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name}
} else {
value, found := scaledObject.Labels[labelScaledObjectName]
if found && value == scaledObject.Name {
return nil
}
scaledObject.Labels[labelScaledObjectName] = scaledObject.Name
}
logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name)
return r.Client.Update(context.TODO(), scaledObject)
}
// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
return gvkr, err
}
gvkString := gvkr.GVKString()
logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource)
// let's try to detect /scale subresource
scale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if errScale != nil {
// not able to get /scale subresource -> let's check if the resource even exist in the cluster
unstruct := &unstructured.Unstructured{}
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, errScale
}
// if it is not already present in ScaledObject Status:
// - store discovered GVK and GVKR
// - store original scaleTarget's replica count (before scaling with KEDA)
if scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {
status := scaledObject.Status.DeepCopy()
if scaledObject.Status.ScaleTargetKind != gvkString {
status.ScaleTargetKind = gvkString
status.ScaleTargetGVKR = &gvkr
}
if scaledObject.Status.OriginalReplicaCount == nil {
status.OriginalReplicaCount = &scale.Spec.Replicas
}
if err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {
return gvkr, err
}
logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
}
return gvkr, nil
}
// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created
func (r *ScaledObjectReconciler) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) {
hpaName := getHPAName(scaledObject)
foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
// Check if HPA for this ScaledObject already exists
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa)
if err != nil && errors.IsNotFound(err) {
// HPA wasn't found -> let's create a new one
err = r.createAndDeployNewHPA(logger, scaledObject, gvkr)
if err != nil {
return false, err
}
// check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18
r.checkMinK8sVersionforHPABehavior(logger, scaledObject)
// new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop
return true, nil
} else if err != nil {
logger.Error(err, "Failed to get HPA from cluster")
return false, err
}
// HPA was found -> let's check if we need to update it
err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr)
if err != nil {
logger.Error(err, "Failed to check HPA for possible update")
return false, err
}
return false, nil
}
// startScaleLoop starts ScaleLoop handler for the respective ScaledObject
func (r *ScaledObjectReconciler) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
logger.V(1).Info("Notify scaleHandler of an update in scaledObject")
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil {
return err
}
// store ScaledObject's current Generation
r.scaledObjectsGenerations.Store(key, scaledObject.Generation)
return nil
}
// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject
func (r *ScaledObjectReconciler) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil {
return err
}
// delete ScaledObject's current Generation
r.scaledObjectsGenerations.Delete(key)
return nil
}
// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed
func (r *ScaledObjectReconciler) scaledObjectGenerationChanged(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (bool, error) {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return true, err
}
value, loaded := r.scaledObjectsGenerations.Load(key)
if loaded {
generation := value.(int64)
if generation == scaledObject.Generation {
return false, nil
}
}
return true, nil
}
| {
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Check the label needed for Metrics servers is present on ScaledObject
err := r.ensureScaledObjectLabel(logger, scaledObject)
if err != nil {
return "Failed to update ScaledObject with scaledObjectName label", err
}
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject)
if err != nil {
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Create a new HPA or update existing one according to ScaledObject
newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr)
if err != nil {
return "Failed to ensure HPA is correctly created for ScaledObject", err
}
scaleObjectSpecChanged := false
if !newHPACreated {
// Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec
// if it was changed we should start a new ScaleLoop
// (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway)
scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject)
if err != nil {
return "Failed to check whether ScaledObject's Generation was changed", err
}
}
// Notify ScaleHandler if a new HPA was created or if ScaledObject was updated
if newHPACreated || scaleObjectSpecChanged {
if r.requestScaleLoop(logger, scaledObject) != nil {
return "Failed to start a new scale loop with scaling logic", err
}
logger.Info("Initializing Scaling logic according to ScaledObject Specification")
}
return "ScaledObject is defined correctly and is ready for scaling", nil
} | identifier_body |
scaledobject_controller.go | package controllers
import (
"context"
"fmt"
"sync"
"github.com/go-logr/logr"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
kedav1alpha1 "github.com/kedacore/keda/api/v1alpha1"
kedacontrollerutil "github.com/kedacore/keda/controllers/util"
"github.com/kedacore/keda/pkg/scaling"
kedautil "github.com/kedacore/keda/pkg/util"
)
// +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*"
// +kubebuilder:rbac:groups=keda.sh,resources=triggerauthentications;triggerauthentications/status,verbs="*"
// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs="*"
// +kubebuilder:rbac:groups="",resources=configmaps;configmaps/status;events,verbs="*"
// +kubebuilder:rbac:groups="",resources=pods;services;services;secrets;external,verbs=get;list;watch
// +kubebuilder:rbac:groups="*",resources="*/scale",verbs="*"
// +kubebuilder:rbac:groups="*",resources="*",verbs=get
// ScaledObjectReconciler reconciles a ScaledObject object
type ScaledObjectReconciler struct {
Log logr.Logger
Client client.Client
Scheme *runtime.Scheme
scaleClient *scale.ScalesGetter
restMapper meta.RESTMapper
scaledObjectsGenerations *sync.Map
scaleHandler scaling.ScaleHandler
kubeVersion kedautil.K8sVersion
}
// SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager) error {
// create Discovery clientset
clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
r.Log.Error(err, "Not able to create Discovery clientset")
return err
}
// Find out Kubernetes version
version, err := clientset.ServerVersion()
if err == nil {
r.kubeVersion = kedautil.NewK8sVersion(version)
r.Log.Info("Running on Kubernetes "+r.kubeVersion.PrettyVersion, "version", version)
} else {
r.Log.Error(err, "Not able to get Kubernetes version")
}
// Create Scale Client
scaleClient := initScaleClient(mgr, clientset)
r.scaleClient = &scaleClient
// Init the rest of ScaledObjectReconciler
r.restMapper = mgr.GetRESTMapper()
r.scaledObjectsGenerations = &sync.Map{}
r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), r.scaleClient, mgr.GetScheme())
// Start controller
return ctrl.NewControllerManagedBy(mgr).
// predicate.GenerationChangedPredicate{} ignore updates to ScaledObject Status
// (in this case metadata.Generation does not change)
// so reconcile loop is not started on Status updates
For(&kedav1alpha1.ScaledObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
Owns(&autoscalingv2beta2.HorizontalPodAutoscaler{}).
Complete(r)
}
func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) scale.ScalesGetter {
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset)
return scale.New(
clientset.RESTClient(), mgr.GetRESTMapper(),
dynamic.LegacyAPIPathResolverFunc,
scaleKindResolver,
)
}
// Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any).
func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("ScaledObject.Namespace", req.Namespace, "ScaledObject.Name", req.Name)
// Fetch the ScaledObject instance
scaledObject := &kedav1alpha1.ScaledObject{}
err := r.Client.Get(context.TODO(), req.NamespacedName, scaledObject)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get ScaledObject")
return ctrl.Result{}, err
}
reqLogger.Info("Reconciling ScaledObject")
// Check if the ScaledObject instance is marked to be deleted, which is
// indicated by the deletion timestamp being set.
if scaledObject.GetDeletionTimestamp() != nil {
return ctrl.Result{}, r.finalizeScaledObject(reqLogger, scaledObject)
}
// ensure finalizer is set on this CR
if err := r.ensureFinalizer(reqLogger, scaledObject); err != nil {
return ctrl.Result{}, err
}
// ensure Status Conditions are initialized
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, conditions)
}
// reconcile ScaledObject and set status appropriately
msg, err := r.reconcileScaledObject(reqLogger, scaledObject)
conditions := scaledObject.Status.Conditions.DeepCopy()
if err != nil {
reqLogger.Error(err, msg)
conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg)
conditions.SetActiveCondition(metav1.ConditionUnknown, "UnkownState", "ScaledObject check failed")
} else {
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, "ScaledObjectReady", msg)
}
kedacontrollerutil.SetStatusConditions(r.Client, reqLogger, scaledObject, &conditions)
return ctrl.Result{}, err
}
// reconcileScaledObject implements reconciler logic for ScaleObject
func (r *ScaledObjectReconciler) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) {
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Check the label needed for Metrics servers is present on ScaledObject
err := r.ensureScaledObjectLabel(logger, scaledObject)
if err != nil {
return "Failed to update ScaledObject with scaledObjectName label", err
}
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(logger, scaledObject)
if err != nil {
return "ScaledObject doesn't have correct scaleTargetRef specification", err
}
// Create a new HPA or update existing one according to ScaledObject
newHPACreated, err := r.ensureHPAForScaledObjectExists(logger, scaledObject, &gvkr)
if err != nil {
return "Failed to ensure HPA is correctly created for ScaledObject", err
}
scaleObjectSpecChanged := false
if !newHPACreated {
// Lets Check whether ScaledObject generation was changed, ie. there were changes in ScaledObject.Spec
// if it was changed we should start a new ScaleLoop
// (we can omit this check if a new HPA was created, which fires new ScaleLoop anyway)
scaleObjectSpecChanged, err = r.scaledObjectGenerationChanged(logger, scaledObject)
if err != nil {
return "Failed to check whether ScaledObject's Generation was changed", err
}
}
// Notify ScaleHandler if a new HPA was created or if ScaledObject was updated
if newHPACreated || scaleObjectSpecChanged {
if r.requestScaleLoop(logger, scaledObject) != nil {
return "Failed to start a new scale loop with scaling logic", err
}
logger.Info("Initializing Scaling logic according to ScaledObject Specification")
}
return "ScaledObject is defined correctly and is ready for scaling", nil
}
// ensureScaledObjectLabel ensures that scaledObjectName=<scaledObject.Name> label exist in the ScaledObject
// This is how the MetricsAdapter will know which ScaledObject a metric is for when the HPA queries it.
func (r *ScaledObjectReconciler) ensureScaledObjectLabel(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
const labelScaledObjectName = "scaledObjectName"
if scaledObject.Labels == nil {
scaledObject.Labels = map[string]string{labelScaledObjectName: scaledObject.Name}
} else {
value, found := scaledObject.Labels[labelScaledObjectName]
if found && value == scaledObject.Name {
return nil
}
scaledObject.Labels[labelScaledObjectName] = scaledObject.Name
}
logger.V(1).Info("Adding scaledObjectName label on ScaledObject", "value", scaledObject.Name)
return r.Client.Update(context.TODO(), scaledObject)
}
// checkTargetResourceIsScalable checks if resource targeted for scaling exists and exposes /scale subresource
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
logger.Error(err, "Failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
return gvkr, err
}
gvkString := gvkr.GVKString()
logger.V(1).Info("Parsed Group, Version, Kind, Resource", "GVK", gvkString, "Resource", gvkr.Resource)
// let's try to detect /scale subresource
scale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if errScale != nil {
// not able to get /scale subresource -> let's check if the resource even exist in the cluster
unstruct := &unstructured.Unstructured{}
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
logger.Error(err, "Target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
logger.Error(errScale, "Target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
return gvkr, errScale
}
// if it is not already present in ScaledObject Status:
// - store discovered GVK and GVKR
// - store original scaleTarget's replica count (before scaling with KEDA)
if scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {
status := scaledObject.Status.DeepCopy()
if scaledObject.Status.ScaleTargetKind != gvkString {
status.ScaleTargetKind = gvkString
status.ScaleTargetGVKR = &gvkr
}
if scaledObject.Status.OriginalReplicaCount == nil {
status.OriginalReplicaCount = &scale.Spec.Replicas
}
if err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {
return gvkr, err
}
logger.Info("Detected resource targeted for scaling", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
}
return gvkr, nil
}
// ensureHPAForScaledObjectExists ensures that in cluster exist up-to-date HPA for specified ScaledObject, returns true if a new HPA was created
func (r *ScaledObjectReconciler) ensureHPAForScaledObjectExists(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, gvkr *kedav1alpha1.GroupVersionKindResource) (bool, error) {
hpaName := getHPAName(scaledObject)
foundHpa := &autoscalingv2beta2.HorizontalPodAutoscaler{}
// Check if HPA for this ScaledObject already exists
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: hpaName, Namespace: scaledObject.Namespace}, foundHpa)
if err != nil && errors.IsNotFound(err) {
// HPA wasn't found -> let's create a new one
err = r.createAndDeployNewHPA(logger, scaledObject, gvkr)
if err != nil {
return false, err
}
// check if scaledObject.spec.behavior was defined, because it is supported only on k8s >= 1.18
r.checkMinK8sVersionforHPABehavior(logger, scaledObject)
// new HPA created successfully -> notify Reconcile function so it could fire a new ScaleLoop
return true, nil
} else if err != nil {
logger.Error(err, "Failed to get HPA from cluster")
return false, err
}
// HPA was found -> let's check if we need to update it
err = r.updateHPAIfNeeded(logger, scaledObject, foundHpa, gvkr)
if err != nil {
logger.Error(err, "Failed to check HPA for possible update")
return false, err
}
return false, nil
}
// startScaleLoop starts ScaleLoop handler for the respective ScaledObject
func (r *ScaledObjectReconciler) requestScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
logger.V(1).Info("Notify scaleHandler of an update in scaledObject")
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err = r.scaleHandler.HandleScalableObject(scaledObject); err != nil {
return err
}
// store ScaledObject's current Generation
r.scaledObjectsGenerations.Store(key, scaledObject.Generation)
return nil
}
// stopScaleLoop stops ScaleLoop handler for the respective ScaleObject
func (r *ScaledObjectReconciler) stopScaleLoop(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return err
}
if err := r.scaleHandler.DeleteScalableObject(scaledObject); err != nil {
return err
}
// delete ScaledObject's current Generation
r.scaledObjectsGenerations.Delete(key)
return nil
}
// scaledObjectGenerationChanged returns true if ScaledObject's Generation was changed, ie. ScaledObject.Spec was changed
func (r *ScaledObjectReconciler) | (logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (bool, error) {
key, err := cache.MetaNamespaceKeyFunc(scaledObject)
if err != nil {
logger.Error(err, "Error getting key for scaledObject")
return true, err
}
value, loaded := r.scaledObjectsGenerations.Load(key)
if loaded {
generation := value.(int64)
if generation == scaledObject.Generation {
return false, nil
}
}
return true, nil
}
| scaledObjectGenerationChanged | identifier_name |
create_secret.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/i18n"
)
// NewCmdCreateSecret groups subcommands to create various types of secrets
func NewCmdCreateSecret(f cmdutil.Factory, cmdOut, errOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "secret",
Short: i18n.T("Create a secret using specified subcommand"),
Long: "Create a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`))
)
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command |
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]",
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretTLS(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName)
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
return cmd
}
// CreateSecretTLS is the implementation of the create secret tls command
func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"cert", "key"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForTLSV1GeneratorName:
generator = &kubectl.SecretForTLSGeneratorV1{
Name: name,
Key: cmdutil.GetFlagString(cmd, "key"),
Cert: cmdutil.GetFlagString(cmd, "cert"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetFlagBool(cmd, "dry-run"),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
| {
cmd := &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
} | identifier_body |
create_secret.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/i18n"
)
// NewCmdCreateSecret groups subcommands to create various types of secrets
func NewCmdCreateSecret(f cmdutil.Factory, cmdOut, errOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "secret",
Short: i18n.T("Create a secret using specified subcommand"),
Long: "Create a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`))
)
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]",
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretTLS(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName)
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
return cmd
}
// CreateSecretTLS is the implementation of the create secret tls command
func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"cert", "key"}
for _, requiredFlag := range requiredFlags |
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForTLSV1GeneratorName:
generator = &kubectl.SecretForTLSGeneratorV1{
Name: name,
Key: cmdutil.GetFlagString(cmd, "key"),
Cert: cmdutil.GetFlagString(cmd, "cert"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetFlagBool(cmd, "dry-run"),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
| {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
} | conditional_block |
create_secret.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/i18n"
)
// NewCmdCreateSecret groups subcommands to create various types of secrets
func NewCmdCreateSecret(f cmdutil.Factory, cmdOut, errOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "secret",
Short: i18n.T("Create a secret using specified subcommand"),
Long: "Create a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`)) | Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func CreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]",
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretTLS(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName)
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
return cmd
}
// CreateSecretTLS is the implementation of the create secret tls command
func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"cert", "key"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForTLSV1GeneratorName:
generator = &kubectl.SecretForTLSGeneratorV1{
Name: name,
Key: cmdutil.GetFlagString(cmd, "key"),
Cert: cmdutil.GetFlagString(cmd, "cert"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetFlagBool(cmd, "dry-run"),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
} | )
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{ | random_line_split |
create_secret.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/i18n"
)
// NewCmdCreateSecret groups subcommands to create various types of secrets
func NewCmdCreateSecret(f cmdutil.Factory, cmdOut, errOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "secret",
Short: i18n.T("Create a secret using specified subcommand"),
Long: "Create a secret using specified subcommand.",
Run: cmdutil.DefaultSubCommandRun(errOut),
}
cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut))
cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut))
return cmd
}
var (
secretLong = templates.LongDesc(i18n.T(`
Create a secret based on a file, directory, or specified literal value.
A single secret may package one or more key/value pairs.
When creating a secret based on a file, the key will default to the basename of the file, and the value will
default to the file content. If the basename is an invalid key, you may specify an alternate key.
When creating a secret based on a directory, each file whose basename is a valid key in the directory will be
packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories,
symlinks, devices, pipes, etc).`))
secretExample = templates.Examples(i18n.T(`
# Create a new secret named my-secret with keys for each file in folder bar
kubectl create secret generic my-secret --from-file=path/to/bar
# Create a new secret named my-secret with specified keys instead of names on disk
kubectl create secret generic my-secret --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub
# Create a new secret named my-secret with key1=supersecret and key2=topsecret
kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
# Create a new secret named my-secret from an env file
kubectl create secret generic my-secret --from-env-file=path/to/bar.env`))
)
// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values
func NewCmdCreateSecretGeneric(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret from a local file, directory or literal value"),
Long: secretLong,
Example: secretExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretGeneric(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretV1GeneratorName)
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
return cmd
}
// CreateSecretGeneric is the implementation of the create secret generic command
func | (f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretV1GeneratorName:
generator = &kubectl.SecretGeneratorV1{
Name: name,
Type: cmdutil.GetFlagString(cmd, "type"),
FileSources: cmdutil.GetFlagStringSlice(cmd, "from-file"),
LiteralSources: cmdutil.GetFlagStringArray(cmd, "from-literal"),
EnvFileSource: cmdutil.GetFlagString(cmd, "from-env-file"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForDockerRegistryLong = templates.LongDesc(i18n.T(`
Create a new secret for use with Docker registries.
Dockercfg secrets are used to authenticate against Docker registries.
When using the Docker command line to push images, you can authenticate to a given registry by running
$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.
That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to
authenticate to the registry. The email address is optional.
When creating applications, you may have a Docker registry that requires authentication. In order for the
nodes to pull images on your behalf, they have to have the credentials. You can provide this information
by creating a dockercfg secret and attaching it to your service account.`))
secretForDockerRegistryExample = templates.Examples(i18n.T(`
# If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using:
kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL`))
)
// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-literal=key1=value1] [--dry-run]",
Short: i18n.T("Create a secret for use with a Docker registry"),
Long: secretForDockerRegistryLong,
Example: secretForDockerRegistryExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretDockerRegistry(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForDockerRegistryV1GeneratorName)
cmd.Flags().String("docker-username", "", i18n.T("Username for Docker registry authentication"))
cmd.MarkFlagRequired("docker-username")
cmd.Flags().String("docker-password", "", i18n.T("Password for Docker registry authentication"))
cmd.MarkFlagRequired("docker-password")
cmd.Flags().String("docker-email", "", i18n.T("Email for Docker registry"))
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmdutil.AddInclude3rdPartyFlags(cmd)
return cmd
}
// CreateSecretDockerRegistry is the implementation of the create secret docker-registry command
func CreateSecretDockerRegistry(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"docker-username", "docker-password", "docker-email", "docker-server"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForDockerRegistryV1GeneratorName:
generator = &kubectl.SecretForDockerRegistryGeneratorV1{
Name: name,
Username: cmdutil.GetFlagString(cmd, "docker-username"),
Email: cmdutil.GetFlagString(cmd, "docker-email"),
Password: cmdutil.GetFlagString(cmd, "docker-password"),
Server: cmdutil.GetFlagString(cmd, "docker-server"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetDryRunFlag(cmd),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries
func NewCmdCreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]",
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
err := CreateSecretTLS(f, cmdOut, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddPrinterFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName)
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
return cmd
}
// CreateSecretTLS is the implementation of the create secret tls command
func CreateSecretTLS(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {
name, err := NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
requiredFlags := []string{"cert", "key"}
for _, requiredFlag := range requiredFlags {
if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 {
return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag)
}
}
var generator kubectl.StructuredGenerator
switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName {
case cmdutil.SecretForTLSV1GeneratorName:
generator = &kubectl.SecretForTLSGeneratorV1{
Name: name,
Key: cmdutil.GetFlagString(cmd, "key"),
Cert: cmdutil.GetFlagString(cmd, "cert"),
}
default:
return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName))
}
return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{
Name: name,
StructuredGenerator: generator,
DryRun: cmdutil.GetFlagBool(cmd, "dry-run"),
OutputFormat: cmdutil.GetFlagString(cmd, "output"),
})
}
| CreateSecretGeneric | identifier_name |
utils.js | "use strict";
// TODO/FIXME: Load the bundled functions in this file from Alhadis/Utils
const {resolve, dirname, sep, isAbsolute, normalize} = require("path");
const {defineAssertions, defineAssertion, flattenList, formatList} = require("chinotto");
const {deindent} =
module.exports = {
/**
* Non-Atom related helpers moved to different repository.
* @see {@link https://github.com/Alhadis/Chinotto}
* @since v2.2.2
*/
defineAssertions,
defineAssertion,
flattenList,
formatList,
/**
* Save a screenshot of the entire desktop in PNG format (macOS/Windows only).
* @param {String} saveTo - Path to save screenshot to
* @return {void}
*/
captureScreen(saveTo){
if(!(saveTo = String(saveTo)).endsWith(".png"))
saveTo += ".png";
const {execFileSync} = require("child_process");
const {existsSync, mkdirSync} = require("fs");
const dir = dirname(saveTo);
existsSync(dir) || mkdirSync(dir, {recursive: true});
switch(process.platform){
case "darwin":
execFileSync("screencapture", ["-xmt", "png", saveTo]);
break;
case "win32":
const input = deindent `
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
Add-Type -AssemblyName System.Windows.Forms
[void] [Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Windows.Forms")
$rect = ([System.Windows.Forms.Screen]::PrimaryScreen).bounds
$bitmap = New-Object Drawing.Bitmap -argumentList $rect.width, $rect.height
$context = [Drawing.Graphics]::FromImage($bitmap)
$context.copyFromScreen($rect.location, [Drawing.Point]::Empty, $rect.size)
$bitmap.save("${saveTo}")
$context.dispose()
$bitmap.dispose()
`.replace(/\r?\n|\r|\u2028|\u2029/g, "\r\n");
execFileSync("powershell.exe", [
"-NoLogo",
"-NoProfile",
"-NonInteractive",
"-WindowStyle", "Hidden",
"-Command", "-",
], {input, encoding: "utf8", windowsHide: true});
break;
default:
throw new Error("Desktop capture requires Windows or macOS");
}
},
/**
* Save a screenshot of the workspace window.
* @param {"png"|"jpg"|"pdf"} [format="png"] - Screenshot format
* @param {Number} [quality=75] - JPEG quality (0–100)
* @return {Promise<Uint8Array>}
*/
async captureWindow(format = "png", quality = 75){
const {remote} = require("electron");
const page = remote.getCurrentWebContents();
switch(format = String(format).toLowerCase()){
case "pdf":
const width = Math.ceil(CSS.px(window.innerWidth * 1000).to("mm").value);
const height = Math.ceil(CSS.px(window.innerHeight * 1000).to("mm").value);
const {buffer} = await page.printToPDF({
marginsType: 1,
printBackground: true,
pageSize: {width, height},
});
return new Uint8Array(buffer);
case "jpg":
case "jpeg":
quality = isNaN(quality) ? 75 : Math.max(Math.min(100, ~~quality), 0);
return (await page.capturePage()).toJPEG(quality);
case "png":
return (await page.capturePage()).toPNG();
default:
throw new TypeError(`Unsupported file-format: ${format}`);
}
},
/**
* Open a file to a specific line and column in the user's editor-pane.
* @param {String} path
* @param {Number} line
* @param {Number} columns
*/
jumpToFile(path, line, column){
const jumpURL = "atom://core/open/file?filename="
+ encodeURIComponent(path)
+ `&line=${line}&column=${column}`;
return require("electron").shell.openExternal(jumpURL);
},
/**
* Open a list of files relative to currently-running test.
*
* @param {...String} paths
* @return {Promise<TextEditor|TextEditor[]>}
*/
async open(...paths){
if(!paths.length) return atom.workspace.open();
const testPath = dirname((AtomMocha.runner.currentRunnable || {file: __filename}).file);
const editors = await Promise.all(paths.map(path => {
path = path.replace(/[\\/]/g, sep);
return atom.workspace.open(isAbsolute(path) ? path : resolve(testPath, normalize(path)));
}));
return (paths.length > 1) ? editors : editors[0];
},
/**
* Return a {@link Promise} which resolves once an event has been emitted.
*
* @param {EventEmitter} source - Something with an {@link Emitter} object
* @param {String} eventName - Name of event to listen for
* @return {Promise}
*/
async waitForEvent(source, eventName){
return new Promise(resolve => {
const disposable = source.emitter.on(eventName, result => {
disposable.dispose();
resolve(result);
});
});
},
/**
* Wrapper for creating a new DOM element, optionally assigning it a hash of properties upon construction.
*
* @param {String} nodeType - Element type to create.
* @param {Object} obj - An optional hash of properties to assign the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
const POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X;
matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
/**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexF | flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
* Affected characters are: & < > "
*
* NOTE: Named entities are NOT checked, and will be double-escaped.
* Exceptions are made for `"`, `<` and `>`, due to their
* abundant use. Numeric entities, even with invalid codepoints, are
* also safe from double-encoding.
*
* @example "name"<email> -> "name"<email>
* @param {String} input
* @return {String}
*/
escapeHTML(input){
return input.replace(/["<>]|&(?!quot;|gt;|lt;|#x?[A-F\d]+;)/gi, s => "&#"+s.charCodeAt(0)+";");
},
/**
* Parse a list of keywords into an object of boolean "true" values.
*
* @example parseKeywords("top left") -> {top: true, left: true}
* @param {Mixed} keywords - A space-delimited string or an array of strings
* @return {Object}
*/
parseKeywords(keywords){
if(!Array.isArray(keywords)){
if(!keywords) return null;
keywords = [keywords];
}
const output = {};
for(const k of keywords)
k.split(/\s+/g).filter(i => i).forEach(k => output[k] = true);
return output;
},
/**
* Return a {@link Promise} which auto-resolves after a delay.
*
* @param {Number} [delay=100] - Delay in milliseconds
* @return {Promise<void>}
*/
wait(delay = 100){
return new Promise(resolve => {
setTimeout(() => resolve(), delay);
});
},
/**
* Keep calling a function until it returns a truthy value.
*
* @example poll(async () => (await fetch(url)).done);
* @param {Function} fn
* @param {Object} [opts={}]
* @param {Number} [opts.rate=100]
* @param {Number} [opts.timeout=0]
* @param {Boolean} [opts.negate=false]
* @return {Promise<void>}
*/
async poll(fn, opts = {}){
const {rate = 100, timeout = 0, negate = false} = opts;
const start = Date.now();
for(;;){
const result = await fn();
if(!negate === !!result) return result;
if(timeout && Date.now() - start > timeout)
throw new Error("Timed out");
await new Promise($ => setTimeout($, rate));
}
},
/**
* Strip excess whitespace from a multiline string.
*
* Intended to be used with tagged template literals,
* but will work on any multiline string value.
*
* @example
* const HTML = deindent;
* let output = HTML `
* <div>
* (Text)
* </div>
* `;
* output == "<div>\n\t(Text)\n</div>";
*
* @param {Object|String} input
* @param {...String} [args]
* @return {String}
*/
deindent(input, ...args){
// Avoid breaking on String.raw if called as an ordinary function
if("object" !== typeof input || "object" !== typeof input.raw)
return deindent `${input}`;
const depthTable = [];
let maxDepth = Number.NEGATIVE_INFINITY;
let minDepth = Number.POSITIVE_INFINITY;
// Normalise newlines and strip leading or trailing blank lines
const chunk = String.raw.call(null, input, ...args)
.replace(/\r(\n?)/g, "$1")
.replace(/^(?:[ \t]*\n)+|(?:\n[ \t]*)+$/g, "");
for(const line of chunk.split(/\n/)){
// Ignore whitespace-only lines
if(!/\S/.test(line)) continue;
const indentString = line.match(/^[ \t]*(?=\S|$)/)[0];
const indentLength = indentString.replace(/\t/g, " ".repeat(8)).length;
if(indentLength < 1) continue;
const depthStrings = depthTable[indentLength] || [];
depthStrings.push(indentString);
maxDepth = Math.max(maxDepth, indentLength);
minDepth = Math.min(minDepth, indentLength);
if(!depthTable[indentLength])
depthTable[indentLength] = depthStrings;
}
if(maxDepth < 1)
return chunk;
const depthStrings = new Set();
for(const column of depthTable.slice(0, minDepth + 1)){
if(!column) continue;
depthStrings.add(...column);
}
depthStrings.delete(undefined);
const stripPattern = [...depthStrings].reverse().join("|");
return chunk.replace(new RegExp(`^(?:${stripPattern})`, "gm"), "");
},
};
| romString(src, | identifier_name |
utils.js | "use strict";
// TODO/FIXME: Load the bundled functions in this file from Alhadis/Utils
const {resolve, dirname, sep, isAbsolute, normalize} = require("path");
const {defineAssertions, defineAssertion, flattenList, formatList} = require("chinotto");
const {deindent} =
module.exports = {
/**
* Non-Atom related helpers moved to different repository.
* @see {@link https://github.com/Alhadis/Chinotto}
* @since v2.2.2
*/
defineAssertions,
defineAssertion,
flattenList,
formatList,
/**
* Save a screenshot of the entire desktop in PNG format (macOS/Windows only).
* @param {String} saveTo - Path to save screenshot to
* @return {void}
*/
captureScreen(saveTo){
if(!(saveTo = String(saveTo)).endsWith(".png"))
saveTo += ".png";
const {execFileSync} = require("child_process");
const {existsSync, mkdirSync} = require("fs");
const dir = dirname(saveTo);
existsSync(dir) || mkdirSync(dir, {recursive: true});
switch(process.platform){
case "darwin":
execFileSync("screencapture", ["-xmt", "png", saveTo]);
break;
case "win32":
const input = deindent `
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
Add-Type -AssemblyName System.Windows.Forms
[void] [Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Windows.Forms")
$rect = ([System.Windows.Forms.Screen]::PrimaryScreen).bounds
$bitmap = New-Object Drawing.Bitmap -argumentList $rect.width, $rect.height
$context = [Drawing.Graphics]::FromImage($bitmap)
$context.copyFromScreen($rect.location, [Drawing.Point]::Empty, $rect.size)
$bitmap.save("${saveTo}")
$context.dispose()
$bitmap.dispose()
`.replace(/\r?\n|\r|\u2028|\u2029/g, "\r\n");
execFileSync("powershell.exe", [
"-NoLogo",
"-NoProfile",
"-NonInteractive",
"-WindowStyle", "Hidden",
"-Command", "-",
], {input, encoding: "utf8", windowsHide: true});
break;
default:
throw new Error("Desktop capture requires Windows or macOS");
}
},
/**
* Save a screenshot of the workspace window.
* @param {"png"|"jpg"|"pdf"} [format="png"] - Screenshot format
* @param {Number} [quality=75] - JPEG quality (0–100)
* @return {Promise<Uint8Array>}
*/
async captureWindow(format = "png", quality = 75){
const {remote} = require("electron");
const page = remote.getCurrentWebContents();
switch(format = String(format).toLowerCase()){
case "pdf":
const width = Math.ceil(CSS.px(window.innerWidth * 1000).to("mm").value);
const height = Math.ceil(CSS.px(window.innerHeight * 1000).to("mm").value);
const {buffer} = await page.printToPDF({
marginsType: 1,
printBackground: true,
pageSize: {width, height},
});
return new Uint8Array(buffer);
case "jpg":
case "jpeg":
quality = isNaN(quality) ? 75 : Math.max(Math.min(100, ~~quality), 0);
return (await page.capturePage()).toJPEG(quality);
case "png":
return (await page.capturePage()).toPNG();
default:
throw new TypeError(`Unsupported file-format: ${format}`);
}
},
/**
* Open a file to a specific line and column in the user's editor-pane.
* @param {String} path
* @param {Number} line
* @param {Number} columns
*/
jumpToFile(path, line, column){
const jumpURL = "atom://core/open/file?filename="
+ encodeURIComponent(path)
+ `&line=${line}&column=${column}`;
return require("electron").shell.openExternal(jumpURL);
},
/**
* Open a list of files relative to currently-running test.
*
* @param {...String} paths
* @return {Promise<TextEditor|TextEditor[]>}
*/
async open(...paths){
if(!paths.length) return atom.workspace.open();
const testPath = dirname((AtomMocha.runner.currentRunnable || {file: __filename}).file);
const editors = await Promise.all(paths.map(path => {
path = path.replace(/[\\/]/g, sep);
return atom.workspace.open(isAbsolute(path) ? path : resolve(testPath, normalize(path)));
}));
return (paths.length > 1) ? editors : editors[0];
},
/**
* Return a {@link Promise} which resolves once an event has been emitted.
*
* @param {EventEmitter} source - Something with an {@link Emitter} object
* @param {String} eventName - Name of event to listen for
* @return {Promise}
*/
async waitForEvent(source, eventName){
return new Promise(resolve => {
const disposable = source.emitter.on(eventName, result => {
disposable.dispose();
resolve(result);
});
});
},
/**
* Wrapper for creating a new DOM element, optionally assigning it a hash of properties upon construction.
*
* @param {String} nodeType - Element type to create.
* @param {Object} obj - An optional hash of properties to assign the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
co | /**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexFromString(src, flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
* Affected characters are: & < > "
*
* NOTE: Named entities are NOT checked, and will be double-escaped.
* Exceptions are made for `"`, `<` and `>`, due to their
* abundant use. Numeric entities, even with invalid codepoints, are
* also safe from double-encoding.
*
* @example "name"<email> -> "name"<email>
* @param {String} input
* @return {String}
*/
escapeHTML(input){
return input.replace(/["<>]|&(?!quot;|gt;|lt;|#x?[A-F\d]+;)/gi, s => "&#"+s.charCodeAt(0)+";");
},
/**
* Parse a list of keywords into an object of boolean "true" values.
*
* @example parseKeywords("top left") -> {top: true, left: true}
* @param {Mixed} keywords - A space-delimited string or an array of strings
* @return {Object}
*/
parseKeywords(keywords){
if(!Array.isArray(keywords)){
if(!keywords) return null;
keywords = [keywords];
}
const output = {};
for(const k of keywords)
k.split(/\s+/g).filter(i => i).forEach(k => output[k] = true);
return output;
},
/**
* Return a {@link Promise} which auto-resolves after a delay.
*
* @param {Number} [delay=100] - Delay in milliseconds
* @return {Promise<void>}
*/
wait(delay = 100){
return new Promise(resolve => {
setTimeout(() => resolve(), delay);
});
},
/**
* Keep calling a function until it returns a truthy value.
*
* @example poll(async () => (await fetch(url)).done);
* @param {Function} fn
* @param {Object} [opts={}]
* @param {Number} [opts.rate=100]
* @param {Number} [opts.timeout=0]
* @param {Boolean} [opts.negate=false]
* @return {Promise<void>}
*/
async poll(fn, opts = {}){
const {rate = 100, timeout = 0, negate = false} = opts;
const start = Date.now();
for(;;){
const result = await fn();
if(!negate === !!result) return result;
if(timeout && Date.now() - start > timeout)
throw new Error("Timed out");
await new Promise($ => setTimeout($, rate));
}
},
/**
* Strip excess whitespace from a multiline string.
*
* Intended to be used with tagged template literals,
* but will work on any multiline string value.
*
* @example
* const HTML = deindent;
* let output = HTML `
* <div>
* (Text)
* </div>
* `;
* output == "<div>\n\t(Text)\n</div>";
*
* @param {Object|String} input
* @param {...String} [args]
* @return {String}
*/
deindent(input, ...args){
// Avoid breaking on String.raw if called as an ordinary function
if("object" !== typeof input || "object" !== typeof input.raw)
return deindent `${input}`;
const depthTable = [];
let maxDepth = Number.NEGATIVE_INFINITY;
let minDepth = Number.POSITIVE_INFINITY;
// Normalise newlines and strip leading or trailing blank lines
const chunk = String.raw.call(null, input, ...args)
.replace(/\r(\n?)/g, "$1")
.replace(/^(?:[ \t]*\n)+|(?:\n[ \t]*)+$/g, "");
for(const line of chunk.split(/\n/)){
// Ignore whitespace-only lines
if(!/\S/.test(line)) continue;
const indentString = line.match(/^[ \t]*(?=\S|$)/)[0];
const indentLength = indentString.replace(/\t/g, " ".repeat(8)).length;
if(indentLength < 1) continue;
const depthStrings = depthTable[indentLength] || [];
depthStrings.push(indentString);
maxDepth = Math.max(maxDepth, indentLength);
minDepth = Math.min(minDepth, indentLength);
if(!depthTable[indentLength])
depthTable[indentLength] = depthStrings;
}
if(maxDepth < 1)
return chunk;
const depthStrings = new Set();
for(const column of depthTable.slice(0, minDepth + 1)){
if(!column) continue;
depthStrings.add(...column);
}
depthStrings.delete(undefined);
const stripPattern = [...depthStrings].reverse().join("|");
return chunk.replace(new RegExp(`^(?:${stripPattern})`, "gm"), "");
},
};
| nst POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X;
matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
| identifier_body |
utils.js | "use strict";
// TODO/FIXME: Load the bundled functions in this file from Alhadis/Utils
const {resolve, dirname, sep, isAbsolute, normalize} = require("path");
const {defineAssertions, defineAssertion, flattenList, formatList} = require("chinotto");
const {deindent} =
module.exports = {
/**
* Non-Atom related helpers moved to different repository.
* @see {@link https://github.com/Alhadis/Chinotto}
* @since v2.2.2
*/
defineAssertions,
defineAssertion,
flattenList,
formatList,
/**
* Save a screenshot of the entire desktop in PNG format (macOS/Windows only).
* @param {String} saveTo - Path to save screenshot to
* @return {void}
*/
captureScreen(saveTo){
if(!(saveTo = String(saveTo)).endsWith(".png"))
saveTo += ".png";
const {execFileSync} = require("child_process");
const {existsSync, mkdirSync} = require("fs");
const dir = dirname(saveTo);
existsSync(dir) || mkdirSync(dir, {recursive: true});
switch(process.platform){
case "darwin":
execFileSync("screencapture", ["-xmt", "png", saveTo]);
break;
case "win32":
const input = deindent `
Set-StrictMode -Version Latest
$ErrorActionPreference = "Stop"
Add-Type -AssemblyName System.Windows.Forms
[void] [Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Drawing")
[void] [System.Reflection.Assembly]::LoadWithPartialName("System.Windows.Forms")
$rect = ([System.Windows.Forms.Screen]::PrimaryScreen).bounds
$bitmap = New-Object Drawing.Bitmap -argumentList $rect.width, $rect.height
$context = [Drawing.Graphics]::FromImage($bitmap)
$context.copyFromScreen($rect.location, [Drawing.Point]::Empty, $rect.size)
$bitmap.save("${saveTo}")
$context.dispose()
$bitmap.dispose()
`.replace(/\r?\n|\r|\u2028|\u2029/g, "\r\n");
execFileSync("powershell.exe", [
"-NoLogo",
"-NoProfile",
"-NonInteractive",
"-WindowStyle", "Hidden",
"-Command", "-",
], {input, encoding: "utf8", windowsHide: true});
break;
default:
throw new Error("Desktop capture requires Windows or macOS");
}
},
/**
* Save a screenshot of the workspace window.
* @param {"png"|"jpg"|"pdf"} [format="png"] - Screenshot format
* @param {Number} [quality=75] - JPEG quality (0–100)
* @return {Promise<Uint8Array>}
*/
async captureWindow(format = "png", quality = 75){
const {remote} = require("electron");
const page = remote.getCurrentWebContents();
switch(format = String(format).toLowerCase()){
case "pdf":
const width = Math.ceil(CSS.px(window.innerWidth * 1000).to("mm").value);
const height = Math.ceil(CSS.px(window.innerHeight * 1000).to("mm").value);
const {buffer} = await page.printToPDF({
marginsType: 1,
printBackground: true,
pageSize: {width, height},
});
return new Uint8Array(buffer);
case "jpg":
case "jpeg":
quality = isNaN(quality) ? 75 : Math.max(Math.min(100, ~~quality), 0);
return (await page.capturePage()).toJPEG(quality);
case "png":
return (await page.capturePage()).toPNG();
default:
throw new TypeError(`Unsupported file-format: ${format}`);
}
},
/**
* Open a file to a specific line and column in the user's editor-pane.
* @param {String} path
* @param {Number} line
* @param {Number} columns
*/
jumpToFile(path, line, column){
const jumpURL = "atom://core/open/file?filename="
+ encodeURIComponent(path)
+ `&line=${line}&column=${column}`;
return require("electron").shell.openExternal(jumpURL);
},
/**
* Open a list of files relative to currently-running test.
*
* @param {...String} paths
* @return {Promise<TextEditor|TextEditor[]>}
*/
async open(...paths){
if(!paths.length) return atom.workspace.open();
const testPath = dirname((AtomMocha.runner.currentRunnable || {file: __filename}).file);
const editors = await Promise.all(paths.map(path => {
path = path.replace(/[\\/]/g, sep);
return atom.workspace.open(isAbsolute(path) ? path : resolve(testPath, normalize(path)));
}));
return (paths.length > 1) ? editors : editors[0];
},
/**
* Return a {@link Promise} which resolves once an event has been emitted.
*
* @param {EventEmitter} source - Something with an {@link Emitter} object
* @param {String} eventName - Name of event to listen for
* @return {Promise}
*/
async waitForEvent(source, eventName){
return new Promise(resolve => {
const disposable = source.emitter.on(eventName, result => {
disposable.dispose();
resolve(result);
});
});
},
/**
* Wrapper for creating a new DOM element, optionally assigning it a hash of properties upon construction.
*
* @param {String} nodeType - Element type to create.
* @param {Object} obj - An optional hash of properties to assign the newly-created object.
* @return {Element}
*/
New(nodeType, obj){
function absorb(a, b){
for(const i in b)
if(Object(a[i]) === a[i] && Object(b[i]) === b[i])
absorb(a[i], b[i]);
else a[i] = b[i];
}
const node = document.createElement(nodeType);
if(obj) absorb(node, obj);
return node;
},
/**
* Curried method to append multiple nodes at once.
*
* @example addTo(node)(el1, el2, …)
* @example node = addTo(node)(…)[0]
* @return {Function}
*/
addTo(parent){
let count = 0;
let target = parent;
const fn = (...nodes) => {
let lastElement;
for(let node of nodes){
if("string" === typeof node)
node = document.createTextNode(node);
else if(node)
lastElement =
fn[++count] = node;
node && target.appendChild(node);
}
target = lastElement || target;
return fn;
};
fn[count] = target;
return fn;
},
/**
* Return the containing element of a node that matches the given selector.
*
* If the node itself matches, it'll be returned unless ignoreSelf is set.
*
* @param {Node} node - A document node to inspect the hierarchy of
* @param {String} selector - A CSS selector string
* @param {Boolean} ignoreSelf - If given a truthy value, only the parents of a node will be queried
* @return {Element} The closest matching element, or NULL if none of the node's parents matched the selector.
*/
nearest(node, selector, ignoreSelf){
let match;
let parent = ignoreSelf ? node.parentNode : node;
const matches = document.querySelectorAll(selector);
const numMatches = matches.length;
if(numMatches) while(parent){
for(match = 0; match < numMatches; ++match)
if(matches[match] === parent) return parent;
parent = parent.parentNode;
}
return null;
},
/**
* Locate the root directory shared by multiple paths.
*
* @param {Array} paths - A list of filesystem paths
* @return {String}
*/
findBasePath(paths){
const POSIX = paths[0].indexOf("/") !== -1;
let matched = [];
// Spare ourselves the trouble if there's only one path
if(1 === paths.length){
matched = (paths[0].replace(/[\\/]+$/, "")).split(/[\\/]/g);
matched.pop();
}
// Otherwise, comb each array
else{
const rows = paths.map(d => d.split(/[\\/]/g));
const width = Math.max(...rows.map(d => d.length));
const height = rows.length;
let x;
X: for(x = 0; x < width; ++x){
const str = rows[0][x];
for(let y = 1; y < height; ++y)
if(str !== rows[y][x]) break X;
matched.push(str);
}
}
return matched.join(POSIX ? "/" : "\\");
},
/**
* Return the width of the scrollbars being displayed by this user's OS/device.
*
* @return {Number}
*/
getScrollbarWidth(){
const el = document.createElement("div");
const {style} = el;
const size = 120;
style.width =
style.height = size+"px";
style.overflow = "auto";
el.innerHTML = Array(size*5).join(" W ");
(document.body || document.documentElement).appendChild(el);
const result = el.offsetWidth - el.scrollWidth;
el.parentNode.removeChild(el);
return result;
},
/**
* Generate a RegEx from its string-based representation.
*
* Useful for "deserialising" a regex from JSON. Optional flags can be given
* to override trailing modifiers found in the source, if any.
*
* @example "/\\S+/i" -> /\S+/i
* @example "\\d+\\.\\d+$" -> /\d+\.\d+$/
* @param {String} src
* @param {String} flags
* @return {RegExp}
*/
regexFromString(src, flags){
src = (src || "").toString();
if(!src) return null;
const matchEnd = src.match(/\/([gimuy]*)$/);
// Input is a "complete" regular expression
if(matchEnd && /^\//.test(src))
return new RegExp(
src.replace(/^\/|\/([gimuy]*)$/gi, ""),
flags != null ? flags : matchEnd[1]
);
return new RegExp(src, flags);
},
/**
* Escape special regex characters within a string.
*
* @example "file.js" -> "file\\.js"
* @param {String} input
* @return {String}
*/
escapeRegExp(input){
return input.replace(/([/\\^$*+?{}[\]().|])/g, "\\$1");
},
/**
* Replace HTML metacharacters with numeric character references.
*
* Affected characters are: & < > "
*
* NOTE: Named entities are NOT checked, and will be double-escaped.
* Exceptions are made for `"`, `<` and `>`, due to their
* abundant use. Numeric entities, even with invalid codepoints, are
* also safe from double-encoding.
*
* @example "name"<email> -> "name"<email>
* @param {String} input
* @return {String}
*/
escapeHTML(input){
return input.replace(/["<>]|&(?!quot;|gt;|lt;|#x?[A-F\d]+;)/gi, s => "&#"+s.charCodeAt(0)+";");
},
/**
* Parse a list of keywords into an object of boolean "true" values.
*
* @example parseKeywords("top left") -> {top: true, left: true}
* @param {Mixed} keywords - A space-delimited string or an array of strings
* @return {Object}
*/
parseKeywords(keywords){
if(!Array.isArray(keywords)){
if(!keywords) return null;
keywords = [keywords];
}
const output = {};
for(const k of keywords)
k.split(/\s+/g).filter(i => i).forEach(k => output[k] = true);
return output;
},
/**
* Return a {@link Promise} which auto-resolves after a delay.
*
* @param {Number} [delay=100] - Delay in milliseconds
* @return {Promise<void>}
*/
wait(delay = 100){
return new Promise(resolve => {
setTimeout(() => resolve(), delay);
});
},
/**
* Keep calling a function until it returns a truthy value.
*
* @example poll(async () => (await fetch(url)).done);
* @param {Function} fn
* @param {Object} [opts={}]
* @param {Number} [opts.rate=100]
* @param {Number} [opts.timeout=0]
* @param {Boolean} [opts.negate=false]
* @return {Promise<void>}
*/
async poll(fn, opts = {}){
const {rate = 100, timeout = 0, negate = false} = opts;
const start = Date.now();
for(;;){
const result = await fn();
if(!negate === !!result) return result;
if(timeout && Date.now() - start > timeout)
throw new Error("Timed out");
await new Promise($ => setTimeout($, rate));
}
},
/**
* Strip excess whitespace from a multiline string.
*
* Intended to be used with tagged template literals,
* but will work on any multiline string value.
*
* @example
* const HTML = deindent;
* let output = HTML `
* <div>
* (Text)
* </div>
* `;
* output == "<div>\n\t(Text)\n</div>";
*
* @param {Object|String} input
* @param {...String} [args]
* @return {String}
*/
deindent(input, ...args){
// Avoid breaking on String.raw if called as an ordinary function
if("object" !== typeof input || "object" !== typeof input.raw)
return deindent `${input}`;
const depthTable = [];
let maxDepth = Number.NEGATIVE_INFINITY;
let minDepth = Number.POSITIVE_INFINITY;
| .replace(/^(?:[ \t]*\n)+|(?:\n[ \t]*)+$/g, "");
for(const line of chunk.split(/\n/)){
// Ignore whitespace-only lines
if(!/\S/.test(line)) continue;
const indentString = line.match(/^[ \t]*(?=\S|$)/)[0];
const indentLength = indentString.replace(/\t/g, " ".repeat(8)).length;
if(indentLength < 1) continue;
const depthStrings = depthTable[indentLength] || [];
depthStrings.push(indentString);
maxDepth = Math.max(maxDepth, indentLength);
minDepth = Math.min(minDepth, indentLength);
if(!depthTable[indentLength])
depthTable[indentLength] = depthStrings;
}
if(maxDepth < 1)
return chunk;
const depthStrings = new Set();
for(const column of depthTable.slice(0, minDepth + 1)){
if(!column) continue;
depthStrings.add(...column);
}
depthStrings.delete(undefined);
const stripPattern = [...depthStrings].reverse().join("|");
return chunk.replace(new RegExp(`^(?:${stripPattern})`, "gm"), "");
},
}; | // Normalise newlines and strip leading or trailing blank lines
const chunk = String.raw.call(null, input, ...args)
.replace(/\r(\n?)/g, "$1") | random_line_split |
HFIF_RunTime.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 16:26:07 2019
@author: WGP
"""
import xlrd, threading,datetime,os,pymssql,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from queue import Queue, Empty
from threading import Thread
import WindTDFAPI as w
from keras import models,backend
import numpy as np
import pandas as pd
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def calPercentile(xValue,arrPercentile,st=0): #len(arrPercentile)=100,upscane
isfind=False
abv=abs(xValue)
for i in range(st,100):
if abv<(arrPercentile[i]+0.00001):
isfind=True
break
if isfind:
result=i/100
else:
result=1
return result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
| def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self.lastInduData[2*i+1])/npAveTSpanAmnt[i]
inputRow=getNormInduData(inputRow,self.pclMatrix)
self.inputData=np.vstack((self.inputData[1:,:],inputRow))
self.lastInduData=crow
for i in range(3):
self.pm[i]=self.listModel[i].predict(self.inputData.reshape(1,nXData,self.nIndu*2))[0,0]
#backend.clear_session()
self.pm=np.round(self.pm,2)
if __name__ == '__main__':
#global
eventManager = EventManager()
lock = threading.Lock()
listForeFactor=[]
dictQuote={}
dictTSAvgAmnt={}
timeSpan=3
nXData=20
#config
cfgPath='D:\\CalForeFactor\\HFI_Model'
if not os.path.exists(cfgPath):
cfgPath='C:\\Users\\WAP\\Documents\\HFI_Model'
cfgSQL,listCfgForeFactor=getCfgFareFactor(cfgPath)
fPath=os.path.join(cfgPath,'cfg')
for cfgFF in listCfgForeFactor:
listForeFactor.append(ForeFactor(fPath,cfgFF))
#SQL
sql=MSSQL(*cfgSQL)
nConnect=0
while not sql.Connect():
print('SQL Connet Error: ',nConnect)
nConnect+=1
time.sleep(5)
#Event
eventManager.AddEventListener("quote",ReceiveQuote)
eventManager.AddEventListener("normData",MyNormData)
eventManager.Start()
w.SetMarketDataCallBack(TDFCallBack)
dataVendor = w.WindMarketVendor("TDFConfig.ini", "TDFAPI25.dll")
nConnect=0
while (dataVendor.Reconnect() is False):
print("Error nConnect: ",nConnect)
nConnect+=1
time.sleep(5)
dataVendor.RegisterSymbol(set(dictTSAvgAmnt.keys()))
time.sleep(10)
for i in range(30):
eventManager.SendEvent(MyEvent("normData",False))
time.sleep(timeSpan)
while True:
eventManager.SendEvent(MyEvent("normData",True))
time.sleep(timeSpan) | random_line_split | |
HFIF_RunTime.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 16:26:07 2019
@author: WGP
"""
import xlrd, threading,datetime,os,pymssql,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from queue import Queue, Empty
from threading import Thread
import WindTDFAPI as w
from keras import models,backend
import numpy as np
import pandas as pd
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def calPercentile(xValue,arrPercentile,st=0): #len(arrPercentile)=100,upscane
isfind=False
abv=abs(xValue)
for i in range(st,100):
if abv<(arrPercentile[i]+0.00001):
isfind=True
break
if isfind:
result=i/100
else:
result=1
return result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.app | stPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self.lastInduData[2*i+1])/npAveTSpanAmnt[i]
inputRow=getNormInduData(inputRow,self.pclMatrix)
self.inputData=np.vstack((self.inputData[1:,:],inputRow))
self.lastInduData=crow
for i in range(3):
self.pm[i]=self.listModel[i].predict(self.inputData.reshape(1,nXData,self.nIndu*2))[0,0]
#backend.clear_session()
self.pm=np.round(self.pm,2)
if __name__ == '__main__':
#global
eventManager = EventManager()
lock = threading.Lock()
listForeFactor=[]
dictQuote={}
dictTSAvgAmnt={}
timeSpan=3
nXData=20
#config
cfgPath='D:\\CalForeFactor\\HFI_Model'
if not os.path.exists(cfgPath):
cfgPath='C:\\Users\\WAP\\Documents\\HFI_Model'
cfgSQL,listCfgForeFactor=getCfgFareFactor(cfgPath)
fPath=os.path.join(cfgPath,'cfg')
for cfgFF in listCfgForeFactor:
listForeFactor.append(ForeFactor(fPath,cfgFF))
#SQL
sql=MSSQL(*cfgSQL)
nConnect=0
while not sql.Connect():
print('SQL Connet Error: ',nConnect)
nConnect+=1
time.sleep(5)
#Event
eventManager.AddEventListener("quote",ReceiveQuote)
eventManager.AddEventListener("normData",MyNormData)
eventManager.Start()
w.SetMarketDataCallBack(TDFCallBack)
dataVendor = w.WindMarketVendor("TDFConfig.ini", "TDFAPI25.dll")
nConnect=0
while (dataVendor.Reconnect() is False):
print("Error nConnect: ",nConnect)
nConnect+=1
time.sleep(5)
dataVendor.RegisterSymbol(set(dictTSAvgAmnt.keys()))
time.sleep(10)
for i in range(30):
eventManager.SendEvent(MyEvent("normData",False))
time.sleep(timeSpan)
while True:
eventManager.SendEvent(MyEvent("normData",True))
time.sleep(timeSpan)
| end(ff.pm)
print(intNTime,*tuple(li | conditional_block |
HFIF_RunTime.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 16:26:07 2019
@author: WGP
"""
import xlrd, threading,datetime,os,pymssql,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from queue import Queue, Empty
from threading import Thread
import WindTDFAPI as w
from keras import models,backend
import numpy as np
import pandas as pd
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def calPercentile(xValue,arrPercentile,st=0): #len(arrPercentile)=100,upscane
isfind=False
abv=abs(xValue)
for i in range(st,100):
if abv<(arrPercentile[i]+0.00001):
isfind=True
break
if isfind:
result=i/100
else:
result=1
return result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
|
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self.lastInduData[2*i+1])/npAveTSpanAmnt[i]
inputRow=getNormInduData(inputRow,self.pclMatrix)
self.inputData=np.vstack((self.inputData[1:,:],inputRow))
self.lastInduData=crow
for i in range(3):
self.pm[i]=self.listModel[i].predict(self.inputData.reshape(1,nXData,self.nIndu*2))[0,0]
#backend.clear_session()
self.pm=np.round(self.pm,2)
if __name__ == '__main__':
#global
eventManager = EventManager()
lock = threading.Lock()
listForeFactor=[]
dictQuote={}
dictTSAvgAmnt={}
timeSpan=3
nXData=20
#config
cfgPath='D:\\CalForeFactor\\HFI_Model'
if not os.path.exists(cfgPath):
cfgPath='C:\\Users\\WAP\\Documents\\HFI_Model'
cfgSQL,listCfgForeFactor=getCfgFareFactor(cfgPath)
fPath=os.path.join(cfgPath,'cfg')
for cfgFF in listCfgForeFactor:
listForeFactor.append(ForeFactor(fPath,cfgFF))
#SQL
sql=MSSQL(*cfgSQL)
nConnect=0
while not sql.Connect():
print('SQL Connet Error: ',nConnect)
nConnect+=1
time.sleep(5)
#Event
eventManager.AddEventListener("quote",ReceiveQuote)
eventManager.AddEventListener("normData",MyNormData)
eventManager.Start()
w.SetMarketDataCallBack(TDFCallBack)
dataVendor = w.WindMarketVendor("TDFConfig.ini", "TDFAPI25.dll")
nConnect=0
while (dataVendor.Reconnect() is False):
print("Error nConnect: ",nConnect)
nConnect+=1
time.sleep(5)
dataVendor.RegisterSymbol(set(dictTSAvgAmnt.keys()))
time.sleep(10)
for i in range(30):
eventManager.SendEvent(MyEvent("normData",False))
time.sleep(timeSpan)
while True:
eventManager.SendEvent(MyEvent("normData",True))
time.sleep(timeSpan)
| def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event) | identifier_body |
HFIF_RunTime.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 16:26:07 2019
@author: WGP
"""
import xlrd, threading,datetime,os,pymssql,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from queue import Queue, Empty
from threading import Thread
import WindTDFAPI as w
from keras import models,backend
import numpy as np
import pandas as pd
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def calPercentile(xValue,arrPercentile,st=0): #len(arrPercentile)=100,upscane
isfind=False
abv=abs(xValue)
for i in range(st,100):
if abv<(arrPercentile[i]+0.00001):
isfind=True
break
if isfind:
result=i/100
else:
result=1
return result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
| self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self.lastInduData[2*i+1])/npAveTSpanAmnt[i]
inputRow=getNormInduData(inputRow,self.pclMatrix)
self.inputData=np.vstack((self.inputData[1:,:],inputRow))
self.lastInduData=crow
for i in range(3):
self.pm[i]=self.listModel[i].predict(self.inputData.reshape(1,nXData,self.nIndu*2))[0,0]
#backend.clear_session()
self.pm=np.round(self.pm,2)
if __name__ == '__main__':
#global
eventManager = EventManager()
lock = threading.Lock()
listForeFactor=[]
dictQuote={}
dictTSAvgAmnt={}
timeSpan=3
nXData=20
#config
cfgPath='D:\\CalForeFactor\\HFI_Model'
if not os.path.exists(cfgPath):
cfgPath='C:\\Users\\WAP\\Documents\\HFI_Model'
cfgSQL,listCfgForeFactor=getCfgFareFactor(cfgPath)
fPath=os.path.join(cfgPath,'cfg')
for cfgFF in listCfgForeFactor:
listForeFactor.append(ForeFactor(fPath,cfgFF))
#SQL
sql=MSSQL(*cfgSQL)
nConnect=0
while not sql.Connect():
print('SQL Connet Error: ',nConnect)
nConnect+=1
time.sleep(5)
#Event
eventManager.AddEventListener("quote",ReceiveQuote)
eventManager.AddEventListener("normData",MyNormData)
eventManager.Start()
w.SetMarketDataCallBack(TDFCallBack)
dataVendor = w.WindMarketVendor("TDFConfig.ini", "TDFAPI25.dll")
nConnect=0
while (dataVendor.Reconnect() is False):
print("Error nConnect: ",nConnect)
nConnect+=1
time.sleep(5)
dataVendor.RegisterSymbol(set(dictTSAvgAmnt.keys()))
time.sleep(10)
for i in range(30):
eventManager.SendEvent(MyEvent("normData",False))
time.sleep(timeSpan)
while True:
eventManager.SendEvent(MyEvent("normData",True))
time.sleep(timeSpan)
| identifier_name | |
utils.py | import os
from os import path
from config import TIMEZONE, SPREADSHEETID
import config
from datetime import datetime, timedelta
import time
from pytz import timezone
import json
import calendar
import models
pj = path.join
CLIENTS_CACHE_VALID = 'CLIENTS_CACHE_VALID'
def p(pt):
return pj(path.dirname(__file__), pt)
def listGet(ls, index, default = None):
try:
return ls[index]
except IndexError:
return default
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def local2utc(dt):
utc_st = dt.replace(tzinfo=timezone(TIMEZONE)).astimezone(timezone('UTC'))
return utc_st
def utc2local(dt, tz = TIMEZONE):
return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute() | user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
# remove expired
t = models.cache.objects.filter(expired_at__lt = datetime.now()).allow_filtering()[:]
for v in t:
v.delete()
#
item = models.cache.objects.filter(name=name).first()
if not item:
return default
return item.value
# deprecated
phoneEventFp = p('phone-event.json')
def addPhoneEventMapping(phone, eventId):
fp = phoneEventFp
if not os.path.exists(fp):
with open(fp, 'w') as target:
target.write('{}')
mapping = None
with open(fp) as f:
mapping = json.loads(f.read())
if phone not in mapping:
mapping[phone] = []
mapping[phone].append(eventId)
with open(fp, 'w') as target:
target.write(json.dumps(mapping))
def getEventIdByPhone(phone):
fp = phoneEventFp
if not os.path.exists(fp):
return None
with open(fp) as f:
mapping = json.loads(f.read())
return mapping.get(phone)
def getLogger(fp):
import logging
from logging.handlers import TimedRotatingFileHandler
# logger
LOG_FILE = fp
#logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',datefmt='%Y-%m-%d %I:%M:%S',filemode='w') #for term print
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
# one log file per day, keep 30 files at most; 日志文件(天), 最多30个
fh = TimedRotatingFileHandler(LOG_FILE,when='D',interval=1,backupCount=30)
datefmt = '%Y-%m-%d %H:%M:%S'
format_str = '%(asctime)s %(levelname)s %(message)s '
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter(format_str, datefmt)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None): | random_line_split |
utils.py | import os
from os import path
from config import TIMEZONE, SPREADSHEETID
import config
from datetime import datetime, timedelta
import time
from pytz import timezone
import json
import calendar
import models
pj = path.join
CLIENTS_CACHE_VALID = 'CLIENTS_CACHE_VALID'
def p(pt):
return pj(path.dirname(__file__), pt)
def listGet(ls, index, default = None):
try:
return ls[index]
except IndexError:
return default
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def local2utc(dt):
utc_st = dt.replace(tzinfo=timezone(TIMEZONE)).astimezone(timezone('UTC'))
return utc_st
def utc2local(dt, tz = TIMEZONE):
return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
|
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
# remove expired
t = models.cache.objects.filter(expired_at__lt = datetime.now()).allow_filtering()[:]
for v in t:
v.delete()
#
item = models.cache.objects.filter(name=name).first()
if not item:
return default
return item.value
# deprecated
phoneEventFp = p('phone-event.json')
def addPhoneEventMapping(phone, eventId):
fp = phoneEventFp
if not os.path.exists(fp):
with open(fp, 'w') as target:
target.write('{}')
mapping = None
with open(fp) as f:
mapping = json.loads(f.read())
if phone not in mapping:
mapping[phone] = []
mapping[phone].append(eventId)
with open(fp, 'w') as target:
target.write(json.dumps(mapping))
def getEventIdByPhone(phone):
fp = phoneEventFp
if not os.path.exists(fp):
return None
with open(fp) as f:
mapping = json.loads(f.read())
return mapping.get(phone)
def getLogger(fp):
import logging
from logging.handlers import TimedRotatingFileHandler
# logger
LOG_FILE = fp
#logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',datefmt='%Y-%m-%d %I:%M:%S',filemode='w') #for term print
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
# one log file per day, keep 30 files at most; 日志文件(天), 最多30个
fh = TimedRotatingFileHandler(LOG_FILE,when='D',interval=1,backupCount=30)
datefmt = '%Y-%m-%d %H:%M:%S'
format_str = '%(asctime)s %(levelname)s %(message)s '
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter(format_str, datefmt)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | return dc.get(name, default) | conditional_block |
utils.py | import os
from os import path
from config import TIMEZONE, SPREADSHEETID
import config
from datetime import datetime, timedelta
import time
from pytz import timezone
import json
import calendar
import models
pj = path.join
CLIENTS_CACHE_VALID = 'CLIENTS_CACHE_VALID'
def p(pt):
return pj(path.dirname(__file__), pt)
def listGet(ls, index, default = None):
try:
return ls[index]
except IndexError:
return default
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def local2utc(dt):
utc_st = dt.replace(tzinfo=timezone(TIMEZONE)).astimezone(timezone('UTC'))
return utc_st
def utc2local(dt, tz = TIMEZONE):
return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day)
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def | (ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
# remove expired
t = models.cache.objects.filter(expired_at__lt = datetime.now()).allow_filtering()[:]
for v in t:
v.delete()
#
item = models.cache.objects.filter(name=name).first()
if not item:
return default
return item.value
# deprecated
phoneEventFp = p('phone-event.json')
def addPhoneEventMapping(phone, eventId):
fp = phoneEventFp
if not os.path.exists(fp):
with open(fp, 'w') as target:
target.write('{}')
mapping = None
with open(fp) as f:
mapping = json.loads(f.read())
if phone not in mapping:
mapping[phone] = []
mapping[phone].append(eventId)
with open(fp, 'w') as target:
target.write(json.dumps(mapping))
def getEventIdByPhone(phone):
fp = phoneEventFp
if not os.path.exists(fp):
return None
with open(fp) as f:
mapping = json.loads(f.read())
return mapping.get(phone)
def getLogger(fp):
import logging
from logging.handlers import TimedRotatingFileHandler
# logger
LOG_FILE = fp
#logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',datefmt='%Y-%m-%d %I:%M:%S',filemode='w') #for term print
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
# one log file per day, keep 30 files at most; 日志文件(天), 最多30个
fh = TimedRotatingFileHandler(LOG_FILE,when='D',interval=1,backupCount=30)
datefmt = '%Y-%m-%d %H:%M:%S'
format_str = '%(asctime)s %(levelname)s %(message)s '
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter(format_str, datefmt)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | toDatetime | identifier_name |
utils.py | import os
from os import path
from config import TIMEZONE, SPREADSHEETID
import config
from datetime import datetime, timedelta
import time
from pytz import timezone
import json
import calendar
import models
pj = path.join
CLIENTS_CACHE_VALID = 'CLIENTS_CACHE_VALID'
def p(pt):
return pj(path.dirname(__file__), pt)
def listGet(ls, index, default = None):
try:
return ls[index]
except IndexError:
return default
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def local2utc(dt):
utc_st = dt.replace(tzinfo=timezone(TIMEZONE)).astimezone(timezone('UTC'))
return utc_st
def utc2local(dt, tz = TIMEZONE):
return dt.replace(tzinfo=timezone('UTC')).astimezone(timezone(tz))
def getWeekDays(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=timezone(TIMEZONE))
else:
dt = dt.astimezone(timezone(TIMEZONE))
# weekday of Monday is 0
monday = dt - timedelta(days=dt.weekday())
weekdays = [monday]
for i in range(1, 7):
weekdays.append(monday + timedelta(days=i))
return weekdays
def addMonths(dt,months):
|
def toTimestamp(dt):
return int(time.mktime(dt.timetuple()))
def toDatetime(ts):
return datetime.fromtimestamp(ts)
gcService = None
def getGoogleCalendarService():
global gcService
if not gcService:
import httplib2
from apiclient import discovery
from get_google_calendar_credentials import get_google_calendar_credentials
credentials = get_google_calendar_credentials()
http = credentials.authorize(httplib2.Http())
gcService = discovery.build('calendar', 'v3', http=http)
return gcService
gsService = None
def getGoogleSheetService():
global gsService
if not gsService:
import httplib2
from apiclient import discovery
from get_google_sheet_credentials import get_google_sheet_credentials
credentials = get_google_sheet_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
gsService = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return gsService
def updateSheet(body, rangeName = 'Sheet1', valueInputOption='USER_ENTERED'):
spreadsheetId = SPREADSHEETID
service = getGoogleSheetService()
result = service.spreadsheets().values().update(
spreadsheetId=spreadsheetId, range=rangeName,
valueInputOption=valueInputOption, body=body).execute()
if rangeName.startswith('Sheet1'):
setting({CLIENTS_CACHE_VALID: False})
print('{0} cells updated.'.format(result.get('updatedCells')))
return result
# create google calendar event
def createEvent(name, time, lastHours=1):
service = getGoogleCalendarService()
endTime = time + timedelta(hours=lastHours)
event = {
'summary': name,
# 'location': '800 Howard St., San Francisco, CA 94103',
# 'description': 'A chance to hear more about Google\'s developer products.',
'start': {
# 'dateTime': '2015-05-28T09:00:00-07:00',
'dateTime': time.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
'end': {
'dateTime': endTime.strftime('%Y-%m-%dT%H:%M:%S'),
'timeZone': TIMEZONE,
},
# 'recurrence': [
# 'RRULE:FREQ=DAILY;COUNT=2'
# ],
# 'attendees': [
# {'email': 'lpage@example.com'},
# {'email': 'sbrin@example.com'},
# ],
# 'reminders': {
# 'useDefault': False,
# 'overrides': [
# {'method': 'email', 'minutes': 24 * 60},
# {'method': 'popup', 'minutes': 10},
# ],
# },
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
return event
def sendSms(phone, msg):
from twilio.rest import Client
from config import SPREADSHEETID, sms_account_sid, sms_auth_token, sms_from
client = Client(sms_account_sid, sms_auth_token)
client.api.account.messages.create(
to=phone,
from_=sms_from,
body=msg)
def getSheetValues(rangeName = 'Sheet1'):
service = getGoogleSheetService()
spreadsheetId = SPREADSHEETID
rangeName = 'Sheet1'
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
sheetRows = result.get('values', [])
return sheetRows
def getClientFilter():
if not setting(CLIENTS_CACHE_VALID):
from cassandra.cqlengine import connection
from cassandra.cqlengine.query import BatchQuery
session = connection.get_connection().session
session.execute('TRUNCATE %s.client;'%(config.db_keyspace))
values = getSheetValues()
with BatchQuery() as b:
for i, row in enumerate(values):
# 0 is head
if i == 0:
continue
lineNumber = i + 1
row2 = [listGet(row, j, '').strip() for j in range(4)]
notEmpty = False
for cell in row2:
if cell:
notEmpty = True
break
if notEmpty:
models.client.batch(b).create(id=lineNumber, phone=row2[0], name=row2[1], full_name=row2[2], facebook_id=row2[3])
values = [['Cached at ' + utc2local(datetime.utcnow()).strftime("%Y-%m-%d %H:%M:%S")], ['line number', 'phone', 'name', 'full name', 'facebook id']]
for row in models.client.all():
values.append([row.id, row.phone, row.name, row.full_name, row.facebook_id])
body = {
'values': values
}
updateSheet(body, 'Cached')
setting({CLIENTS_CACHE_VALID: True})
return models.client.objects()
def getGoogleStrTime(dt):
return dt.replace(tzinfo=None).isoformat() + 'Z' # 'Z' indicates UTC time
def getEventsByPhone(phone):
service = getGoogleCalendarService()
today = utc2local(datetime.utcnow())
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
today = local2utc(today)
today = getGoogleStrTime(today)
eventsResult = service.events().list(
calendarId='primary', singleEvents=True, q =phone, timeMin=today,
orderBy='startTime').execute()
events = eventsResult.get('items')
if not events:
return None
events.reverse()
return events
def getEventById(evid):
service = getGoogleCalendarService()
return service.events().get(calendarId='primary', eventId=evid).execute()
def getBookingDateFromEvent(event, fmt = '%Y-%m-%d %H:%M'):
start = datetime.strptime(event['start']['dateTime'][:19], "%Y-%m-%dT%H:%M:%S")
bookingDatetime = start.strftime(fmt)
return bookingDatetime
def userCacheGet(id, name, default=None):
user = models.user.objects.filter(id=id).first()
cache = {} if not user else json.loads(user.cache)
return cache.get(name, default)
def userCacheSet(id, name, value):
user = models.user.objects.filter(id=id).first()
if not user:
models.user.create(id=id, cache=json.dumps({}))
user = models.user.objects.filter(id=id).first()
cache = json.loads(user.cache)
if value == None:
if name in cache:
del cache[name]
else:
cache[name] = value
user.cache = json.dumps(cache)
user.save()
def setting(name, default=None):
item = models.key_value.objects.filter(key='setting').first()
dc = {} if not item else json.loads(item.value)
if isinstance(name, dict):
# set
toset = name
for k, v in toset.items():
if v == None:
# remove
if k in dc:
del dc[k]
else:
dc[k] = v
dcStr = json.dumps(dc)
if not item:
item = models.key_value(key='setting')
item.value = dcStr
item.save()
else:
# get
return dc.get(name, default)
# get: name, default(nullable)
# set: nameValues(dict), minutes(nullable)
def cache(*args):
if isinstance(args[0], dict):
# set
nameValues = args[0]
minutes = listGet(args, 1)
for k, v in nameValues.items():
item = models.cache.objects.filter(name=k).first()
if not item:
item = models.cache()
item.name = k
item.value = v
if minutes:
item.expired_at = datetime.now() + timedelta(minutes=minutes)
else:
item.expired_at = None
item.save()
else:
# get
name = args[0]
default = listGet(args, 1)
# remove expired
t = models.cache.objects.filter(expired_at__lt = datetime.now()).allow_filtering()[:]
for v in t:
v.delete()
#
item = models.cache.objects.filter(name=name).first()
if not item:
return default
return item.value
# deprecated
phoneEventFp = p('phone-event.json')
def addPhoneEventMapping(phone, eventId):
fp = phoneEventFp
if not os.path.exists(fp):
with open(fp, 'w') as target:
target.write('{}')
mapping = None
with open(fp) as f:
mapping = json.loads(f.read())
if phone not in mapping:
mapping[phone] = []
mapping[phone].append(eventId)
with open(fp, 'w') as target:
target.write(json.dumps(mapping))
def getEventIdByPhone(phone):
fp = phoneEventFp
if not os.path.exists(fp):
return None
with open(fp) as f:
mapping = json.loads(f.read())
return mapping.get(phone)
def getLogger(fp):
import logging
from logging.handlers import TimedRotatingFileHandler
# logger
LOG_FILE = fp
#logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',datefmt='%Y-%m-%d %I:%M:%S',filemode='w') #for term print
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
# one log file per day, keep 30 files at most; 日志文件(天), 最多30个
fh = TimedRotatingFileHandler(LOG_FILE,when='D',interval=1,backupCount=30)
datefmt = '%Y-%m-%d %H:%M:%S'
format_str = '%(asctime)s %(levelname)s %(message)s '
#formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
formatter = logging.Formatter(format_str, datefmt)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | month = dt.month - 1 + months
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return dt.replace(year=year, month = month,day=day) | identifier_body |
peers_tests.rs | #[cfg(not(feature = "native"))] use common::call_back;
use common::executor::Timer;
use common::for_tests::wait_for_log_re;
use common::mm_ctx::{MmArc, MmCtxBuilder};
use common::privkey::key_pair_from_seed;
#[cfg(feature = "native")] use common::wio::{drive, CORE};
use common::{block_on, now_float, small_rng};
use crdts::CmRDT;
use futures::future::{select, Either};
use futures01::Future;
use rand::{self, Rng, RngCore};
use serde_bytes::ByteBuf;
use serde_json::Value as Json;
use std::net::{Ipv4Addr, SocketAddr};
#[cfg(not(feature = "native"))] use std::os::raw::c_char;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
#[cfg(feature = "native")]
use crate::http_fallback::UniqueActorId;
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn ulimit_n() -> Option<u32> {
use std::mem::zeroed;
let mut lim: libc::rlimit = unsafe { zeroed() };
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
if rc == 0 {
Some(lim.rlim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends[&bob_id].endpoints.contains_key(&bob_addr))
}
// Finally see if Bob got the message.
unwrap!(wait_for_log(&bob.log, 1., &|_| bob_pctx | let received = unwrap!(block_on(recv_f));
assert_eq!(received, message);
assert!(now_float() - start < 0.1); // Double-check that we're not waiting for DHT chunks.
block_on(destruction_check(alice));
block_on(destruction_check(bob));
}
/// Check the primitives used to communicate with the HTTP fallback server.
/// These are useful in implementing NAT traversal in situations
/// where a truly distributed no-single-point-of failure operation is not necessary,
/// like when we're using the fallback server to drive a tested mm2 instance.
#[cfg(feature = "native")]
pub fn peers_http_fallback_kv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30205);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
// Wait for the HTTP server to start.
thread::sleep(Duration::from_millis(20));
// Insert several entries in parallel, relying on CRDT to ensure that no entry is lost.
let entries = 9;
let mut handles = Vec::with_capacity(entries);
for en in 1..=entries {
let unique_actor_id = (99 + en) as UniqueActorId;
let key = fomat!((en));
let f = super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..]));
let f = f.and_then(move |mut map| {
let read_ctx = map.len();
map.apply(map.update(key, read_ctx.derive_add_ctx(unique_actor_id), |set, ctx| {
set.add("1".into(), ctx)
}));
super::http_fallback::merge_map(&addr, Vec::from(&b"test-id"[..]), &map)
});
handles.push((en, drive(f)))
}
for (en, f) in handles {
let map = unwrap!(unwrap!(f.wait()));
let _v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
}
// See if all entries survived.
let map = unwrap!(super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..])).wait());
for en in 1..=entries {
let v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
let members = v.read().val;
log! ("members of " (en) ": " [members]);
}
// TODO: Shut down the HTTP server as well.
drop(ctx)
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_kv() {} | .direct_chunks
.load(Ordering::Relaxed)
> 0));
let start = now_float(); | random_line_split |
peers_tests.rs | #[cfg(not(feature = "native"))] use common::call_back;
use common::executor::Timer;
use common::for_tests::wait_for_log_re;
use common::mm_ctx::{MmArc, MmCtxBuilder};
use common::privkey::key_pair_from_seed;
#[cfg(feature = "native")] use common::wio::{drive, CORE};
use common::{block_on, now_float, small_rng};
use crdts::CmRDT;
use futures::future::{select, Either};
use futures01::Future;
use rand::{self, Rng, RngCore};
use serde_bytes::ByteBuf;
use serde_json::Value as Json;
use std::net::{Ipv4Addr, SocketAddr};
#[cfg(not(feature = "native"))] use std::os::raw::c_char;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
#[cfg(feature = "native")]
use crate::http_fallback::UniqueActorId;
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn ulimit_n() -> Option<u32> {
use std::mem::zeroed;
let mut lim: libc::rlimit = unsafe { zeroed() };
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
if rc == 0 {
Some(lim.rlim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
| /// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends[&bob_id].endpoints.contains_key(&bob_addr))
}
// Finally see if Bob got the message.
unwrap!(wait_for_log(&bob.log, 1., &|_| bob_pctx
.direct_chunks
.load(Ordering::Relaxed)
> 0));
let start = now_float();
let received = unwrap!(block_on(recv_f));
assert_eq!(received, message);
assert!(now_float() - start < 0.1); // Double-check that we're not waiting for DHT chunks.
block_on(destruction_check(alice));
block_on(destruction_check(bob));
}
/// Check the primitives used to communicate with the HTTP fallback server.
/// These are useful in implementing NAT traversal in situations
/// where a truly distributed no-single-point-of failure operation is not necessary,
/// like when we're using the fallback server to drive a tested mm2 instance.
#[cfg(feature = "native")]
pub fn peers_http_fallback_kv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30205);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
// Wait for the HTTP server to start.
thread::sleep(Duration::from_millis(20));
// Insert several entries in parallel, relying on CRDT to ensure that no entry is lost.
let entries = 9;
let mut handles = Vec::with_capacity(entries);
for en in 1..=entries {
let unique_actor_id = (99 + en) as UniqueActorId;
let key = fomat!((en));
let f = super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..]));
let f = f.and_then(move |mut map| {
let read_ctx = map.len();
map.apply(map.update(key, read_ctx.derive_add_ctx(unique_actor_id), |set, ctx| {
set.add("1".into(), ctx)
}));
super::http_fallback::merge_map(&addr, Vec::from(&b"test-id"[..]), &map)
});
handles.push((en, drive(f)))
}
for (en, f) in handles {
let map = unwrap!(unwrap!(f.wait()));
let _v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
}
// See if all entries survived.
let map = unwrap!(super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..])).wait());
for en in 1..=entries {
let v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
let members = v.read().val;
log! ("members of " (en) ": " [members]);
}
// TODO: Shut down the HTTP server as well.
drop(ctx)
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_kv() {}
| use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
| identifier_body |
peers_tests.rs | #[cfg(not(feature = "native"))] use common::call_back;
use common::executor::Timer;
use common::for_tests::wait_for_log_re;
use common::mm_ctx::{MmArc, MmCtxBuilder};
use common::privkey::key_pair_from_seed;
#[cfg(feature = "native")] use common::wio::{drive, CORE};
use common::{block_on, now_float, small_rng};
use crdts::CmRDT;
use futures::future::{select, Either};
use futures01::Future;
use rand::{self, Rng, RngCore};
use serde_bytes::ByteBuf;
use serde_json::Value as Json;
use std::net::{Ipv4Addr, SocketAddr};
#[cfg(not(feature = "native"))] use std::os::raw::c_char;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
#[cfg(feature = "native")]
use crate::http_fallback::UniqueActorId;
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn ulimit_n() -> Option<u32> {
use std::mem::zeroed;
let mut lim: libc::rlimit = unsafe { zeroed() };
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
if rc == 0 {
Some(lim.rlim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn destruction_check(mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
| // NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends[&bob_id].endpoints.contains_key(&bob_addr))
}
// Finally see if Bob got the message.
unwrap!(wait_for_log(&bob.log, 1., &|_| bob_pctx
.direct_chunks
.load(Ordering::Relaxed)
> 0));
let start = now_float();
let received = unwrap!(block_on(recv_f));
assert_eq!(received, message);
assert!(now_float() - start < 0.1); // Double-check that we're not waiting for DHT chunks.
block_on(destruction_check(alice));
block_on(destruction_check(bob));
}
/// Check the primitives used to communicate with the HTTP fallback server.
/// These are useful in implementing NAT traversal in situations
/// where a truly distributed no-single-point-of failure operation is not necessary,
/// like when we're using the fallback server to drive a tested mm2 instance.
#[cfg(feature = "native")]
pub fn peers_http_fallback_kv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30205);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
// Wait for the HTTP server to start.
thread::sleep(Duration::from_millis(20));
// Insert several entries in parallel, relying on CRDT to ensure that no entry is lost.
let entries = 9;
let mut handles = Vec::with_capacity(entries);
for en in 1..=entries {
let unique_actor_id = (99 + en) as UniqueActorId;
let key = fomat!((en));
let f = super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..]));
let f = f.and_then(move |mut map| {
let read_ctx = map.len();
map.apply(map.update(key, read_ctx.derive_add_ctx(unique_actor_id), |set, ctx| {
set.add("1".into(), ctx)
}));
super::http_fallback::merge_map(&addr, Vec::from(&b"test-id"[..]), &map)
});
handles.push((en, drive(f)))
}
for (en, f) in handles {
let map = unwrap!(unwrap!(f.wait()));
let _v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
}
// See if all entries survived.
let map = unwrap!(super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..])).wait());
for en in 1..=entries {
let v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
let members = v.read().val;
log! ("members of " (en) ": " [members]);
}
// TODO: Shut down the HTTP server as well.
drop(ctx)
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_kv() {}
| return;
}
| conditional_block |
peers_tests.rs | #[cfg(not(feature = "native"))] use common::call_back;
use common::executor::Timer;
use common::for_tests::wait_for_log_re;
use common::mm_ctx::{MmArc, MmCtxBuilder};
use common::privkey::key_pair_from_seed;
#[cfg(feature = "native")] use common::wio::{drive, CORE};
use common::{block_on, now_float, small_rng};
use crdts::CmRDT;
use futures::future::{select, Either};
use futures01::Future;
use rand::{self, Rng, RngCore};
use serde_bytes::ByteBuf;
use serde_json::Value as Json;
use std::net::{Ipv4Addr, SocketAddr};
#[cfg(not(feature = "native"))] use std::os::raw::c_char;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
#[cfg(feature = "native")]
use crate::http_fallback::UniqueActorId;
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn ulimit_n() -> Option<u32> {
use std::mem::zeroed;
let mut lim: libc::rlimit = unsafe { zeroed() };
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
if rc == 0 {
Some(lim.rlim_cur as u32)
} else {
None
}
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
fn ulimit_n() -> Option<u32> { None }
async fn peer(conf: Json, port: u16) -> MmArc {
if let Some(n) = ulimit_n() {
assert!(n > 2000, "`ulimit -n` is too low: {}", n)
}
let ctx = MmCtxBuilder::new().with_conf(conf).into_mm_arc();
unwrap!(ctx.log.thread_gravity_on());
let seed = fomat!((small_rng().next_u64()));
unwrap!(ctx.secp256k1_key_pair.pin(unwrap!(key_pair_from_seed(&seed))));
if let Some(seednodes) = ctx.conf["seednodes"].as_array() {
let mut seeds = unwrap!(ctx.seeds.lock());
assert!(seeds.is_empty()); // `fn lp_initpeers` was not invoked.
assert!(!seednodes.is_empty());
seeds.push(unwrap!(unwrap!(seednodes[0].as_str()).parse()))
}
unwrap!(super::initialize(&ctx, 9999, port).await);
ctx
}
async fn | (mm: MmArc) {
mm.stop();
if let Err(err) = wait_for_log_re(&mm, 1., "delete_dugout finished!").await {
// NB: We want to know if/when the `peers` destruction doesn't happen, but we don't want to panic about it.
pintln!((err))
}
}
async fn peers_exchange(conf: Json) {
let fallback_on = conf["http-fallback"] == "on";
let fallback = if fallback_on { 1 } else { 255 };
let alice = peer(conf.clone(), 2111).await;
let bob = peer(conf, 2112).await;
if !fallback_on {
unwrap!(wait_for_log_re(&alice, 99., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
unwrap!(wait_for_log_re(&bob, 33., r"\[dht-boot] DHT bootstrap \.\.\. Done\.").await);
}
let tested_lengths: &[usize] = &[
2222, // Send multiple chunks.
1, // Reduce the number of chunks *in the same subject*.
// 992 /* (1000 - bencode overhead - checksum) */ * 253 /* Compatible with (1u8..) */ - 1 /* space for number_of_chunks */
];
let mut rng = small_rng();
for message_len in tested_lengths.iter() {
// Send a message to Bob.
let message: Vec<u8> = (0..*message_len).map(|_| rng.gen()).collect();
log! ("Sending " (message.len()) " bytes …");
let bob_id = unwrap!(bob.public_id());
let sending_f = unwrap!(
super::send(
alice.clone(),
bob_id,
Vec::from(&b"test_dht"[..]),
fallback,
message.clone()
)
.await
);
// Get that message from Alice.
let validator = super::FixedValidator::Exact(ByteBuf::from(&message[..]));
let rc = super::recv(bob.clone(), Vec::from(&b"test_dht"[..]), fallback, validator);
let rc = select(Box::pin(rc), Timer::sleep(99.)).await;
let received = match rc {
Either::Left((rc, _)) => unwrap!(rc),
Either::Right(_) => panic!("Out of time waiting for reply"),
};
assert_eq!(received, message);
if fallback_on {
// TODO: Refine the log test.
// TODO: Check that the HTTP fallback was NOT used if `!fallback_on`.
unwrap!(wait_for_log_re(&alice, 0.1, r"transmit] TBD, time to use the HTTP fallback\.\.\.").await)
// TODO: Check the time delta, with fallback 1 the delivery shouldn't take long.
}
let hn1 = crate::send_handlers_num();
drop(sending_f);
let hn2 = crate::send_handlers_num();
if cfg!(feature = "native") {
// Dropping SendHandlerRef results in the removal of the corresponding `Arc<SendHandler>`.
assert!(hn1 > 0 && hn2 == hn1 - 1, "hn1 {} hn2 {}", hn1, hn2)
} else {
// `SEND_HANDLERS` only tracks the arcs in the native helper.
assert!(hn1 == 0 && hn2 == 0, "hn1 {} hn2 {}", hn1, hn2)
}
}
destruction_check(alice).await;
destruction_check(bob).await;
}
/// Send and receive messages of various length and chunking via the DHT.
pub async fn peers_dht() { peers_exchange(json! ({"dht": "on"})).await }
#[cfg(not(feature = "native"))]
#[no_mangle]
pub extern "C" fn test_peers_dht(cb_id: i32) {
use std::ptr::null;
common::executor::spawn(async move {
peers_dht().await;
unsafe { call_back(cb_id, null(), 0) }
})
}
/// Using a minimal one second HTTP fallback which should happen before the DHT kicks in.
#[cfg(feature = "native")]
pub fn peers_http_fallback_recv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30204);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
block_on(peers_exchange(json! ({
"http-fallback": "on",
"seednodes": ["127.0.0.1"],
"http-fallback-port": 30204
})))
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_recv() {}
#[cfg(feature = "native")]
pub fn peers_direct_send() {
use common::for_tests::wait_for_log;
// Unstable results on our MacOS CI server,
// which isn't a problem in general (direct UDP communication is a best effort optimization)
// but is bad for the CI tests.
// Might experiment more with MacOS in the future.
if cfg!(target_os = "macos") {
return;
}
// NB: Still need the DHT enabled in order for the pings to work.
let alice = block_on(peer(json! ({"dht": "on"}), 2121));
let bob = block_on(peer(json! ({"dht": "on"}), 2122));
let bob_id = unwrap!(bob.public_id());
// Bob isn't a friend yet.
let alice_pctx = unwrap!(super::PeersContext::from_ctx(&alice));
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(!alice_trans.friends.contains_key(&bob_id))
}
let mut rng = small_rng();
let message: Vec<u8> = (0..33).map(|_| rng.gen()).collect();
let _send_f = block_on(super::send(
alice.clone(),
bob_id,
Vec::from(&b"subj"[..]),
255,
message.clone(),
));
let recv_f = super::recv(
bob.clone(),
Vec::from(&b"subj"[..]),
255,
super::FixedValidator::AnythingGoes,
);
// Confirm that Bob was added into the friendlist and that we don't know its address yet.
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends.contains_key(&bob_id))
}
let bob_pctx = unwrap!(super::PeersContext::from_ctx(&bob));
assert_eq!(0, alice_pctx.direct_pings.load(Ordering::Relaxed));
assert_eq!(0, bob_pctx.direct_pings.load(Ordering::Relaxed));
// Hint at the Bob's endpoint.
unwrap!(super::investigate_peer(&alice, "127.0.0.1", 2122));
// Direct pings triggered by `investigate_peer`.
// NB: The sleep here is larger than expected because the actual pings start to fly only after the DHT initialization kicks in.
unwrap!(wait_for_log(&bob.log, 22., &|_| bob_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Bob's reply.
unwrap!(wait_for_log(&alice.log, 22., &|_| alice_pctx
.direct_pings
.load(Ordering::Relaxed)
> 0));
// Confirm that Bob now has the address.
let bob_addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 2122);
{
let alice_trans = unwrap!(alice_pctx.trans_meta.lock());
assert!(alice_trans.friends[&bob_id].endpoints.contains_key(&bob_addr))
}
// Finally see if Bob got the message.
unwrap!(wait_for_log(&bob.log, 1., &|_| bob_pctx
.direct_chunks
.load(Ordering::Relaxed)
> 0));
let start = now_float();
let received = unwrap!(block_on(recv_f));
assert_eq!(received, message);
assert!(now_float() - start < 0.1); // Double-check that we're not waiting for DHT chunks.
block_on(destruction_check(alice));
block_on(destruction_check(bob));
}
/// Check the primitives used to communicate with the HTTP fallback server.
/// These are useful in implementing NAT traversal in situations
/// where a truly distributed no-single-point-of failure operation is not necessary,
/// like when we're using the fallback server to drive a tested mm2 instance.
#[cfg(feature = "native")]
pub fn peers_http_fallback_kv() {
let ctx = MmCtxBuilder::new().into_mm_arc();
let addr = SocketAddr::new(unwrap!("127.0.0.1".parse()), 30205);
let server = unwrap!(super::http_fallback::new_http_fallback(ctx.weak(), addr));
unwrap!(CORE.lock()).spawn(server);
// Wait for the HTTP server to start.
thread::sleep(Duration::from_millis(20));
// Insert several entries in parallel, relying on CRDT to ensure that no entry is lost.
let entries = 9;
let mut handles = Vec::with_capacity(entries);
for en in 1..=entries {
let unique_actor_id = (99 + en) as UniqueActorId;
let key = fomat!((en));
let f = super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..]));
let f = f.and_then(move |mut map| {
let read_ctx = map.len();
map.apply(map.update(key, read_ctx.derive_add_ctx(unique_actor_id), |set, ctx| {
set.add("1".into(), ctx)
}));
super::http_fallback::merge_map(&addr, Vec::from(&b"test-id"[..]), &map)
});
handles.push((en, drive(f)))
}
for (en, f) in handles {
let map = unwrap!(unwrap!(f.wait()));
let _v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
}
// See if all entries survived.
let map = unwrap!(super::http_fallback::fetch_map(&addr, Vec::from(&b"test-id"[..])).wait());
for en in 1..=entries {
let v = unwrap!(map.get(&fomat!((en))).val, "No such value: {}", en);
let members = v.read().val;
log! ("members of " (en) ": " [members]);
}
// TODO: Shut down the HTTP server as well.
drop(ctx)
}
#[cfg(not(feature = "native"))]
pub fn peers_http_fallback_kv() {}
| destruction_check | identifier_name |
fbb_churn_eval_ensemb_alberto.py | # coding: utf-8
import sys
from common.src.main.python.utils.hdfs_generic import *
import argparse
import os
import sys
import time
from pyspark.sql.functions import (udf,
col,
decode,
when,
lit,
lower,
concat,
translate,
count,
sum as sql_sum,
max as sql_max,
min as sql_min,
avg as sql_avg,
greatest,
least,
isnull,
isnan,
struct,
substring,
size,
length,
year,
month,
dayofmonth,
unix_timestamp,
date_format,
from_unixtime,
datediff,
to_date,
desc,
asc,
countDistinct,
row_number)
from pyspark.sql import Row, DataFrame, Column, Window
from pyspark.sql.types import DoubleType, StringType, IntegerType, DateType, ArrayType, LongType
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler, SQLTransformer, OneHotEncoder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from datetime import datetime
from itertools import chain
import numpy as np
from functools import reduce
from utils_general import *
from utils_model import *
from metadata_fbb_churn import *
from feature_selection_utils import *
import subprocess
#from date_functions import get_next_cycle
def set_paths():
'''
Deployment should be something like "dirs/dir1/use-cases"
This function adds to the path "dirs/dir1/use-cases" and "dirs/dir1/"
:return:
'''
import imp
from os.path import dirname
import os
USE_CASES = "/var/SP/data/home/asaezco/src/devel2/use-cases"#dirname(os.path.abspath(imp.find_module('churn')[1]))
if USE_CASES not in sys.path:
sys.path.append(USE_CASES)
print("Added '{}' to path".format(USE_CASES))
# if deployment is correct, this path should be the one that contains "use-cases", "pykhaos", ...
# FIXME another way of doing it more general?
DEVEL_SRC = os.path.dirname(USE_CASES) # dir before use-cases dir
if DEVEL_SRC not in sys.path:
sys.path.append(DEVEL_SRC)
print("Added '{}' to path".format(DEVEL_SRC))
####################################
### Creating Spark Session
###################################
def get_spark_session(app_name="default name", log_level='INFO', min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "32g", driver_memory="32g"):
HOME_SRC = os.path.join(os.environ.get('BDA_USER_HOME', ''), "src")
if HOME_SRC not in sys.path:
sys.path.append(HOME_SRC)
setting_bdp(app_name=app_name, min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores, executor_memory = executor_memory, driver_memory=driver_memory)
from common.src.main.python.utils.hdfs_generic import run_sc
sc, spark, sql_context = run_sc(log_level=log_level)
return sc, spark, sql_context
# set BDP parameters
def setting_bdp(min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
print "[Info Main FbbChurn] Non-informative feat: " + f
####################
### 2. TEST DATA ###
####################
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_ini_" + tr_ttdates
#if (pathExist(path)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
#ttdf_ini = spark.read.parquet(path)
#else:
ttdf_ini = getFbbChurnLabeledCarCycles(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
#if (pathExist(path)):
# print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
# ttdf = spark.read.parquet(path)
#else:
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client in tt"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the test set " + str(ttdf.count())
print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf to HDFS "
#tdf = ttdf.repartition(300)
#ttdf.repartition(300).write.save(path, format='parquet', mode='overwrite')
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline = Pipeline(stages=[assembler, classifier])
model = pipeline.fit(trdf)
feat_importance = getOrderedRelevantFeats(model, featCols, 'f', 'rf')
| ##################
### EVALUATION ###
##################
# Calibration
calibmodel = get_calibration_function2(spark, model, valdf, 'label', 10)
getScore = udf(lambda prob: float(prob[1]), DoubleType())
# Train
tr_preds_df = model.transform(trdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df = calibmodel[0].transform(tr_preds_df)
# Train evaluation
trPredictionAndLabels = tr_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
trmetrics = BinaryClassificationMetrics(trPredictionAndLabels)
# Test eval
tt_preds_df = model.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df = calibmodel[0].transform(tt_preds_df)
# Evaluation
ttPredictionAndLabels = tt_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
ttmetrics = BinaryClassificationMetrics(ttPredictionAndLabels)
print(" Area under ROC(tr) = " + str(trmetrics.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
print("[Info Main FbbChurn] " + time.ctime() + " Area under ROC(tr) = " + str(trmetrics.areaUnderROC) + " - Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
lift = get_lift(tt_calib_preds_df, 'calib_model_score', 'label', 40)
for d, l in lift:
print str(d) + ": " + str(l)
print('Summary of Evaluations')
print('ttmetrics', ttmetrics.areaUnderROC)
print('trmetrics', trmetrics.areaUnderROC)
#########################
### ENSEMBLER ALBERTO ###
#########################
bad = tr_preds_df.where(col('label') != col('prediction')).drop(col('features'))
good = tr_preds_df.where(col('label') == col('prediction')).drop(col('features'))
bad_b = balance_df2(bad, 'label')
good_b = balance_df2(good, 'label')
#full_concat = tr_preds_df.union(tt_preds_df)
assembler2 = VectorAssembler(inputCols=featCols, outputCol="features")
classifier2 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline2 = Pipeline(stages=[assembler2, classifier2])
model_good = pipeline2.fit(good_b)
model_bad = pipeline2.fit(bad_b)
calibmodel_good = get_calibration_function2(spark, model_good, valdf, 'label', 10)
calibmodel_bad = get_calibration_function2(spark, model_bad, valdf, 'label', 10)
tr_good = model_good.transform(good_b).drop(col('features'))
tr_bad = model_bad.transform(bad_b).drop(col('features'))
tr_preds_good = tr_good.withColumn("model_score_good_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_good = calibmodel_good[0].transform(tr_preds_good)
tr_preds_bad = tr_bad.withColumn("model_score_bad_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_bad = calibmodel_bad[0].transform(tr_preds_bad)
final_df = tr_preds_good.select('msisdn', "model_score_good_calib").join(tr_preds_bad.select('msisdn', "model_score_bad_calib"), ['msisdn'], 'inner')
final_feats = ["model_score_good_calib", "model_score_bad_calib"]
assembler3 = VectorAssembler(inputCols=final_feats, outputCol="features")
classifier3 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline3 = Pipeline(stages=[assembler3, classifier3])
[tr_final, tt_final] = final_df.randomsplit([0.7, 0.3], 1234)
model_f = pipeline3.fit(tr_final)
calibmodel_final = get_calibration_function2(spark, model_f, valdf, 'label', 10)
final_tr_df_with_scores = model_f.transform(tr_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tr_df_with_calib_scores = calibmodel_final[0].transform(final_tr_df_with_scores)
tr_PredictionAndLabels = final_tr_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
final_tt_df_with_scores = model_f.transform(tt_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tt_df_with_calib_scores = calibmodel_final[0].transform(final_tt_df_with_scores)
tt_PredictionAndLabels = final_tt_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
tr_metrics_final = BinaryClassificationMetrics(tr_PredictionAndLabels)
tt_metrics_final = BinaryClassificationMetrics(tt_PredictionAndLabels)
print 'Final AUC (tr): {}'.format(tr_metrics_final.areaUnderROC)
print 'Final AUC (tt): {}'.format(tt_metrics_final.areaUnderROC)
#Calibrar los scores dándoles un nuevo nombre para que sean independientes DONE
#Combinarlos en un nuevo df DONE
#Entrenar el ultimo modelo
#Mucho ojo a que nombres espera la funcion de calibracion
print(" ")
print("[Info Main FbbChurn] Process completed")
print(" ")
spark.stop() | for fimp in feat_importance:
print "[Info Main FbbChurn] Imp feat " + str(fimp[0]) + ": " + str(fimp[1])
| random_line_split |
fbb_churn_eval_ensemb_alberto.py | # coding: utf-8
import sys
from common.src.main.python.utils.hdfs_generic import *
import argparse
import os
import sys
import time
from pyspark.sql.functions import (udf,
col,
decode,
when,
lit,
lower,
concat,
translate,
count,
sum as sql_sum,
max as sql_max,
min as sql_min,
avg as sql_avg,
greatest,
least,
isnull,
isnan,
struct,
substring,
size,
length,
year,
month,
dayofmonth,
unix_timestamp,
date_format,
from_unixtime,
datediff,
to_date,
desc,
asc,
countDistinct,
row_number)
from pyspark.sql import Row, DataFrame, Column, Window
from pyspark.sql.types import DoubleType, StringType, IntegerType, DateType, ArrayType, LongType
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler, SQLTransformer, OneHotEncoder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from datetime import datetime
from itertools import chain
import numpy as np
from functools import reduce
from utils_general import *
from utils_model import *
from metadata_fbb_churn import *
from feature_selection_utils import *
import subprocess
#from date_functions import get_next_cycle
def set_paths():
'''
Deployment should be something like "dirs/dir1/use-cases"
This function adds to the path "dirs/dir1/use-cases" and "dirs/dir1/"
:return:
'''
import imp
from os.path import dirname
import os
USE_CASES = "/var/SP/data/home/asaezco/src/devel2/use-cases"#dirname(os.path.abspath(imp.find_module('churn')[1]))
if USE_CASES not in sys.path:
sys.path.append(USE_CASES)
print("Added '{}' to path".format(USE_CASES))
# if deployment is correct, this path should be the one that contains "use-cases", "pykhaos", ...
# FIXME another way of doing it more general?
DEVEL_SRC = os.path.dirname(USE_CASES) # dir before use-cases dir
if DEVEL_SRC not in sys.path:
sys.path.append(DEVEL_SRC)
print("Added '{}' to path".format(DEVEL_SRC))
####################################
### Creating Spark Session
###################################
def get_spark_session(app_name="default name", log_level='INFO', min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "32g", driver_memory="32g"):
HOME_SRC = os.path.join(os.environ.get('BDA_USER_HOME', ''), "src")
if HOME_SRC not in sys.path:
sys.path.append(HOME_SRC)
setting_bdp(app_name=app_name, min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores, executor_memory = executor_memory, driver_memory=driver_memory)
from common.src.main.python.utils.hdfs_generic import run_sc
sc, spark, sql_context = run_sc(log_level=log_level)
return sc, spark, sql_context
# set BDP parameters
def setting_bdp(min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
|
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
print "[Info Main FbbChurn] Non-informative feat: " + f
####################
### 2. TEST DATA ###
####################
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_ini_" + tr_ttdates
#if (pathExist(path)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
#ttdf_ini = spark.read.parquet(path)
#else:
ttdf_ini = getFbbChurnLabeledCarCycles(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
#if (pathExist(path)):
# print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
# ttdf = spark.read.parquet(path)
#else:
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client in tt"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the test set " + str(ttdf.count())
print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf to HDFS "
#tdf = ttdf.repartition(300)
#ttdf.repartition(300).write.save(path, format='parquet', mode='overwrite')
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline = Pipeline(stages=[assembler, classifier])
model = pipeline.fit(trdf)
feat_importance = getOrderedRelevantFeats(model, featCols, 'f', 'rf')
for fimp in feat_importance:
print "[Info Main FbbChurn] Imp feat " + str(fimp[0]) + ": " + str(fimp[1])
##################
### EVALUATION ###
##################
# Calibration
calibmodel = get_calibration_function2(spark, model, valdf, 'label', 10)
getScore = udf(lambda prob: float(prob[1]), DoubleType())
# Train
tr_preds_df = model.transform(trdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df = calibmodel[0].transform(tr_preds_df)
# Train evaluation
trPredictionAndLabels = tr_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
trmetrics = BinaryClassificationMetrics(trPredictionAndLabels)
# Test eval
tt_preds_df = model.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df = calibmodel[0].transform(tt_preds_df)
# Evaluation
ttPredictionAndLabels = tt_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
ttmetrics = BinaryClassificationMetrics(ttPredictionAndLabels)
print(" Area under ROC(tr) = " + str(trmetrics.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
print("[Info Main FbbChurn] " + time.ctime() + " Area under ROC(tr) = " + str(trmetrics.areaUnderROC) + " - Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
lift = get_lift(tt_calib_preds_df, 'calib_model_score', 'label', 40)
for d, l in lift:
print str(d) + ": " + str(l)
print('Summary of Evaluations')
print('ttmetrics', ttmetrics.areaUnderROC)
print('trmetrics', trmetrics.areaUnderROC)
#########################
### ENSEMBLER ALBERTO ###
#########################
bad = tr_preds_df.where(col('label') != col('prediction')).drop(col('features'))
good = tr_preds_df.where(col('label') == col('prediction')).drop(col('features'))
bad_b = balance_df2(bad, 'label')
good_b = balance_df2(good, 'label')
#full_concat = tr_preds_df.union(tt_preds_df)
assembler2 = VectorAssembler(inputCols=featCols, outputCol="features")
classifier2 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline2 = Pipeline(stages=[assembler2, classifier2])
model_good = pipeline2.fit(good_b)
model_bad = pipeline2.fit(bad_b)
calibmodel_good = get_calibration_function2(spark, model_good, valdf, 'label', 10)
calibmodel_bad = get_calibration_function2(spark, model_bad, valdf, 'label', 10)
tr_good = model_good.transform(good_b).drop(col('features'))
tr_bad = model_bad.transform(bad_b).drop(col('features'))
tr_preds_good = tr_good.withColumn("model_score_good_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_good = calibmodel_good[0].transform(tr_preds_good)
tr_preds_bad = tr_bad.withColumn("model_score_bad_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_bad = calibmodel_bad[0].transform(tr_preds_bad)
final_df = tr_preds_good.select('msisdn', "model_score_good_calib").join(tr_preds_bad.select('msisdn', "model_score_bad_calib"), ['msisdn'], 'inner')
final_feats = ["model_score_good_calib", "model_score_bad_calib"]
assembler3 = VectorAssembler(inputCols=final_feats, outputCol="features")
classifier3 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline3 = Pipeline(stages=[assembler3, classifier3])
[tr_final, tt_final] = final_df.randomsplit([0.7, 0.3], 1234)
model_f = pipeline3.fit(tr_final)
calibmodel_final = get_calibration_function2(spark, model_f, valdf, 'label', 10)
final_tr_df_with_scores = model_f.transform(tr_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tr_df_with_calib_scores = calibmodel_final[0].transform(final_tr_df_with_scores)
tr_PredictionAndLabels = final_tr_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
final_tt_df_with_scores = model_f.transform(tt_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tt_df_with_calib_scores = calibmodel_final[0].transform(final_tt_df_with_scores)
tt_PredictionAndLabels = final_tt_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
tr_metrics_final = BinaryClassificationMetrics(tr_PredictionAndLabels)
tt_metrics_final = BinaryClassificationMetrics(tt_PredictionAndLabels)
print 'Final AUC (tr): {}'.format(tr_metrics_final.areaUnderROC)
print 'Final AUC (tt): {}'.format(tt_metrics_final.areaUnderROC)
#Calibrar los scores dándoles un nuevo nombre para que sean independientes DONE
#Combinarlos en un nuevo df DONE
#Entrenar el ultimo modelo
#Mucho ojo a que nombres espera la funcion de calibracion
print(" ")
print("[Info Main FbbChurn] Process completed")
print(" ")
spark.stop()
| MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*' | identifier_body |
fbb_churn_eval_ensemb_alberto.py | # coding: utf-8
import sys
from common.src.main.python.utils.hdfs_generic import *
import argparse
import os
import sys
import time
from pyspark.sql.functions import (udf,
col,
decode,
when,
lit,
lower,
concat,
translate,
count,
sum as sql_sum,
max as sql_max,
min as sql_min,
avg as sql_avg,
greatest,
least,
isnull,
isnan,
struct,
substring,
size,
length,
year,
month,
dayofmonth,
unix_timestamp,
date_format,
from_unixtime,
datediff,
to_date,
desc,
asc,
countDistinct,
row_number)
from pyspark.sql import Row, DataFrame, Column, Window
from pyspark.sql.types import DoubleType, StringType, IntegerType, DateType, ArrayType, LongType
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler, SQLTransformer, OneHotEncoder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from datetime import datetime
from itertools import chain
import numpy as np
from functools import reduce
from utils_general import *
from utils_model import *
from metadata_fbb_churn import *
from feature_selection_utils import *
import subprocess
#from date_functions import get_next_cycle
def set_paths():
'''
Deployment should be something like "dirs/dir1/use-cases"
This function adds to the path "dirs/dir1/use-cases" and "dirs/dir1/"
:return:
'''
import imp
from os.path import dirname
import os
USE_CASES = "/var/SP/data/home/asaezco/src/devel2/use-cases"#dirname(os.path.abspath(imp.find_module('churn')[1]))
if USE_CASES not in sys.path:
sys.path.append(USE_CASES)
print("Added '{}' to path".format(USE_CASES))
# if deployment is correct, this path should be the one that contains "use-cases", "pykhaos", ...
# FIXME another way of doing it more general?
DEVEL_SRC = os.path.dirname(USE_CASES) # dir before use-cases dir
if DEVEL_SRC not in sys.path:
sys.path.append(DEVEL_SRC)
print("Added '{}' to path".format(DEVEL_SRC))
####################################
### Creating Spark Session
###################################
def get_spark_session(app_name="default name", log_level='INFO', min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "32g", driver_memory="32g"):
HOME_SRC = os.path.join(os.environ.get('BDA_USER_HOME', ''), "src")
if HOME_SRC not in sys.path:
sys.path.append(HOME_SRC)
setting_bdp(app_name=app_name, min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores, executor_memory = executor_memory, driver_memory=driver_memory)
from common.src.main.python.utils.hdfs_generic import run_sc
sc, spark, sql_context = run_sc(log_level=log_level)
return sc, spark, sql_context
# set BDP parameters
def setting_bdp(min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
|
####################
### 2. TEST DATA ###
####################
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_ini_" + tr_ttdates
#if (pathExist(path)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
#ttdf_ini = spark.read.parquet(path)
#else:
ttdf_ini = getFbbChurnLabeledCarCycles(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
#if (pathExist(path)):
# print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
# ttdf = spark.read.parquet(path)
#else:
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client in tt"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the test set " + str(ttdf.count())
print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf to HDFS "
#tdf = ttdf.repartition(300)
#ttdf.repartition(300).write.save(path, format='parquet', mode='overwrite')
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline = Pipeline(stages=[assembler, classifier])
model = pipeline.fit(trdf)
feat_importance = getOrderedRelevantFeats(model, featCols, 'f', 'rf')
for fimp in feat_importance:
print "[Info Main FbbChurn] Imp feat " + str(fimp[0]) + ": " + str(fimp[1])
##################
### EVALUATION ###
##################
# Calibration
calibmodel = get_calibration_function2(spark, model, valdf, 'label', 10)
getScore = udf(lambda prob: float(prob[1]), DoubleType())
# Train
tr_preds_df = model.transform(trdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df = calibmodel[0].transform(tr_preds_df)
# Train evaluation
trPredictionAndLabels = tr_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
trmetrics = BinaryClassificationMetrics(trPredictionAndLabels)
# Test eval
tt_preds_df = model.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df = calibmodel[0].transform(tt_preds_df)
# Evaluation
ttPredictionAndLabels = tt_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
ttmetrics = BinaryClassificationMetrics(ttPredictionAndLabels)
print(" Area under ROC(tr) = " + str(trmetrics.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
print("[Info Main FbbChurn] " + time.ctime() + " Area under ROC(tr) = " + str(trmetrics.areaUnderROC) + " - Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
lift = get_lift(tt_calib_preds_df, 'calib_model_score', 'label', 40)
for d, l in lift:
print str(d) + ": " + str(l)
print('Summary of Evaluations')
print('ttmetrics', ttmetrics.areaUnderROC)
print('trmetrics', trmetrics.areaUnderROC)
#########################
### ENSEMBLER ALBERTO ###
#########################
bad = tr_preds_df.where(col('label') != col('prediction')).drop(col('features'))
good = tr_preds_df.where(col('label') == col('prediction')).drop(col('features'))
bad_b = balance_df2(bad, 'label')
good_b = balance_df2(good, 'label')
#full_concat = tr_preds_df.union(tt_preds_df)
assembler2 = VectorAssembler(inputCols=featCols, outputCol="features")
classifier2 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline2 = Pipeline(stages=[assembler2, classifier2])
model_good = pipeline2.fit(good_b)
model_bad = pipeline2.fit(bad_b)
calibmodel_good = get_calibration_function2(spark, model_good, valdf, 'label', 10)
calibmodel_bad = get_calibration_function2(spark, model_bad, valdf, 'label', 10)
tr_good = model_good.transform(good_b).drop(col('features'))
tr_bad = model_bad.transform(bad_b).drop(col('features'))
tr_preds_good = tr_good.withColumn("model_score_good_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_good = calibmodel_good[0].transform(tr_preds_good)
tr_preds_bad = tr_bad.withColumn("model_score_bad_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_bad = calibmodel_bad[0].transform(tr_preds_bad)
final_df = tr_preds_good.select('msisdn', "model_score_good_calib").join(tr_preds_bad.select('msisdn', "model_score_bad_calib"), ['msisdn'], 'inner')
final_feats = ["model_score_good_calib", "model_score_bad_calib"]
assembler3 = VectorAssembler(inputCols=final_feats, outputCol="features")
classifier3 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline3 = Pipeline(stages=[assembler3, classifier3])
[tr_final, tt_final] = final_df.randomsplit([0.7, 0.3], 1234)
model_f = pipeline3.fit(tr_final)
calibmodel_final = get_calibration_function2(spark, model_f, valdf, 'label', 10)
final_tr_df_with_scores = model_f.transform(tr_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tr_df_with_calib_scores = calibmodel_final[0].transform(final_tr_df_with_scores)
tr_PredictionAndLabels = final_tr_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
final_tt_df_with_scores = model_f.transform(tt_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tt_df_with_calib_scores = calibmodel_final[0].transform(final_tt_df_with_scores)
tt_PredictionAndLabels = final_tt_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
tr_metrics_final = BinaryClassificationMetrics(tr_PredictionAndLabels)
tt_metrics_final = BinaryClassificationMetrics(tt_PredictionAndLabels)
print 'Final AUC (tr): {}'.format(tr_metrics_final.areaUnderROC)
print 'Final AUC (tt): {}'.format(tt_metrics_final.areaUnderROC)
#Calibrar los scores dándoles un nuevo nombre para que sean independientes DONE
#Combinarlos en un nuevo df DONE
#Entrenar el ultimo modelo
#Mucho ojo a que nombres espera la funcion de calibracion
print(" ")
print("[Info Main FbbChurn] Process completed")
print(" ")
spark.stop()
| print "[Info Main FbbChurn] Non-informative feat: " + f | conditional_block |
fbb_churn_eval_ensemb_alberto.py | # coding: utf-8
import sys
from common.src.main.python.utils.hdfs_generic import *
import argparse
import os
import sys
import time
from pyspark.sql.functions import (udf,
col,
decode,
when,
lit,
lower,
concat,
translate,
count,
sum as sql_sum,
max as sql_max,
min as sql_min,
avg as sql_avg,
greatest,
least,
isnull,
isnan,
struct,
substring,
size,
length,
year,
month,
dayofmonth,
unix_timestamp,
date_format,
from_unixtime,
datediff,
to_date,
desc,
asc,
countDistinct,
row_number)
from pyspark.sql import Row, DataFrame, Column, Window
from pyspark.sql.types import DoubleType, StringType, IntegerType, DateType, ArrayType, LongType
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler, SQLTransformer, OneHotEncoder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from datetime import datetime
from itertools import chain
import numpy as np
from functools import reduce
from utils_general import *
from utils_model import *
from metadata_fbb_churn import *
from feature_selection_utils import *
import subprocess
#from date_functions import get_next_cycle
def set_paths():
'''
Deployment should be something like "dirs/dir1/use-cases"
This function adds to the path "dirs/dir1/use-cases" and "dirs/dir1/"
:return:
'''
import imp
from os.path import dirname
import os
USE_CASES = "/var/SP/data/home/asaezco/src/devel2/use-cases"#dirname(os.path.abspath(imp.find_module('churn')[1]))
if USE_CASES not in sys.path:
sys.path.append(USE_CASES)
print("Added '{}' to path".format(USE_CASES))
# if deployment is correct, this path should be the one that contains "use-cases", "pykhaos", ...
# FIXME another way of doing it more general?
DEVEL_SRC = os.path.dirname(USE_CASES) # dir before use-cases dir
if DEVEL_SRC not in sys.path:
sys.path.append(DEVEL_SRC)
print("Added '{}' to path".format(DEVEL_SRC))
####################################
### Creating Spark Session
###################################
def get_spark_session(app_name="default name", log_level='INFO', min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "32g", driver_memory="32g"):
HOME_SRC = os.path.join(os.environ.get('BDA_USER_HOME', ''), "src")
if HOME_SRC not in sys.path:
sys.path.append(HOME_SRC)
setting_bdp(app_name=app_name, min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores, executor_memory = executor_memory, driver_memory=driver_memory)
from common.src.main.python.utils.hdfs_generic import run_sc
sc, spark, sql_context = run_sc(log_level=log_level)
return sc, spark, sql_context
# set BDP parameters
def | (min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Beatriz <beatriz.gonzalez2@vodafone.com>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
inittrdf_ini = getFbbChurnLabeledCarCycles(spark, origin, trcycle_ini, selcols, horizon)
#inittrdf_ini.repartition(200).write.save(path, format='parquet', mode='overwrite')
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#inittrdf.repartition(200).write.save(path, format='parquet', mode='overwrite')
#path1 = '/data/udf/vf_es/churn/fbb_tmp/unbaltrdf_' + tr_ttdates
#path2 = '/data/udf/vf_es/churn/fbb_tmp/valdf_' + tr_ttdates
#if (pathExist(path1)) and (pathExist(path2)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path1) + " and " + str(path2) + " already exist. Reading them."
#unbaltrdf = spark.read.parquet(path1)
#valdf = spark.read.parquet(path2)
#else:
#print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the training set" + str(inittrdf.count())
[unbaltrdf, valdf] = inittrdf.randomSplit([0.7, 0.3], 1234)
#print "[Info Main FbbChurn] " + time.ctime() + " Stat description of the target variable printed above"
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving unbaltrdf to HDFS " + str(unbaltrdf.count())
#print "[Info Main FbbChurn] " + time.ctime() + " Saving valdf to HDFS " + str(valdf.count())
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
#path = "/data/udf/vf_es/churn/fbb_tmp/trdf_" + tr_ttdates
#if (pathExist(path)):
unbaltrdf.groupBy('label').agg(count('*')).show()
print "[Info Main FbbChurn]" + time.ctime() + " Count on label column for unbalanced tr set showed above"
trdf = balance_df2(unbaltrdf, 'label')
trdf.groupBy('label').agg(count('*')).show()
#print "[Info Main FbbChurn] " + time.ctime() + " Saving trdf to HDFS "
#trdf.repartition(300).write.save(path, format='parquet',mode='overwrite')
# 1.3. Feature selection
allFeats = trdf.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf, numerical_feats)
for f in noninf_feats:
print "[Info Main FbbChurn] Non-informative feat: " + f
####################
### 2. TEST DATA ###
####################
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_ini_" + tr_ttdates
#if (pathExist(path)):
#print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
#ttdf_ini = spark.read.parquet(path)
#else:
ttdf_ini = getFbbChurnLabeledCarCycles(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
#if (pathExist(path)):
# print "[Info Main FbbChurn] " + time.ctime() + " File " + str(path) + " already exists. Reading it."
# ttdf = spark.read.parquet(path)
#else:
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client in tt"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
print "[Info Main FbbChurn] " + time.ctime() + " Number of clients after joining the Extra Feats to the test set " + str(ttdf.count())
print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf to HDFS "
#tdf = ttdf.repartition(300)
#ttdf.repartition(300).write.save(path, format='parquet', mode='overwrite')
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline = Pipeline(stages=[assembler, classifier])
model = pipeline.fit(trdf)
feat_importance = getOrderedRelevantFeats(model, featCols, 'f', 'rf')
for fimp in feat_importance:
print "[Info Main FbbChurn] Imp feat " + str(fimp[0]) + ": " + str(fimp[1])
##################
### EVALUATION ###
##################
# Calibration
calibmodel = get_calibration_function2(spark, model, valdf, 'label', 10)
getScore = udf(lambda prob: float(prob[1]), DoubleType())
# Train
tr_preds_df = model.transform(trdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df = calibmodel[0].transform(tr_preds_df)
# Train evaluation
trPredictionAndLabels = tr_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
trmetrics = BinaryClassificationMetrics(trPredictionAndLabels)
# Test eval
tt_preds_df = model.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df = calibmodel[0].transform(tt_preds_df)
# Evaluation
ttPredictionAndLabels = tt_calib_preds_df.select(['calib_model_score', 'label']).rdd.map(lambda r: (r['calib_model_score'], r['label']))
ttmetrics = BinaryClassificationMetrics(ttPredictionAndLabels)
print(" Area under ROC(tr) = " + str(trmetrics.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
print("[Info Main FbbChurn] " + time.ctime() + " Area under ROC(tr) = " + str(trmetrics.areaUnderROC) + " - Area under ROC(tt) = " + str(ttmetrics.areaUnderROC))
lift = get_lift(tt_calib_preds_df, 'calib_model_score', 'label', 40)
for d, l in lift:
print str(d) + ": " + str(l)
print('Summary of Evaluations')
print('ttmetrics', ttmetrics.areaUnderROC)
print('trmetrics', trmetrics.areaUnderROC)
#########################
### ENSEMBLER ALBERTO ###
#########################
bad = tr_preds_df.where(col('label') != col('prediction')).drop(col('features'))
good = tr_preds_df.where(col('label') == col('prediction')).drop(col('features'))
bad_b = balance_df2(bad, 'label')
good_b = balance_df2(good, 'label')
#full_concat = tr_preds_df.union(tt_preds_df)
assembler2 = VectorAssembler(inputCols=featCols, outputCol="features")
classifier2 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline2 = Pipeline(stages=[assembler2, classifier2])
model_good = pipeline2.fit(good_b)
model_bad = pipeline2.fit(bad_b)
calibmodel_good = get_calibration_function2(spark, model_good, valdf, 'label', 10)
calibmodel_bad = get_calibration_function2(spark, model_bad, valdf, 'label', 10)
tr_good = model_good.transform(good_b).drop(col('features'))
tr_bad = model_bad.transform(bad_b).drop(col('features'))
tr_preds_good = tr_good.withColumn("model_score_good_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_good = calibmodel_good[0].transform(tr_preds_good)
tr_preds_bad = tr_bad.withColumn("model_score_bad_calib", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_bad = calibmodel_bad[0].transform(tr_preds_bad)
final_df = tr_preds_good.select('msisdn', "model_score_good_calib").join(tr_preds_bad.select('msisdn', "model_score_bad_calib"), ['msisdn'], 'inner')
final_feats = ["model_score_good_calib", "model_score_bad_calib"]
assembler3 = VectorAssembler(inputCols=final_feats, outputCol="features")
classifier3 = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=15, \
maxBins=32, \
minInstancesPerNode=200, \
impurity="gini", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.7, \
numTrees=800, \
seed=1234)
pipeline3 = Pipeline(stages=[assembler3, classifier3])
[tr_final, tt_final] = final_df.randomsplit([0.7, 0.3], 1234)
model_f = pipeline3.fit(tr_final)
calibmodel_final = get_calibration_function2(spark, model_f, valdf, 'label', 10)
final_tr_df_with_scores = model_f.transform(tr_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tr_df_with_calib_scores = calibmodel_final[0].transform(final_tr_df_with_scores)
tr_PredictionAndLabels = final_tr_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
final_tt_df_with_scores = model_f.transform(tt_final).withColumn("final_score", getScore(col("probability")).cast(DoubleType()))
final_tt_df_with_calib_scores = calibmodel_final[0].transform(final_tt_df_with_scores)
tt_PredictionAndLabels = final_tt_df_with_calib_scores.select(['final_score_calib', 'label']).rdd.map(lambda r: (r['final_score_calib'], r['label']))
tr_metrics_final = BinaryClassificationMetrics(tr_PredictionAndLabels)
tt_metrics_final = BinaryClassificationMetrics(tt_PredictionAndLabels)
print 'Final AUC (tr): {}'.format(tr_metrics_final.areaUnderROC)
print 'Final AUC (tt): {}'.format(tt_metrics_final.areaUnderROC)
#Calibrar los scores dándoles un nuevo nombre para que sean independientes DONE
#Combinarlos en un nuevo df DONE
#Entrenar el ultimo modelo
#Mucho ojo a que nombres espera la funcion de calibracion
print(" ")
print("[Info Main FbbChurn] Process completed")
print(" ")
spark.stop()
| setting_bdp | identifier_name |
feature_extraction.py | import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.fftpack import fft
import scipy.stats
from collections import Counter
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score as accuracy
import matplotlib.pyplot as plt
from data_handling import load_dataset
"""
Features:
- FFT (first n peaks coordinates in axis x and y)
- PSD (first n peaks coordinates in axis x and y)
- Auto-correlation (first n peaks coordinates in axis x and y)
- entropy
- statistics features (n5, n25, n75, n95, median, mean, std, var, rms)
- crossings
Activities description:
1: walking
2: walking upstairs
3: walking downstairs
4: sitting
5: standing
6: laying
"""
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph)
# Single features
features += get_single_features(signal)
list_of_features.append(features)
return np.array(list_of_features), np.array(list_of_labels)
def evaluate_model(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
score = acc * 100.0
return score
# Summarize scores
def summarize_results(scores, classifiers):
# summarize mean and standard deviation
for score, clf in zip(scores, classifiers):
m, s = np.mean(score), np.std(score)
print("-"*40)
print('Average score: %.3f%% (+/-%.3f)' % (m, s))
print(clf, "\n")
# Box-plot of scores
# plt.boxplot(scores, labels=[clf.__class__.__name__ if clf.__class__.__name__ != 'Pipeline'
# else clf[-1].__class__.__name__ for clf in classifiers])
# plt.title("Accuracy")
# plt.show()
# Run an experiment
def | (repeats=10):
# Load data
N = 128
fs = 50 # Hz
t_n = 2.56 # total duration seconds
T = t_n / N # sample_rate T
denominator = 10
# Load data
X_train, X_test, y_train, y_test = load_dataset(verbose=0)
# Feature extraction
X_train, Y_train = extract_features(X_train, y_train, T, N, fs, denominator)
X_test, Y_test = extract_features(X_test, y_test, T, N, fs, denominator)
# Models
classifiers = [
make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=300)),
make_pipeline(StandardScaler(), LogisticRegression(C=30, max_iter=300)),
make_pipeline(MinMaxScaler(), SVC(kernel='rbf')),
make_pipeline(MinMaxScaler(), KNeighborsClassifier()),
RandomForestClassifier(n_estimators=800, min_samples_leaf=5, n_jobs=-1),
MLPClassifier(hidden_layer_sizes=(100, 100)),
GradientBoostingClassifier(n_estimators=100)
]
all_scores = list()
for clf in classifiers:
# Repeat experiment
scores = list()
for r in range(repeats):
score = evaluate_model(clf, X_train, y_train, X_test, y_test)
scores.append(score)
all_scores.append(scores)
# summarize results
summarize_results(all_scores, classifiers)
if __name__ == '__main__':
run_experiment(repeats=1)
| run_experiment | identifier_name |
feature_extraction.py | import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.fftpack import fft
import scipy.stats
from collections import Counter
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score as accuracy
import matplotlib.pyplot as plt
from data_handling import load_dataset
"""
Features:
- FFT (first n peaks coordinates in axis x and y)
- PSD (first n peaks coordinates in axis x and y)
- Auto-correlation (first n peaks coordinates in axis x and y)
- entropy
- statistics features (n5, n25, n75, n95, median, mean, std, var, rms)
- crossings
Activities description:
1: walking
2: walking upstairs
3: walking downstairs
4: sitting
5: standing
6: laying
"""
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
|
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph)
# Single features
features += get_single_features(signal)
list_of_features.append(features)
return np.array(list_of_features), np.array(list_of_labels)
def evaluate_model(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
score = acc * 100.0
return score
# Summarize scores
def summarize_results(scores, classifiers):
# summarize mean and standard deviation
for score, clf in zip(scores, classifiers):
m, s = np.mean(score), np.std(score)
print("-"*40)
print('Average score: %.3f%% (+/-%.3f)' % (m, s))
print(clf, "\n")
# Box-plot of scores
# plt.boxplot(scores, labels=[clf.__class__.__name__ if clf.__class__.__name__ != 'Pipeline'
# else clf[-1].__class__.__name__ for clf in classifiers])
# plt.title("Accuracy")
# plt.show()
# Run an experiment
def run_experiment(repeats=10):
# Load data
N = 128
fs = 50 # Hz
t_n = 2.56 # total duration seconds
T = t_n / N # sample_rate T
denominator = 10
# Load data
X_train, X_test, y_train, y_test = load_dataset(verbose=0)
# Feature extraction
X_train, Y_train = extract_features(X_train, y_train, T, N, fs, denominator)
X_test, Y_test = extract_features(X_test, y_test, T, N, fs, denominator)
# Models
classifiers = [
make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=300)),
make_pipeline(StandardScaler(), LogisticRegression(C=30, max_iter=300)),
make_pipeline(MinMaxScaler(), SVC(kernel='rbf')),
make_pipeline(MinMaxScaler(), KNeighborsClassifier()),
RandomForestClassifier(n_estimators=800, min_samples_leaf=5, n_jobs=-1),
MLPClassifier(hidden_layer_sizes=(100, 100)),
GradientBoostingClassifier(n_estimators=100)
]
all_scores = list()
for clf in classifiers:
# Repeat experiment
scores = list()
for r in range(repeats):
score = evaluate_model(clf, X_train, y_train, X_test, y_test)
scores.append(score)
all_scores.append(scores)
# summarize results
summarize_results(all_scores, classifiers)
if __name__ == '__main__':
run_experiment(repeats=1)
| idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak | conditional_block |
feature_extraction.py | import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.fftpack import fft
import scipy.stats
from collections import Counter
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score as accuracy
import matplotlib.pyplot as plt
from data_handling import load_dataset
"""
Features:
- FFT (first n peaks coordinates in axis x and y)
- PSD (first n peaks coordinates in axis x and y)
- Auto-correlation (first n peaks coordinates in axis x and y)
- entropy
- statistics features (n5, n25, n75, n95, median, mean, std, var, rms)
- crossings
Activities description:
1: walking
2: walking upstairs
3: walking downstairs
4: sitting
5: standing
6: laying
"""
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks | if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph)
# Single features
features += get_single_features(signal)
list_of_features.append(features)
return np.array(list_of_features), np.array(list_of_labels)
def evaluate_model(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
score = acc * 100.0
return score
# Summarize scores
def summarize_results(scores, classifiers):
# summarize mean and standard deviation
for score, clf in zip(scores, classifiers):
m, s = np.mean(score), np.std(score)
print("-"*40)
print('Average score: %.3f%% (+/-%.3f)' % (m, s))
print(clf, "\n")
# Box-plot of scores
# plt.boxplot(scores, labels=[clf.__class__.__name__ if clf.__class__.__name__ != 'Pipeline'
# else clf[-1].__class__.__name__ for clf in classifiers])
# plt.title("Accuracy")
# plt.show()
# Run an experiment
def run_experiment(repeats=10):
# Load data
N = 128
fs = 50 # Hz
t_n = 2.56 # total duration seconds
T = t_n / N # sample_rate T
denominator = 10
# Load data
X_train, X_test, y_train, y_test = load_dataset(verbose=0)
# Feature extraction
X_train, Y_train = extract_features(X_train, y_train, T, N, fs, denominator)
X_test, Y_test = extract_features(X_test, y_test, T, N, fs, denominator)
# Models
classifiers = [
make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=300)),
make_pipeline(StandardScaler(), LogisticRegression(C=30, max_iter=300)),
make_pipeline(MinMaxScaler(), SVC(kernel='rbf')),
make_pipeline(MinMaxScaler(), KNeighborsClassifier()),
RandomForestClassifier(n_estimators=800, min_samples_leaf=5, n_jobs=-1),
MLPClassifier(hidden_layer_sizes=(100, 100)),
GradientBoostingClassifier(n_estimators=100)
]
all_scores = list()
for clf in classifiers:
# Repeat experiment
scores = list()
for r in range(repeats):
score = evaluate_model(clf, X_train, y_train, X_test, y_test)
scores.append(score)
all_scores.append(scores)
# summarize results
summarize_results(all_scores, classifiers)
if __name__ == '__main__':
run_experiment(repeats=1) | if ind.size and ind[0] == 0:
ind = ind[1:] | random_line_split |
feature_extraction.py | import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.fftpack import fft
import scipy.stats
from collections import Counter
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score as accuracy
import matplotlib.pyplot as plt
from data_handling import load_dataset
"""
Features:
- FFT (first n peaks coordinates in axis x and y)
- PSD (first n peaks coordinates in axis x and y)
- Auto-correlation (first n peaks coordinates in axis x and y)
- entropy
- statistics features (n5, n25, n75, n95, median, mean, std, var, rms)
- crossings
Activities description:
1: walking
2: walking upstairs
3: walking downstairs
4: sitting
5: standing
6: laying
"""
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
|
def get_values(y_values, T, N, f_s):
y_values = y_values
x_values = [(1 / f_s) * kk for kk in range(0, len(y_values))]
return x_values, y_values
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
fft_values_ = fft(y_values)
fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2])
return f_values, fft_values
def get_psd_values(y_values, T, N, f_s):
f_values, psd_values = welch(y_values, fs=f_s)
return f_values, psd_values
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[len(result) // 2:]
def get_autocorr_values(y_values, T, N, f_s):
autocorr_values = autocorr(y_values)
x_values = np.array([T * jj for jj in range(0, N)])
return x_values, autocorr_values
def get_first_n_peaks(x, y, no_peaks=5):
x_, y_ = list(x), list(y)
if len(x_) >= no_peaks:
return x_[:no_peaks], y_[:no_peaks]
else:
missing_no_peaks = no_peaks - len(x_)
return x_ + [0] * missing_no_peaks, y_ + [0] * missing_no_peaks
def get_features(x_values, y_values, mph):
indices_peaks = detect_peaks(y_values, mph=mph)
peaks_x, peaks_y = get_first_n_peaks(x_values[indices_peaks], y_values[indices_peaks])
return peaks_x + peaks_y
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1] / len(list_values) for elem in counter_values]
entropy = scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values ** 2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_single_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def extract_features(dataset, labels, T, N, f_s, denominator):
percentile = 5
list_of_features = []
list_of_labels = []
for signal_no in range(0, len(dataset)):
features = []
list_of_labels.append(labels[signal_no])
for signal_comp in range(0, dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
signal_min = np.nanpercentile(signal, percentile)
signal_max = np.nanpercentile(signal, 100 - percentile)
# ijk = (100 - 2*percentile)/10
mph = signal_min + (signal_max - signal_min) / denominator
# Peak features
features += get_features(*get_psd_values(signal, T, N, f_s), mph)
features += get_features(*get_fft_values(signal, T, N, f_s), mph)
features += get_features(*get_autocorr_values(signal, T, N, f_s), mph)
# Single features
features += get_single_features(signal)
list_of_features.append(features)
return np.array(list_of_features), np.array(list_of_labels)
def evaluate_model(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
score = acc * 100.0
return score
# Summarize scores
def summarize_results(scores, classifiers):
# summarize mean and standard deviation
for score, clf in zip(scores, classifiers):
m, s = np.mean(score), np.std(score)
print("-"*40)
print('Average score: %.3f%% (+/-%.3f)' % (m, s))
print(clf, "\n")
# Box-plot of scores
# plt.boxplot(scores, labels=[clf.__class__.__name__ if clf.__class__.__name__ != 'Pipeline'
# else clf[-1].__class__.__name__ for clf in classifiers])
# plt.title("Accuracy")
# plt.show()
# Run an experiment
def run_experiment(repeats=10):
# Load data
N = 128
fs = 50 # Hz
t_n = 2.56 # total duration seconds
T = t_n / N # sample_rate T
denominator = 10
# Load data
X_train, X_test, y_train, y_test = load_dataset(verbose=0)
# Feature extraction
X_train, Y_train = extract_features(X_train, y_train, T, N, fs, denominator)
X_test, Y_test = extract_features(X_test, y_test, T, N, fs, denominator)
# Models
classifiers = [
make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=300)),
make_pipeline(StandardScaler(), LogisticRegression(C=30, max_iter=300)),
make_pipeline(MinMaxScaler(), SVC(kernel='rbf')),
make_pipeline(MinMaxScaler(), KNeighborsClassifier()),
RandomForestClassifier(n_estimators=800, min_samples_leaf=5, n_jobs=-1),
MLPClassifier(hidden_layer_sizes=(100, 100)),
GradientBoostingClassifier(n_estimators=100)
]
all_scores = list()
for clf in classifiers:
# Repeat experiment
scores = list()
for r in range(repeats):
score = evaluate_model(clf, X_train, y_train, X_test, y_test)
scores.append(score)
all_scores.append(scores)
# summarize results
summarize_results(all_scores, classifiers)
if __name__ == '__main__':
run_experiment(repeats=1)
| """Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind | identifier_body |
main.js | // All colors are from https://www.google.com/design/spec/style/color.html#color-color-palette
var classToColorMap = {
'CS 61A': '#4DD0E1',
'CS 61B': '#F06292',
'CS 61C': '#BA68C8',
'CS 70': '#81C784',
'CS 170': '#E57373',
'CS 188': '#7986CB'
};
var defaultClassColor = '#616161';
var classToColor = function(course) {
return classToColorMap.hasOwnProperty(course) ? classToColorMap[course] : defaultClassColor;
}
var activeFilter = '';
var activeSearchHit = '';
var main = function(entries) {
var startTime = new Date();
var layout = $('#layout').value;
s = new sigma({
renderer: {
container: document.getElementById('graph'),
type: 'canvas'
},
settings: {
font: 'monospace',
minEdgeSize: 0,
maxEdgeSize: 0,
defaultLabelSize: 14,
labelThreshold: 5
}
});
var graph = s.graph;
// Add nodes
yearMap = {};
entries.forEach(function(entry) {
graph.addNode({
id: entry.name,
label: entry.name,
x: entry.name.charCodeAt(0), // Positions are refined below
y: entry.name.charCodeAt(1),
size: 5 + Math.pow(entry.students ? entry.students.length : 0, 0.8)
// TODO: Assign node colors in some meaningful way
});
if (Object.keys(entry).indexOf('year') !== -1) {
yearMap[entry.name] = "'" + ('' + entry.year).substring(2);
}
});
// Add edges
var inMap = {};
var outMap = {};
var edgesToColors = {};
var seenCourses = {};
entries.forEach(function(teacher) {
if (teacher.students) {
teacher.students.forEach(function(student) {
var edgeId = teacher.name + ':' + student.name + ':' + student.class;
var edgeColor = classToColor(student.class);
graph.addEdge({
id: edgeId,
source: teacher.name,
target: student.name,
type: 'arrow',
size: 1,
color: edgeColor
});
edgesToColors[edgeId] = edgeColor;
if (Object.keys(seenCourses).indexOf(student.class) === -1) {
seenCourses[student.class] = true;
}
// Save in/out info for detailed "info" view (on node hover)
if (!inMap[student.name]) {
inMap[student.name] = [];
}
if (!outMap[teacher.name]) {
outMap[teacher.name] = [];
}
inMap[student.name].push(teacher.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
outMap[teacher.name].push(student.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
// Approximate tree-forming: if student is above teacher, swap their y-coordinates
// TODO: Make this better
if (layout !== 'forceDirected') {
var teacherNode = graph.nodes(teacher.name);
var studentNode = graph.nodes(student.name);
if (studentNode.y < teacherNode.y) {
var tmp = studentNode.y;
studentNode.y = teacherNode.y;
teacherNode.y = tmp;
}
}
});
}
});
// Fill in filtering dropdown
var seenCoursesList = Object.keys(seenCourses);
seenCoursesList.sort();
seenCoursesList.forEach(function(course) {
$('#filter').innerHTML += '<option value="' + course + '">' + course + '</option>';
});
// Bind node hover handler
s.bind('overNode', function(e) {
if (activeSearchHit) {
return;
}
var node = e.data.node;
showPersonInfo(node, inMap, outMap);
var edges = s.graph.edges();
edges.forEach(function(edge) {
var idParts = edge.id.split(':');
var teacher = idParts[0];
var student = idParts[1];
if (teacher != node.id && student != node.id) {
edge.color = 'transparent';
} else {
edge.size = 3;
}
});
s.refresh();
});
// Bind node un-hover handler
s.bind('outNode', function(e) {
if (activeSearchHit) {
return;
}
$('#info').style.display = 'none';
if (activeFilter) { // Hack to reapply filter
var activeFilterCopy = activeFilter;
activeFilter = '';
filterByCourse(activeFilterCopy);
} else {
var edges = s.graph.edges();
edges.forEach(function(edge) {
edge.color = edgesToColors[edge.id];
edge.size = 1;
});
s.refresh();
}
});
// Bind search handler
$('#search').onkeydown = function(e) {
if (e.keyCode == 13) {
if (highlightSearchHit($('#search').value)) {
$('#layout-wrapper').style.display = 'none';
$('#filter-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
$('#search-cancel').style.display = 'inline';
}
}
};
// $('body').onkeydown = function(e) {
// if (e.keyCode == 27) {
// cancelSearchHit();
// }
// };
// Set up autocomplete for search
var names = entries.map(function(e) { return e.name; });
names.sort();
new Awesomplete($('#search'), {
list: names,
minChars: 1,
autoFirst: true
});
$('body').addEventListener('awesomplete-selectcomplete', function(e) {
$('#search').onkeydown({ keyCode: 13 }); // trigger search handler
});
// Zoom out a tiny bit then render
var c = s.cameras[0];
c.ratio *= 1.2;
defaultCameraSettings = {
x: c.x,
y: c.y,
ratio: c.ratio,
angle: c.angle
};
s.refresh();
if (!layout || layout === 'forceDirected') {
s.startForceAtlas2({
gravity: 0.5,
linLogMode: true
});
window.setTimeout(function() { s.killForceAtlas2(); }, 5000);
} else {
// Make sure no nodes overlap
s.configNoverlap({
gridSize: 50,
nodeMargin: 20
});
s.startNoverlap();
}
var elapsedTime = ((new Date()) - startTime) / 1000;
console.log('main() finished in ' + elapsedTime + 's')
};
var showColorLegend = function() {
var newHTML = '';
$.each(classToColorMap, function(className, color) {
newHTML += '<span style="color: ' + color + '" onclick="filterByCourse(\'' + className + '\')"><br>' + className + '</span>';
});
newHTML += '<span style="color: ' + defaultClassColor + '" onclick="filterByCourse(\'\')"><br>Other</span>';
$('#legend').innerHTML = newHTML;
};
var highlightSearchHit = function(name) {
cancelSearchHit();
node = s.graph.nodes(name);
if (node) {
$('#search').value = '';
s.dispatchEvent('overNode', { node: node });
s.cameras[0].goTo(defaultCameraSettings);
activeSearchHit = node.id;
node.color = '#FFA726';
s.refresh();
return true;
}
return false;
};
var cancelSearchHit = function() {
if (activeSearchHit) {
activeSearchHit = '';
$('#layout-wrapper').style.display = 'inline';
$('#filter-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
$('#search-cancel').style.display = 'none';
$('#search').focus();
s.dispatchEvent('outNode', { node: node });
s.graph.nodes().forEach(function(node) {
node.color = 'black';
});
s.refresh();
}
};
var showPersonInfo = function(node, inMap, outMap) {
var name = node.id;
var newHTML = '';
newHTML += '<b>' + name + (Object.keys(yearMap).indexOf(name) !== -1 ? ' (' + yearMap[name] + ')' : '') + '</b>';
if (inMap[name] && inMap[name].length) {
newHTML += '<p>Teachers:<ul>';
inMap[name].forEach(function(teacher) {
newHTML += '<li>' + teacher + '</li>';
| outMap[name].forEach(function(student) {
newHTML += '<li>' + student + '</li>';
});
newHTML += '</ul>';
}
$('#info').innerHTML = newHTML;
$('#info').style.display = 'block';
};
var filterByCourse = function(course) {
if ($('#filter').value !== course) {
$('#filter').value = course;
}
if (course) {
activeFilter = course;
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
if (idParts[2] !== course) {
edge.color = 'transparent';
edge.size = 1;
} else {
edge.color = classToColor(idParts[2]);
edge.size = 1;
}
});
s.refresh();
$('#layout-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
} else {
activeFilter = '';
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
edge.color = classToColor(idParts[2]);
edge.size = 1;
});
s.refresh();
$('#layout-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
}
}
var goToLayout = function(layoutName) {
window.location.href = window.location.href.split('?')[0] + '?layout=' + layoutName;
}
var computeLongest = function() {
entries.forEach(function(e) { longestPath(e, []); });
var sortedKeys = Object.keys(memo).sort(function(a, b){ return memo[a] - memo[b] });
sortedKeys.forEach(function(k) { console.log(k, memo[k], longest[k]); });
};
memo = {}
longest = {}
var longestPath = function(e, seen) {
if (Object.keys(memo).indexOf(e.name) !== -1) {
return memo[e.name];
}
if (!e.students || e.students.length === 0) {
memo[e.name] = 1;
longest[e.name] = [e.name];
} else {
var children = e.students.map(function(s) { return s.name; });
var childrenEntries = entries.filter(function(e) {
return children.indexOf(e.name) !== -1 && seen.indexOf(e.name) === -1;
});
var childrenLengths = childrenEntries.map(function(e) { return longestPath(e, [e.name].concat(seen)); });
var maxLength = -1;
var bestIndex = -1;
for (var i = 0; i < childrenEntries.length; i++) {
if (childrenLengths[i] > maxLength) {
maxLength = childrenLengths[i];
bestIndex = i;
}
}
memo[e.name] = maxLength + 1;
longest[e.name] = [e.name].concat(longest[childrenEntries[bestIndex].name]);
}
return memo[e.name];
};
var topClasses = function() {
var courses = s.graph.edges().map(function(e) { return e.id.split(':')[2]; });
var counter = {};
courses.forEach(function(c) {
if (Object.keys(counter).indexOf(c) !== -1) {
counter[c]++;
} else {
counter[c] = 1;
}
});
var sortedKeys = Object.keys(counter).sort(function(a, b){ return counter[a] - counter[b] });
sortedKeys.forEach(function(k) { console.log(k, counter[k]); });
};
// Modified from http://gomakethings.com/how-to-get-the-value-of-a-querystring-with-native-javascript/
var getQueryString = function(field) {
var href = window.location.href;
if (href[href.length - 1] === '/') {
href = href.substring(0, href.length - 1);
}
var reg = new RegExp('[?&]' + field + '=([^&#]*)', 'i');
var string = reg.exec(href);
return string ? string[1] : null;
};
var parseOptions = function() {
$('#layout').value = getQueryString('layout') || 'forceDirected';
$('#layout-wrapper').style.display = 'inline';
}
$.ready().then(function() {
if (/Mobi/.test(navigator.userAgent)) {
if (!confirm('Notice: This website is not optimized for mobile view, and may cause your browser to crash or become unresponsive.')) {
window.history.back();
return;
}
}
var time = new Date().getTime();
$.fetch('data/data.yaml?t=' + time).then(function(data) {
entries = jsyaml.load(data.responseText);
parseOptions();
main(entries);
});
showColorLegend();
$('#about')._.transition({ opacity: 0.9 });
}); | });
newHTML += '</ul>';
}
if (outMap[name] && outMap[name].length) {
newHTML += '<p>Students:<ul>';
| random_line_split |
main.js | // All colors are from https://www.google.com/design/spec/style/color.html#color-color-palette
var classToColorMap = {
'CS 61A': '#4DD0E1',
'CS 61B': '#F06292',
'CS 61C': '#BA68C8',
'CS 70': '#81C784',
'CS 170': '#E57373',
'CS 188': '#7986CB'
};
var defaultClassColor = '#616161';
var classToColor = function(course) {
return classToColorMap.hasOwnProperty(course) ? classToColorMap[course] : defaultClassColor;
}
var activeFilter = '';
var activeSearchHit = '';
var main = function(entries) {
var startTime = new Date();
var layout = $('#layout').value;
s = new sigma({
renderer: {
container: document.getElementById('graph'),
type: 'canvas'
},
settings: {
font: 'monospace',
minEdgeSize: 0,
maxEdgeSize: 0,
defaultLabelSize: 14,
labelThreshold: 5
}
});
var graph = s.graph;
// Add nodes
yearMap = {};
entries.forEach(function(entry) {
graph.addNode({
id: entry.name,
label: entry.name,
x: entry.name.charCodeAt(0), // Positions are refined below
y: entry.name.charCodeAt(1),
size: 5 + Math.pow(entry.students ? entry.students.length : 0, 0.8)
// TODO: Assign node colors in some meaningful way
});
if (Object.keys(entry).indexOf('year') !== -1) {
yearMap[entry.name] = "'" + ('' + entry.year).substring(2);
}
});
// Add edges
var inMap = {};
var outMap = {};
var edgesToColors = {};
var seenCourses = {};
entries.forEach(function(teacher) {
if (teacher.students) {
teacher.students.forEach(function(student) {
var edgeId = teacher.name + ':' + student.name + ':' + student.class;
var edgeColor = classToColor(student.class);
graph.addEdge({
id: edgeId,
source: teacher.name,
target: student.name,
type: 'arrow',
size: 1,
color: edgeColor
});
edgesToColors[edgeId] = edgeColor;
if (Object.keys(seenCourses).indexOf(student.class) === -1) {
seenCourses[student.class] = true;
}
// Save in/out info for detailed "info" view (on node hover)
if (!inMap[student.name]) {
inMap[student.name] = [];
}
if (!outMap[teacher.name]) {
outMap[teacher.name] = [];
}
inMap[student.name].push(teacher.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
outMap[teacher.name].push(student.name + ' (' + student.class + (student.semester ? ', ' + student.semester : '') + ')');
// Approximate tree-forming: if student is above teacher, swap their y-coordinates
// TODO: Make this better
if (layout !== 'forceDirected') {
var teacherNode = graph.nodes(teacher.name);
var studentNode = graph.nodes(student.name);
if (studentNode.y < teacherNode.y) {
var tmp = studentNode.y;
studentNode.y = teacherNode.y;
teacherNode.y = tmp;
}
}
});
}
});
// Fill in filtering dropdown
var seenCoursesList = Object.keys(seenCourses);
seenCoursesList.sort();
seenCoursesList.forEach(function(course) {
$('#filter').innerHTML += '<option value="' + course + '">' + course + '</option>';
});
// Bind node hover handler
s.bind('overNode', function(e) {
if (activeSearchHit) {
return;
}
var node = e.data.node;
showPersonInfo(node, inMap, outMap);
var edges = s.graph.edges();
edges.forEach(function(edge) {
var idParts = edge.id.split(':');
var teacher = idParts[0];
var student = idParts[1];
if (teacher != node.id && student != node.id) {
edge.color = 'transparent';
} else {
edge.size = 3;
}
});
s.refresh();
});
// Bind node un-hover handler
s.bind('outNode', function(e) {
if (activeSearchHit) {
return;
}
$('#info').style.display = 'none';
if (activeFilter) { // Hack to reapply filter
var activeFilterCopy = activeFilter;
activeFilter = '';
filterByCourse(activeFilterCopy);
} else {
var edges = s.graph.edges();
edges.forEach(function(edge) {
edge.color = edgesToColors[edge.id];
edge.size = 1;
});
s.refresh();
}
});
// Bind search handler
$('#search').onkeydown = function(e) {
if (e.keyCode == 13) {
if (highlightSearchHit($('#search').value)) {
$('#layout-wrapper').style.display = 'none';
$('#filter-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
$('#search-cancel').style.display = 'inline';
}
}
};
// $('body').onkeydown = function(e) {
// if (e.keyCode == 27) {
// cancelSearchHit();
// }
// };
// Set up autocomplete for search
var names = entries.map(function(e) { return e.name; });
names.sort();
new Awesomplete($('#search'), {
list: names,
minChars: 1,
autoFirst: true
});
$('body').addEventListener('awesomplete-selectcomplete', function(e) {
$('#search').onkeydown({ keyCode: 13 }); // trigger search handler
});
// Zoom out a tiny bit then render
var c = s.cameras[0];
c.ratio *= 1.2;
defaultCameraSettings = {
x: c.x,
y: c.y,
ratio: c.ratio,
angle: c.angle
};
s.refresh();
if (!layout || layout === 'forceDirected') {
s.startForceAtlas2({
gravity: 0.5,
linLogMode: true
});
window.setTimeout(function() { s.killForceAtlas2(); }, 5000);
} else {
// Make sure no nodes overlap
s.configNoverlap({
gridSize: 50,
nodeMargin: 20
});
s.startNoverlap();
}
var elapsedTime = ((new Date()) - startTime) / 1000;
console.log('main() finished in ' + elapsedTime + 's')
};
var showColorLegend = function() {
var newHTML = '';
$.each(classToColorMap, function(className, color) {
newHTML += '<span style="color: ' + color + '" onclick="filterByCourse(\'' + className + '\')"><br>' + className + '</span>';
});
newHTML += '<span style="color: ' + defaultClassColor + '" onclick="filterByCourse(\'\')"><br>Other</span>';
$('#legend').innerHTML = newHTML;
};
var highlightSearchHit = function(name) {
cancelSearchHit();
node = s.graph.nodes(name);
if (node) {
$('#search').value = '';
s.dispatchEvent('overNode', { node: node });
s.cameras[0].goTo(defaultCameraSettings);
activeSearchHit = node.id;
node.color = '#FFA726';
s.refresh();
return true;
}
return false;
};
var cancelSearchHit = function() {
if (activeSearchHit) {
activeSearchHit = '';
$('#layout-wrapper').style.display = 'inline';
$('#filter-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
$('#search-cancel').style.display = 'none';
$('#search').focus();
s.dispatchEvent('outNode', { node: node });
s.graph.nodes().forEach(function(node) {
node.color = 'black';
});
s.refresh();
}
};
var showPersonInfo = function(node, inMap, outMap) {
var name = node.id;
var newHTML = '';
newHTML += '<b>' + name + (Object.keys(yearMap).indexOf(name) !== -1 ? ' (' + yearMap[name] + ')' : '') + '</b>';
if (inMap[name] && inMap[name].length) {
newHTML += '<p>Teachers:<ul>';
inMap[name].forEach(function(teacher) {
newHTML += '<li>' + teacher + '</li>';
});
newHTML += '</ul>';
}
if (outMap[name] && outMap[name].length) {
newHTML += '<p>Students:<ul>';
outMap[name].forEach(function(student) {
newHTML += '<li>' + student + '</li>';
});
newHTML += '</ul>';
}
$('#info').innerHTML = newHTML;
$('#info').style.display = 'block';
};
var filterByCourse = function(course) {
if ($('#filter').value !== course) {
$('#filter').value = course;
}
if (course) | else {
activeFilter = '';
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
edge.color = classToColor(idParts[2]);
edge.size = 1;
});
s.refresh();
$('#layout-wrapper').style.display = 'inline';
$('#search-wrapper').style.display = 'inline';
}
}
var goToLayout = function(layoutName) {
window.location.href = window.location.href.split('?')[0] + '?layout=' + layoutName;
}
var computeLongest = function() {
entries.forEach(function(e) { longestPath(e, []); });
var sortedKeys = Object.keys(memo).sort(function(a, b){ return memo[a] - memo[b] });
sortedKeys.forEach(function(k) { console.log(k, memo[k], longest[k]); });
};
memo = {}
longest = {}
var longestPath = function(e, seen) {
if (Object.keys(memo).indexOf(e.name) !== -1) {
return memo[e.name];
}
if (!e.students || e.students.length === 0) {
memo[e.name] = 1;
longest[e.name] = [e.name];
} else {
var children = e.students.map(function(s) { return s.name; });
var childrenEntries = entries.filter(function(e) {
return children.indexOf(e.name) !== -1 && seen.indexOf(e.name) === -1;
});
var childrenLengths = childrenEntries.map(function(e) { return longestPath(e, [e.name].concat(seen)); });
var maxLength = -1;
var bestIndex = -1;
for (var i = 0; i < childrenEntries.length; i++) {
if (childrenLengths[i] > maxLength) {
maxLength = childrenLengths[i];
bestIndex = i;
}
}
memo[e.name] = maxLength + 1;
longest[e.name] = [e.name].concat(longest[childrenEntries[bestIndex].name]);
}
return memo[e.name];
};
var topClasses = function() {
var courses = s.graph.edges().map(function(e) { return e.id.split(':')[2]; });
var counter = {};
courses.forEach(function(c) {
if (Object.keys(counter).indexOf(c) !== -1) {
counter[c]++;
} else {
counter[c] = 1;
}
});
var sortedKeys = Object.keys(counter).sort(function(a, b){ return counter[a] - counter[b] });
sortedKeys.forEach(function(k) { console.log(k, counter[k]); });
};
// Modified from http://gomakethings.com/how-to-get-the-value-of-a-querystring-with-native-javascript/
var getQueryString = function(field) {
var href = window.location.href;
if (href[href.length - 1] === '/') {
href = href.substring(0, href.length - 1);
}
var reg = new RegExp('[?&]' + field + '=([^&#]*)', 'i');
var string = reg.exec(href);
return string ? string[1] : null;
};
var parseOptions = function() {
$('#layout').value = getQueryString('layout') || 'forceDirected';
$('#layout-wrapper').style.display = 'inline';
}
$.ready().then(function() {
if (/Mobi/.test(navigator.userAgent)) {
if (!confirm('Notice: This website is not optimized for mobile view, and may cause your browser to crash or become unresponsive.')) {
window.history.back();
return;
}
}
var time = new Date().getTime();
$.fetch('data/data.yaml?t=' + time).then(function(data) {
entries = jsyaml.load(data.responseText);
parseOptions();
main(entries);
});
showColorLegend();
$('#about')._.transition({ opacity: 0.9 });
});
| {
activeFilter = course;
s.graph.edges().forEach(function(edge) {
var idParts = edge.id.split(':');
if (idParts[2] !== course) {
edge.color = 'transparent';
edge.size = 1;
} else {
edge.color = classToColor(idParts[2]);
edge.size = 1;
}
});
s.refresh();
$('#layout-wrapper').style.display = 'none';
$('#search-wrapper').style.display = 'none';
} | conditional_block |
Trade.go | package trade
// Trade
type Trade struct {
// 交易编号 (父订单的交易编号)
Tid string `json:"tid,omitempty" xml:"tid,omitempty"`
// 商品购买数量。取值范围:大于零的整数,对于一个trade对应多个order的时候(一笔主订单,对应多笔子订单),num=0,num是一个跟商品关联的属性,一笔订单对应多比子订单的时候,主订单上的num无意义。
Num int64 `json:"num,omitempty" xml:"num,omitempty"`
// 商品数字编号
NumIid int64 `json:"num_iid,omitempty" xml:"num_iid,omitempty"`
// 交易状态。可选值: * TRADE_NO_CREATE_PAY(没有创建支付宝交易) * WAIT_BUYER_PAY(等待买家付款) * SELLER_CONSIGNED_PART(卖家部分发货) * WAIT_SELLER_SEND_GOODS(等待卖家发货,即:买家已付款) * WAIT_BUYER_CONFIRM_GOODS(等待买家确认收货,即:卖家已发货) * TRADE_BUYER_SIGNED(买家已签收,货到付款专用) * TRADE_FINISHED(交易成功) * TRADE_CLOSED(付款以后用户退款成功,交易自动关闭) * TRADE_CLOSED_BY_TAOBAO(付款以前,卖家或买家主动关闭交易) * PAY_PENDING(国际信用卡支付付款确认中) * WAIT_PRE_AUTH_CONFIRM(0元购合约中) * PAID_FORBID_CONSIGN(拼团中订单或者发货强管控的订单,已付款但禁止发货)
Status string `json:"status,omitempty" xml:"status,omitempty"`
// 交易类型列表,同时查询多种交易类型可用逗号分隔。默认同时查询guarantee_trade, auto_delivery, ec, cod的4种交易类型的数据 可选值 fixed(一口价) auction(拍卖) guarantee_trade(一口价、拍卖) auto_delivery(自动发货) independent_simple_trade(旺店入门版交易) independent_shop_trade(旺店标准版交易) ec(直冲) cod(货到付款) fenxiao(分销) game_equipment(游戏装备) shopex_trade(ShopEX交易) netcn_trade(万网交易) external_trade(统一外部交易)o2o_offlinetrade(O2O交易)step (万人团)nopaid(无付款订单)pre_auth_type(预授权0元购机交易)
Type string `json:"type,omitempty" xml:"type,omitempty"`
// 商品价格。精确到2位小数;单位:元。如:200.07,表示:200元7分
Price string `json:"price,omitempty" xml:"price,omitempty"`
// 商品金额(商品价格乘以数量的总金额)。精确到2位小数;单位:元。如:200.07,表示:200元7分
TotalFee string `json:"total_fee,omitempty" xml:"total_fee,omitempty"`
// 交易创建时间。格式:yyyy-MM-dd HH:mm:ss
Created string `json:"created,omitempty" xml:"created,omitempty"`
// 订单列表
Orders []Order `json:"orders,omitempty" xml:"orders,omitempty"`
// 买家的openuid,入参fields中传入buyer_nick ,才能返回
BuyerOpenUid string `json:"buyer_open_uid,omitempty" xml:"buyer_open_uid,omitempty"`
// 付款时间。格式:yyyy-MM-dd HH:mm:ss。订单的付款时间即为物流订单的创建时间。
PayTime string `json:"pay_time,omitempty" xml:"pay_time,omitempty"`
// 卖家备注(与淘宝网上订单的卖家备注对应,只有卖家才能查看该字段)
SellerMemo string `json:"seller_memo,omitempty" xml:"seller_memo,omitempty"`
// 买家备注(与淘宝网上订单的买家备注对应,只有买家才能查看该字段)
BuyerMemo string `json:"buyer_memo,omitempty" xml:"buyer_memo,omitempty"`
// 邮费。精确到2位小数;单位:元。如:200.07,表示:200元7分
PostFee string `json:"post_fee,omitempty" xml:"post_fee,omitempty"`
// 是否包含邮费。与available_confirm_fee同时使用。可选值:true(包含),false(不包含)
HasPostFee bool `json:"has_post_fee,omitempty" xml:"has_post_fee,omitempty"`
// 优惠详情
PromotionDetails []PromotionDetail `json:"promotion_details,omitempty" xml:"promotion_details,omitempty"`
// 分阶段付款的订单状态(例如万人团订单等),目前有三返回状态FRONT_NOPAID_FINAL_NOPAID(定金未付尾款未付),FRONT_PAID_FINAL_NOPAID(定金已付尾款未付),FRONT_PAID_FINAL_PAID(定金和尾款都付)
StepTradeStatus string `json:"step_trade_status,omitempty" xml:"step_trade_status,omitempty"`
// 分阶段付款的已付金额(万人团订单已付金额)
StepPaidFee string `json:"step_paid_fee,omitempty" xml:"step_paid_fee,omitempty"`
// 卖家昵称
SellerNick string `json:"seller_nick,omitempty" xml:"seller_nick,omitempty"`
// 实付金额。精确到2位小数;单位:元。如:200.07,表示:200元7分
Payment string `json:"payment,omitempty" xml:"payment,omitempty"`
// 交易修改时间(用户对订单的任何修改都会更新此字段)。格式:yyyy-MM-dd HH:mm:ss
Modified string `json:"modified,omitempty" xml:"modified,omitempty"`
// 商品图片绝对途径
PicPath string `json:"pic_path,omitempty" xml:"pic_path,omitempty"`
// 卖家是否已评价。可选值:true(已评价),false(未评价)
SellerRate bool `json:"seller_rate,omitempty" xml:"seller_rate,omitempty"`
// 收货人的姓名
ReceiverName string `json:"receiver_name,omitempty" xml:"receiver_name,omitempty"`
// 收货人的所在省份
ReceiverState string `json:"receiver_state,omitempty" xml:"receiver_state,omitempty"`
// 收货人的详细地址
ReceiverAddress string `json:"receiver_address,omitempty" xml:"receiver_address,omitempty"`
// 收货人的邮编
ReceiverZip string `json:"receiver_zip,omitempty" xml:"receiver_zip,omitempty"`
// 收货人的手机号码
ReceiverMobile string `json:"receiver_mobile,omitempty" xml:"receiver_mobile,omitempty"`
// 收货人的电话号码
ReceiverPhone string `json:"receiver_phone,omitempty" xml:"receiver_phone,omitempty"`
// 卖家发货时间。格式:yyyy-MM-dd HH:mm:ss
ConsignTime string `json:"consign_time,omitempty" xml:"consign_time,omitempty"`
// 卖家实际收到的支付宝打款金额(由于子订单可以部分确认收货,这个金额会随着子订单的确认收货而不断增加,交易成功后等于买家实付款减去退款金额)。精确到2位小数;单位:元。如:200.07,表示:200元7分
ReceivedPayment string `json:"received_payment,omitempty" xml:"received_payment,omitempty"`
// 商家的预计发货时间
EstConTime string `json:"est_con_time,omitempty" xml:"est_con_time,omitempty"`
// 收货人国籍
ReceiverCountry string `json:"receiver_country,omitempty" xml:"receiver_country,omitempty"`
// 收货人街道地址
ReceiverTown string `json:"receiver_town,omitempty" xml:"receiver_town,omitempty"`
// 天猫国际官网直供主订单关税税费
OrderTaxFee string `json:"order_tax_fee,omitempty" xml:"order_tax_fee,omitempty"`
// 满返红包的金额;如果没有满返红包,则值为 0.00
PaidCouponFee string `json:"paid_coupon_fee,omitempty" xml:"paid_coupon_fee,omitempty"`
// 门店自提,总店发货,分店取货的门店自提订单标识
ShopPick string `json:"shop_pick,omitempty" xml:"shop_pick,omitempty"`
// 同tid
TidStr string `json:"tid_str,omitempty" xml:"tid_str,omitempty"`
// 为tmall.daogoubao.cloudstore时表示云店链路
BizCode string `json:"biz_code,omitempty" xml:"biz_code,omitempty"`
// 值为1,且bizCode不为tmall.daogoubao.cloudstore时,为旗舰店订单
CloudStore string `json:"cloud_store,omitempty" xml:"cloud_store,omitempty"`
// 预售单为true,否则false (云店订单专用)
NewPresell bool `json:"new_presell,omitempty" xml:"new_presell,omitempty"`
// 优享购为true,否则false(云店订单专用)
YouXiang bool `json:"you_xiang,omitempty" xml:"you_xiang,omitempty"`
// 默认为0,0 表示用户主动支付1 表示系统代扣2 表示保险赔付
PayChannel string `json:"pay_channel,omitempty" xml:"pay_channel,omitempty"`
// 交易标题,以店铺名作为此标题的值。注:taobao.trades.get接口返回的Trade中的title是商品名称
Title string `json:"title,omitempty" xml:"title,omitempty"`
// 可以使用trade.promotion_details查询系统优惠系统优惠金额(如打折,VIP,满就送等),精确到2位小数,单位:元。如:200.07,表示:200元7分
DiscountFee string `json:"discount_fee,omitempty" xml:"discount_fee,omitempty"`
// 交易结束时间。交易成功时间(更新交易状态为成功的同时更新)/确认收货时间或者交易关闭时间 。格式:yyyy-MM-dd HH:mm:ss
EndTime string `json:"end_time,omitempty" xml:"end_time,omitempty"`
// 买家留言
BuyerMessage string `json:"buyer_message,omitempty" xml:"buyer_message,omitempty"`
// 买家备注旗帜(与淘宝网上订单的买家备注旗帜对应,只有买家才能查看该字段)红、黄、绿、蓝、紫 分别对应 1、2、3、4、5
BuyerFlag int64 `json:"buyer_flag,omitempty" xml:"buyer_flag,omitempty"`
// 卖家备注旗帜(与淘宝网上订单的卖家备注旗帜对应,只有卖家才能查看该字段)红、黄、绿、蓝、紫 分别对应 1、2、3、4、5
SellerFlag int64 `json:"seller_flag,omitempty" xml:"seller_flag,omitempty"`
// 买家昵称
BuyerNick string `json:"buyer_nick,omitempty" xml:"buyer_nick,omitempty"`
// top动态字段
TradeAttr string `json:"trade_attr,omitempty" xml:"trade_attr,omitempty"`
// 订单中是否包含运费险订单,如果包含运费险订单返回true,不包含运费险订单,返回false
HasYfx bool `json:"has_yfx,omitempty" xml:"has_yfx,omitempty"`
// 订单的运费险,单位为元
YfxFee string `json:"yfx_fee,omitempty" xml:"yfx_fee,omitempty"`
// 使用信用卡支付金额数
CreditCardFee string `json:"credit_card_fee,omitempty" xml:"credit_card_fee,omitempty"`
// 订单出现异常问题的时候,给予用户的描述,没有异常的时候,此值为空
MarkDesc string `json:"mark_desc,omitempty" xml:"mark_desc,omitempty"`
// 创建交易时的物流方式(交易完成前,物流方式有可能改变,但系统里的这个字段一直不变)。可选值:free(卖家包邮),post(平邮),express(快递),ems(EMS),virtual(虚拟发货),25(次日必达),26(预约配送)。
ShippingType string `json:"shipping_type,omitempty" xml:"shipping_type,omitempty"`
// 买家货到付款服务费。精确到2位小数;单位:元。如:12.07,表示:12元7分
BuyerCodFee string `json:"buyer_cod_fee,omitempty" xml:"buyer_cod_fee,omitempty"`
// 卖家手工调整金额,精确到2位小数,单位:元。如:200.07,表示:200元7分。来源于订单价格修改,如果有多笔子订单的时候,这个为0,单笔的话则跟[order].adjust_fee一样
AdjustFee string `json:"adjust_fee,omitempty" xml:"adjust_fee,omitempty"`
// 交易内部来源。WAP(手机);HITAO(嗨淘);TOP(TOP平台);TAOBAO(普通淘宝);JHS(聚划算)一笔订单可能同时有以上多个标记,则以逗号分隔
TradeFrom string `json:"trade_from,omitempty" xml:"trade_from,omitempty"`
// 服务子订单列表
ServiceOrders []ServiceOrder `json:"service_orders,omitempty" xml:"service_orders,omitempty"`
// 买家是否已评价。可选值:true(已评价),false(未评价)。如买家只评价未打分,此字段仍返回false
BuyerRate bool `json:"buyer_rate,omitempty" xml:"buyer_rate,omitempty"`
// 收货人的所在城市<br/>注:因为国家对于城市和地区的划分的有:省直辖市和省直辖县级行政区(区级别的)划分的,淘宝这边根据这个差异保存在不同字段里面比如:广东广州:广州属于一个直辖市是放在的receiver_city的字段里面;而河南济源:济源属于省直辖县级行政区划分,是区级别的,放在了receiver_district里面<br/>建议:程序依赖于城市字段做物流等判断的操作,最好加一个判断逻辑:如果返回值里面只有receiver_district参数,该参数作为城市
ReceiverCity string `json:"receiver_city,omitempty" xml:"receiver_city,omitempty"`
// 收货人的所在地区<br/>注:因为国家对于城市和地区的划分的有:省直辖市和省直辖县级行政区(区级别的)划分的,淘宝这边根据这个差异保存在不同字段里面比如:广东广州:广州属于一个直辖市是放在的receiver_city的字段里面;而河南济源:济源属于省直辖县级行政区划分,是区级别的,放在了receiver_district里面<br/>建议:程序依赖于城市字段做物流等判断的操作,最好加一个判断逻辑:如果返回值里面只有receiver_district参数,该参数作为城市
ReceiverDistrict string `json:"receiver_district,omitempty" xml:"receiver_district,omitempty"`
// 物流标签
ServiceTags []LogisticsTag `json:"service_tags,omitempty" xml:"service_tags,omitempty"`
// 导购宝=crm
O2o string `json:"o2o,omitempty" xml:"o2o,omitempty"`
// 导购员id
O2oGuideId string `json:"o2o_guide_id,omitempty" xml:"o2o_guide_id,omitempty"`
// 导购员门店id
O2oShopId string `json:"o2o_shop_id,omitempty" xml:"o2o_shop_id,omitempty"`
// 导购员名称
O2oGuideName string `json:"o2o_guide_name,omitempty" xml:"o2o_guide_name,omitempty"`
// 导购门店名称
O2oShopName string `json:"o2o_shop_name,omitempty" xml:"o2o_shop_name,omitempty"`
// 导购宝提货方式,inshop:店内提货,online:线上发货
O2oDelivery string `json:"o2o_delivery,omitempty" xml:"o2o_delivery,omitempty"`
// 交易扩展表信息
TradeExt *TradeExt `json:"trade_ext,omitempty" xml:"trade_ext,omitempty"`
// 天猫电子凭证家装
EticketServiceAddr string `json:"eticket_service_addr,omitempty" xml:"eticket_service_addr,omitempty"`
// 处方药未审核状态
RxAuditStatus string `json:"rx_audit_status,omitempty" xml:"rx_audit_status,omitempty"`
// 时间段
EsRange string `json:"es_range,omitempty" xml:"es_range,omitempty"`
// 时间
EsDate string `json:"es_date,omitempty" xml:"es_date,omitempty"`
// 时间
OsDate string `json:"os_date,omitempty" xml:"os_date,omitempty"`
// 时间段
OsRange string `json:"os_range,omitempty" xml:"os_range,omitempty"`
// 订单中使用红包付款的金额
CouponFee int64 `json:"coupon_fee,omitempty" xml:"coupon_fee,omitempty"`
// 分阶段交易的特权定金订单ID
O2oEtOrderId string `json:"o2o_et_order_id,omitempty" xml:"o2o_et_order_id,omitempty"`
// 邮关订单
PostGateDeclare bool `json:"post_gate_declare,omitempty" xml:"post_gate_declare,omitempty"`
// 跨境订单
CrossBondedDeclare bool `json:"cross_bonded_declare,omitempty" xml:"cross_bonded_declare,omitempty"`
// 全渠道商品通相关字段
OmnichannelParam string `json:"omnichannel_param,omitempty" xml:"omnichannel_param,omitempty"`
// 组合商品
Assembly string `json:"assembly,omitempty" xml:"assembly,omitempty"`
// TOP拦截标识,0不拦截,1拦截
TopHold int64 `json:"top_hold,omitempty" xml:"top_hold,omitempty"`
// 星盘标识字段
OmniAttr string `json:"omni_attr,omitempty" xml:"omni_attr,omitempty"`
// 星盘业务字段
OmniParam string `json:"omni_param,omitempty" xml:"omni_param,omitempty"`
// 聚划算一起买字段
ForbidConsign int64 `json:"forbid_consign,omitempty" xml:"forbid_consign,omitempty"`
// 采购订单标识
Identity string `json:"identity,omitempty" xml:"identity,omitempty"`
// 天猫拼团拦截标示
TeamBuyHold int64 `json:"team_buy_hold,omitempty" xml:"team_buy_hold,omitempty"`
// shareGroupHold
ShareGroupHold int64 `json:"share_group_hold,omitempty" xml:"share_group_hold,omitempty"`
// 天猫国际拦截
OfpHold int64 `json:"ofp_hold,omitempty" xml:"ofp_hold,omitempty"`
// 组装O2O多阶段尾款订单的明细数据 总阶段数,当前阶数,阶段金额(单位:分),支付状态,例如 3_1_100_paid ; 3_2_2000_nopaid
O2oStepTradeDetail string `json:"o2o_step_trade_detail,omitempty" xml:"o2o_step_trade_detail,omitempty"`
// 特权定金订单的尾款订单ID
O2oStepOrderId string `json:"o2o_step_order_id,omitempty" xml:"o2o_step_order_id,omitempty"`
// 分阶段订单的特权定金抵扣金额,单位:分
O2oVoucherPrice string `json:"o2o_voucher_price,omitempty" xml:"o2o_voucher_price,omitempty"`
// 天猫国际计税优惠金额
OrderTaxPromotionFee string `json:"order_tax_promotion_fee,omitempty" xml:"order_tax_promotion_fee,omitempty"`
// 聚划算火拼标记
DelayCreateDelivery int64 `json:"delay_create_delivery,omitempty" xml:"delay_create_delivery,omitempty"`
// top定义订单类型
Toptype int64 `json:"toptype,omitempty" xml:"toptype,omitempty"`
// serviceType
ServiceType string `json:"service_type,omitempty" xml:"service_type,omitempty"`
// o2oServiceMobile
O2oServiceMobile string `json:"o2o_service_mobile,omitempty" xml:"o2o_service_mobile,omitempty"`
// o2oServiceName
O2oServiceName string `json:"o2o_service_name,omitempty" xml:"o2o_service_name,omitempty"`
// o2oServiceState
O2oServiceState string `json:"o2o_service_state,omitempty" xml:"o2o_service_state,omitempty"`
// o2oServiceCity
O2oServiceCity string `json:"o2o_service_city,omitempty" xml:"o2o_service_city,omitempty"`
// o2oServiceDistrict
O2oServiceDistrict string `json:"o2o_service_district,omitempty" xml:"o2o_service_district,omitempty"`
// o2oServiceTown
O2oServiceTown string `json:"o2o_service_town,omitempty" xml:"o2o_service_town,omitempty"`
// o2oServiceAddress
O2oServiceAddress string `json:"o2o_service_address,omitempty" xml:"o2o_service_address,omitempty"`
// o2oStepTradeDetailNew
O2oStepTradeDetailNew string `json:"o2o_step_trade_detail_new,omitempty" xml:"o2o_step_trade_detail_new,omitempty"`
// o2oXiaopiao
O2oXiaopiao string `json:"o2o_xiaopiao,omitempty" xml:"o2o_xiaopiao,omitempty"`
// o2oContract
O2oContract string `json:"o2o_contract,omitempty" xml:"o2o_contract,omitempty"`
// 新零售门店编码
RetailStoreCode string `json:"retail_store_code,omitempty" xml:"retail_store_code,omitempty"`
// 新零售线下订单id
RetailOutOrderId string `json:"retail_out_order_id,omitempty" xml:"retail_out_order_id,omitempty"`
// rechargeFee
RechargeFee string `json:"recharge_fee,omitempty" xml:"recharge_fee,omitempty"`
// platformSubsidyFee
PlatformSubsidyFee string `json:"platform_subsidy_fee,omitempty" xml:"platform_subsidy_fee,omitempty"`
// nrOffline
NrOffline string `json:"nr_offline,omitempty" xml:"nr_offline,omitempty"`
// 网厅订单垂直表信息
WttParam string `json:"wtt_param,omitempty" xml:"wtt_param,omitempty"`
// logisticsInfos
LogisticsInfos []LogisticsInfo `json:"logistics_infos,omitempty" xml:"logistics_infos,omitempty"`
// nrStoreOrderId
NrStoreOrderId string `json:"nr_store_order_id,omitempty" xml:"nr_store_order_id,omitempty"`
// 门店 ID
NrShopId string `json:"nr_shop_id,omitempty" xml:"nr_shop_id,omitempty"`
// 门店名称
NrShopName string `json:"nr_shop_name,omitempty" xml:"nr_shop_name,omitempty"`
// 导购员ID
NrShopGuideId string `json:"nr_shop_guide_id,omitempty" xml:"nr_shop_guide_id,omitempty"`
// 导购员名称
NrShopGuideName string `json:"nr_shop_guide_name,omitempty" xml:"nr_shop_guide_name,omitempty"`
// sortInfo
SortInfo string `json:"sort_info,omitempty" xml:"sort_info,omitempty"`
// 1已排序 2不排序
Sorted int64 `json:"sorted,omitempty" xml:"sorted,omitempty"`
// 一小时达不处理订单
NrNoHandle string `json:"nr_no_handle,omitempty" xml:"nr_no_handle,omitempty"`
// isGift
IsGift bool `json:"is_gift,omitempty" xml:"is_gift,omitempty"`
// doneeNick
DoneeNick string `json:"donee_nick,omitempty" xml:"donee_nick,omitempty"`
// doneeUid
DoneeOpenUid string `json:"donee_open_uid,omitempty" xml:"donee_open_uid,omitempty"`
// suningShopCode
SuningShopCode string `json:"suning_shop_code,omitempty" xml:"suning_shop_code,omitempty"`
// suningShopValid
SuningShopValid int64 `json:"suning_shop_valid,omitempty" xml:"suning_shop_valid,omitempty"`
// retailStoreId
RetailStoreId string `json:"retail_store_id,omitempty" xml:"retail_store_id,omitempty"`
// isIstore
IsIstore bool `json:"is_istore,omitempty" xml:"is_istore,omitempty"`
// ua
Ua string `json:"ua,omitempty" xml:"ua,omitempty"`
// 截单时间
CutoffMinutes string `json:"cutoff_minutes,omitempty" xml:"cutoff_minutes,omitempty"`
// 时效:天
EsTime string `json:"es_time,omitempty" xml:"es_time,omitempty"`
// 发货时间
DeliveryTime string `json:"delivery_time,omitempty" xml:"delivery_time,omitempty"`
// 揽收时间
CollectTime string `json:"collect_time,omitempty" xml:"collect_time,omitempty"`
// 派送时间
DispatchTime string `json:"dispatch_time,omitempty" xml:"dispatch_time,omitempty"`
// 签收时间
SignTime string `json:"sign_time,omitempty" xml:"sign_time,omitempty"`
// 派送CP
DeliveryCps string `json:"delivery_cps,omitempty" xml:"delivery_cps,omitempty"`
// linkedmall透传参数
LinkedmallExtInfo string `json:"linkedmall_ext_info,omitempty" xml:"linkedmall_ext_info,omitempty"`
// 新零售全渠道订单:订单类型,自提订单:pickUp,电商发货:tmall,门店发货(配送、骑手):storeSend
RtOmniSendType string `json:"rt_omni_send_type,omitempty" xml:"rt_omni_send_type,omitempty"`
// 新零售全渠道订单:发货门店ID
RtOmniStoreId string `json:"rt_omni_store_id,omitempty" xml:"rt_omni_store_id,omitempty"`
// 新零售全渠道订单:商家自有发货门店编码
RtOmniOuterStoreId string `json:"rt_omni_outer_store_id,omitempty" xml:"rt_omni_outer_store_id,omitempty"`
// 同城预约配送开始时间
TcpsStart string `json:"tcps_start,omitempty" xml:"tcps_start,omitempty"`
// 同城业务类型,com.tmall.dsd:定时送,storeDsd-fn-3-1:淘速达3公里蜂鸟配送
TcpsCode string `json:"tcps_code,omitempty" xml:"tcps_code,omitempty"`
// 同城预约配送结束时间
TcpsEnd string `json:"tcps_end,omitempty" xml:"tcps_end,omitempty"`
//
MTariffFee string `json:"m_tariff_fee,omitempty" xml:"m_tariff_fee,omitempty"`
// 时效服务身份,如tmallPromise代表天猫时效承诺
TimingPromise string `json:"timing_promise,omitempty" xml:"timing_promise,omitempty"`
// 时效服务字段,服务字段,会有多个服务值,以英文半角逗号","切割
PromiseService string `json:"promise_service,omitempty" xml:"promise_service,omitempty"`
// 苏宁预约安装,用户安装时间段
OiRange string `json:"oi_range,omitempty" xml:"oi_range,omitempty"`
// 苏宁预约安装,用户安装时间
OiDate string `json:"oi_date,omitempty" xml:"oi_date,omitempty"`
// 苏宁预约安装,暂不安装
HoldInstall string `json:"hold_install,omitempty" xml:"hold_install,omitempty"`
// 外部会员id
OuterPartnerMemberId string `json:"outer_partner_member_id,omitempty" xml:"outer_partner_member_id,omitempty"`
// 叶子分类
RootCat string `json:"root_cat,omitempty" xml:"root_cat,omitempty"`
// 1-gifting订单
Gifting string `json:"gifting,omitempty" xml:"gifting,omitempty"`
// 1-coffee gifting订单
GiftingTakeout string `json:"gifting_takeout,omitempty" xml:"gifting_takeout,omitempty"`
// 订单来源
AppName string `json:"app_name,omitempty" xml:"app_name,omitempty"`
// 居然之家同城站订单类型 deposit:预约到店,direct:直接购买,tail:尾款核销
EasyHomeCityType string `json:"easy_home_city_type,omitempty" xml:"easy_home_city_type,omitempty"`
// 同城站关联订单号
NrDepositOrderId string `json:"nr_deposit_order_id,omitempty" xml:"nr_deposit_order_id,omitempty"`
// 摊位id
NrStoreCode string `json:"nr_store_code,omitempty" xml:"nr_store_code,omitempty"`
// 使用淘金币的数量,以分为单位,和订单标propoint中间那一段一样,没有返回null
Propoint string `json:"propoint,omitempty" xml:"propoint,omitempty"`
// 1-周期送订单
ZqsOrderTag string `json:"zqs_order_tag,omitempty" xml:"zqs_order_tag,omitempty"`
// 天鲜配冰柜id
TxpFreezerId string `json:"txp_freezer_id,omitempty" xml:"txp_freezer_id,omitempty"`
// 天鲜配自提方式
TxpReceiveMethod string `json:"txp_receive_method,omitempty" xml:"txp_receive_method,omitempty"`
// 同城购门店ID
BrandLightShopStoreId string `json:"brand_light_shop_store_id,omitempty" xml:"brand_light_shop_store_id,omitempty"`
|
BrandLightShopSource string `json:"brand_light_shop_source,omitempty" xml:"brand_light_shop_source,omitempty"`
// 透出的额外信息
ExtendInfo string `json:"extend_info,omitempty" xml:"extend_info,omitempty"`
// 收货地址有变更,返回"1"
Lm string `json:"lm,omitempty" xml:"lm,omitempty"`
// 新康众定制数据
NczExtAttr string `json:"ncz_ext_attr,omitempty" xml:"ncz_ext_attr,omitempty"`
// 标识完美履约订单
IsWmly string `json:"is_wmly,omitempty" xml:"is_wmly,omitempty"`
// 全渠道包裹信息
OmniPackage string `json:"omni_package,omitempty" xml:"omni_package,omitempty"`
// 购物金信息输出
ExpandcardInfo *ExpandCardInfo `json:"expandcard_info,omitempty" xml:"expandcard_info,omitempty"`
// 苹果发票详情
InvoiceDetailAfterRefund string `json:"invoice_detail_after_refund,omitempty" xml:"invoice_detail_after_refund,omitempty"`
// 苹果发票详情
InvoiceDetailPay string `json:"invoice_detail_pay,omitempty" xml:"invoice_detail_pay,omitempty"`
// 苹果发票详情
InvoiceDetailMidRefund string `json:"invoice_detail_mid_refund,omitempty" xml:"invoice_detail_mid_refund,omitempty"`
// 买卡订单本金
ExpandCardBasicPrice string `json:"expand_card_basic_price,omitempty" xml:"expand_card_basic_price,omitempty"`
// 买卡订单权益金
ExpandCardExpandPrice string `json:"expand_card_expand_price,omitempty" xml:"expand_card_expand_price,omitempty"`
// 用卡订单所用的本金
ExpandCardBasicPriceUsed string `json:"expand_card_basic_price_used,omitempty" xml:"expand_card_basic_price_used,omitempty"`
// 用卡订单所用的权益金
ExpandCardExpandPriceUsed string `json:"expand_card_expand_price_used,omitempty" xml:"expand_card_expand_price_used,omitempty"`
// 是否是Openmall订单
IsOpenmall bool `json:"is_openmall,omitempty" xml:"is_openmall,omitempty"`
// asdp业务身份
AsdpBizType string `json:"asdp_biz_type,omitempty" xml:"asdp_biz_type,omitempty"`
// (收货人+手机号+收货地址+create)4字段返回值都都不能为空,否则生成不了oaid
Oaid string `json:"oaid,omitempty" xml:"oaid,omitempty"`
// 是否是码上收订单
VLogisticsCreate bool `json:"v_logistics_create,omitempty" xml:"v_logistics_create,omitempty"`
// 是否是非物流订单
QRPay bool `json:"q_r_pay,omitempty" xml:"q_r_pay,omitempty"`
// 关联下单订单
OrderFollowId string `json:"order_follow_id,omitempty" xml:"order_follow_id,omitempty"`
// 无物流信息返回true,平台属性,业务不要依赖
NoShipping bool `json:"no_shipping,omitempty" xml:"no_shipping,omitempty"`
// 送货上门标
AsdpAds string `json:"asdp_ads,omitempty" xml:"asdp_ads,omitempty"`
// 是否屏蔽发货
IsShShip bool `json:"is_sh_ship,omitempty" xml:"is_sh_ship,omitempty"`
// 抢单状态0,未处理待分发;1,抢单中;2,已抢单;3,已发货;-1,超时;-2,处理异常;-3,匹配失败;-4,取消抢单;-5,退款取消;-9,逻辑删除
O2oSnatchStatus string `json:"o2o_snatch_status,omitempty" xml:"o2o_snatch_status,omitempty"`
// 垂直市场
Market string `json:"market,omitempty" xml:"market,omitempty"`
// 电子凭证扫码购-扫码支付订单type
EtType string `json:"et_type,omitempty" xml:"et_type,omitempty"`
// 扫码购关联门店
EtShopId int64 `json:"et_shop_id,omitempty" xml:"et_shop_id,omitempty"`
// 门店预约自提订单标
Obs string `json:"obs,omitempty" xml:"obs,omitempty"`
// 透出前置营销工具
Pmtp string `json:"pmtp,omitempty" xml:"pmtp,omitempty"`
// 判断订单是否有买家留言,有买家留言返回true,否则返回false
HasBuyerMessage bool `json:"has_buyer_message,omitempty" xml:"has_buyer_message,omitempty"`
// threeplTiming
ThreeplTiming bool `json:"threepl_timing,omitempty" xml:"threepl_timing,omitempty"`
// 是否是智慧门店订单,只有true,或者 null 两种情况
IsO2oPassport bool `json:"is_o2o_passport,omitempty" xml:"is_o2o_passport,omitempty"`
// tmallDelivery
TmallDelivery bool `json:"tmall_delivery,omitempty" xml:"tmall_delivery,omitempty"`
// 天猫直送服务
CnService string `json:"cn_service,omitempty" xml:"cn_service,omitempty"`
} | // 同城购订单source | random_line_split |
asistencia.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef, Input, ChangeDetectorRef } from '@angular/core';
import { PopoverModule } from 'ngx-popover';
import { DaterangepickerConfig, DaterangePickerComponent } from 'ng2-daterangepicker';
import { ToastrService } from 'ngx-toastr';
import { Router, ActivatedRoute } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { AddAusentismoComponent } from '../formularios/add-ausentismo.component';
import { PyaExceptionComponent } from '../formularios/pya-exception.component';
import { ApiService, InitService, TokenCheckService } from '../../services/service.index';
import { saveAs } from 'file-saver';
import { utils, write, WorkBook, read } from 'xlsx';
import * as Globals from '../../globals';
declare var jQuery:any;
// import * as moment from 'moment';
import * as moment from 'moment-timezone';
@Component({
selector: 'app-asistencia',
templateUrl: './asistencia.component.html',
styles: ['input[type=checkbox]{ cursor: pointer}']
})
export class AsistenciaComponent implements OnInit {
@ViewChild( DaterangePickerComponent,{static:false} ) private picker: DaterangePickerComponent
@ViewChild( AddAusentismoComponent,{static:false} ) _aus: AddAusentismoComponent
@ViewChild( PyaExceptionComponent,{static:false} ) _pya:PyaExceptionComponent
currentUser: any
showContents:boolean = false
mainCredential:string = 'tablas_f'
filterExpanded:boolean = false
selectedAsesores:any = []
loading:boolean = false
showProgress:boolean = false
asistData:any
datesData:any
deps:any
searchBy:boolean = true
private depsSubject = new Subject<any>();
private asistSubject = new Subject<any>();
orederedKeys:any
shownDom:any = []
depLoaders:any = {}
depLoadFlag:boolean = false
showOpts:Object = {
ch_jornada: true,
ch_comida: false,
ch_excep: true,
ch_excep_p: false,
ch_ret: false,
ch_sa: false,
ch_x: false,
ch_x_p: false,
sh_p: false,
sh_d: false
}
today:any = moment()
searchCriteria:Object = {
start: this.today.subtract(15, 'days').format('YYYY-MM-DD'),
end: this.today.add(15, 'days').format('YYYY-MM-DD'),
value: '',
skill: ''
}
searchFilter:string = ''
searchFields:any = [
'Nombre', 'PuestoName', 'Departamento'
]
error:string = null
constructor(
private _dateRangeOptions: DaterangepickerConfig,
private _api:ApiService,
private _init:InitService,
private _tokenCheck:TokenCheckService,
public toastr: ToastrService,
private cd: ChangeDetectorRef
) {
this.currentUser = this._init.getUserInfo()
this.showContents = this._init.checkCredential( this.mainCredential, true )
this._tokenCheck.getTokenStatus()
.subscribe( res => {
if( res.status ){
this.showContents = this._init.checkCredential( this.mainCredential, true )
}else{
this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
| ( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date}_${block}`)){
return true
}else{
return false
}
}
isset (a, name ) {
let is = true
if ( a[name] == undefined || a[name] == '' || a[name] == null ) {
is = false
}
return is;
}
progressProps( val, originalBg = 'primary' ){
let bar: string
let border: string
if(val<60){
bar = 'danger'
}else if(val<100){
bar = 'warning'
}else{
bar = 'success'
}
if(originalBg == bar){
border = 'light'
}else{
border = bar
}
return {bar: bar, border: border, val: val}
}
excStatus( event ){
if( !event.status ){
let error = event.error.json()
this.toastr.error( error.msg, `Error ${event.error.status} - ${event.error.statusText}` )
if( error.Existente ){
console.error('Ausentismo existente: ', error.Existente)
}
if( error.errores ){
console.error('Ausentismo existente: ', error.errores)
}
}else{
this.toastr.success( event.error.msg, `Guardado` )
this.getAsistencia( this.searchCriteria['skill'] ? this.searchCriteria['skill'] : 0, event.fecha, event.fecha, [event.asesor] )
}
}
downloadXLS( id, title ){
this.toXls( id, title )
}
toXls( sheets, title ){
let wb = utils.table_to_book(document.getElementById(sheets), {raw: false});
let newSheets = {
jornada: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
extra: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
excepciones: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
retardos: JSON.parse(JSON.stringify(wb.Sheets['Sheet1']))
}
// tslint:disable-next-line:forin
for( let cell in wb.Sheets['Sheet1']){
// tslint:disable-next-line:max-line-length
let j = wb.Sheets['Sheet1'][cell].v, x2 = wb.Sheets['Sheet1'][cell].v, x1 = wb.Sheets['Sheet1'][cell].v, he = wb.Sheets['Sheet1'][cell].v, r = wb.Sheets['Sheet1'][cell].v, e = wb.Sheets['Sheet1'][cell].v
let compare = {}
if( cell.match(/^[A-D]{1}[0-9]*$/g) || cell.match(/^[A-Z]*[1]{1}$/g) ){
newSheets['jornada'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['extra'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['excepciones'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['retardos'][cell] = wb.Sheets['Sheet1'][cell]
}else{
if( cell.match(/^[A-Z]*[0-9]*$/g)){
if( j.match(/[j]:/g) ){
let jornada = j.match(/(([j]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})|([j]:[ ]*[a-zA-Z\-]*))/gm)
if( jornada ){
newSheets['jornada'][cell].v = jornada[0].replace('j:','').trim()
}else{
newSheets['jornada'][cell].v = '-'
}
}else{
newSheets['jornada'][cell].v = '*'
}
if( x1.match(/[x]:/g) ){
let extra1 = x1.match(/([x]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})/gm)
if( extra1 ){
newSheets['extra'][cell].v = extra1[0].replace('x:','').trim()
}else{
newSheets['extra'][cell].v = '-'
}
if( x2.match(/((-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}))/g) ){
let extra2 = x2.match(/-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}/gm)
if( extra2 ){
newSheets['extra'][cell].v = newSheets['extra'][cell].v + '\r\n' + extra2[0].replace('-->','').trim()
newSheets['extra'][cell].t = 'h'
}
}
}else{
newSheets['extra'][cell].v = ''
}
if( e.match(/[e]:/g) ){
let excep = e.match(/[e]:[ ]*[a-zA-Z\-]*/gm)
if( excep ){
newSheets['excepciones'][cell].v = excep[0].replace('e:','').trim()
}else{
newSheets['excepciones'][cell].v = '-'
}
}else{
newSheets['excepciones'][cell].v = ''
}
if( r.match(/[r]:/g) ){
let rts = r.match(/[r]:[ ]*[a-zA-Z\-]*/gm)
if( rts ){
newSheets['retardos'][cell].v = rts[0].replace('r:','').trim()
}else{
newSheets['retardos'][cell].v = '-'
}
}else{
newSheets['retardos'][cell].v = ''
}
}
}
}
wb.SheetNames[0]='Jornadas'
delete wb.Sheets['Sheet1']
wb.SheetNames.push('Excepciones')
wb.SheetNames.push('Retardos')
wb.SheetNames.push('Extra')
wb.Sheets['Jornadas'] = newSheets['jornada']
wb.Sheets['Extra'] = newSheets['extra']
wb.Sheets['Excepciones']= newSheets['excepciones']
wb.Sheets['Retardos'] = newSheets['retardos']
let wbout = write(wb, { bookType: 'xlsx', bookSST: true, type:
'binary' });
saveAs(new Blob([this.s2ab(wbout)], { type: 'application/vnd.ms-excel' }), `${title}.xlsx`)
}
s2ab(s) {
let buf = new ArrayBuffer(s.length);
let view = new Uint8Array(buf);
// tslint:disable-next-line:no-bitwise
for (let i=0; i!=s.length; ++i) { view[i] = s.charCodeAt(i) & 0xFF; }
return buf;
}
tst(){
console.log(this.asistData)
}
sRep( input ){
return input.toLowerCase()
.replace(/á/gm,'a')
.replace(/é/gm,'e')
.replace(/í/gm,'i')
.replace(/ó/gm,'o')
.replace(/ú/gm,'u')
.replace(/ñ/gm,'n')
}
hxSave( event ){
if( event.status ){
this.toastr.success(`${ event.msg }`, 'Success!');
}else{
this.toastr.error(`${ event.msg }`, 'Error!');
}
}
}
| timeDateXform | identifier_name |
asistencia.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef, Input, ChangeDetectorRef } from '@angular/core';
import { PopoverModule } from 'ngx-popover';
import { DaterangepickerConfig, DaterangePickerComponent } from 'ng2-daterangepicker';
import { ToastrService } from 'ngx-toastr';
import { Router, ActivatedRoute } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { AddAusentismoComponent } from '../formularios/add-ausentismo.component';
import { PyaExceptionComponent } from '../formularios/pya-exception.component';
import { ApiService, InitService, TokenCheckService } from '../../services/service.index';
import { saveAs } from 'file-saver';
import { utils, write, WorkBook, read } from 'xlsx';
import * as Globals from '../../globals';
declare var jQuery:any;
// import * as moment from 'moment';
import * as moment from 'moment-timezone';
@Component({
selector: 'app-asistencia',
templateUrl: './asistencia.component.html',
styles: ['input[type=checkbox]{ cursor: pointer}']
})
export class AsistenciaComponent implements OnInit {
@ViewChild( DaterangePickerComponent,{static:false} ) private picker: DaterangePickerComponent
@ViewChild( AddAusentismoComponent,{static:false} ) _aus: AddAusentismoComponent
@ViewChild( PyaExceptionComponent,{static:false} ) _pya:PyaExceptionComponent
currentUser: any
showContents:boolean = false
mainCredential:string = 'tablas_f'
filterExpanded:boolean = false
selectedAsesores:any = []
loading:boolean = false
showProgress:boolean = false
asistData:any
datesData:any
deps:any
searchBy:boolean = true
private depsSubject = new Subject<any>();
private asistSubject = new Subject<any>();
orederedKeys:any
shownDom:any = []
depLoaders:any = {}
depLoadFlag:boolean = false
showOpts:Object = {
ch_jornada: true,
ch_comida: false,
ch_excep: true,
ch_excep_p: false,
ch_ret: false,
ch_sa: false,
ch_x: false,
ch_x_p: false,
sh_p: false,
sh_d: false
}
today:any = moment()
searchCriteria:Object = {
start: this.today.subtract(15, 'days').format('YYYY-MM-DD'),
end: this.today.add(15, 'days').format('YYYY-MM-DD'),
value: '',
skill: ''
}
searchFilter:string = ''
searchFields:any = [
'Nombre', 'PuestoName', 'Departamento'
]
error:string = null
constructor(
private _dateRangeOptions: DaterangepickerConfig,
private _api:ApiService,
private _init:InitService,
private _tokenCheck:TokenCheckService,
public toastr: ToastrService,
private cd: ChangeDetectorRef
) {
this.currentUser = this._init.getUserInfo()
this.showContents = this._init.checkCredential( this.mainCredential, true )
this._tokenCheck.getTokenStatus()
.subscribe( res => {
if( res.status ){
this.showContents = this._init.checkCredential( this.mainCredential, true )
}else{
this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ) | else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date}_${block}`)){
return true
}else{
return false
}
}
isset (a, name ) {
let is = true
if ( a[name] == undefined || a[name] == '' || a[name] == null ) {
is = false
}
return is;
}
progressProps( val, originalBg = 'primary' ){
let bar: string
let border: string
if(val<60){
bar = 'danger'
}else if(val<100){
bar = 'warning'
}else{
bar = 'success'
}
if(originalBg == bar){
border = 'light'
}else{
border = bar
}
return {bar: bar, border: border, val: val}
}
excStatus( event ){
if( !event.status ){
let error = event.error.json()
this.toastr.error( error.msg, `Error ${event.error.status} - ${event.error.statusText}` )
if( error.Existente ){
console.error('Ausentismo existente: ', error.Existente)
}
if( error.errores ){
console.error('Ausentismo existente: ', error.errores)
}
}else{
this.toastr.success( event.error.msg, `Guardado` )
this.getAsistencia( this.searchCriteria['skill'] ? this.searchCriteria['skill'] : 0, event.fecha, event.fecha, [event.asesor] )
}
}
downloadXLS( id, title ){
this.toXls( id, title )
}
toXls( sheets, title ){
let wb = utils.table_to_book(document.getElementById(sheets), {raw: false});
let newSheets = {
jornada: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
extra: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
excepciones: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
retardos: JSON.parse(JSON.stringify(wb.Sheets['Sheet1']))
}
// tslint:disable-next-line:forin
for( let cell in wb.Sheets['Sheet1']){
// tslint:disable-next-line:max-line-length
let j = wb.Sheets['Sheet1'][cell].v, x2 = wb.Sheets['Sheet1'][cell].v, x1 = wb.Sheets['Sheet1'][cell].v, he = wb.Sheets['Sheet1'][cell].v, r = wb.Sheets['Sheet1'][cell].v, e = wb.Sheets['Sheet1'][cell].v
let compare = {}
if( cell.match(/^[A-D]{1}[0-9]*$/g) || cell.match(/^[A-Z]*[1]{1}$/g) ){
newSheets['jornada'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['extra'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['excepciones'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['retardos'][cell] = wb.Sheets['Sheet1'][cell]
}else{
if( cell.match(/^[A-Z]*[0-9]*$/g)){
if( j.match(/[j]:/g) ){
let jornada = j.match(/(([j]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})|([j]:[ ]*[a-zA-Z\-]*))/gm)
if( jornada ){
newSheets['jornada'][cell].v = jornada[0].replace('j:','').trim()
}else{
newSheets['jornada'][cell].v = '-'
}
}else{
newSheets['jornada'][cell].v = '*'
}
if( x1.match(/[x]:/g) ){
let extra1 = x1.match(/([x]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})/gm)
if( extra1 ){
newSheets['extra'][cell].v = extra1[0].replace('x:','').trim()
}else{
newSheets['extra'][cell].v = '-'
}
if( x2.match(/((-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}))/g) ){
let extra2 = x2.match(/-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}/gm)
if( extra2 ){
newSheets['extra'][cell].v = newSheets['extra'][cell].v + '\r\n' + extra2[0].replace('-->','').trim()
newSheets['extra'][cell].t = 'h'
}
}
}else{
newSheets['extra'][cell].v = ''
}
if( e.match(/[e]:/g) ){
let excep = e.match(/[e]:[ ]*[a-zA-Z\-]*/gm)
if( excep ){
newSheets['excepciones'][cell].v = excep[0].replace('e:','').trim()
}else{
newSheets['excepciones'][cell].v = '-'
}
}else{
newSheets['excepciones'][cell].v = ''
}
if( r.match(/[r]:/g) ){
let rts = r.match(/[r]:[ ]*[a-zA-Z\-]*/gm)
if( rts ){
newSheets['retardos'][cell].v = rts[0].replace('r:','').trim()
}else{
newSheets['retardos'][cell].v = '-'
}
}else{
newSheets['retardos'][cell].v = ''
}
}
}
}
wb.SheetNames[0]='Jornadas'
delete wb.Sheets['Sheet1']
wb.SheetNames.push('Excepciones')
wb.SheetNames.push('Retardos')
wb.SheetNames.push('Extra')
wb.Sheets['Jornadas'] = newSheets['jornada']
wb.Sheets['Extra'] = newSheets['extra']
wb.Sheets['Excepciones']= newSheets['excepciones']
wb.Sheets['Retardos'] = newSheets['retardos']
let wbout = write(wb, { bookType: 'xlsx', bookSST: true, type:
'binary' });
saveAs(new Blob([this.s2ab(wbout)], { type: 'application/vnd.ms-excel' }), `${title}.xlsx`)
}
s2ab(s) {
let buf = new ArrayBuffer(s.length);
let view = new Uint8Array(buf);
// tslint:disable-next-line:no-bitwise
for (let i=0; i!=s.length; ++i) { view[i] = s.charCodeAt(i) & 0xFF; }
return buf;
}
tst(){
console.log(this.asistData)
}
sRep( input ){
return input.toLowerCase()
.replace(/á/gm,'a')
.replace(/é/gm,'e')
.replace(/í/gm,'i')
.replace(/ó/gm,'o')
.replace(/ú/gm,'u')
.replace(/ñ/gm,'n')
}
hxSave( event ){
if( event.status ){
this.toastr.success(`${ event.msg }`, 'Success!');
}else{
this.toastr.error(`${ event.msg }`, 'Error!');
}
}
}
| {
this.asistData[asesor]['data'][inicio]['loading'] = true
} | conditional_block |
asistencia.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef, Input, ChangeDetectorRef } from '@angular/core';
import { PopoverModule } from 'ngx-popover';
import { DaterangepickerConfig, DaterangePickerComponent } from 'ng2-daterangepicker';
import { ToastrService } from 'ngx-toastr';
import { Router, ActivatedRoute } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { AddAusentismoComponent } from '../formularios/add-ausentismo.component';
import { PyaExceptionComponent } from '../formularios/pya-exception.component';
import { ApiService, InitService, TokenCheckService } from '../../services/service.index';
import { saveAs } from 'file-saver';
import { utils, write, WorkBook, read } from 'xlsx';
import * as Globals from '../../globals';
declare var jQuery:any;
// import * as moment from 'moment';
import * as moment from 'moment-timezone';
@Component({
selector: 'app-asistencia',
templateUrl: './asistencia.component.html',
styles: ['input[type=checkbox]{ cursor: pointer}']
})
export class AsistenciaComponent implements OnInit {
@ViewChild( DaterangePickerComponent,{static:false} ) private picker: DaterangePickerComponent
@ViewChild( AddAusentismoComponent,{static:false} ) _aus: AddAusentismoComponent
@ViewChild( PyaExceptionComponent,{static:false} ) _pya:PyaExceptionComponent
currentUser: any
showContents:boolean = false
mainCredential:string = 'tablas_f'
filterExpanded:boolean = false
selectedAsesores:any = []
loading:boolean = false
showProgress:boolean = false
asistData:any
datesData:any
deps:any
searchBy:boolean = true
private depsSubject = new Subject<any>();
private asistSubject = new Subject<any>();
orederedKeys:any
shownDom:any = []
depLoaders:any = {}
depLoadFlag:boolean = false
showOpts:Object = {
ch_jornada: true,
ch_comida: false,
ch_excep: true,
ch_excep_p: false,
ch_ret: false,
ch_sa: false,
ch_x: false,
ch_x_p: false,
sh_p: false,
sh_d: false
}
today:any = moment()
searchCriteria:Object = {
start: this.today.subtract(15, 'days').format('YYYY-MM-DD'),
end: this.today.add(15, 'days').format('YYYY-MM-DD'),
value: '',
skill: ''
}
searchFilter:string = ''
searchFields:any = [
'Nombre', 'PuestoName', 'Departamento'
]
error:string = null
constructor(
private _dateRangeOptions: DaterangepickerConfig,
private _api:ApiService,
private _init:InitService,
private _tokenCheck:TokenCheckService,
public toastr: ToastrService,
private cd: ChangeDetectorRef
) {
this.currentUser = this._init.getUserInfo()
this.showContents = this._init.checkCredential( this.mainCredential, true )
this._tokenCheck.getTokenStatus()
.subscribe( res => {
if( res.status ){
this.showContents = this._init.checkCredential( this.mainCredential, true )
}else{
this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone')
let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block) |
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date}_${block}`)){
return true
}else{
return false
}
}
isset (a, name ) {
let is = true
if ( a[name] == undefined || a[name] == '' || a[name] == null ) {
is = false
}
return is;
}
progressProps( val, originalBg = 'primary' ){
let bar: string
let border: string
if(val<60){
bar = 'danger'
}else if(val<100){
bar = 'warning'
}else{
bar = 'success'
}
if(originalBg == bar){
border = 'light'
}else{
border = bar
}
return {bar: bar, border: border, val: val}
}
excStatus( event ){
if( !event.status ){
let error = event.error.json()
this.toastr.error( error.msg, `Error ${event.error.status} - ${event.error.statusText}` )
if( error.Existente ){
console.error('Ausentismo existente: ', error.Existente)
}
if( error.errores ){
console.error('Ausentismo existente: ', error.errores)
}
}else{
this.toastr.success( event.error.msg, `Guardado` )
this.getAsistencia( this.searchCriteria['skill'] ? this.searchCriteria['skill'] : 0, event.fecha, event.fecha, [event.asesor] )
}
}
downloadXLS( id, title ){
this.toXls( id, title )
}
toXls( sheets, title ){
let wb = utils.table_to_book(document.getElementById(sheets), {raw: false});
let newSheets = {
jornada: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
extra: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
excepciones: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
retardos: JSON.parse(JSON.stringify(wb.Sheets['Sheet1']))
}
// tslint:disable-next-line:forin
for( let cell in wb.Sheets['Sheet1']){
// tslint:disable-next-line:max-line-length
let j = wb.Sheets['Sheet1'][cell].v, x2 = wb.Sheets['Sheet1'][cell].v, x1 = wb.Sheets['Sheet1'][cell].v, he = wb.Sheets['Sheet1'][cell].v, r = wb.Sheets['Sheet1'][cell].v, e = wb.Sheets['Sheet1'][cell].v
let compare = {}
if( cell.match(/^[A-D]{1}[0-9]*$/g) || cell.match(/^[A-Z]*[1]{1}$/g) ){
newSheets['jornada'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['extra'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['excepciones'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['retardos'][cell] = wb.Sheets['Sheet1'][cell]
}else{
if( cell.match(/^[A-Z]*[0-9]*$/g)){
if( j.match(/[j]:/g) ){
let jornada = j.match(/(([j]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})|([j]:[ ]*[a-zA-Z\-]*))/gm)
if( jornada ){
newSheets['jornada'][cell].v = jornada[0].replace('j:','').trim()
}else{
newSheets['jornada'][cell].v = '-'
}
}else{
newSheets['jornada'][cell].v = '*'
}
if( x1.match(/[x]:/g) ){
let extra1 = x1.match(/([x]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})/gm)
if( extra1 ){
newSheets['extra'][cell].v = extra1[0].replace('x:','').trim()
}else{
newSheets['extra'][cell].v = '-'
}
if( x2.match(/((-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}))/g) ){
let extra2 = x2.match(/-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}/gm)
if( extra2 ){
newSheets['extra'][cell].v = newSheets['extra'][cell].v + '\r\n' + extra2[0].replace('-->','').trim()
newSheets['extra'][cell].t = 'h'
}
}
}else{
newSheets['extra'][cell].v = ''
}
if( e.match(/[e]:/g) ){
let excep = e.match(/[e]:[ ]*[a-zA-Z\-]*/gm)
if( excep ){
newSheets['excepciones'][cell].v = excep[0].replace('e:','').trim()
}else{
newSheets['excepciones'][cell].v = '-'
}
}else{
newSheets['excepciones'][cell].v = ''
}
if( r.match(/[r]:/g) ){
let rts = r.match(/[r]:[ ]*[a-zA-Z\-]*/gm)
if( rts ){
newSheets['retardos'][cell].v = rts[0].replace('r:','').trim()
}else{
newSheets['retardos'][cell].v = '-'
}
}else{
newSheets['retardos'][cell].v = ''
}
}
}
}
wb.SheetNames[0]='Jornadas'
delete wb.Sheets['Sheet1']
wb.SheetNames.push('Excepciones')
wb.SheetNames.push('Retardos')
wb.SheetNames.push('Extra')
wb.Sheets['Jornadas'] = newSheets['jornada']
wb.Sheets['Extra'] = newSheets['extra']
wb.Sheets['Excepciones']= newSheets['excepciones']
wb.Sheets['Retardos'] = newSheets['retardos']
let wbout = write(wb, { bookType: 'xlsx', bookSST: true, type:
'binary' });
saveAs(new Blob([this.s2ab(wbout)], { type: 'application/vnd.ms-excel' }), `${title}.xlsx`)
}
s2ab(s) {
let buf = new ArrayBuffer(s.length);
let view = new Uint8Array(buf);
// tslint:disable-next-line:no-bitwise
for (let i=0; i!=s.length; ++i) { view[i] = s.charCodeAt(i) & 0xFF; }
return buf;
}
tst(){
console.log(this.asistData)
}
sRep( input ){
return input.toLowerCase()
.replace(/á/gm,'a')
.replace(/é/gm,'e')
.replace(/í/gm,'i')
.replace(/ó/gm,'o')
.replace(/ú/gm,'u')
.replace(/ñ/gm,'n')
}
hxSave( event ){
if( event.status ){
this.toastr.success(`${ event.msg }`, 'Success!');
}else{
this.toastr.error(`${ event.msg }`, 'Error!');
}
}
}
| {
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
} | identifier_body |
asistencia.component.ts | import { Component, OnInit, ViewChild, ViewContainerRef, Input, ChangeDetectorRef } from '@angular/core';
import { PopoverModule } from 'ngx-popover';
import { DaterangepickerConfig, DaterangePickerComponent } from 'ng2-daterangepicker';
import { ToastrService } from 'ngx-toastr';
import { Router, ActivatedRoute } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Subject } from 'rxjs/Subject';
import { AddAusentismoComponent } from '../formularios/add-ausentismo.component';
import { PyaExceptionComponent } from '../formularios/pya-exception.component';
import { ApiService, InitService, TokenCheckService } from '../../services/service.index';
import { saveAs } from 'file-saver';
import { utils, write, WorkBook, read } from 'xlsx';
import * as Globals from '../../globals';
declare var jQuery:any;
// import * as moment from 'moment';
import * as moment from 'moment-timezone';
@Component({
selector: 'app-asistencia',
templateUrl: './asistencia.component.html',
styles: ['input[type=checkbox]{ cursor: pointer}']
})
export class AsistenciaComponent implements OnInit {
@ViewChild( DaterangePickerComponent,{static:false} ) private picker: DaterangePickerComponent
@ViewChild( AddAusentismoComponent,{static:false} ) _aus: AddAusentismoComponent
@ViewChild( PyaExceptionComponent,{static:false} ) _pya:PyaExceptionComponent
currentUser: any
showContents:boolean = false
mainCredential:string = 'tablas_f'
filterExpanded:boolean = false
selectedAsesores:any = []
loading:boolean = false
showProgress:boolean = false
asistData:any
datesData:any
deps:any
searchBy:boolean = true
private depsSubject = new Subject<any>();
private asistSubject = new Subject<any>();
orederedKeys:any
shownDom:any = []
depLoaders:any = {}
depLoadFlag:boolean = false
showOpts:Object = {
ch_jornada: true,
ch_comida: false,
ch_excep: true,
ch_excep_p: false,
ch_ret: false,
ch_sa: false,
ch_x: false,
ch_x_p: false,
sh_p: false,
sh_d: false
}
today:any = moment()
searchCriteria:Object = {
start: this.today.subtract(15, 'days').format('YYYY-MM-DD'),
end: this.today.add(15, 'days').format('YYYY-MM-DD'),
value: '',
skill: ''
}
searchFilter:string = ''
searchFields:any = [
'Nombre', 'PuestoName', 'Departamento'
]
error:string = null
constructor(
private _dateRangeOptions: DaterangepickerConfig,
private _api:ApiService,
private _init:InitService,
private _tokenCheck:TokenCheckService,
public toastr: ToastrService,
private cd: ChangeDetectorRef
) {
this.currentUser = this._init.getUserInfo()
this.showContents = this._init.checkCredential( this.mainCredential, true )
this._tokenCheck.getTokenStatus()
.subscribe( res => {
if( res.status ){
this.showContents = this._init.checkCredential( this.mainCredential, true )
}else{
this.showContents = false
}
})
this.searchCriteria['value']= `${this.searchCriteria['start']} - ${this.searchCriteria['end']}`
this._dateRangeOptions.settings = {
autoUpdateInput: true,
locale: { format: 'YYYY-MM-DD' }
}
this.loadDeps()
moment.locale('es-MX')
}
searchAsistencia( dep, inicio, fin ){
if( dep != 'MX' && dep != 'CO' ){
this.depLoadFlag = false
this.getAsistencia( dep, inicio, fin )
}else{
this.depLoadFlag = true
this.asistData = {}
this.datesData = []
this.depLoaders = {}
for( let pcrc of this.deps ){
if( pcrc.id != 29 && pcrc.id != 56 && pcrc.sede == dep ){
this.depLoaders[pcrc.Departamento] = true
let params = `${pcrc.id}/${inicio}/${fin}`
this.getAllDeps( pcrc, params, () => {
this.orderNames( this.asistData, 1)
})
}
}
this.orderNames( this.asistData, 1)
}
}
getAllDeps( pcrc, params, callback ){
this._api.restfulGet( params, 'Asistencia/pya' )
.subscribe( res =>{
this.depLoaders[pcrc.Departamento] = false
if( res['data'] != null ){
Object.assign(this.asistData,res['data']);
this.datesData = (res['Fechas'])
}
callback()
},
(err) => {
this.error = err
this.depLoaders[pcrc.Departamento] = false
this.toastr.error(`${ this.error }`, 'Error!');
callback()
});
}
getAsistencia( dep, inicio, fin, asesor?:any, flag=false ){
this.filterExpanded = false
this.searchFilter = ''
let params = {
dep : dep ,
inicio : inicio ,
fin : fin ,
asesor : asesor ,
noSup : null ,
order : null ,
}
if( asesor ){
if( !flag ){
this.asistData[asesor]['data'][inicio]['loading'] = true
}else{
this.loading = true
}
}else{
this.loading = true
}
this._api.restfulPut( params, 'Asistencia/pya' )
.subscribe( res =>{
if( asesor && !flag){
// console.log( res )
this.singleUpdate( res )
}else{
this.asistSubject.next({ res })
}
},
(err) => {
this.error = err
this.loading = false
this.toastr.error(`${ this.error }`, 'Error!');
});
}
singleUpdate( data ){
for( let asesor in data.data ){
// tslint:disable-next-line:forin
for(let fecha in data.Fechas ){
this.asistData[ asesor ]['data'][ fecha ] = data.data[ asesor ][ 'data' ][ fecha ]
}
}
}
compareDates( date ){
let header = moment(date)
let td = moment( this.today.format('YYYY-MM-DD') )
if(header >= td){
return false
}else{
return true
}
}
loadDeps(){
this._api.restfulGet( '','Headcount/deps' )
.subscribe( res => {
this.depsSubject.next({ res })
})
}
@Input() loadData(): Observable<any>{
return this.asistSubject.asObservable();
}
@Input() getDeps(): Observable<any>{
return this.depsSubject.asObservable();
}
setVal( inicio, fin ){
this.searchCriteria['start'] = inicio.format('YYYY-MM-DD')
this.searchCriteria['end'] = fin.format('YYYY-MM-DD')
}
pcrcChange( select ){
this.searchCriteria['skill']=event.target['value']
}
applyFilter( rac ){
if(this.searchFilter == ''){
return true
}
for(let item of this.searchFields){
if(rac[item].toLowerCase().includes(this.searchFilter.toLowerCase())){
return true
}
}
return false
}
ngOnInit() {
this.getDeps()
.subscribe( res => {
this.deps = res.res
// console.log( res.res )
this.cd.markForCheck()
})
this.loadData()
.subscribe( res => {
this.asistData = res.res['data']
this.datesData = res.res['Fechas']
this.orderNames( this.asistData, 1)
this.loading = false
// console.log( res.res )
// console.log( this.asistData )
this.cd.markForCheck()
})
}
printTimeInterval(date, start, end){
let inicio = moment.tz(`${date} ${start}`, 'this._zh.defaultZone')
let fin = moment.tz(`${date} ${end}`, 'this._zh.defaultZone')
let inicioCUN = inicio.clone().tz('America/Bogota')
let finCUN = fin.clone().tz('America/Bogota')
let result = `${inicioCUN.format('HH:mm')} - ${finCUN.format('HH:mm')}`
return result
}
printTime(date, time){
let tiempo = moment.tz(`${date} ${time}`, 'this._zh.defaultZone')
let tiempoCUN = tiempo.clone().tz('America/Bogota')
let result = tiempoCUN.format('HH:mm:ss')
return result
}
| let cunTime = time.clone().tz('America/Bogota')
return cunTime.format(format)
}
orderNames( data, ord=1 ){
// console.log(data)
let sortArray:any = []
let tmpSlot:any = []
let flag:boolean
let pushFlag:boolean
let x:number
let lastInput:any
let compare:any = []
for(let id in data){
if(sortArray.length == 0){
sortArray[0] = id
}else{
flag = false
for(x=0; x<sortArray.length; x++){
if(!flag){
if(ord == 1){
compare[1] = data[id]['Nombre']
compare[2] = data[sortArray[x]]['Nombre']
}else{
compare[1] = data[sortArray[x]]['Nombre']
compare[2] = data[id]['Nombre']
}
if(compare[1] < compare[2]){
tmpSlot[0] = sortArray[x]
sortArray[x] = id
flag = true
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = tmpSlot[0]
}
}else{
if(x == (sortArray.length)-1){
pushFlag=true
lastInput = id
}
}
}else{
tmpSlot[1] = sortArray[x]
sortArray[x] = tmpSlot[0]
tmpSlot[0] = tmpSlot[1]
}
}
if(pushFlag){
sortArray.push(lastInput)
}
}
}
this.orederedKeys = sortArray
}
ausentNotif( event ){
this.toastr.error(`${ event.msg }`, `${ event.title.toUpperCase() }!`);
}
perCumplimiento( rac, date, log ){
let inicio = this.asistData[rac].data[date][`${log}s`]
let fin = this.asistData[rac].data[date][`${log}e`]
let ji = this.asistData[rac].data[date][`${log}_login`]
let jf = this.asistData[rac].data[date][`${log}_logout`]
if( inicio == null ||
fin == null ||
ji == null ||
jf == null ){
return 0
}
let s = moment( inicio )
let e = moment( fin )
let js = moment( ji )
let je = moment( jf )
let total = e.diff(s, 'seconds')
let did = je.diff(js, 'seconds')
let result:number = did / total * 100
return (Math.floor(result))
}
timeDateXform( time ){
let td = moment(time)
if( td < moment(`${moment().format('YYYY-MM-DD')} 05:00:00`)){
return td.add(1, 'days')
}else{
return td
}
}
showDom(rac, date, block){
if(this.checkSet(rac, date, block)){
this.shownDom[`${rac}_${date}_${block}`] = undefined
}else{
this.shownDom[`${rac}_${date}_${block}`] = true
}
}
checkSet(rac, date, block){
if(this.isset(this.shownDom,`${rac}_${date}_${block}`)){
return true
}else{
return false
}
}
isset (a, name ) {
let is = true
if ( a[name] == undefined || a[name] == '' || a[name] == null ) {
is = false
}
return is;
}
progressProps( val, originalBg = 'primary' ){
let bar: string
let border: string
if(val<60){
bar = 'danger'
}else if(val<100){
bar = 'warning'
}else{
bar = 'success'
}
if(originalBg == bar){
border = 'light'
}else{
border = bar
}
return {bar: bar, border: border, val: val}
}
excStatus( event ){
if( !event.status ){
let error = event.error.json()
this.toastr.error( error.msg, `Error ${event.error.status} - ${event.error.statusText}` )
if( error.Existente ){
console.error('Ausentismo existente: ', error.Existente)
}
if( error.errores ){
console.error('Ausentismo existente: ', error.errores)
}
}else{
this.toastr.success( event.error.msg, `Guardado` )
this.getAsistencia( this.searchCriteria['skill'] ? this.searchCriteria['skill'] : 0, event.fecha, event.fecha, [event.asesor] )
}
}
downloadXLS( id, title ){
this.toXls( id, title )
}
toXls( sheets, title ){
let wb = utils.table_to_book(document.getElementById(sheets), {raw: false});
let newSheets = {
jornada: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
extra: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
excepciones: JSON.parse(JSON.stringify(wb.Sheets['Sheet1'])),
retardos: JSON.parse(JSON.stringify(wb.Sheets['Sheet1']))
}
// tslint:disable-next-line:forin
for( let cell in wb.Sheets['Sheet1']){
// tslint:disable-next-line:max-line-length
let j = wb.Sheets['Sheet1'][cell].v, x2 = wb.Sheets['Sheet1'][cell].v, x1 = wb.Sheets['Sheet1'][cell].v, he = wb.Sheets['Sheet1'][cell].v, r = wb.Sheets['Sheet1'][cell].v, e = wb.Sheets['Sheet1'][cell].v
let compare = {}
if( cell.match(/^[A-D]{1}[0-9]*$/g) || cell.match(/^[A-Z]*[1]{1}$/g) ){
newSheets['jornada'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['extra'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['excepciones'][cell] = wb.Sheets['Sheet1'][cell]
newSheets['retardos'][cell] = wb.Sheets['Sheet1'][cell]
}else{
if( cell.match(/^[A-Z]*[0-9]*$/g)){
if( j.match(/[j]:/g) ){
let jornada = j.match(/(([j]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})|([j]:[ ]*[a-zA-Z\-]*))/gm)
if( jornada ){
newSheets['jornada'][cell].v = jornada[0].replace('j:','').trim()
}else{
newSheets['jornada'][cell].v = '-'
}
}else{
newSheets['jornada'][cell].v = '*'
}
if( x1.match(/[x]:/g) ){
let extra1 = x1.match(/([x]:[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2})/gm)
if( extra1 ){
newSheets['extra'][cell].v = extra1[0].replace('x:','').trim()
}else{
newSheets['extra'][cell].v = '-'
}
if( x2.match(/((-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}))/g) ){
let extra2 = x2.match(/-->[ ]*\W[ ]*[0-9]{2}:[0-9]{2} - [0-9]{2}:[0-9]{2}/gm)
if( extra2 ){
newSheets['extra'][cell].v = newSheets['extra'][cell].v + '\r\n' + extra2[0].replace('-->','').trim()
newSheets['extra'][cell].t = 'h'
}
}
}else{
newSheets['extra'][cell].v = ''
}
if( e.match(/[e]:/g) ){
let excep = e.match(/[e]:[ ]*[a-zA-Z\-]*/gm)
if( excep ){
newSheets['excepciones'][cell].v = excep[0].replace('e:','').trim()
}else{
newSheets['excepciones'][cell].v = '-'
}
}else{
newSheets['excepciones'][cell].v = ''
}
if( r.match(/[r]:/g) ){
let rts = r.match(/[r]:[ ]*[a-zA-Z\-]*/gm)
if( rts ){
newSheets['retardos'][cell].v = rts[0].replace('r:','').trim()
}else{
newSheets['retardos'][cell].v = '-'
}
}else{
newSheets['retardos'][cell].v = ''
}
}
}
}
wb.SheetNames[0]='Jornadas'
delete wb.Sheets['Sheet1']
wb.SheetNames.push('Excepciones')
wb.SheetNames.push('Retardos')
wb.SheetNames.push('Extra')
wb.Sheets['Jornadas'] = newSheets['jornada']
wb.Sheets['Extra'] = newSheets['extra']
wb.Sheets['Excepciones']= newSheets['excepciones']
wb.Sheets['Retardos'] = newSheets['retardos']
let wbout = write(wb, { bookType: 'xlsx', bookSST: true, type:
'binary' });
saveAs(new Blob([this.s2ab(wbout)], { type: 'application/vnd.ms-excel' }), `${title}.xlsx`)
}
s2ab(s) {
let buf = new ArrayBuffer(s.length);
let view = new Uint8Array(buf);
// tslint:disable-next-line:no-bitwise
for (let i=0; i!=s.length; ++i) { view[i] = s.charCodeAt(i) & 0xFF; }
return buf;
}
tst(){
console.log(this.asistData)
}
sRep( input ){
return input.toLowerCase()
.replace(/á/gm,'a')
.replace(/é/gm,'e')
.replace(/í/gm,'i')
.replace(/ó/gm,'o')
.replace(/ú/gm,'u')
.replace(/ñ/gm,'n')
}
hxSave( event ){
if( event.status ){
this.toastr.success(`${ event.msg }`, 'Success!');
}else{
this.toastr.error(`${ event.msg }`, 'Error!');
}
}
} | formatDate(datetime, format){
let time = moment.tz(datetime, 'this._zh.defaultZone') | random_line_split |
simple-handler.go | /*
Package simplehttp provides an http.Handler that makes it easy to serve Vugu applications.
Useful for development and production.
The idea is that the common behaviors needed to serve a Vugu site are readily available
in one place. If you require more functionality than simplehttp provides, nearly everything
it does is available in the github.com/vugu/vugu package and you can construct what you
need from its parts. That said, simplehttp should make it easy to start:
// dev flag enables most common development features
// including rebuild your .wasm upon page reload
dev := true
h := simplehttp.New(dir, dev)
After creation, some flags are available for tuning, e.g.:
h.EnableGenerate = true // upon page reload run "go generate ."
h.DisableBuildCache = true // do not try to cache build results during development, just rebuild every time
h.ParserGoPkgOpts.SkipRegisterComponentTypes = true // do not generate component registration init() stuff
Since it's just a regular http.Handler, starting a webserver is as simple as:
log.Fatal(http.ListenAndServe("127.0.0.1:5678", h))
*/
package simplehttp
import (
"bytes"
"compress/gzip"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/vugu/vugu/gen"
)
// SimpleHandler provides common web serving functionality useful for building Vugu sites.
type SimpleHandler struct {
Dir string // project directory
EnableBuildAndServe bool // enables the build-and-serve sequence for your wasm binary - useful for dev, should be off in production
EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
ParserGoPkgOpts *gen.ParserGoPkgOpts // if set enables running ParserGoPkg with these options (requires EnableBuildAndServe)
DisableBuildCache bool // if true then rebuild every time instead of trying to cache (requires EnableBuildAndServe)
DisableTimestampPreservation bool // if true don't try to keep timestamps the same for files that are byte for byte identical (requires EnableBuildAndServe)
MainWasmPath string // path to serve main wasm file from, in dev mod defaults to "/main.wasm" (requires EnableBuildAndServe)
WasmExecJsPath string // path to serve wasm_exec.js from after finding in the local Go installation, in dev mode defaults to "/wasm_exec.js"
IsPage func(r *http.Request) bool // func that returns true if PageHandler should serve the request
PageHandler http.Handler // returns the HTML page
StaticHandler http.Handler // returns static assets from Dir with appropriate filtering or appropriate error
wasmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil |
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func dirTimestamp(dir string) (ts time.Time, reterr error) {
dirf, err := os.Open(dir)
if err != nil {
return ts, err
}
defer dirf.Close()
fis, err := dirf.Readdir(-1)
if err != nil {
return ts, err
}
for _, fi := range fis {
if fi.Name() == "." || fi.Name() == ".." {
continue
}
// for directories we recurse
if fi.IsDir() {
dirTs, err := dirTimestamp(filepath.Join(dir, fi.Name()))
if err != nil {
return ts, err
}
if dirTs.After(ts) {
ts = dirTs
}
continue
}
// for files check timestamp
mt := fi.ModTime()
if mt.After(ts) {
ts = mt
}
}
return
}
| {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
} | conditional_block |
simple-handler.go | /*
Package simplehttp provides an http.Handler that makes it easy to serve Vugu applications.
Useful for development and production.
The idea is that the common behaviors needed to serve a Vugu site are readily available
in one place. If you require more functionality than simplehttp provides, nearly everything
it does is available in the github.com/vugu/vugu package and you can construct what you
need from its parts. That said, simplehttp should make it easy to start:
// dev flag enables most common development features
// including rebuild your .wasm upon page reload
dev := true
h := simplehttp.New(dir, dev)
After creation, some flags are available for tuning, e.g.:
h.EnableGenerate = true // upon page reload run "go generate ."
h.DisableBuildCache = true // do not try to cache build results during development, just rebuild every time
h.ParserGoPkgOpts.SkipRegisterComponentTypes = true // do not generate component registration init() stuff
Since it's just a regular http.Handler, starting a webserver is as simple as:
log.Fatal(http.ListenAndServe("127.0.0.1:5678", h))
*/
package simplehttp
import (
"bytes"
"compress/gzip"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/vugu/vugu/gen"
)
// SimpleHandler provides common web serving functionality useful for building Vugu sites.
type SimpleHandler struct {
Dir string // project directory
EnableBuildAndServe bool // enables the build-and-serve sequence for your wasm binary - useful for dev, should be off in production
EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
ParserGoPkgOpts *gen.ParserGoPkgOpts // if set enables running ParserGoPkg with these options (requires EnableBuildAndServe)
DisableBuildCache bool // if true then rebuild every time instead of trying to cache (requires EnableBuildAndServe)
DisableTimestampPreservation bool // if true don't try to keep timestamps the same for files that are byte for byte identical (requires EnableBuildAndServe)
MainWasmPath string // path to serve main wasm file from, in dev mod defaults to "/main.wasm" (requires EnableBuildAndServe)
WasmExecJsPath string // path to serve wasm_exec.js from after finding in the local Go installation, in dev mode defaults to "/wasm_exec.js"
IsPage func(r *http.Request) bool // func that returns true if PageHandler should serve the request
PageHandler http.Handler // returns the HTML page
StaticHandler http.Handler // returns static assets from Dir with appropriate filtering or appropriate error
wasmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm") | if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func dirTimestamp(dir string) (ts time.Time, reterr error) {
dirf, err := os.Open(dir)
if err != nil {
return ts, err
}
defer dirf.Close()
fis, err := dirf.Readdir(-1)
if err != nil {
return ts, err
}
for _, fi := range fis {
if fi.Name() == "." || fi.Name() == ".." {
continue
}
// for directories we recurse
if fi.IsDir() {
dirTs, err := dirTimestamp(filepath.Join(dir, fi.Name()))
if err != nil {
return ts, err
}
if dirTs.After(ts) {
ts = dirTs
}
continue
}
// for files check timestamp
mt := fi.ModTime()
if mt.After(ts) {
ts = mt
}
}
return
} | // w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back | random_line_split |
simple-handler.go | /*
Package simplehttp provides an http.Handler that makes it easy to serve Vugu applications.
Useful for development and production.
The idea is that the common behaviors needed to serve a Vugu site are readily available
in one place. If you require more functionality than simplehttp provides, nearly everything
it does is available in the github.com/vugu/vugu package and you can construct what you
need from its parts. That said, simplehttp should make it easy to start:
// dev flag enables most common development features
// including rebuild your .wasm upon page reload
dev := true
h := simplehttp.New(dir, dev)
After creation, some flags are available for tuning, e.g.:
h.EnableGenerate = true // upon page reload run "go generate ."
h.DisableBuildCache = true // do not try to cache build results during development, just rebuild every time
h.ParserGoPkgOpts.SkipRegisterComponentTypes = true // do not generate component registration init() stuff
Since it's just a regular http.Handler, starting a webserver is as simple as:
log.Fatal(http.ListenAndServe("127.0.0.1:5678", h))
*/
package simplehttp
import (
"bytes"
"compress/gzip"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/vugu/vugu/gen"
)
// SimpleHandler provides common web serving functionality useful for building Vugu sites.
type SimpleHandler struct {
Dir string // project directory
EnableBuildAndServe bool // enables the build-and-serve sequence for your wasm binary - useful for dev, should be off in production
EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
ParserGoPkgOpts *gen.ParserGoPkgOpts // if set enables running ParserGoPkg with these options (requires EnableBuildAndServe)
DisableBuildCache bool // if true then rebuild every time instead of trying to cache (requires EnableBuildAndServe)
DisableTimestampPreservation bool // if true don't try to keep timestamps the same for files that are byte for byte identical (requires EnableBuildAndServe)
MainWasmPath string // path to serve main wasm file from, in dev mod defaults to "/main.wasm" (requires EnableBuildAndServe)
WasmExecJsPath string // path to serve wasm_exec.js from after finding in the local Go installation, in dev mode defaults to "/wasm_exec.js"
IsPage func(r *http.Request) bool // func that returns true if PageHandler should serve the request
PageHandler http.Handler // returns the HTML page
StaticHandler http.Handler // returns static assets from Dir with appropriate filtering or appropriate error
wasmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func dirTimestamp(dir string) (ts time.Time, reterr error) | {
dirf, err := os.Open(dir)
if err != nil {
return ts, err
}
defer dirf.Close()
fis, err := dirf.Readdir(-1)
if err != nil {
return ts, err
}
for _, fi := range fis {
if fi.Name() == "." || fi.Name() == ".." {
continue
}
// for directories we recurse
if fi.IsDir() {
dirTs, err := dirTimestamp(filepath.Join(dir, fi.Name()))
if err != nil {
return ts, err
}
if dirTs.After(ts) {
ts = dirTs
}
continue
}
// for files check timestamp
mt := fi.ModTime()
if mt.After(ts) {
ts = mt
}
}
return
} | identifier_body | |
simple-handler.go | /*
Package simplehttp provides an http.Handler that makes it easy to serve Vugu applications.
Useful for development and production.
The idea is that the common behaviors needed to serve a Vugu site are readily available
in one place. If you require more functionality than simplehttp provides, nearly everything
it does is available in the github.com/vugu/vugu package and you can construct what you
need from its parts. That said, simplehttp should make it easy to start:
// dev flag enables most common development features
// including rebuild your .wasm upon page reload
dev := true
h := simplehttp.New(dir, dev)
After creation, some flags are available for tuning, e.g.:
h.EnableGenerate = true // upon page reload run "go generate ."
h.DisableBuildCache = true // do not try to cache build results during development, just rebuild every time
h.ParserGoPkgOpts.SkipRegisterComponentTypes = true // do not generate component registration init() stuff
Since it's just a regular http.Handler, starting a webserver is as simple as:
log.Fatal(http.ListenAndServe("127.0.0.1:5678", h))
*/
package simplehttp
import (
"bytes"
"compress/gzip"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/vugu/vugu/gen"
)
// SimpleHandler provides common web serving functionality useful for building Vugu sites.
type SimpleHandler struct {
Dir string // project directory
EnableBuildAndServe bool // enables the build-and-serve sequence for your wasm binary - useful for dev, should be off in production
EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
ParserGoPkgOpts *gen.ParserGoPkgOpts // if set enables running ParserGoPkg with these options (requires EnableBuildAndServe)
DisableBuildCache bool // if true then rebuild every time instead of trying to cache (requires EnableBuildAndServe)
DisableTimestampPreservation bool // if true don't try to keep timestamps the same for files that are byte for byte identical (requires EnableBuildAndServe)
MainWasmPath string // path to serve main wasm file from, in dev mod defaults to "/main.wasm" (requires EnableBuildAndServe)
WasmExecJsPath string // path to serve wasm_exec.js from after finding in the local Go installation, in dev mode defaults to "/wasm_exec.js"
IsPage func(r *http.Request) bool // func that returns true if PageHandler should serve the request
PageHandler http.Handler // returns the HTML page
StaticHandler http.Handler // returns static assets from Dir with appropriate filtering or appropriate error
wasmExecJsOnce sync.Once
wasmExecJsContent []byte
wasmExecJsTs time.Time
lastBuildTime time.Time // time of last successful build
lastBuildContentGZ []byte // last successful build gzipped
mu sync.RWMutex
}
// New returns an SimpleHandler ready to serve using the specified directory.
// The dev flag indicates if development functionality is enabled.
// Settings on SimpleHandler may be tuned more specifically after creation, this function just
// returns sensible defaults for development or production according to if dev is true or false.
func New(dir string, dev bool) *SimpleHandler {
if !filepath.IsAbs(dir) {
panic(fmt.Errorf("dir %q is not an absolute path", dir))
}
ret := &SimpleHandler{
Dir: dir,
}
ret.IsPage = DefaultIsPageFunc
ret.PageHandler = &PageHandler{
Template: template.Must(template.New("_page_").Parse(DefaultPageTemplateSource)),
TemplateDataFunc: DefaultTemplateDataFunc,
}
ret.StaticHandler = FilteredFileServer(
regexp.MustCompile(`[.](css|js|html|map|jpg|jpeg|png|gif|svg|eot|ttf|otf|woff|woff2|wasm)$`),
http.Dir(dir))
if dev {
ret.EnableBuildAndServe = true
ret.ParserGoPkgOpts = &gen.ParserGoPkgOpts{}
ret.MainWasmPath = "/main.wasm"
ret.WasmExecJsPath = "/wasm_exec.js"
}
return ret
}
// ServeHTTP implements http.Handler.
func (h *SimpleHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// by default we tell browsers to always check back with us for content, even in production;
// we allow disabling by the caller just setting another value first; otherwise too much
// headache caused by pages that won't reload and we still reduce a lot of bandwidth usage with
// 304 responses, seems like a sensible trade off for now
if w.Header().Get("Cache-Control") == "" {
w.Header().Set("Cache-Control", "max-age=0, no-cache")
}
p := path.Clean("/" + r.URL.Path)
if h.EnableBuildAndServe && h.MainWasmPath == p {
h.buildAndServe(w, r)
return
}
if h.WasmExecJsPath == p {
h.serveGoEnvWasmExecJs(w, r)
return
}
if h.IsPage(r) {
h.PageHandler.ServeHTTP(w, r)
return
}
h.StaticHandler.ServeHTTP(w, r)
}
func (h *SimpleHandler) buildAndServe(w http.ResponseWriter, r *http.Request) {
// EnableGenerate bool // if true calls `go generate` (requires EnableBuildAndServe)
// main.wasm and build process, first check if it's needed
h.mu.RLock()
lastBuildTime := h.lastBuildTime
lastBuildContentGZ := h.lastBuildContentGZ
h.mu.RUnlock()
var buildDirTs time.Time
var err error
if !h.DisableTimestampPreservation {
buildDirTs, err = dirTimestamp(h.Dir)
if err != nil {
log.Printf("error in dirTimestamp(%q): %v", h.Dir, err)
goto doBuild
}
}
if len(lastBuildContentGZ) == 0 {
// log.Printf("2")
goto doBuild
}
if h.DisableBuildCache {
goto doBuild
}
// skip build process if timestamp from build dir exists and is equal or older than our last build
if !buildDirTs.IsZero() && !buildDirTs.After(lastBuildTime) {
// log.Printf("3")
goto serveBuiltFile
}
// // a false return value means we should send a 304
// if !checkIfModifiedSince(r, buildDirTs) {
// w.WriteHeader(http.StatusNotModified)
// return
// }
// FIXME: might be useful to make it so only one thread rebuilds at a time and they both use the result
doBuild:
// log.Printf("GOT HERE")
{
if h.ParserGoPkgOpts != nil {
pg := gen.NewParserGoPkg(h.Dir, h.ParserGoPkgOpts)
err := pg.Run()
if err != nil {
msg := fmt.Sprintf("Error from ParserGoPkg: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
f, err := ioutil.TempFile("", "main_wasm_")
if err != nil {
panic(err)
}
fpath := f.Name()
f.Close()
os.Remove(f.Name())
defer os.Remove(f.Name())
startTime := time.Now()
if h.EnableGenerate {
cmd := exec.Command("go", "generate", ".")
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
b, err := cmd.CombinedOutput()
w.Header().Set("X-Go-Generate-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from generate: %v; Output:\n%s", err, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
}
// GOOS=js GOARCH=wasm go build -o main.wasm .
startTime = time.Now()
runCommand := func(args ...string) ([]byte, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = h.Dir
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOOS=js", "GOARCH=wasm")
b, err := cmd.CombinedOutput()
return b, err
}
b, err := runCommand("go", "mod", "tidy")
if err == nil {
b, err = runCommand("go", "build", "-o", fpath, ".")
}
w.Header().Set("X-Go-Build-Duration", time.Since(startTime).String())
if err != nil {
msg := fmt.Sprintf("Error from compile: %v (out path=%q); Output:\n%s", err, fpath, b)
log.Print(msg)
http.Error(w, msg, 500)
return
}
f, err = os.Open(fpath)
if err != nil {
msg := fmt.Sprintf("Error opening file after build: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
// gzip with max compression
var buf bytes.Buffer
gzw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
n, err := io.Copy(gzw, f)
if err != nil {
msg := fmt.Sprintf("Error reading and compressing binary: %v", err)
log.Print(msg)
http.Error(w, msg, 500)
return
}
gzw.Close()
w.Header().Set("X-Gunzipped-Size", fmt.Sprint(n))
// update cache
if buildDirTs.IsZero() {
lastBuildTime = time.Now()
} else {
lastBuildTime = buildDirTs
}
lastBuildContentGZ = buf.Bytes()
// log.Printf("GOT TO UPDATE")
h.mu.Lock()
h.lastBuildTime = lastBuildTime
h.lastBuildContentGZ = lastBuildContentGZ
h.mu.Unlock()
}
serveBuiltFile:
w.Header().Set("Content-Type", "application/wasm")
// w.Header().Set("Last-Modified", lastBuildTime.Format(http.TimeFormat)) // handled by http.ServeContent
// if client supports gzip response (the usual case), we just set the gzip header and send back
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("X-Gzipped-Size", fmt.Sprint(len(lastBuildContentGZ)))
http.ServeContent(w, r, h.MainWasmPath, lastBuildTime, bytes.NewReader(lastBuildContentGZ))
return
}
// no gzip, we decompress internally and send it back
gzr, _ := gzip.NewReader(bytes.NewReader(lastBuildContentGZ))
_, err = io.Copy(w, gzr)
if err != nil {
log.Print(err)
}
return
}
func (h *SimpleHandler) serveGoEnvWasmExecJs(w http.ResponseWriter, r *http.Request) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsOnce.Do(func() {
h.wasmExecJsContent, err = ioutil.ReadFile(filepath.Join(strings.TrimSpace(string(b)), "misc/wasm/wasm_exec.js"))
if err != nil {
http.Error(w, "failed to run `go env GOROOT`: "+err.Error(), 500)
return
}
h.wasmExecJsTs = time.Now() // hack but whatever for now
})
if len(h.wasmExecJsContent) == 0 {
http.Error(w, "failed to read wasm_exec.js from local Go environment", 500)
return
}
w.Header().Set("Content-Type", "text/javascript")
http.ServeContent(w, r, "/wasm_exec.js", h.wasmExecJsTs, bytes.NewReader(h.wasmExecJsContent))
}
// FilteredFileServer is similar to the standard librarie's http.FileServer
// but the handler it returns will refuse to serve any files which don't
// match the specified regexp pattern after running through path.Clean().
// The idea is to make it easy to serve only specific kinds of
// static files from a directory. If pattern does not match a 404 will be returned.
// Be sure to include a trailing "$" if you are checking for file extensions, so it
// only matches the end of the path, e.g. "[.](css|js)$"
func FilteredFileServer(pattern *regexp.Regexp, fs http.FileSystem) http.Handler {
if pattern == nil {
panic(fmt.Errorf("pattern is nil"))
}
if fs == nil {
panic(fmt.Errorf("fs is nil"))
}
fserver := http.FileServer(fs)
ret := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
p := path.Clean("/" + r.URL.Path)
if !strings.HasPrefix(p, "/") { // should never happen after Clean above, but just being extra cautious
http.NotFound(w, r)
return
}
if !pattern.MatchString(p) {
http.NotFound(w, r)
return
}
// delegate to the regular file-serving behavior
fserver.ServeHTTP(w, r)
})
return ret
}
// DefaultIsPageFunc will return true for any request to a path with no file extension.
var DefaultIsPageFunc = func(r *http.Request) bool {
// anything without a file extension is a page
return path.Ext(path.Clean("/"+r.URL.Path)) == ""
}
// DefaultPageTemplateSource a useful default HTML template for serving pages.
var DefaultPageTemplateSource = `<!doctype html>
<html>
<head>
{{if .Title}}
<title>{{.Title}}</title>
{{else}}
<title>Vugu Dev - {{.Request.URL.Path}}</title>
{{end}}
<meta charset="utf-8"/>
{{if .MetaTags}}{{range $k, $v := .MetaTags}}
<meta name="{{$k}}" content="{{$v}}"/>
{{end}}{{end}}
{{if .CSSFiles}}{{range $f := .CSSFiles}}
<link rel="stylesheet" href="{{$f}}" />
{{end}}{{end}}
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script> <!-- MS Edge polyfill -->
<script src="/wasm_exec.js"></script>
</head>
<body>
<div id="vugu_mount_point">
{{if .ServerRenderedOutput}}{{.ServerRenderedOutput}}{{else}}
<img style="position: absolute; top: 50%; left: 50%;" src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif">
{{end}}
</div>
<script>
var wasmSupported = (typeof WebAssembly === "object");
if (wasmSupported) {
if (!WebAssembly.instantiateStreaming) { // polyfill
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("/main.wasm"), go.importObject).then((result) => {
go.run(result.instance);
});
} else {
document.getElementById("vugu_mount_point").innerHTML = 'This application requires WebAssembly support. Please upgrade your browser.';
}
</script>
</body>
</html>
`
// PageHandler executes a Go template and responsds with the page.
type PageHandler struct {
Template *template.Template
TemplateDataFunc func(r *http.Request) interface{}
}
// DefaultStaticData is a map of static things added to the return value of DefaultTemplateDataFunc.
// Provides a quick and dirty way to do things like add CSS files to every page.
var DefaultStaticData = make(map[string]interface{}, 4)
// DefaultTemplateDataFunc is the default behavior for making template data. It
// returns a map with "Request" set to r and all elements of DefaultStaticData added to it.
var DefaultTemplateDataFunc = func(r *http.Request) interface{} {
ret := map[string]interface{}{
"Request": r,
}
for k, v := range DefaultStaticData {
ret[k] = v
}
return ret
}
// ServeHTTP implements http.Handler
func (h *PageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmplData := h.TemplateDataFunc(r)
if tmplData == nil {
http.NotFound(w, r)
return
}
err := h.Template.Execute(w, tmplData)
if err != nil {
log.Printf("Error during simplehttp.PageHandler.Template.Execute: %v", err)
}
}
// dirTimestamp finds the most recent time stamp associated with files in a folder
// TODO: we should look into file watcher stuff, better performance for large trees
func | (dir string) (ts time.Time, reterr error) {
dirf, err := os.Open(dir)
if err != nil {
return ts, err
}
defer dirf.Close()
fis, err := dirf.Readdir(-1)
if err != nil {
return ts, err
}
for _, fi := range fis {
if fi.Name() == "." || fi.Name() == ".." {
continue
}
// for directories we recurse
if fi.IsDir() {
dirTs, err := dirTimestamp(filepath.Join(dir, fi.Name()))
if err != nil {
return ts, err
}
if dirTs.After(ts) {
ts = dirTs
}
continue
}
// for files check timestamp
mt := fi.ModTime()
if mt.After(ts) {
ts = mt
}
}
return
}
| dirTimestamp | identifier_name |
parse_dwarf.py | # -*- Mode: Python -*-
# Copyright (c) 2002-2011 IronPort Systems and Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: this code mistakenly assumes the file it's looking at has the same
# endianness as the host. Please fix.
import struct
from pprint import pprint as pp
# DWARF tags
DW_TAG_array_type = 0x01
DW_TAG_class_type = 0x02
DW_TAG_entry_point = 0x03
DW_TAG_enumeration_type = 0x04
DW_TAG_formal_parameter = 0x05
DW_TAG_imported_declaration = 0x08
DW_TAG_label = 0x0a
DW_TAG_lexical_block = 0x0b
DW_TAG_member = 0x0d
DW_TAG_pointer_type = 0x0f
DW_TAG_reference_type = 0x10
DW_TAG_compile_unit = 0x11
DW_TAG_string_type = 0x12
DW_TAG_structure_type = 0x13
DW_TAG_subroutine_type = 0x15
DW_TAG_typedef = 0x16
DW_TAG_union_type = 0x17
DW_TAG_unspecified_parameters = 0x18
DW_TAG_variant = 0x19
DW_TAG_common_block = 0x1a
DW_TAG_common_inclusion = 0x1b
DW_TAG_inheritance = 0x1c
DW_TAG_inlined_subroutine = 0x1d
DW_TAG_module = 0x1e
DW_TAG_ptr_to_member_type = 0x1f
DW_TAG_set_type = 0x20
DW_TAG_subrange_type = 0x21
DW_TAG_with_stmt = 0x22
DW_TAG_access_declaration = 0x23
DW_TAG_base_type = 0x24
DW_TAG_catch_block = 0x25
DW_TAG_const_type = 0x26
DW_TAG_constant = 0x27
DW_TAG_enumerator = 0x28
DW_TAG_file_type = 0x29
DW_TAG_friend = 0x2a
DW_TAG_namelist = 0x2b
DW_TAG_namelist_item = 0x2c # DWARF3/2 spelling
DW_TAG_packed_type = 0x2d
DW_TAG_subprogram = 0x2e
DW_TAG_template_type_parameter = 0x2f # DWARF3/2 spelling
DW_TAG_template_value_parameter = 0x30 # DWARF3/2 spelling
DW_TAG_thrown_type = 0x31
DW_TAG_try_block = 0x32
DW_TAG_variant_part = 0x33
DW_TAG_variable = 0x34
DW_TAG_volatile_type = 0x35
DW_TAG_dwarf_procedure = 0x36 # DWARF3
DW_TAG_restrict_type = 0x37 # DWARF3
DW_TAG_interface_type = 0x38 # DWARF3
DW_TAG_namespace = 0x39 # DWARF3
DW_TAG_imported_module = 0x3a # DWARF3
DW_TAG_unspecified_type = 0x3b # DWARF3
DW_TAG_partial_unit = 0x3c # DWARF3
DW_TAG_imported_unit = 0x3d # DWARF3
DW_TAG_mutable_type = 0x3e # DWARF3
TAGS = {}
for name in dir():
if name.startswith ('DW_TAG_'):
TAGS[eval(name)] = name[7:]
# DWARF attributes
DW_AT_sibling = 0x01
DW_AT_location = 0x02
DW_AT_name = 0x03
DW_AT_ordering = 0x09
DW_AT_subscr_data = 0x0a
DW_AT_byte_size = 0x0b
DW_AT_bit_offset = 0x0c
DW_AT_bit_size = 0x0d
DW_AT_element_list = 0x0f
DW_AT_stmt_list = 0x10
DW_AT_low_pc = 0x11
DW_AT_high_pc = 0x12
DW_AT_language = 0x13
DW_AT_member = 0x14
DW_AT_discr = 0x15
DW_AT_discr_value = 0x16
DW_AT_visibility = 0x17
DW_AT_import = 0x18
DW_AT_string_length = 0x19
DW_AT_common_reference = 0x1a
DW_AT_comp_dir = 0x1b
DW_AT_const_value = 0x1c
DW_AT_containing_type = 0x1d
DW_AT_default_value = 0x1e
DW_AT_inline = 0x20
DW_AT_is_optional = 0x21
DW_AT_lower_bound = 0x22
DW_AT_producer = 0x25
DW_AT_prototyped = 0x27
DW_AT_return_addr = 0x2a
DW_AT_start_scope = 0x2c
DW_AT_stride_size = 0x2e
DW_AT_upper_bound = 0x2f
DW_AT_abstract_origin = 0x31
DW_AT_accessibility = 0x32
DW_AT_address_class = 0x33
DW_AT_artificial = 0x34
DW_AT_base_types = 0x35
DW_AT_calling_convention = 0x36
DW_AT_count = 0x37
DW_AT_data_member_location = 0x38
DW_AT_decl_column = 0x39
DW_AT_decl_file = 0x3a
DW_AT_decl_line = 0x3b
DW_AT_declaration = 0x3c
DW_AT_discr_list = 0x3d
DW_AT_encoding = 0x3e
DW_AT_external = 0x3f
DW_AT_frame_base = 0x40
DW_AT_friend = 0x41
DW_AT_identifier_case = 0x42
DW_AT_macro_info = 0x43
DW_AT_namelist_item = 0x44
DW_AT_priority = 0x45
DW_AT_segment = 0x46
DW_AT_specification = 0x47
DW_AT_static_link = 0x48
DW_AT_type = 0x49
DW_AT_use_location = 0x4a
DW_AT_variable_parameter = 0x4b
DW_AT_virtuality = 0x4c
DW_AT_vtable_elem_location = 0x4d
# these are supposed to be in DWARF3 only, but I'm seeing them
# in DWARF2 files?
DW_AT_allocated = 0x4e # DWARF3
DW_AT_associated = 0x4f # DWARF3
DW_AT_data_location = 0x50 # DWARF3
DW_AT_stride = 0x51 # DWARF3
DW_AT_entry_pc = 0x52 # DWARF3
DW_AT_use_UTF8 = 0x53 # DWARF3
DW_AT_extension = 0x54 # DWARF3
DW_AT_ranges = 0x55 # DWARF3
DW_AT_trampoline = 0x56 # DWARF3
DW_AT_call_column = 0x57 # DWARF3
DW_AT_call_file = 0x58 # DWARF3
DW_AT_call_line = 0x59 # DWARF3
DW_AT_description = 0x5a # DWARF3
# DWARF4
DW_AT_description = 0x5a
DW_AT_binary_scale = 0x5b
DW_AT_decimal_scale = 0x5c
DW_AT_small = 0x5d
DW_AT_decimal_sign = 0x5e
DW_AT_digit_count = 0x5f
DW_AT_picture_string = 0x60
DW_AT_mutable = 0x61
DW_AT_threads_scaled = 0x62
DW_AT_explicit = 0x63
DW_AT_object_pointer = 0x64
DW_AT_endianity = 0x65
DW_AT_elemental = 0x66
DW_AT_pure = 0x67
DW_AT_recursive = 0x68
DW_AT_signature = 0x69
DW_AT_main_subprogram = 0x6a
DW_AT_data_bit_offset = 0x6b
DW_AT_const_expr = 0x6c
DW_AT_enum_class = 0x6d
DW_AT_linkage_name = 0x6e
# gcc spits this one out at times
DW_AT_MIPS_linkage_name = 0x2007 # MIPS/SGI
ATS = {}
for name in dir():
if name.startswith ('DW_AT_'):
ATS[eval(name)] = name[6:]
DW_ATE_address = 0x01
DW_ATE_boolean = 0x02
DW_ATE_complex_float = 0x03
DW_ATE_float = 0x04
DW_ATE_signed = 0x05
DW_ATE_signed_char = 0x06
DW_ATE_unsigned = 0x07
DW_ATE_unsigned_char = 0x08
DW_ATE_imaginary_float = 0x09
DW_ATE_packed_decimal = 0x0a
DW_ATE_numeric_string = 0x0b
DW_ATE_edited = 0x0c
DW_ATE_signed_fixed = 0x0d
DW_ATE_unsigned_fixed = 0x0e
ATES = {}
for name in dir():
if name.startswith ('DW_ATE_'):
ATES[eval(name)] = name[7:]
# DWARF forms
DW_FORM_addr = 0x01
DW_FORM_block2 = 0x03
DW_FORM_block4 = 0x04
DW_FORM_data2 = 0x05
DW_FORM_data4 = 0x06
DW_FORM_data8 = 0x07
DW_FORM_string = 0x08
DW_FORM_block = 0x09
DW_FORM_block1 = 0x0a
DW_FORM_data1 = 0x0b
DW_FORM_flag = 0x0c
DW_FORM_sdata = 0x0d
DW_FORM_strp = 0x0e
DW_FORM_udata = 0x0f
DW_FORM_ref_addr = 0x10
DW_FORM_ref1 = 0x11
DW_FORM_ref2 = 0x12
DW_FORM_ref4 = 0x13
DW_FORM_ref8 = 0x14
DW_FORM_ref_udata = 0x15
DW_FORM_indirect = 0x16
# DWARF 4
DW_FORM_sec_offset = 0x17
DW_FORM_exprloc = 0x18
DW_FORM_flag_present = 0x19
DW_FORM_ref_sig8 = 0x20
FORMS = {}
for name in dir():
if name.startswith ('DW_FORM_'):
FORMS[eval(name)] = name[8:]
header_spec = '=lhlb'
header_size = struct.calcsize (header_spec)
def read_uleb128 (f):
"read an 'unsigned little-endian base 128' from <f>"
result = 0
shift = 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
children = None
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class | :
def __init__ (self, tree, by_pos):
self.tree = tree
tag, where, self.attrs, self.children = tree
assert (tag == 'compile_unit')
self.by_pos = by_pos
def __repr__ (self):
return '<compile_unit %r at 0x%x>' % (self.attrs['name'], id(self))
def dump (self, file):
self.dump_tree (file, self.tree, 0)
def __getitem__ (self, pos):
return self.by_pos[pos]
def dump_tree (self, file, ob, depth):
tag, where, attrs, children = ob
print '%6d%s %s' % (where, ' ' * depth, tag),
for attr, data in attrs.iteritems():
print '%s:%r' % (attr, data),
print
if children:
for child in children:
self.dump_tree (file, child, depth + 1)
# http://dwarfstd.org/dwarf-2.0.0.pdf
# see pg 95 for a good example of the relationship between the different sections
# see pg 71/72 for descriptions of DW_FORMs
# location descriptions: start on page 72, hopefully we don't need to implement
# the whole stack machine thing. Most of the offsets appear to be simple
# DW_OP_plus_uconst ('#'/0x23), which encodes as a uleb128
def read (path, elf_info):
"""read (<path>, <elf_info>) => <iterator>
generate a list of <compile_unit> objects for file <path>"""
ehdr, phdrs, shdrs, syms, core_info = elf_info
info = abbrev = strings = None
for shdr in shdrs:
if shdr['name'] == '.debug_info':
info = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_abbrev':
abbrev = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_str':
strings = shdr['offset'], shdr['size']
if not info:
return []
else:
abbrevs = abbrev_section (path, abbrev[0], abbrev[1])
if strings:
strings = string_section (path, strings[0], strings[1])
info = info_section (path, info[0], info[1])
return info.read_all (abbrevs, strings)
def test (path):
import parse_elf
import sys
global info
info_iter = read (path, parse_elf.go (path))
if not info_iter:
sys.stderr.write ('no debugging information present\n')
else:
for unit in info_iter:
print '-' * 75
unit.dump (sys.stdout)
if __name__ == '__main__':
import sys
test (sys.argv[1])
| compile_unit | identifier_name |
parse_dwarf.py | # -*- Mode: Python -*-
# Copyright (c) 2002-2011 IronPort Systems and Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: this code mistakenly assumes the file it's looking at has the same
# endianness as the host. Please fix.
import struct
from pprint import pprint as pp
# DWARF tags
DW_TAG_array_type = 0x01
DW_TAG_class_type = 0x02
DW_TAG_entry_point = 0x03
DW_TAG_enumeration_type = 0x04
DW_TAG_formal_parameter = 0x05
DW_TAG_imported_declaration = 0x08
DW_TAG_label = 0x0a
DW_TAG_lexical_block = 0x0b
DW_TAG_member = 0x0d
DW_TAG_pointer_type = 0x0f
DW_TAG_reference_type = 0x10
DW_TAG_compile_unit = 0x11
DW_TAG_string_type = 0x12
DW_TAG_structure_type = 0x13
DW_TAG_subroutine_type = 0x15
DW_TAG_typedef = 0x16
DW_TAG_union_type = 0x17
DW_TAG_unspecified_parameters = 0x18
DW_TAG_variant = 0x19
DW_TAG_common_block = 0x1a
DW_TAG_common_inclusion = 0x1b
DW_TAG_inheritance = 0x1c
DW_TAG_inlined_subroutine = 0x1d
DW_TAG_module = 0x1e
DW_TAG_ptr_to_member_type = 0x1f
DW_TAG_set_type = 0x20
DW_TAG_subrange_type = 0x21
DW_TAG_with_stmt = 0x22
DW_TAG_access_declaration = 0x23
DW_TAG_base_type = 0x24
DW_TAG_catch_block = 0x25
DW_TAG_const_type = 0x26
DW_TAG_constant = 0x27
DW_TAG_enumerator = 0x28
DW_TAG_file_type = 0x29
DW_TAG_friend = 0x2a
DW_TAG_namelist = 0x2b
DW_TAG_namelist_item = 0x2c # DWARF3/2 spelling
DW_TAG_packed_type = 0x2d
DW_TAG_subprogram = 0x2e
DW_TAG_template_type_parameter = 0x2f # DWARF3/2 spelling
DW_TAG_template_value_parameter = 0x30 # DWARF3/2 spelling
DW_TAG_thrown_type = 0x31
DW_TAG_try_block = 0x32
DW_TAG_variant_part = 0x33
DW_TAG_variable = 0x34
DW_TAG_volatile_type = 0x35
DW_TAG_dwarf_procedure = 0x36 # DWARF3
DW_TAG_restrict_type = 0x37 # DWARF3
DW_TAG_interface_type = 0x38 # DWARF3
DW_TAG_namespace = 0x39 # DWARF3
DW_TAG_imported_module = 0x3a # DWARF3
DW_TAG_unspecified_type = 0x3b # DWARF3
DW_TAG_partial_unit = 0x3c # DWARF3
DW_TAG_imported_unit = 0x3d # DWARF3
DW_TAG_mutable_type = 0x3e # DWARF3
TAGS = {}
for name in dir():
if name.startswith ('DW_TAG_'):
TAGS[eval(name)] = name[7:]
# DWARF attributes
DW_AT_sibling = 0x01
DW_AT_location = 0x02
DW_AT_name = 0x03
DW_AT_ordering = 0x09
DW_AT_subscr_data = 0x0a
DW_AT_byte_size = 0x0b
DW_AT_bit_offset = 0x0c
DW_AT_bit_size = 0x0d
DW_AT_element_list = 0x0f
DW_AT_stmt_list = 0x10
DW_AT_low_pc = 0x11
DW_AT_high_pc = 0x12
DW_AT_language = 0x13
DW_AT_member = 0x14
DW_AT_discr = 0x15
DW_AT_discr_value = 0x16
DW_AT_visibility = 0x17
DW_AT_import = 0x18
DW_AT_string_length = 0x19
DW_AT_common_reference = 0x1a
DW_AT_comp_dir = 0x1b
DW_AT_const_value = 0x1c
DW_AT_containing_type = 0x1d
DW_AT_default_value = 0x1e
DW_AT_inline = 0x20
DW_AT_is_optional = 0x21
DW_AT_lower_bound = 0x22
DW_AT_producer = 0x25
DW_AT_prototyped = 0x27
DW_AT_return_addr = 0x2a
DW_AT_start_scope = 0x2c
DW_AT_stride_size = 0x2e
DW_AT_upper_bound = 0x2f
DW_AT_abstract_origin = 0x31
DW_AT_accessibility = 0x32
DW_AT_address_class = 0x33
DW_AT_artificial = 0x34
DW_AT_base_types = 0x35
DW_AT_calling_convention = 0x36
DW_AT_count = 0x37
DW_AT_data_member_location = 0x38
DW_AT_decl_column = 0x39
DW_AT_decl_file = 0x3a
DW_AT_decl_line = 0x3b
DW_AT_declaration = 0x3c
DW_AT_discr_list = 0x3d
DW_AT_encoding = 0x3e
DW_AT_external = 0x3f
DW_AT_frame_base = 0x40
DW_AT_friend = 0x41
DW_AT_identifier_case = 0x42
DW_AT_macro_info = 0x43
DW_AT_namelist_item = 0x44
DW_AT_priority = 0x45
DW_AT_segment = 0x46
DW_AT_specification = 0x47
DW_AT_static_link = 0x48
DW_AT_type = 0x49
DW_AT_use_location = 0x4a
DW_AT_variable_parameter = 0x4b
DW_AT_virtuality = 0x4c
DW_AT_vtable_elem_location = 0x4d
# these are supposed to be in DWARF3 only, but I'm seeing them
# in DWARF2 files?
DW_AT_allocated = 0x4e # DWARF3
DW_AT_associated = 0x4f # DWARF3
DW_AT_data_location = 0x50 # DWARF3
DW_AT_stride = 0x51 # DWARF3
DW_AT_entry_pc = 0x52 # DWARF3
DW_AT_use_UTF8 = 0x53 # DWARF3
DW_AT_extension = 0x54 # DWARF3
DW_AT_ranges = 0x55 # DWARF3
DW_AT_trampoline = 0x56 # DWARF3
DW_AT_call_column = 0x57 # DWARF3
DW_AT_call_file = 0x58 # DWARF3
DW_AT_call_line = 0x59 # DWARF3
DW_AT_description = 0x5a # DWARF3
# DWARF4
DW_AT_description = 0x5a
DW_AT_binary_scale = 0x5b
DW_AT_decimal_scale = 0x5c
DW_AT_small = 0x5d
DW_AT_decimal_sign = 0x5e
DW_AT_digit_count = 0x5f
DW_AT_picture_string = 0x60
DW_AT_mutable = 0x61
DW_AT_threads_scaled = 0x62
DW_AT_explicit = 0x63
DW_AT_object_pointer = 0x64
DW_AT_endianity = 0x65
DW_AT_elemental = 0x66
DW_AT_pure = 0x67
DW_AT_recursive = 0x68
DW_AT_signature = 0x69
DW_AT_main_subprogram = 0x6a
DW_AT_data_bit_offset = 0x6b
DW_AT_const_expr = 0x6c
DW_AT_enum_class = 0x6d
DW_AT_linkage_name = 0x6e
# gcc spits this one out at times
DW_AT_MIPS_linkage_name = 0x2007 # MIPS/SGI
ATS = {}
for name in dir():
if name.startswith ('DW_AT_'):
ATS[eval(name)] = name[6:]
DW_ATE_address = 0x01
DW_ATE_boolean = 0x02
DW_ATE_complex_float = 0x03
DW_ATE_float = 0x04
DW_ATE_signed = 0x05
DW_ATE_signed_char = 0x06
DW_ATE_unsigned = 0x07
DW_ATE_unsigned_char = 0x08
DW_ATE_imaginary_float = 0x09
DW_ATE_packed_decimal = 0x0a
DW_ATE_numeric_string = 0x0b
DW_ATE_edited = 0x0c
DW_ATE_signed_fixed = 0x0d
DW_ATE_unsigned_fixed = 0x0e
ATES = {}
for name in dir():
if name.startswith ('DW_ATE_'):
ATES[eval(name)] = name[7:]
# DWARF forms
DW_FORM_addr = 0x01
DW_FORM_block2 = 0x03
DW_FORM_block4 = 0x04
DW_FORM_data2 = 0x05
DW_FORM_data4 = 0x06
DW_FORM_data8 = 0x07
DW_FORM_string = 0x08
DW_FORM_block = 0x09
DW_FORM_block1 = 0x0a
DW_FORM_data1 = 0x0b
DW_FORM_flag = 0x0c
DW_FORM_sdata = 0x0d
DW_FORM_strp = 0x0e
DW_FORM_udata = 0x0f
DW_FORM_ref_addr = 0x10
DW_FORM_ref1 = 0x11
DW_FORM_ref2 = 0x12
DW_FORM_ref4 = 0x13
DW_FORM_ref8 = 0x14
DW_FORM_ref_udata = 0x15
DW_FORM_indirect = 0x16
# DWARF 4
DW_FORM_sec_offset = 0x17
DW_FORM_exprloc = 0x18
DW_FORM_flag_present = 0x19
DW_FORM_ref_sig8 = 0x20
FORMS = {}
for name in dir():
if name.startswith ('DW_FORM_'):
FORMS[eval(name)] = name[8:]
header_spec = '=lhlb'
header_size = struct.calcsize (header_spec)
def read_uleb128 (f):
"read an 'unsigned little-endian base 128' from <f>"
result = 0
shift = 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
children = None
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class compile_unit:
def __init__ (self, tree, by_pos):
self.tree = tree
tag, where, self.attrs, self.children = tree
assert (tag == 'compile_unit')
self.by_pos = by_pos
def __repr__ (self):
return '<compile_unit %r at 0x%x>' % (self.attrs['name'], id(self))
def dump (self, file):
self.dump_tree (file, self.tree, 0)
def __getitem__ (self, pos):
return self.by_pos[pos]
def dump_tree (self, file, ob, depth):
tag, where, attrs, children = ob
print '%6d%s %s' % (where, ' ' * depth, tag),
for attr, data in attrs.iteritems():
print '%s:%r' % (attr, data),
print
if children:
for child in children:
self.dump_tree (file, child, depth + 1)
# http://dwarfstd.org/dwarf-2.0.0.pdf
# see pg 95 for a good example of the relationship between the different sections
# see pg 71/72 for descriptions of DW_FORMs
# location descriptions: start on page 72, hopefully we don't need to implement
# the whole stack machine thing. Most of the offsets appear to be simple
# DW_OP_plus_uconst ('#'/0x23), which encodes as a uleb128
def read (path, elf_info):
|
def test (path):
import parse_elf
import sys
global info
info_iter = read (path, parse_elf.go (path))
if not info_iter:
sys.stderr.write ('no debugging information present\n')
else:
for unit in info_iter:
print '-' * 75
unit.dump (sys.stdout)
if __name__ == '__main__':
import sys
test (sys.argv[1])
| """read (<path>, <elf_info>) => <iterator>
generate a list of <compile_unit> objects for file <path>"""
ehdr, phdrs, shdrs, syms, core_info = elf_info
info = abbrev = strings = None
for shdr in shdrs:
if shdr['name'] == '.debug_info':
info = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_abbrev':
abbrev = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_str':
strings = shdr['offset'], shdr['size']
if not info:
return []
else:
abbrevs = abbrev_section (path, abbrev[0], abbrev[1])
if strings:
strings = string_section (path, strings[0], strings[1])
info = info_section (path, info[0], info[1])
return info.read_all (abbrevs, strings) | identifier_body |
parse_dwarf.py | # -*- Mode: Python -*-
# Copyright (c) 2002-2011 IronPort Systems and Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: this code mistakenly assumes the file it's looking at has the same
# endianness as the host. Please fix.
import struct
from pprint import pprint as pp
# DWARF tags
DW_TAG_array_type = 0x01
DW_TAG_class_type = 0x02
DW_TAG_entry_point = 0x03
DW_TAG_enumeration_type = 0x04
DW_TAG_formal_parameter = 0x05
DW_TAG_imported_declaration = 0x08
DW_TAG_label = 0x0a
DW_TAG_lexical_block = 0x0b
DW_TAG_member = 0x0d
DW_TAG_pointer_type = 0x0f
DW_TAG_reference_type = 0x10
DW_TAG_compile_unit = 0x11
DW_TAG_string_type = 0x12
DW_TAG_structure_type = 0x13
DW_TAG_subroutine_type = 0x15
DW_TAG_typedef = 0x16
DW_TAG_union_type = 0x17
DW_TAG_unspecified_parameters = 0x18
DW_TAG_variant = 0x19
DW_TAG_common_block = 0x1a
DW_TAG_common_inclusion = 0x1b
DW_TAG_inheritance = 0x1c
DW_TAG_inlined_subroutine = 0x1d
DW_TAG_module = 0x1e
DW_TAG_ptr_to_member_type = 0x1f
DW_TAG_set_type = 0x20
DW_TAG_subrange_type = 0x21
DW_TAG_with_stmt = 0x22
DW_TAG_access_declaration = 0x23
DW_TAG_base_type = 0x24
DW_TAG_catch_block = 0x25
DW_TAG_const_type = 0x26
DW_TAG_constant = 0x27
DW_TAG_enumerator = 0x28
DW_TAG_file_type = 0x29
DW_TAG_friend = 0x2a
DW_TAG_namelist = 0x2b
DW_TAG_namelist_item = 0x2c # DWARF3/2 spelling
DW_TAG_packed_type = 0x2d
DW_TAG_subprogram = 0x2e
DW_TAG_template_type_parameter = 0x2f # DWARF3/2 spelling
DW_TAG_template_value_parameter = 0x30 # DWARF3/2 spelling
DW_TAG_thrown_type = 0x31
DW_TAG_try_block = 0x32
DW_TAG_variant_part = 0x33
DW_TAG_variable = 0x34
DW_TAG_volatile_type = 0x35
DW_TAG_dwarf_procedure = 0x36 # DWARF3
DW_TAG_restrict_type = 0x37 # DWARF3
DW_TAG_interface_type = 0x38 # DWARF3
DW_TAG_namespace = 0x39 # DWARF3
DW_TAG_imported_module = 0x3a # DWARF3
DW_TAG_unspecified_type = 0x3b # DWARF3
DW_TAG_partial_unit = 0x3c # DWARF3
DW_TAG_imported_unit = 0x3d # DWARF3
DW_TAG_mutable_type = 0x3e # DWARF3
TAGS = {}
for name in dir():
if name.startswith ('DW_TAG_'):
TAGS[eval(name)] = name[7:]
# DWARF attributes
DW_AT_sibling = 0x01
DW_AT_location = 0x02
DW_AT_name = 0x03
DW_AT_ordering = 0x09
DW_AT_subscr_data = 0x0a
DW_AT_byte_size = 0x0b
DW_AT_bit_offset = 0x0c
DW_AT_bit_size = 0x0d
DW_AT_element_list = 0x0f
DW_AT_stmt_list = 0x10
DW_AT_low_pc = 0x11
DW_AT_high_pc = 0x12
DW_AT_language = 0x13
DW_AT_member = 0x14
DW_AT_discr = 0x15
DW_AT_discr_value = 0x16
DW_AT_visibility = 0x17
DW_AT_import = 0x18
DW_AT_string_length = 0x19
DW_AT_common_reference = 0x1a
DW_AT_comp_dir = 0x1b
DW_AT_const_value = 0x1c
DW_AT_containing_type = 0x1d
DW_AT_default_value = 0x1e
DW_AT_inline = 0x20
DW_AT_is_optional = 0x21
DW_AT_lower_bound = 0x22
DW_AT_producer = 0x25
DW_AT_prototyped = 0x27
DW_AT_return_addr = 0x2a
DW_AT_start_scope = 0x2c
DW_AT_stride_size = 0x2e
DW_AT_upper_bound = 0x2f
DW_AT_abstract_origin = 0x31
DW_AT_accessibility = 0x32
DW_AT_address_class = 0x33
DW_AT_artificial = 0x34
DW_AT_base_types = 0x35
DW_AT_calling_convention = 0x36
DW_AT_count = 0x37
DW_AT_data_member_location = 0x38
DW_AT_decl_column = 0x39
DW_AT_decl_file = 0x3a
DW_AT_decl_line = 0x3b
DW_AT_declaration = 0x3c
DW_AT_discr_list = 0x3d
DW_AT_encoding = 0x3e
DW_AT_external = 0x3f
DW_AT_frame_base = 0x40
DW_AT_friend = 0x41
DW_AT_identifier_case = 0x42
DW_AT_macro_info = 0x43
DW_AT_namelist_item = 0x44
DW_AT_priority = 0x45
DW_AT_segment = 0x46
DW_AT_specification = 0x47
DW_AT_static_link = 0x48
DW_AT_type = 0x49
DW_AT_use_location = 0x4a
DW_AT_variable_parameter = 0x4b
DW_AT_virtuality = 0x4c
DW_AT_vtable_elem_location = 0x4d
# these are supposed to be in DWARF3 only, but I'm seeing them
# in DWARF2 files?
DW_AT_allocated = 0x4e # DWARF3
DW_AT_associated = 0x4f # DWARF3
DW_AT_data_location = 0x50 # DWARF3
DW_AT_stride = 0x51 # DWARF3
DW_AT_entry_pc = 0x52 # DWARF3
DW_AT_use_UTF8 = 0x53 # DWARF3
DW_AT_extension = 0x54 # DWARF3
DW_AT_ranges = 0x55 # DWARF3
DW_AT_trampoline = 0x56 # DWARF3
DW_AT_call_column = 0x57 # DWARF3
DW_AT_call_file = 0x58 # DWARF3
DW_AT_call_line = 0x59 # DWARF3
DW_AT_description = 0x5a # DWARF3
# DWARF4
DW_AT_description = 0x5a
DW_AT_binary_scale = 0x5b
DW_AT_decimal_scale = 0x5c
DW_AT_small = 0x5d
DW_AT_decimal_sign = 0x5e
DW_AT_digit_count = 0x5f
DW_AT_picture_string = 0x60
DW_AT_mutable = 0x61
DW_AT_threads_scaled = 0x62
DW_AT_explicit = 0x63
DW_AT_object_pointer = 0x64
DW_AT_endianity = 0x65
DW_AT_elemental = 0x66
DW_AT_pure = 0x67
DW_AT_recursive = 0x68
DW_AT_signature = 0x69
DW_AT_main_subprogram = 0x6a
DW_AT_data_bit_offset = 0x6b
DW_AT_const_expr = 0x6c
DW_AT_enum_class = 0x6d
DW_AT_linkage_name = 0x6e
# gcc spits this one out at times
DW_AT_MIPS_linkage_name = 0x2007 # MIPS/SGI
ATS = {}
for name in dir():
if name.startswith ('DW_AT_'):
ATS[eval(name)] = name[6:]
DW_ATE_address = 0x01
DW_ATE_boolean = 0x02
DW_ATE_complex_float = 0x03
DW_ATE_float = 0x04
DW_ATE_signed = 0x05
DW_ATE_signed_char = 0x06
DW_ATE_unsigned = 0x07
DW_ATE_unsigned_char = 0x08
DW_ATE_imaginary_float = 0x09
DW_ATE_packed_decimal = 0x0a
DW_ATE_numeric_string = 0x0b
DW_ATE_edited = 0x0c
DW_ATE_signed_fixed = 0x0d
DW_ATE_unsigned_fixed = 0x0e
ATES = {}
for name in dir():
if name.startswith ('DW_ATE_'):
ATES[eval(name)] = name[7:]
# DWARF forms
DW_FORM_addr = 0x01
DW_FORM_block2 = 0x03
DW_FORM_block4 = 0x04
DW_FORM_data2 = 0x05
DW_FORM_data4 = 0x06
DW_FORM_data8 = 0x07
DW_FORM_string = 0x08
DW_FORM_block = 0x09
DW_FORM_block1 = 0x0a
DW_FORM_data1 = 0x0b
DW_FORM_flag = 0x0c
DW_FORM_sdata = 0x0d
DW_FORM_strp = 0x0e
DW_FORM_udata = 0x0f
DW_FORM_ref_addr = 0x10
DW_FORM_ref1 = 0x11
DW_FORM_ref2 = 0x12
DW_FORM_ref4 = 0x13
DW_FORM_ref8 = 0x14
DW_FORM_ref_udata = 0x15
DW_FORM_indirect = 0x16
# DWARF 4
DW_FORM_sec_offset = 0x17
DW_FORM_exprloc = 0x18
DW_FORM_flag_present = 0x19
DW_FORM_ref_sig8 = 0x20
FORMS = {}
for name in dir():
if name.startswith ('DW_FORM_'):
FORMS[eval(name)] = name[8:]
header_spec = '=lhlb'
header_size = struct.calcsize (header_spec)
def read_uleb128 (f):
"read an 'unsigned little-endian base 128' from <f>"
result = 0
shift = 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
|
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class compile_unit:
def __init__ (self, tree, by_pos):
self.tree = tree
tag, where, self.attrs, self.children = tree
assert (tag == 'compile_unit')
self.by_pos = by_pos
def __repr__ (self):
return '<compile_unit %r at 0x%x>' % (self.attrs['name'], id(self))
def dump (self, file):
self.dump_tree (file, self.tree, 0)
def __getitem__ (self, pos):
return self.by_pos[pos]
def dump_tree (self, file, ob, depth):
tag, where, attrs, children = ob
print '%6d%s %s' % (where, ' ' * depth, tag),
for attr, data in attrs.iteritems():
print '%s:%r' % (attr, data),
print
if children:
for child in children:
self.dump_tree (file, child, depth + 1)
# http://dwarfstd.org/dwarf-2.0.0.pdf
# see pg 95 for a good example of the relationship between the different sections
# see pg 71/72 for descriptions of DW_FORMs
# location descriptions: start on page 72, hopefully we don't need to implement
# the whole stack machine thing. Most of the offsets appear to be simple
# DW_OP_plus_uconst ('#'/0x23), which encodes as a uleb128
def read (path, elf_info):
"""read (<path>, <elf_info>) => <iterator>
generate a list of <compile_unit> objects for file <path>"""
ehdr, phdrs, shdrs, syms, core_info = elf_info
info = abbrev = strings = None
for shdr in shdrs:
if shdr['name'] == '.debug_info':
info = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_abbrev':
abbrev = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_str':
strings = shdr['offset'], shdr['size']
if not info:
return []
else:
abbrevs = abbrev_section (path, abbrev[0], abbrev[1])
if strings:
strings = string_section (path, strings[0], strings[1])
info = info_section (path, info[0], info[1])
return info.read_all (abbrevs, strings)
def test (path):
import parse_elf
import sys
global info
info_iter = read (path, parse_elf.go (path))
if not info_iter:
sys.stderr.write ('no debugging information present\n')
else:
for unit in info_iter:
print '-' * 75
unit.dump (sys.stdout)
if __name__ == '__main__':
import sys
test (sys.argv[1])
| children = None | conditional_block |
parse_dwarf.py | # -*- Mode: Python -*-
# Copyright (c) 2002-2011 IronPort Systems and Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: this code mistakenly assumes the file it's looking at has the same
# endianness as the host. Please fix.
import struct
from pprint import pprint as pp
# DWARF tags
DW_TAG_array_type = 0x01
DW_TAG_class_type = 0x02
DW_TAG_entry_point = 0x03
DW_TAG_enumeration_type = 0x04
DW_TAG_formal_parameter = 0x05
DW_TAG_imported_declaration = 0x08
DW_TAG_label = 0x0a
DW_TAG_lexical_block = 0x0b
DW_TAG_member = 0x0d
DW_TAG_pointer_type = 0x0f
DW_TAG_reference_type = 0x10
DW_TAG_compile_unit = 0x11
DW_TAG_string_type = 0x12
DW_TAG_structure_type = 0x13
DW_TAG_subroutine_type = 0x15
DW_TAG_typedef = 0x16
DW_TAG_union_type = 0x17
DW_TAG_unspecified_parameters = 0x18
DW_TAG_variant = 0x19
DW_TAG_common_block = 0x1a
DW_TAG_common_inclusion = 0x1b
DW_TAG_inheritance = 0x1c
DW_TAG_inlined_subroutine = 0x1d
DW_TAG_module = 0x1e
DW_TAG_ptr_to_member_type = 0x1f
DW_TAG_set_type = 0x20
DW_TAG_subrange_type = 0x21
DW_TAG_with_stmt = 0x22
DW_TAG_access_declaration = 0x23
DW_TAG_base_type = 0x24
DW_TAG_catch_block = 0x25
DW_TAG_const_type = 0x26
DW_TAG_constant = 0x27
DW_TAG_enumerator = 0x28
DW_TAG_file_type = 0x29
DW_TAG_friend = 0x2a
DW_TAG_namelist = 0x2b
DW_TAG_namelist_item = 0x2c # DWARF3/2 spelling
DW_TAG_packed_type = 0x2d
DW_TAG_subprogram = 0x2e
DW_TAG_template_type_parameter = 0x2f # DWARF3/2 spelling
DW_TAG_template_value_parameter = 0x30 # DWARF3/2 spelling
DW_TAG_thrown_type = 0x31
DW_TAG_try_block = 0x32
DW_TAG_variant_part = 0x33
DW_TAG_variable = 0x34
DW_TAG_volatile_type = 0x35
DW_TAG_dwarf_procedure = 0x36 # DWARF3
DW_TAG_restrict_type = 0x37 # DWARF3
DW_TAG_interface_type = 0x38 # DWARF3
DW_TAG_namespace = 0x39 # DWARF3
DW_TAG_imported_module = 0x3a # DWARF3
DW_TAG_unspecified_type = 0x3b # DWARF3
DW_TAG_partial_unit = 0x3c # DWARF3
DW_TAG_imported_unit = 0x3d # DWARF3
DW_TAG_mutable_type = 0x3e # DWARF3
TAGS = {}
for name in dir():
if name.startswith ('DW_TAG_'):
TAGS[eval(name)] = name[7:]
# DWARF attributes
DW_AT_sibling = 0x01
DW_AT_location = 0x02
DW_AT_name = 0x03
DW_AT_ordering = 0x09
DW_AT_subscr_data = 0x0a
DW_AT_byte_size = 0x0b
DW_AT_bit_offset = 0x0c
DW_AT_bit_size = 0x0d
DW_AT_element_list = 0x0f
DW_AT_stmt_list = 0x10
DW_AT_low_pc = 0x11
DW_AT_high_pc = 0x12
DW_AT_language = 0x13
DW_AT_member = 0x14
DW_AT_discr = 0x15
DW_AT_discr_value = 0x16
DW_AT_visibility = 0x17
DW_AT_import = 0x18
DW_AT_string_length = 0x19
DW_AT_common_reference = 0x1a
DW_AT_comp_dir = 0x1b
DW_AT_const_value = 0x1c
DW_AT_containing_type = 0x1d
DW_AT_default_value = 0x1e
DW_AT_inline = 0x20
DW_AT_is_optional = 0x21
DW_AT_lower_bound = 0x22
DW_AT_producer = 0x25
DW_AT_prototyped = 0x27
DW_AT_return_addr = 0x2a
DW_AT_start_scope = 0x2c
DW_AT_stride_size = 0x2e
DW_AT_upper_bound = 0x2f
DW_AT_abstract_origin = 0x31
DW_AT_accessibility = 0x32
DW_AT_address_class = 0x33
DW_AT_artificial = 0x34
DW_AT_base_types = 0x35
DW_AT_calling_convention = 0x36
DW_AT_count = 0x37
DW_AT_data_member_location = 0x38
DW_AT_decl_column = 0x39
DW_AT_decl_file = 0x3a
DW_AT_decl_line = 0x3b
DW_AT_declaration = 0x3c
DW_AT_discr_list = 0x3d
DW_AT_encoding = 0x3e
DW_AT_external = 0x3f
DW_AT_frame_base = 0x40
DW_AT_friend = 0x41
DW_AT_identifier_case = 0x42
DW_AT_macro_info = 0x43
DW_AT_namelist_item = 0x44
DW_AT_priority = 0x45
DW_AT_segment = 0x46
DW_AT_specification = 0x47
DW_AT_static_link = 0x48
DW_AT_type = 0x49
DW_AT_use_location = 0x4a
DW_AT_variable_parameter = 0x4b
DW_AT_virtuality = 0x4c
DW_AT_vtable_elem_location = 0x4d
# these are supposed to be in DWARF3 only, but I'm seeing them
# in DWARF2 files?
DW_AT_allocated = 0x4e # DWARF3
DW_AT_associated = 0x4f # DWARF3
DW_AT_data_location = 0x50 # DWARF3
DW_AT_stride = 0x51 # DWARF3
DW_AT_entry_pc = 0x52 # DWARF3
DW_AT_use_UTF8 = 0x53 # DWARF3
DW_AT_extension = 0x54 # DWARF3
DW_AT_ranges = 0x55 # DWARF3
DW_AT_trampoline = 0x56 # DWARF3
DW_AT_call_column = 0x57 # DWARF3
DW_AT_call_file = 0x58 # DWARF3
DW_AT_call_line = 0x59 # DWARF3
DW_AT_description = 0x5a # DWARF3
# DWARF4
DW_AT_description = 0x5a
DW_AT_binary_scale = 0x5b
DW_AT_decimal_scale = 0x5c
DW_AT_small = 0x5d
DW_AT_decimal_sign = 0x5e
DW_AT_digit_count = 0x5f
DW_AT_picture_string = 0x60
DW_AT_mutable = 0x61
DW_AT_threads_scaled = 0x62
DW_AT_explicit = 0x63
DW_AT_object_pointer = 0x64
DW_AT_endianity = 0x65
DW_AT_elemental = 0x66
DW_AT_pure = 0x67
DW_AT_recursive = 0x68
DW_AT_signature = 0x69
DW_AT_main_subprogram = 0x6a
DW_AT_data_bit_offset = 0x6b
DW_AT_const_expr = 0x6c
DW_AT_enum_class = 0x6d
DW_AT_linkage_name = 0x6e
# gcc spits this one out at times
DW_AT_MIPS_linkage_name = 0x2007 # MIPS/SGI
ATS = {}
for name in dir():
if name.startswith ('DW_AT_'):
ATS[eval(name)] = name[6:]
DW_ATE_address = 0x01 | DW_ATE_boolean = 0x02
DW_ATE_complex_float = 0x03
DW_ATE_float = 0x04
DW_ATE_signed = 0x05
DW_ATE_signed_char = 0x06
DW_ATE_unsigned = 0x07
DW_ATE_unsigned_char = 0x08
DW_ATE_imaginary_float = 0x09
DW_ATE_packed_decimal = 0x0a
DW_ATE_numeric_string = 0x0b
DW_ATE_edited = 0x0c
DW_ATE_signed_fixed = 0x0d
DW_ATE_unsigned_fixed = 0x0e
ATES = {}
for name in dir():
if name.startswith ('DW_ATE_'):
ATES[eval(name)] = name[7:]
# DWARF forms
DW_FORM_addr = 0x01
DW_FORM_block2 = 0x03
DW_FORM_block4 = 0x04
DW_FORM_data2 = 0x05
DW_FORM_data4 = 0x06
DW_FORM_data8 = 0x07
DW_FORM_string = 0x08
DW_FORM_block = 0x09
DW_FORM_block1 = 0x0a
DW_FORM_data1 = 0x0b
DW_FORM_flag = 0x0c
DW_FORM_sdata = 0x0d
DW_FORM_strp = 0x0e
DW_FORM_udata = 0x0f
DW_FORM_ref_addr = 0x10
DW_FORM_ref1 = 0x11
DW_FORM_ref2 = 0x12
DW_FORM_ref4 = 0x13
DW_FORM_ref8 = 0x14
DW_FORM_ref_udata = 0x15
DW_FORM_indirect = 0x16
# DWARF 4
DW_FORM_sec_offset = 0x17
DW_FORM_exprloc = 0x18
DW_FORM_flag_present = 0x19
DW_FORM_ref_sig8 = 0x20
FORMS = {}
for name in dir():
if name.startswith ('DW_FORM_'):
FORMS[eval(name)] = name[8:]
header_spec = '=lhlb'
header_size = struct.calcsize (header_spec)
def read_uleb128 (f):
"read an 'unsigned little-endian base 128' from <f>"
result = 0
shift = 0
while 1:
byte = ord (f.read (1))
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def decode_uleb128 (s):
"parse an 'unsigned little-endian base 128' from string <s>"
result = 0
shift = 0
i = 0
while 1:
byte = ord (s[i]); i += 1
result |= (byte & 0x7f) << shift
if byte & 0x80:
shift += 7
else:
break
return result
def read_string (f):
"read null-terminated string from <f>"
r = []
while 1:
b = f.read (1)
if b == '\x00':
break
else:
r.append (b)
return ''.join (r)
def read_struct (f, s, n):
"read a struct of size <n> with spec <s> from <f>"
r = struct.unpack (s, f.read (n))
if len(r) == 1:
return r[0]
else:
return r
def read_addr (f, psize):
"read an address of length <psize>"
if psize == 4:
return read_struct (f, '=l', 4)
elif psize == 8:
return read_struct (f, '=q', 8)
else:
raise ValueError, "unsupported pointer size"
def read_block1 (f):
return f.read (read_struct (f, '=b', 1))
def read_block2 (f):
return f.read (read_struct (f, '=h', 2))
def read_block4 (f):
return f.read (read_struct (f, '=l', 4))
def read_block (f):
return f.read (read_uleb128 (f))
def read_flag (f):
return not (f.read (1) == '\x00')
def read_data1 (f):
return read_struct (f, '=B', 1)
def read_data2 (f):
return read_struct (f, '=H', 2)
def read_data4 (f):
return read_struct (f, '=L', 4)
def read_data8 (f):
return read_struct (f, '=Q', 8)
def read_ref1 (f):
return read_struct (f, '=B', 1)
def read_ref2 (f):
return read_struct (f, '=H', 2)
def read_ref4 (f):
return read_struct (f, '=L', 4)
def read_ref_udata (f):
return read_uleb128 (f)
def read_udata (f):
return read_uleb128 (f)
def read_flag_present (f):
return True
def decode_location (x):
# interpret form
if x[0] == '#':
return decode_uleb128 (x[1:])
elif x[0] == '\x91':
# XXX signed, but we'll cheat
return decode_uleb128 (x[1:])
elif x[0] == '\x03':
# DW_OP_ADDR
x = x[1:]
if len(x) == 4:
return struct.unpack ('=l', x)[0]
elif len(x) == 8:
return struct.unpack ('=q', x)[0]
else:
return 'DW_OP_ADDR:%s' % (x.encode ('hex'))
else:
return x
form_readers = {
DW_FORM_string: read_string,
DW_FORM_data1: read_data1,
DW_FORM_data2: read_data2,
DW_FORM_data4: read_data4,
DW_FORM_data8: read_data8,
DW_FORM_ref1: read_ref1,
DW_FORM_ref2: read_ref2,
DW_FORM_ref4: read_ref4,
DW_FORM_ref4: read_ref4,
DW_FORM_ref_udata: read_ref_udata,
DW_FORM_block1: read_block1,
DW_FORM_block2: read_block2,
DW_FORM_block4: read_block4,
DW_FORM_flag: read_flag,
DW_FORM_udata: read_udata,
# XXX: HACK - I'm too lazy to figure out
# how to sign-extend these numbers.
DW_FORM_sdata: read_udata,
DW_FORM_flag_present: read_flag_present, # DWARF4
}
class section:
def __init__ (self, path, offset, size):
self.path = path
self.file = open (path, 'rb')
self.offset = offset
self.size = size
self.file.seek (offset)
def __repr__ (self):
return '<%s "%s" at 0x%x>' % (self.__class__.__name__, self.path, id(self))
class string_section (section):
def get (self, pos):
self.file.seek (self.offset + pos)
return read_string (self.file)
class abbrev_section (section):
def read_cu (self):
"read a compilation unit entry from an abbrev section"
tag = read_uleb128 (self.file)
child = ord (self.file.read (1))
attrs = []
while 1:
attr = read_uleb128 (self.file)
form = read_uleb128 (self.file)
if (attr, form) == (0, 0):
break
else:
attrs.append ((attr, form))
return tag, child, attrs
def read (self, offset):
"read abbrev table at <offset>"
self.file.seek (self.offset + offset)
abbrevs = {}
while 1:
index = read_uleb128 (self.file)
if index == 0:
break
else:
abbrevs[index] = self.read_cu()
return abbrevs
class info_section (section):
def read_all (self, abbrevs, strings):
"generate a list of compile_unit objects"
# Made this an iterator because collecting all the
# debug info from a typical python binary eats up
# about 100MB of memory! Iterating over it one
# compile_unit at a time is much more manageable
while 1:
where = self.file.tell()
if where >= self.offset + self.size:
break
else:
tree, by_pos = self.read (abbrevs, strings)
yield (compile_unit (tree, by_pos))
def read (self, abbrevs, strings):
base = self.file.tell()
self.header = struct.unpack (header_spec, self.file.read (header_size))
self.length, self.version, self.abbr_offset, self.psize = self.header
if self.version > 2:
raise ValueError
abbrev_table = abbrevs.read (self.abbr_offset)
by_pos = {}
tree = self.read_tree (abbrev_table, strings, by_pos, 0, base)
return tree, by_pos
def read_tree (self, abbrev_table, strings, by_pos, depth, base):
f = self.file
tree = []
while 1:
where = f.tell() - base
index = read_uleb128 (f)
if not index:
# null index indicates the end of a list of siblings
return tree
# NOTE: each item in a list of siblings has a 'DW_AT_sibling'
# telling you the location of the next record. This can be
# used to skip over types you don't know or care about.
attrs = {}
tag, child, attr_forms = abbrev_table[index]
for attr, form in attr_forms:
# strp & addr are special-cased because they
# need extra context...
if form == DW_FORM_strp:
x = strings.get (read_struct (f, '=l', 4))
elif form == DW_FORM_addr:
x = read_addr (f, self.psize)
else:
x = form_readers[form](f)
# special-case these, which technically require interpreters
# for the stack language. however, gcc seems to only output
# the uleb128 & sleb128 versions...
if attr in (DW_AT_data_member_location, DW_AT_location):
if isinstance (x, str):
x = decode_location (x)
elif isinstance (x, int):
pass
else:
raise ValueError ("unexpected type in DW_AT_data_member_location/DW_AT_location")
try:
attrs[ATS[attr]] = x
except KeyError:
# lots of vendor-specific extensions
attrs[hex(attr)] = x
if child:
# recursively read the list of children of this node
children = self.read_tree (abbrev_table, strings, by_pos, depth + 1, base)
else:
children = None
if TAGS.has_key (tag):
item = (TAGS[tag], where, attrs, children)
else:
item = (tag, where, attrs, children)
by_pos[where] = item
tree.append (item)
if depth == 0:
# only one element at the top level, special-case it
return item
return tree
class compile_unit:
def __init__ (self, tree, by_pos):
self.tree = tree
tag, where, self.attrs, self.children = tree
assert (tag == 'compile_unit')
self.by_pos = by_pos
def __repr__ (self):
return '<compile_unit %r at 0x%x>' % (self.attrs['name'], id(self))
def dump (self, file):
self.dump_tree (file, self.tree, 0)
def __getitem__ (self, pos):
return self.by_pos[pos]
def dump_tree (self, file, ob, depth):
tag, where, attrs, children = ob
print '%6d%s %s' % (where, ' ' * depth, tag),
for attr, data in attrs.iteritems():
print '%s:%r' % (attr, data),
print
if children:
for child in children:
self.dump_tree (file, child, depth + 1)
# http://dwarfstd.org/dwarf-2.0.0.pdf
# see pg 95 for a good example of the relationship between the different sections
# see pg 71/72 for descriptions of DW_FORMs
# location descriptions: start on page 72, hopefully we don't need to implement
# the whole stack machine thing. Most of the offsets appear to be simple
# DW_OP_plus_uconst ('#'/0x23), which encodes as a uleb128
def read (path, elf_info):
"""read (<path>, <elf_info>) => <iterator>
generate a list of <compile_unit> objects for file <path>"""
ehdr, phdrs, shdrs, syms, core_info = elf_info
info = abbrev = strings = None
for shdr in shdrs:
if shdr['name'] == '.debug_info':
info = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_abbrev':
abbrev = shdr['offset'], shdr['size']
if shdr['name'] == '.debug_str':
strings = shdr['offset'], shdr['size']
if not info:
return []
else:
abbrevs = abbrev_section (path, abbrev[0], abbrev[1])
if strings:
strings = string_section (path, strings[0], strings[1])
info = info_section (path, info[0], info[1])
return info.read_all (abbrevs, strings)
def test (path):
import parse_elf
import sys
global info
info_iter = read (path, parse_elf.go (path))
if not info_iter:
sys.stderr.write ('no debugging information present\n')
else:
for unit in info_iter:
print '-' * 75
unit.dump (sys.stdout)
if __name__ == '__main__':
import sys
test (sys.argv[1]) | random_line_split | |
report.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/framework"
"log"
"sort"
"strings"
"time"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
const (
ResourceNvidiaGPU v1.ResourceName = "nvdia.com/gpu"
)
type ClusterCapacityReview struct {
metav1.TypeMeta
Spec ClusterCapacityReviewSpec `json:"spec"`
Status ClusterCapacityReviewStatus `json:"status"`
}
type ClusterCapacityReviewSpec struct {
// the pod desired for scheduling
Templates []v1.Pod `json:"templates"`
PodRequirements []*Requirements `json:"podRequirements"`
}
type ClusterCapacityReviewStatus struct {
CreationTimestamp time.Time `json:"creationTimestamp"`
// actual number of replicas that could schedule
Replicas int32 `json:"replicas"`
FailReason *ClusterCapacityReviewScheduleFailReason `json:"failReason"`
// per node information about the scheduling simulation
Pods []*ClusterCapacityPodResult `json:"pods"`
Nodes []*ClusterCapacityNodeResult `json:"nodes"`
}
type PodReplicaCount map[string]int
type ClusterCapacityPodResult struct {
PodName string `json:"podName"`
// numbers of replicas on nodes
ReplicasOnNodes PodReplicaCount `json:"replicasOnNodes"`
// reason why no more pods could schedule (if any on this node)
FailSummary []FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func getResourceRequest(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits) | return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt.Printf("%v\n", pod.PodName)
for node, count := range pod.ReplicasOnNodes {
fmt.Printf("\t- %v: %v instance(s)\n", node, count)
}
}
printNodeCapacity(r.Status.Nodes)
printClusterCapacity("========== Cluster capacity", r.Status.Nodes)
printLabeledCapacity(nodeLabels, r.Status.Nodes)
}
}
func printLabeledCapacity(nodeLabels []string, nodes []*ClusterCapacityNodeResult) {
labeledResults := map[string][]*ClusterCapacityNodeResult{}
for _, node := range nodes {
for _, label := range nodeLabels {
value, ok := node.Labels[label]
if !ok {
continue
}
resultName := fmt.Sprintf("%s:%s", label, value)
labeledResults[resultName] = append(labeledResults[resultName], node)
}
}
for label, results := range labeledResults {
printClusterCapacity(label, results)
}
}
func printClusterCapacity(title string, nodes []*ClusterCapacityNodeResult) {
var (
clusterCPUAllocatable, clusterCPURequested, clusterCPULimit,
clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit,
clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit int64
)
for _, node := range nodes {
clusterCPUAllocatable += node.Allocatable.MilliCPU
clusterCPURequested += node.Requested.MilliCPU
clusterCPULimit += node.Limits.MilliCPU
clusterMemoryAllocatable += node.Allocatable.Memory
clusterMemoryRequested += node.Requested.Memory
clusterMemoryLimit += node.Limits.Memory
clusterStorageAllocatable += node.Allocatable.EphemeralStorage
clusterStorageRequested += node.Requested.EphemeralStorage
clusterStorageLimit += node.Limits.EphemeralStorage
}
fmt.Printf("\n%s:\n", title)
printCapacity(clusterCPUAllocatable, clusterCPURequested, clusterCPULimit, "CPU", "m")
printCapacity(clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit, "Memory", "bytes")
printCapacity(clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit, "EphemeralStorage", "bytes")
}
func printNodeCapacity(nodes []*ClusterCapacityNodeResult) {
fmt.Printf("\n========== Node capacity\n")
for _, node := range nodes {
fmt.Printf("%s\n", node.NodeName)
fmt.Printf("\t- pod count: %v\n", node.PodCount)
printCapacity(node.Allocatable.MilliCPU, node.Requested.MilliCPU, node.Limits.MilliCPU, "CPU", "m")
printCapacity(node.Allocatable.Memory, node.Requested.Memory, node.Limits.Memory, "Memory", "bytes")
printCapacity(node.Allocatable.EphemeralStorage, node.Requested.EphemeralStorage, node.Limits.EphemeralStorage, "EphemeralStorage", "bytes")
}
}
func printCapacity(allocatable, requested, limit int64, label, unit string) {
cap := float64(requested) / float64(allocatable) * 100
fmt.Printf("\t- %s requested: %v%s/%v%s %.2f%% allocated\n",
label, requested, unit, allocatable, unit, cap)
commit := float64(limit) / float64(allocatable) * 100
fmt.Printf("\t- %s limited: %v%s/%v%s %.2f%% allocated\n",
label, limit, unit, allocatable, unit, commit)
}
func printResources(resources *Resources) {
fmt.Printf("\t\t- CPU: %v\n", resources.PrimaryResources.Cpu().String())
fmt.Printf("\t\t- Memory: %v\n", resources.PrimaryResources.Memory().String())
if resources.PrimaryResources.StorageEphemeral() != nil {
fmt.Printf("\t\t- Ephemeral Storage: %v\n", resources.PrimaryResources.StorageEphemeral().String())
}
//if !req.Resources.PrimaryResources.NvidiaGPU().IsZero() {
// fmt.Printf("\t- NvidiaGPU: %v\n", req.Resources.PrimaryResources.NvidiaGPU().String())
//}
if resources.ScalarResources != nil {
fmt.Printf("\t\t- ScalarResources: %v\n", resources.ScalarResources)
}
}
func clusterCapacityReviewPrintJson(r *ClusterCapacityReview) error {
jsoned, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create json: %v", err)
}
fmt.Println(string(jsoned))
return nil
}
func clusterCapacityReviewPrintYaml(r *ClusterCapacityReview) error {
yamled, err := yaml.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create yaml: %v", err)
}
fmt.Print(string(yamled))
return nil
}
func ClusterCapacityReviewPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool, format string) error {
switch format {
case "json":
return clusterCapacityReviewPrintJson(r)
case "yaml":
return clusterCapacityReviewPrintYaml(r)
case "":
clusterCapacityReviewPrettyPrint(r, nodeLabels, verbose)
return nil
default:
return fmt.Errorf("output format %q not recognized", format)
}
} | } | random_line_split |
report.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/framework"
"log"
"sort"
"strings"
"time"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
const (
ResourceNvidiaGPU v1.ResourceName = "nvdia.com/gpu"
)
type ClusterCapacityReview struct {
metav1.TypeMeta
Spec ClusterCapacityReviewSpec `json:"spec"`
Status ClusterCapacityReviewStatus `json:"status"`
}
type ClusterCapacityReviewSpec struct {
// the pod desired for scheduling
Templates []v1.Pod `json:"templates"`
PodRequirements []*Requirements `json:"podRequirements"`
}
type ClusterCapacityReviewStatus struct {
CreationTimestamp time.Time `json:"creationTimestamp"`
// actual number of replicas that could schedule
Replicas int32 `json:"replicas"`
FailReason *ClusterCapacityReviewScheduleFailReason `json:"failReason"`
// per node information about the scheduling simulation
Pods []*ClusterCapacityPodResult `json:"pods"`
Nodes []*ClusterCapacityNodeResult `json:"nodes"`
}
type PodReplicaCount map[string]int
type ClusterCapacityPodResult struct {
PodName string `json:"podName"`
// numbers of replicas on nodes
ReplicasOnNodes PodReplicaCount `json:"replicasOnNodes"`
// reason why no more pods could schedule (if any on this node)
FailSummary []FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func getResourceRequest(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits)
}
return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt.Printf("%v\n", pod.PodName)
for node, count := range pod.ReplicasOnNodes {
fmt.Printf("\t- %v: %v instance(s)\n", node, count)
}
}
printNodeCapacity(r.Status.Nodes)
printClusterCapacity("========== Cluster capacity", r.Status.Nodes)
printLabeledCapacity(nodeLabels, r.Status.Nodes)
}
}
func printLabeledCapacity(nodeLabels []string, nodes []*ClusterCapacityNodeResult) {
labeledResults := map[string][]*ClusterCapacityNodeResult{}
for _, node := range nodes {
for _, label := range nodeLabels {
value, ok := node.Labels[label]
if !ok {
continue
}
resultName := fmt.Sprintf("%s:%s", label, value)
labeledResults[resultName] = append(labeledResults[resultName], node)
}
}
for label, results := range labeledResults {
printClusterCapacity(label, results)
}
}
func printClusterCapacity(title string, nodes []*ClusterCapacityNodeResult) {
var (
clusterCPUAllocatable, clusterCPURequested, clusterCPULimit,
clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit,
clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit int64
)
for _, node := range nodes {
clusterCPUAllocatable += node.Allocatable.MilliCPU
clusterCPURequested += node.Requested.MilliCPU
clusterCPULimit += node.Limits.MilliCPU
clusterMemoryAllocatable += node.Allocatable.Memory
clusterMemoryRequested += node.Requested.Memory
clusterMemoryLimit += node.Limits.Memory
clusterStorageAllocatable += node.Allocatable.EphemeralStorage
clusterStorageRequested += node.Requested.EphemeralStorage
clusterStorageLimit += node.Limits.EphemeralStorage
}
fmt.Printf("\n%s:\n", title)
printCapacity(clusterCPUAllocatable, clusterCPURequested, clusterCPULimit, "CPU", "m")
printCapacity(clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit, "Memory", "bytes")
printCapacity(clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit, "EphemeralStorage", "bytes")
}
func printNodeCapacity(nodes []*ClusterCapacityNodeResult) {
fmt.Printf("\n========== Node capacity\n")
for _, node := range nodes {
fmt.Printf("%s\n", node.NodeName)
fmt.Printf("\t- pod count: %v\n", node.PodCount)
printCapacity(node.Allocatable.MilliCPU, node.Requested.MilliCPU, node.Limits.MilliCPU, "CPU", "m")
printCapacity(node.Allocatable.Memory, node.Requested.Memory, node.Limits.Memory, "Memory", "bytes")
printCapacity(node.Allocatable.EphemeralStorage, node.Requested.EphemeralStorage, node.Limits.EphemeralStorage, "EphemeralStorage", "bytes")
}
}
func printCapacity(allocatable, requested, limit int64, label, unit string) |
func printResources(resources *Resources) {
fmt.Printf("\t\t- CPU: %v\n", resources.PrimaryResources.Cpu().String())
fmt.Printf("\t\t- Memory: %v\n", resources.PrimaryResources.Memory().String())
if resources.PrimaryResources.StorageEphemeral() != nil {
fmt.Printf("\t\t- Ephemeral Storage: %v\n", resources.PrimaryResources.StorageEphemeral().String())
}
//if !req.Resources.PrimaryResources.NvidiaGPU().IsZero() {
// fmt.Printf("\t- NvidiaGPU: %v\n", req.Resources.PrimaryResources.NvidiaGPU().String())
//}
if resources.ScalarResources != nil {
fmt.Printf("\t\t- ScalarResources: %v\n", resources.ScalarResources)
}
}
func clusterCapacityReviewPrintJson(r *ClusterCapacityReview) error {
jsoned, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create json: %v", err)
}
fmt.Println(string(jsoned))
return nil
}
func clusterCapacityReviewPrintYaml(r *ClusterCapacityReview) error {
yamled, err := yaml.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create yaml: %v", err)
}
fmt.Print(string(yamled))
return nil
}
func ClusterCapacityReviewPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool, format string) error {
switch format {
case "json":
return clusterCapacityReviewPrintJson(r)
case "yaml":
return clusterCapacityReviewPrintYaml(r)
case "":
clusterCapacityReviewPrettyPrint(r, nodeLabels, verbose)
return nil
default:
return fmt.Errorf("output format %q not recognized", format)
}
}
| {
cap := float64(requested) / float64(allocatable) * 100
fmt.Printf("\t- %s requested: %v%s/%v%s %.2f%% allocated\n",
label, requested, unit, allocatable, unit, cap)
commit := float64(limit) / float64(allocatable) * 100
fmt.Printf("\t- %s limited: %v%s/%v%s %.2f%% allocated\n",
label, limit, unit, allocatable, unit, commit)
} | identifier_body |
report.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/framework"
"log"
"sort"
"strings"
"time"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
const (
ResourceNvidiaGPU v1.ResourceName = "nvdia.com/gpu"
)
type ClusterCapacityReview struct {
metav1.TypeMeta
Spec ClusterCapacityReviewSpec `json:"spec"`
Status ClusterCapacityReviewStatus `json:"status"`
}
type ClusterCapacityReviewSpec struct {
// the pod desired for scheduling
Templates []v1.Pod `json:"templates"`
PodRequirements []*Requirements `json:"podRequirements"`
}
type ClusterCapacityReviewStatus struct {
CreationTimestamp time.Time `json:"creationTimestamp"`
// actual number of replicas that could schedule
Replicas int32 `json:"replicas"`
FailReason *ClusterCapacityReviewScheduleFailReason `json:"failReason"`
// per node information about the scheduling simulation
Pods []*ClusterCapacityPodResult `json:"pods"`
Nodes []*ClusterCapacityNodeResult `json:"nodes"`
}
type PodReplicaCount map[string]int
type ClusterCapacityPodResult struct {
PodName string `json:"podName"`
// numbers of replicas on nodes
ReplicasOnNodes PodReplicaCount `json:"replicasOnNodes"`
// reason why no more pods could schedule (if any on this node)
FailSummary []FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func getResourceRequest(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits)
}
return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results |
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt.Printf("%v\n", pod.PodName)
for node, count := range pod.ReplicasOnNodes {
fmt.Printf("\t- %v: %v instance(s)\n", node, count)
}
}
printNodeCapacity(r.Status.Nodes)
printClusterCapacity("========== Cluster capacity", r.Status.Nodes)
printLabeledCapacity(nodeLabels, r.Status.Nodes)
}
}
func printLabeledCapacity(nodeLabels []string, nodes []*ClusterCapacityNodeResult) {
labeledResults := map[string][]*ClusterCapacityNodeResult{}
for _, node := range nodes {
for _, label := range nodeLabels {
value, ok := node.Labels[label]
if !ok {
continue
}
resultName := fmt.Sprintf("%s:%s", label, value)
labeledResults[resultName] = append(labeledResults[resultName], node)
}
}
for label, results := range labeledResults {
printClusterCapacity(label, results)
}
}
func printClusterCapacity(title string, nodes []*ClusterCapacityNodeResult) {
var (
clusterCPUAllocatable, clusterCPURequested, clusterCPULimit,
clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit,
clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit int64
)
for _, node := range nodes {
clusterCPUAllocatable += node.Allocatable.MilliCPU
clusterCPURequested += node.Requested.MilliCPU
clusterCPULimit += node.Limits.MilliCPU
clusterMemoryAllocatable += node.Allocatable.Memory
clusterMemoryRequested += node.Requested.Memory
clusterMemoryLimit += node.Limits.Memory
clusterStorageAllocatable += node.Allocatable.EphemeralStorage
clusterStorageRequested += node.Requested.EphemeralStorage
clusterStorageLimit += node.Limits.EphemeralStorage
}
fmt.Printf("\n%s:\n", title)
printCapacity(clusterCPUAllocatable, clusterCPURequested, clusterCPULimit, "CPU", "m")
printCapacity(clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit, "Memory", "bytes")
printCapacity(clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit, "EphemeralStorage", "bytes")
}
func printNodeCapacity(nodes []*ClusterCapacityNodeResult) {
fmt.Printf("\n========== Node capacity\n")
for _, node := range nodes {
fmt.Printf("%s\n", node.NodeName)
fmt.Printf("\t- pod count: %v\n", node.PodCount)
printCapacity(node.Allocatable.MilliCPU, node.Requested.MilliCPU, node.Limits.MilliCPU, "CPU", "m")
printCapacity(node.Allocatable.Memory, node.Requested.Memory, node.Limits.Memory, "Memory", "bytes")
printCapacity(node.Allocatable.EphemeralStorage, node.Requested.EphemeralStorage, node.Limits.EphemeralStorage, "EphemeralStorage", "bytes")
}
}
func printCapacity(allocatable, requested, limit int64, label, unit string) {
cap := float64(requested) / float64(allocatable) * 100
fmt.Printf("\t- %s requested: %v%s/%v%s %.2f%% allocated\n",
label, requested, unit, allocatable, unit, cap)
commit := float64(limit) / float64(allocatable) * 100
fmt.Printf("\t- %s limited: %v%s/%v%s %.2f%% allocated\n",
label, limit, unit, allocatable, unit, commit)
}
func printResources(resources *Resources) {
fmt.Printf("\t\t- CPU: %v\n", resources.PrimaryResources.Cpu().String())
fmt.Printf("\t\t- Memory: %v\n", resources.PrimaryResources.Memory().String())
if resources.PrimaryResources.StorageEphemeral() != nil {
fmt.Printf("\t\t- Ephemeral Storage: %v\n", resources.PrimaryResources.StorageEphemeral().String())
}
//if !req.Resources.PrimaryResources.NvidiaGPU().IsZero() {
// fmt.Printf("\t- NvidiaGPU: %v\n", req.Resources.PrimaryResources.NvidiaGPU().String())
//}
if resources.ScalarResources != nil {
fmt.Printf("\t\t- ScalarResources: %v\n", resources.ScalarResources)
}
}
func clusterCapacityReviewPrintJson(r *ClusterCapacityReview) error {
jsoned, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create json: %v", err)
}
fmt.Println(string(jsoned))
return nil
}
func clusterCapacityReviewPrintYaml(r *ClusterCapacityReview) error {
yamled, err := yaml.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create yaml: %v", err)
}
fmt.Print(string(yamled))
return nil
}
func ClusterCapacityReviewPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool, format string) error {
switch format {
case "json":
return clusterCapacityReviewPrintJson(r)
case "yaml":
return clusterCapacityReviewPrintYaml(r)
case "":
clusterCapacityReviewPrettyPrint(r, nodeLabels, verbose)
return nil
default:
return fmt.Errorf("output format %q not recognized", format)
}
}
| {
resultSlc = append(resultSlc, v)
} | conditional_block |
report.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/framework"
"log"
"sort"
"strings"
"time"
"github.com/ghodss/yaml"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
const (
ResourceNvidiaGPU v1.ResourceName = "nvdia.com/gpu"
)
type ClusterCapacityReview struct {
metav1.TypeMeta
Spec ClusterCapacityReviewSpec `json:"spec"`
Status ClusterCapacityReviewStatus `json:"status"`
}
type ClusterCapacityReviewSpec struct {
// the pod desired for scheduling
Templates []v1.Pod `json:"templates"`
PodRequirements []*Requirements `json:"podRequirements"`
}
type ClusterCapacityReviewStatus struct {
CreationTimestamp time.Time `json:"creationTimestamp"`
// actual number of replicas that could schedule
Replicas int32 `json:"replicas"`
FailReason *ClusterCapacityReviewScheduleFailReason `json:"failReason"`
// per node information about the scheduling simulation
Pods []*ClusterCapacityPodResult `json:"pods"`
Nodes []*ClusterCapacityNodeResult `json:"nodes"`
}
type PodReplicaCount map[string]int
type ClusterCapacityPodResult struct {
PodName string `json:"podName"`
// numbers of replicas on nodes
ReplicasOnNodes PodReplicaCount `json:"replicasOnNodes"`
// reason why no more pods could schedule (if any on this node)
FailSummary []FailReasonSummary `json:"failSummary"`
}
type NodeMap map[string]*framework.NodeInfo
type ClusterCapacityNodeResult struct {
NodeName string `json:"nodeName"`
Labels map[string]string `json:"labels"`
PodCount int `json:"podCount"`
Allocatable *framework.Resource `json:"allocatable"`
Requested *framework.Resource `json:"requested"`
Limits *framework.Resource `json:"limits"`
}
type FailReasonSummary struct {
Reason string `json:"reason"`
Count int `json:"count"`
}
type Resources struct {
PrimaryResources v1.ResourceList `json:"primaryResources"`
ScalarResources map[v1.ResourceName]int64 `json:"scalarResources"`
}
type Requirements struct {
PodName string `json:"podName"`
Resources *Resources `json:"resources"`
Limits *Resources `json:"limits"`
NodeSelectors map[string]string `json:"nodeSelectors"`
}
type ClusterCapacityReviewScheduleFailReason struct {
FailType string `json:"failType"`
FailMessage string `json:"failMessage"`
}
func getMainFailReason(message string) *ClusterCapacityReviewScheduleFailReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
fail := &ClusterCapacityReviewScheduleFailReason{
FailType: slicedMessage[0][:colon],
FailMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return fail
}
func | (pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Requests)
}
return result
}
func getResourceLimit(pod *v1.Pod) *Resources {
result := newResources()
for _, container := range pod.Spec.Containers {
appendResources(result, container.Resources.Limits)
}
return result
}
func newResources() *Resources {
return &Resources{
PrimaryResources: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceName(v1.ResourceMemory): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(v1.ResourceEphemeralStorage): *resource.NewQuantity(0, resource.BinarySI),
v1.ResourceName(ResourceNvidiaGPU): *resource.NewMilliQuantity(0, resource.DecimalSI),
},
}
}
func appendResources(dest *Resources, src v1.ResourceList) {
for rName, rQuantity := range src {
switch rName {
case v1.ResourceMemory:
rQuantity.Add(*(dest.PrimaryResources.Memory()))
dest.PrimaryResources[v1.ResourceMemory] = rQuantity
case v1.ResourceCPU:
rQuantity.Add(*(dest.PrimaryResources.Cpu()))
dest.PrimaryResources[v1.ResourceCPU] = rQuantity
case v1.ResourceEphemeralStorage:
rQuantity.Add(*(dest.PrimaryResources.StorageEphemeral()))
dest.PrimaryResources[v1.ResourceEphemeralStorage] = rQuantity
case v1.ResourceStorage:
rQuantity.Add(*(dest.PrimaryResources.Storage()))
dest.PrimaryResources[v1.ResourceStorage] = rQuantity
//case v1.ResourceNvidiaGPU:
// rQuantity.Add(*(result.PrimaryResources.NvidiaGPU()))
// result.PrimaryResources[v1.ResourceNvidiaGPU] = rQuantity
default:
if schedutil.IsScalarResourceName(rName) {
// Lazily allocate this map only if required.
if dest.ScalarResources == nil {
dest.ScalarResources = map[v1.ResourceName]int64{}
}
dest.ScalarResources[rName] += rQuantity.Value()
}
}
}
}
func parseNodesReview(nodes NodeMap) []*ClusterCapacityNodeResult {
// sort nodes by name
nodeNames := make([]string, len(nodes), len(nodes))
nodeIdx := 0
for key, _ := range nodes {
nodeNames[nodeIdx] = key
nodeIdx++
}
sort.Strings(nodeNames)
result := make([]*ClusterCapacityNodeResult, len(nodes), len(nodes))
for i, key := range nodeNames {
node := nodes[key]
limits := newResources()
for _, pod := range node.Pods {
appendResources(limits, getResourceLimit(pod.Pod).PrimaryResources)
}
result[i] = &ClusterCapacityNodeResult{
NodeName: key,
Labels: node.Node().Labels,
PodCount: len(node.Pods),
Allocatable: node.Allocatable,
Requested: node.Requested,
Limits: &framework.Resource{
MilliCPU: limits.PrimaryResources.Cpu().MilliValue(),
Memory: limits.PrimaryResources.Memory().Value(),
EphemeralStorage: limits.PrimaryResources.StorageEphemeral().Value(),
ScalarResources: limits.ScalarResources,
},
}
}
return result
}
func parsePodsReview(templatePods []*v1.Pod, status Status) []*ClusterCapacityPodResult {
results := map[string]*ClusterCapacityPodResult{}
for _, tmpl := range templatePods {
results[tmpl.Name] = &ClusterCapacityPodResult{
ReplicasOnNodes: PodReplicaCount{},
PodName: tmpl.Name,
}
}
for _, pod := range status.Pods {
tmplName, tFound := pod.ObjectMeta.Annotations[podTemplate]
if !tFound {
log.Fatal(fmt.Errorf("pod template annotation missing"))
}
result, rFound := results[tmplName]
if !rFound {
log.Fatal(fmt.Errorf("unknown pod template: %s", tmplName))
}
result.ReplicasOnNodes[pod.Spec.NodeName]++
}
resultSlc := make([]*ClusterCapacityPodResult, 0)
for _, v := range results {
resultSlc = append(resultSlc, v)
}
return resultSlc
}
func getPodsRequirements(pods []*v1.Pod) []*Requirements {
result := make([]*Requirements, 0)
for _, pod := range pods {
podRequirements := &Requirements{
PodName: pod.Name,
Resources: getResourceRequest(pod),
Limits: getResourceLimit(pod),
NodeSelectors: pod.Spec.NodeSelector,
}
result = append(result, podRequirements)
}
return result
}
func deepCopyPods(in []*v1.Pod, out []v1.Pod) {
for i, pod := range in {
out[i] = *pod.DeepCopy()
}
}
func getReviewSpec(podTemplates []*v1.Pod) ClusterCapacityReviewSpec {
podCopies := make([]v1.Pod, len(podTemplates))
deepCopyPods(podTemplates, podCopies)
return ClusterCapacityReviewSpec{
Templates: podCopies,
PodRequirements: getPodsRequirements(podTemplates),
}
}
func getReviewStatus(pods []*v1.Pod, nodes NodeMap, status Status) ClusterCapacityReviewStatus {
return ClusterCapacityReviewStatus{
CreationTimestamp: time.Now(),
Replicas: int32(len(status.Pods)),
FailReason: getMainFailReason(status.StopReason),
Pods: parsePodsReview(pods, status),
Nodes: parseNodesReview(nodes),
}
}
func GetReport(pods []*v1.Pod, nodes NodeMap, status Status) *ClusterCapacityReview {
return &ClusterCapacityReview{
Spec: getReviewSpec(pods),
Status: getReviewStatus(pods, nodes, status),
}
}
func instancesSum(replicasOnNodes PodReplicaCount) int {
result := 0
for _, v := range replicasOnNodes {
result += v
}
return result
}
func clusterCapacityReviewPrettyPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool) {
if verbose {
fmt.Println("========== Simulation spec")
for _, req := range r.Spec.PodRequirements {
fmt.Printf("%v\n", req.PodName)
fmt.Printf("\trequests:\n")
printResources(req.Resources)
fmt.Printf("\tlimits:\n")
printResources(req.Limits)
if req.NodeSelectors != nil {
fmt.Printf("\t- NodeSelector: %v\n", labels.SelectorFromSet(labels.Set(req.NodeSelectors)).String())
}
fmt.Println("\n========== Simulation result")
}
}
for _, pod := range r.Status.Pods {
if verbose {
fmt.Printf("The cluster can schedule %v instance(s) of the pod %v.\n", instancesSum(pod.ReplicasOnNodes), pod.PodName)
} else {
fmt.Printf("%v\n", instancesSum(pod.ReplicasOnNodes))
}
}
if verbose {
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.FailReason.FailType, r.Status.FailReason.FailMessage)
}
if verbose && r.Status.Replicas > 0 {
for _, pod := range r.Status.Pods {
if pod.FailSummary != nil {
fmt.Printf("fit failure summary on nodes: ")
for _, fs := range pod.FailSummary {
fmt.Printf("%v (%v), ", fs.Reason, fs.Count)
}
fmt.Printf("\n")
}
}
fmt.Printf("\nPod distribution among nodes:\n")
for _, pod := range r.Status.Pods {
fmt.Printf("%v\n", pod.PodName)
for node, count := range pod.ReplicasOnNodes {
fmt.Printf("\t- %v: %v instance(s)\n", node, count)
}
}
printNodeCapacity(r.Status.Nodes)
printClusterCapacity("========== Cluster capacity", r.Status.Nodes)
printLabeledCapacity(nodeLabels, r.Status.Nodes)
}
}
func printLabeledCapacity(nodeLabels []string, nodes []*ClusterCapacityNodeResult) {
labeledResults := map[string][]*ClusterCapacityNodeResult{}
for _, node := range nodes {
for _, label := range nodeLabels {
value, ok := node.Labels[label]
if !ok {
continue
}
resultName := fmt.Sprintf("%s:%s", label, value)
labeledResults[resultName] = append(labeledResults[resultName], node)
}
}
for label, results := range labeledResults {
printClusterCapacity(label, results)
}
}
func printClusterCapacity(title string, nodes []*ClusterCapacityNodeResult) {
var (
clusterCPUAllocatable, clusterCPURequested, clusterCPULimit,
clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit,
clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit int64
)
for _, node := range nodes {
clusterCPUAllocatable += node.Allocatable.MilliCPU
clusterCPURequested += node.Requested.MilliCPU
clusterCPULimit += node.Limits.MilliCPU
clusterMemoryAllocatable += node.Allocatable.Memory
clusterMemoryRequested += node.Requested.Memory
clusterMemoryLimit += node.Limits.Memory
clusterStorageAllocatable += node.Allocatable.EphemeralStorage
clusterStorageRequested += node.Requested.EphemeralStorage
clusterStorageLimit += node.Limits.EphemeralStorage
}
fmt.Printf("\n%s:\n", title)
printCapacity(clusterCPUAllocatable, clusterCPURequested, clusterCPULimit, "CPU", "m")
printCapacity(clusterMemoryAllocatable, clusterMemoryRequested, clusterMemoryLimit, "Memory", "bytes")
printCapacity(clusterStorageAllocatable, clusterStorageRequested, clusterStorageLimit, "EphemeralStorage", "bytes")
}
func printNodeCapacity(nodes []*ClusterCapacityNodeResult) {
fmt.Printf("\n========== Node capacity\n")
for _, node := range nodes {
fmt.Printf("%s\n", node.NodeName)
fmt.Printf("\t- pod count: %v\n", node.PodCount)
printCapacity(node.Allocatable.MilliCPU, node.Requested.MilliCPU, node.Limits.MilliCPU, "CPU", "m")
printCapacity(node.Allocatable.Memory, node.Requested.Memory, node.Limits.Memory, "Memory", "bytes")
printCapacity(node.Allocatable.EphemeralStorage, node.Requested.EphemeralStorage, node.Limits.EphemeralStorage, "EphemeralStorage", "bytes")
}
}
func printCapacity(allocatable, requested, limit int64, label, unit string) {
cap := float64(requested) / float64(allocatable) * 100
fmt.Printf("\t- %s requested: %v%s/%v%s %.2f%% allocated\n",
label, requested, unit, allocatable, unit, cap)
commit := float64(limit) / float64(allocatable) * 100
fmt.Printf("\t- %s limited: %v%s/%v%s %.2f%% allocated\n",
label, limit, unit, allocatable, unit, commit)
}
func printResources(resources *Resources) {
fmt.Printf("\t\t- CPU: %v\n", resources.PrimaryResources.Cpu().String())
fmt.Printf("\t\t- Memory: %v\n", resources.PrimaryResources.Memory().String())
if resources.PrimaryResources.StorageEphemeral() != nil {
fmt.Printf("\t\t- Ephemeral Storage: %v\n", resources.PrimaryResources.StorageEphemeral().String())
}
//if !req.Resources.PrimaryResources.NvidiaGPU().IsZero() {
// fmt.Printf("\t- NvidiaGPU: %v\n", req.Resources.PrimaryResources.NvidiaGPU().String())
//}
if resources.ScalarResources != nil {
fmt.Printf("\t\t- ScalarResources: %v\n", resources.ScalarResources)
}
}
func clusterCapacityReviewPrintJson(r *ClusterCapacityReview) error {
jsoned, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create json: %v", err)
}
fmt.Println(string(jsoned))
return nil
}
func clusterCapacityReviewPrintYaml(r *ClusterCapacityReview) error {
yamled, err := yaml.Marshal(r)
if err != nil {
return fmt.Errorf("Failed to create yaml: %v", err)
}
fmt.Print(string(yamled))
return nil
}
func ClusterCapacityReviewPrint(r *ClusterCapacityReview, nodeLabels []string, verbose bool, format string) error {
switch format {
case "json":
return clusterCapacityReviewPrintJson(r)
case "yaml":
return clusterCapacityReviewPrintYaml(r)
case "":
clusterCapacityReviewPrettyPrint(r, nodeLabels, verbose)
return nil
default:
return fmt.Errorf("output format %q not recognized", format)
}
}
| getResourceRequest | identifier_name |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit="256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use sp_std::prelude::*;
use sp_core::{
crypto::KeyTypeId, OpaqueMetadata,
u32_trait::{_1, _2, _3, _4, _5}
};
use sp_runtime::{
create_runtime_str, impl_opaque_keys,
Percent, ModuleId,
generic, ApplyExtrinsicResult,
curve::PiecewiseLinear,
traits::ConvertInto,
transaction_validity::{TransactionPriority, TransactionValidity, TransactionSource},
};
use sp_runtime::traits::{
IdentityLookup, BlakeTwo256, Block as BlockT,
Convert, OpaqueKeys, Verify, IdentifyAccount,
NumberFor, Saturating,
};
use static_assertions::const_assert;
use sp_api::impl_runtime_apis;
use pallet_grandpa::fg_primitives;
use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
use pallet_session::historical as pallet_session_historical;
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
use frame_system::{EnsureRoot, EnsureOneOf};
use frame_support::{
construct_runtime, parameter_types, StorageValue,
traits::{KeyOwnerProofSystem, Randomness, LockIdentifier, Filter},
weights::{
Weight, IdentityFee,
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
},
};
// A few exports that help ease life for downstream crates.
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Permill, Perbill};
pub use pallet_timestamp::Call as TimestampCall;
pub use pallet_balances::Call as BalancesCall;
pub use pallet_staking::StakerStatus;
/// Import the template pallet.
pub use pallet_template;
pub mod common;
pub use common::*;
pub use common::opaque;
pub mod constants;
pub use constants::{time::*, currency::*, fee::*};
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("bandot"),
impl_name: create_runtime_str!("bandot"),
authoring_version: 1,
spec_version: 1,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
impl_opaque_keys! {
pub struct SessionKeys {
pub babe: Babe,
pub grandpa: Grandpa,
pub im_online: ImOnline,
pub authority_discovery: AuthorityDiscovery,
//pub parachain_validator: ParachainSessionKeyPlaceholder<Runtime>,
}
}
/// Avoid processing transactions from slots and parachain registrar.
pub struct BaseFilter;
impl Filter<Call> for BaseFilter {
fn filter(_: &Call) -> bool {
true
}
}
parameter_types! {
pub const BlockHashCount: BlockNumber = 2400;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
/// Assume 10% of weight for average on_initialize calls.
pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get()
.saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get();
pub const MaximumBlockLength: u32 = 5 * 1024 * 1024;
pub const Version: RuntimeVersion = VERSION;
}
/// frame_system by skyh 0927
impl frame_system::Trait for Runtime {
type BaseCallFilter = BaseFilter; /// filter 0927
type AccountId = AccountId;
type AccountData = pallet_balances::AccountData<Balance>;
type Header = generic::Header<BlockNumber, BlakeTwo256>;
type Lookup = IdentityLookup<AccountId>;
type Index = Index;
type Hash = Hash;
type Hashing = BlakeTwo256;
type BlockNumber = BlockNumber;
type BlockHashCount = BlockHashCount;
type BlockExecutionWeight = BlockExecutionWeight;
type ExtrinsicBaseWeight = ExtrinsicBaseWeight;
type MaximumBlockLength = MaximumBlockLength;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumExtrinsicWeight = MaximumExtrinsicWeight;
type AvailableBlockRatio = AvailableBlockRatio;
type DbWeight = RocksDbWeight;
type PalletInfo = PalletInfo;
type Version = Version;
type Call = Call;
type Event = Event;
type Origin = Origin;
type OnNewAccount = ();
type OnKilledAccount = (); ///
type SystemWeightInfo = ();
}
parameter_types! {
pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS;
pub const ExpectedBlockTime: u64 = MILLISECS_PER_BLOCK;
}
/// pallet_babe by skyh 0927
impl pallet_babe::Trait for Runtime {
type EpochDuration = EpochDuration;
type ExpectedBlockTime = ExpectedBlockTime;
type EpochChangeTrigger = pallet_babe::ExternalTrigger;
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::IdentificationTuple;
type HandleEquivocation = pallet_babe::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
}
/// pallet_grandpa by skyh 0927
impl pallet_grandpa::Trait for Runtime {
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
type HandleEquivocation = pallet_grandpa::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17);
}
/// pallet_session by skyh 0927
impl pallet_session::Trait for Runtime {
type Keys = SessionKeys;
type ValidatorId = <Self as frame_system::Trait>::AccountId;
type ValidatorIdOf = pallet_staking::StashOf<Self>;
type ShouldEndSession = Babe;
type NextSessionRotation = Babe;
type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
type DisabledValidatorsThreshold = DisabledValidatorsThreshold;
type WeightInfo = ();
type Event = Event;
}
impl pallet_session::historical::Trait for Runtime {
type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
}
/// frame_system::offchain by skyh 0927
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where Call: From<C>,
{
type Extrinsic = UncheckedExtrinsic;
type OverarchingCall = Call;
}
pallet_staking_reward_curve::build! {
const REWARD_CURVE: PiecewiseLinear<'static> = curve!(
min_inflation: 0_025_000,
max_inflation: 0_100_000,
ideal_stake: 0_500_000,
falloff: 0_050_000,
max_piece_count: 40,
test_precision: 0_005_000,
);
}
/// Struct that handles the conversion of Balance -> `u64`. This is used for
/// staking's election calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance |
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn convert(x: Balance) -> u64 {
(x / Self::factor()) as u64
}
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance {
x * Self::factor()
}
}
parameter_types! {
pub const SessionsPerEra: sp_staking::SessionIndex = 3; // 3 hours
pub const BondingDuration: pallet_staking::EraIndex = 4; // 12 hours
pub const SlashDeferDuration: pallet_staking::EraIndex = 2; // 6 hours
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 64;
pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4;
pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
pub const MaxIterations: u32 = 5;
// 0.05%. The higher the value, the more strict solution acceptance becomes.
pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000);
}
/// pallet_staking::offchain by skyh 0927
impl pallet_staking::Trait for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = CurrencyToVoteHandler;
type RewardRemainder = Treasury;
type RewardCurve = RewardCurve;
type Slash = Treasury; // send the slashed funds to the pallet treasury.
type Reward = (); // rewards are minted from the void
type SessionInterface = Self;
type SessionsPerEra = SessionsPerEra;
type BondingDuration = BondingDuration;
type SlashDeferDuration = SlashDeferDuration;
/// A super-majority of the council can cancel the slash.
type SlashCancelOrigin = frame_system::EnsureRoot<Self::AccountId>; // TODO
type NextNewSession = Session;
type ElectionLookahead = ElectionLookahead;
type UnsignedPriority = StakingUnsignedPriority;
type MaxIterations = MaxIterations;
type MinSolutionScoreBump = MinSolutionScoreBump;
type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator;
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const CouncilMotionDuration: BlockNumber = 3 * DAYS;
pub const CouncilMaxProposals: u32 = 100;
pub const CouncilMaxMembers: u32 = 100;
}
type CouncilCollective = pallet_collective::Instance1;
pub type MoreThanHalfCouncil = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>
>;
pub type SlashCancelOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>
>;
/// pallet_collective by skyh 1020
impl pallet_collective::Trait<CouncilCollective> for Runtime {
type MotionDuration = CouncilMotionDuration;
type MaxProposals = CouncilMaxProposals;
type MaxMembers = CouncilMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
type Origin = Origin;
type Proposal = Call;
type Event = Event;
}
parameter_types! {
pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS;
pub const TechnicalMaxProposals: u32 = 100;
pub const TechnicalMaxMembers: u32 = 100;
}
type TechnicalCollective = pallet_collective::Instance2;
impl pallet_collective::Trait<TechnicalCollective> for Runtime {
type Origin = Origin;
type Proposal = Call;
type Event = Event;
type MotionDuration = TechnicalMotionDuration;
type MaxProposals = TechnicalMaxProposals;
type MaxMembers = TechnicalMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
}
parameter_types! {
pub const LaunchPeriod: BlockNumber = 7 * DAYS;
pub const VotingPeriod: BlockNumber = 7 * DAYS;
pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS;
pub const MinimumDeposit: Balance = 1 * DOLLARS;
pub const EnactmentPeriod: BlockNumber = 8 * DAYS;
pub const CooloffPeriod: BlockNumber = 7 * DAYS;
// One cent: $10,000 / MB
pub const PreimageByteDeposit: Balance = 10 * MILLICENTS;
pub const InstantAllowed: bool = true;
pub const MaxVotes: u32 = 100;
pub const MaxProposals: u32 = 100;
}
/// pallet_democracy by skyh 1020
impl pallet_democracy::Trait for Runtime {
type Proposal = Call;
type Event = Event;
type Currency = Balances;
type EnactmentPeriod = EnactmentPeriod;
type LaunchPeriod = LaunchPeriod;
type VotingPeriod = VotingPeriod;
type CooloffPeriod = CooloffPeriod;
type PalletsOrigin = OriginCaller;
type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>;
type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime {
type Event = Event;
type AddOrigin = MoreThanHalfCouncil;
type RemoveOrigin = MoreThanHalfCouncil;
type SwapOrigin = MoreThanHalfCouncil;
type ResetOrigin = MoreThanHalfCouncil;
type PrimeOrigin = MoreThanHalfCouncil;
type MembershipInitialized = TechnicalCommittee;
type MembershipChanged = TechnicalCommittee;
}
parameter_types! {
pub const ProposalBond: Permill = Permill::from_percent(5);
pub const ProposalBondMinimum: Balance = 20 * DOLLARS;
pub const SpendPeriod: BlockNumber = 6 * DAYS;
pub const Burn: Permill = Permill::from_perthousand(2);
pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry");
pub const TipCountdown: BlockNumber = 1 * DAYS;
pub const TipFindersFee: Percent = Percent::from_percent(20);
pub const TipReportDepositBase: Balance = 1 * DOLLARS;
pub const DataDepositPerByte: Balance = 1 * CENTS;
pub const BountyDepositBase: Balance = 1 * DOLLARS;
pub const BountyDepositPayoutDelay: BlockNumber = 4 * DAYS;
pub const BountyUpdatePeriod: BlockNumber = 90 * DAYS;
pub const MaximumReasonLength: u32 = 16384;
pub const BountyCuratorDeposit: Permill = Permill::from_percent(50);
pub const BountyValueMinimum: Balance = 2 * DOLLARS;
}
type ApproveOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>
>;
/// pallet_treasury by skyh 1020
impl pallet_treasury::Trait for Runtime {
type ModuleId = TreasuryModuleId;
type Currency = Balances;
type ApproveOrigin = ApproveOrigin;
type RejectOrigin = MoreThanHalfCouncil;
type DataDepositPerByte = DataDepositPerByte;
type Tippers = ElectionsPhragmen;
type TipCountdown = TipCountdown;
type TipFindersFee = TipFindersFee;
type TipReportDepositBase = TipReportDepositBase;
type BountyDepositBase = BountyDepositBase;
type BountyDepositPayoutDelay = BountyDepositPayoutDelay;
type BountyUpdatePeriod = BountyUpdatePeriod;
type MaximumReasonLength = MaximumReasonLength;
type BountyCuratorDeposit = BountyCuratorDeposit;
type BountyValueMinimum = BountyValueMinimum;
// type BurnDestination = Society;
type ProposalBond = ProposalBond;
type ProposalBondMinimum = ProposalBondMinimum;
type SpendPeriod = SpendPeriod;
type OnSlash = Treasury;
type Burn = Burn;
type BurnDestination = ();
// type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const MaxScheduledPerBlock: u32 = 50;
}
/// pallet_scheduler by skyh 1020
impl pallet_scheduler::Trait for Runtime {
type PalletsOrigin = OriginCaller;
type MaximumWeight = MaximumBlockWeight;
type ScheduleOrigin = EnsureRoot<AccountId>;
type MaxScheduledPerBlock = MaxScheduledPerBlock;
type WeightInfo = ();
type Origin = Origin;
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _;
pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value();
}
impl pallet_im_online::Trait for Runtime {
type AuthorityId = ImOnlineId;
type SessionDuration = SessionDuration;
type ReportUnresponsiveness = Offences;
type UnsignedPriority = ImOnlineUnsignedPriority;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const IndexDeposit: Balance = DOLLARS;
}
/// pallet_indices by skyh 1020
impl pallet_indices::Trait for Runtime {
type AccountIndex = AccountIndex;
type Deposit = IndexDeposit;
type Currency = Balances;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get();
}
/// pallet_offences by skyh 1020
impl pallet_offences::Trait for Runtime {
type IdentificationTuple = pallet_session::historical::IdentificationTuple<Self>;
type WeightSoftLimit = OffencesWeightSoftLimit;
type OnOffenceHandler = Staking;
type Event = Event;
}
impl pallet_utility::Trait for Runtime {
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const MinVestedTransfer: Balance = 100 * DOLLARS;
}
/// pallet_vesting by skyh 1020
impl pallet_vesting::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type BlockNumberToBalance = ConvertInto;
type MinVestedTransfer = MinVestedTransfer;
type WeightInfo = ();
}
impl pallet_authority_discovery::Trait for Runtime {}
parameter_types! {
pub const UncleGenerations: BlockNumber = 5;
}
/// pallet_authorship origin
impl pallet_authorship::Trait for Runtime {
type FindAuthor = pallet_session::FindAccountFromAuthorIndex<Self, Babe>;
type UncleGenerations = UncleGenerations;
type FilterUncle = ();
type EventHandler = (Staking, ()); // ImOnline
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
/// pallet_timestamp origin
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = Moment;
type OnTimestampSet = Babe;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500; // 0
pub const MaxLocks: u32 = 50;
}
/// pallet_balances origin
impl pallet_balances::Trait for Runtime {
type Balance = Balance;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = MaxLocks;
type AccountStore = System; //
type Event = Event;
type WeightInfo = ();
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
}
/// pallet_transaction_payment origin
impl pallet_transaction_payment::Trait for Runtime {
type Currency = Balances;
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type OnTransactionPayment = (); // treasury
type FeeMultiplierUpdate = (); //
}
impl pallet_sudo::Trait for Runtime {
type Event = Event;
type Call = Call;
}
/// Configure the template pallet in pallets/template.
impl pallet_template::Trait for Runtime {
type Event = Event;
}
// TODO
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
TransactionPayment: pallet_transaction_payment::{Module, Storage},
Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>},
Authorship: pallet_authorship::{Module, Call, Storage, Inherent},
Babe: pallet_babe::{Module, Call, Storage, Config, Inherent},
Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event},
Staking: pallet_staking::{Module, Call, Config<T>, Storage, Event<T>},
Session: pallet_session::{Module, Call, Storage, Event, Config<T>},
AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config},
Council: pallet_collective::<Instance1>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
TechnicalCommittee: pallet_collective::<Instance2>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
Democracy: pallet_democracy::{Module, Call, Storage, Config, Event<T>},
ElectionsPhragmen: pallet_elections_phragmen::{Module, Call, Storage, Event<T>, Config<T>},
TechnicalMembership: pallet_membership::<Instance1>::{Module, Call, Storage, Event<T>, Config<T>},
ImOnline: pallet_im_online::{Module, Call, Storage, Event<T>, ValidateUnsigned, Config<T>},
Indices: pallet_indices::{Module, Call, Storage, Config<T>, Event<T>},
Scheduler: pallet_scheduler::{Module, Call, Storage, Event<T>},
Treasury: pallet_treasury::{Module, Call, Storage, Event<T>},
Offences: pallet_offences::{Module, Call, Storage, Event},
Historical: pallet_session_historical::{Module},
Vesting: pallet_vesting::{Module, Call, Storage, Event<T>, Config<T>},
Utility: pallet_utility::{Module, Call, Event},
// Include the custom logic from the template pallet in the runtime.
TemplateModule: pallet_template::{Module, Call, Storage, Event<T>},
}
);
/// The address format for describing accounts.
pub type Address = AccountId;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Extrinsic type that has already been checked.
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllModules,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block)
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
Runtime::metadata().into()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
fn random_seed() -> <Block as BlockT>::Hash {
RandomnessCollectiveFlip::random_seed()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
) -> TransactionValidity {
Executive::validate_transaction(source, tx)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
// TODO ### sp_consensus_babe by Skyh, 0927 ###
/// configuration
/// current_epoch_start
/// generate_key_ownership_proof
/// submit_report_equivocation_unsigned_extrinsic
impl sp_consensus_babe::BabeApi<Block> for Runtime {
fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration {
// The choice of `c` parameter (where `1 - c` represents the
// probability of a slot being empty), is done in accordance to the
// slot duration and expected target block time, for safely
// resisting network delays of maximum two seconds.
// <https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results>
sp_consensus_babe::BabeGenesisConfiguration {
slot_duration: Babe::slot_duration(),
epoch_length: EpochDuration::get(),
c: PRIMARY_PROBABILITY,
genesis_authorities: Babe::authorities(),
randomness: Babe::randomness(),
allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,
}
}
fn current_epoch_start() -> sp_consensus_babe::SlotNumber {
Babe::current_epoch_start()
}
fn generate_key_ownership_proof(
_slot_number: sp_consensus_babe::SlotNumber,
authority_id: sp_consensus_babe::AuthorityId,
) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
use codec::Encode;
Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id))
.map(|p| p.encode())
.map(sp_consensus_babe::OpaqueKeyOwnershipProof::new)
}
fn submit_report_equivocation_unsigned_extrinsic(
equivocation_proof: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof,
) -> Option<()> {
let key_owner_proof = key_owner_proof.decode()?;
Babe::submit_unsigned_equivocation_report(
equivocation_proof,
key_owner_proof,
)
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
fn account_nonce(account: AccountId) -> Index {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
use frame_system_benchmarking::Module as SystemBench;
impl frame_system_benchmarking::Trait for Runtime {}
let whitelist: Vec<TrackedStorageKey> = vec![
// Block Number
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
// Total Issuance
hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
// Execution Phase
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
// Event Count
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
// System Events
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
];
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
add_benchmark!(params, batches, pallet_balances, Balances);
add_benchmark!(params, batches, pallet_timestamp, Timestamp);
if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
Ok(batches)
}
}
}
| {
(Balances::total_issuance() / u64::max_value() as Balance).max(1)
} | identifier_body |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit="256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use sp_std::prelude::*;
use sp_core::{
crypto::KeyTypeId, OpaqueMetadata,
u32_trait::{_1, _2, _3, _4, _5}
};
use sp_runtime::{
create_runtime_str, impl_opaque_keys,
Percent, ModuleId,
generic, ApplyExtrinsicResult,
curve::PiecewiseLinear,
traits::ConvertInto,
transaction_validity::{TransactionPriority, TransactionValidity, TransactionSource},
};
use sp_runtime::traits::{
IdentityLookup, BlakeTwo256, Block as BlockT,
Convert, OpaqueKeys, Verify, IdentifyAccount,
NumberFor, Saturating,
};
use static_assertions::const_assert;
use sp_api::impl_runtime_apis;
use pallet_grandpa::fg_primitives;
use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
use pallet_session::historical as pallet_session_historical;
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
use frame_system::{EnsureRoot, EnsureOneOf};
use frame_support::{
construct_runtime, parameter_types, StorageValue,
traits::{KeyOwnerProofSystem, Randomness, LockIdentifier, Filter},
weights::{
Weight, IdentityFee,
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
},
};
// A few exports that help ease life for downstream crates.
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Permill, Perbill};
pub use pallet_timestamp::Call as TimestampCall;
pub use pallet_balances::Call as BalancesCall;
pub use pallet_staking::StakerStatus;
/// Import the template pallet.
pub use pallet_template;
pub mod common;
pub use common::*;
pub use common::opaque;
pub mod constants;
pub use constants::{time::*, currency::*, fee::*};
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("bandot"),
impl_name: create_runtime_str!("bandot"),
authoring_version: 1,
spec_version: 1,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
impl_opaque_keys! {
pub struct SessionKeys {
pub babe: Babe,
pub grandpa: Grandpa,
pub im_online: ImOnline,
pub authority_discovery: AuthorityDiscovery,
//pub parachain_validator: ParachainSessionKeyPlaceholder<Runtime>,
}
}
/// Avoid processing transactions from slots and parachain registrar.
pub struct BaseFilter;
impl Filter<Call> for BaseFilter {
fn filter(_: &Call) -> bool {
true
}
}
parameter_types! {
pub const BlockHashCount: BlockNumber = 2400;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
/// Assume 10% of weight for average on_initialize calls.
pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get()
.saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get();
pub const MaximumBlockLength: u32 = 5 * 1024 * 1024;
pub const Version: RuntimeVersion = VERSION;
}
/// frame_system by skyh 0927
impl frame_system::Trait for Runtime {
type BaseCallFilter = BaseFilter; /// filter 0927
type AccountId = AccountId;
type AccountData = pallet_balances::AccountData<Balance>;
type Header = generic::Header<BlockNumber, BlakeTwo256>;
type Lookup = IdentityLookup<AccountId>;
type Index = Index;
type Hash = Hash;
type Hashing = BlakeTwo256;
type BlockNumber = BlockNumber;
type BlockHashCount = BlockHashCount;
type BlockExecutionWeight = BlockExecutionWeight;
type ExtrinsicBaseWeight = ExtrinsicBaseWeight;
type MaximumBlockLength = MaximumBlockLength;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumExtrinsicWeight = MaximumExtrinsicWeight;
type AvailableBlockRatio = AvailableBlockRatio;
type DbWeight = RocksDbWeight;
type PalletInfo = PalletInfo;
type Version = Version;
type Call = Call;
type Event = Event;
type Origin = Origin;
type OnNewAccount = ();
type OnKilledAccount = (); ///
type SystemWeightInfo = ();
}
parameter_types! {
pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS;
pub const ExpectedBlockTime: u64 = MILLISECS_PER_BLOCK;
}
/// pallet_babe by skyh 0927
impl pallet_babe::Trait for Runtime {
type EpochDuration = EpochDuration;
type ExpectedBlockTime = ExpectedBlockTime;
type EpochChangeTrigger = pallet_babe::ExternalTrigger;
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::IdentificationTuple;
type HandleEquivocation = pallet_babe::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
}
/// pallet_grandpa by skyh 0927
impl pallet_grandpa::Trait for Runtime {
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
type HandleEquivocation = pallet_grandpa::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17);
}
/// pallet_session by skyh 0927
impl pallet_session::Trait for Runtime {
type Keys = SessionKeys;
type ValidatorId = <Self as frame_system::Trait>::AccountId;
type ValidatorIdOf = pallet_staking::StashOf<Self>;
type ShouldEndSession = Babe;
type NextSessionRotation = Babe;
type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
type DisabledValidatorsThreshold = DisabledValidatorsThreshold;
type WeightInfo = ();
type Event = Event;
}
impl pallet_session::historical::Trait for Runtime {
type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
}
/// frame_system::offchain by skyh 0927
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where Call: From<C>,
{
type Extrinsic = UncheckedExtrinsic;
type OverarchingCall = Call;
}
pallet_staking_reward_curve::build! {
const REWARD_CURVE: PiecewiseLinear<'static> = curve!(
min_inflation: 0_025_000,
max_inflation: 0_100_000,
ideal_stake: 0_500_000,
falloff: 0_050_000,
max_piece_count: 40,
test_precision: 0_005_000,
);
}
/// Struct that handles the conversion of Balance -> `u64`. This is used for
/// staking's election calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance {
(Balances::total_issuance() / u64::max_value() as Balance).max(1)
}
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn | (x: Balance) -> u64 {
(x / Self::factor()) as u64
}
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance {
x * Self::factor()
}
}
parameter_types! {
pub const SessionsPerEra: sp_staking::SessionIndex = 3; // 3 hours
pub const BondingDuration: pallet_staking::EraIndex = 4; // 12 hours
pub const SlashDeferDuration: pallet_staking::EraIndex = 2; // 6 hours
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 64;
pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4;
pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
pub const MaxIterations: u32 = 5;
// 0.05%. The higher the value, the more strict solution acceptance becomes.
pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000);
}
/// pallet_staking::offchain by skyh 0927
impl pallet_staking::Trait for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = CurrencyToVoteHandler;
type RewardRemainder = Treasury;
type RewardCurve = RewardCurve;
type Slash = Treasury; // send the slashed funds to the pallet treasury.
type Reward = (); // rewards are minted from the void
type SessionInterface = Self;
type SessionsPerEra = SessionsPerEra;
type BondingDuration = BondingDuration;
type SlashDeferDuration = SlashDeferDuration;
/// A super-majority of the council can cancel the slash.
type SlashCancelOrigin = frame_system::EnsureRoot<Self::AccountId>; // TODO
type NextNewSession = Session;
type ElectionLookahead = ElectionLookahead;
type UnsignedPriority = StakingUnsignedPriority;
type MaxIterations = MaxIterations;
type MinSolutionScoreBump = MinSolutionScoreBump;
type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator;
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const CouncilMotionDuration: BlockNumber = 3 * DAYS;
pub const CouncilMaxProposals: u32 = 100;
pub const CouncilMaxMembers: u32 = 100;
}
type CouncilCollective = pallet_collective::Instance1;
pub type MoreThanHalfCouncil = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>
>;
pub type SlashCancelOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>
>;
/// pallet_collective by skyh 1020
impl pallet_collective::Trait<CouncilCollective> for Runtime {
type MotionDuration = CouncilMotionDuration;
type MaxProposals = CouncilMaxProposals;
type MaxMembers = CouncilMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
type Origin = Origin;
type Proposal = Call;
type Event = Event;
}
parameter_types! {
pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS;
pub const TechnicalMaxProposals: u32 = 100;
pub const TechnicalMaxMembers: u32 = 100;
}
type TechnicalCollective = pallet_collective::Instance2;
impl pallet_collective::Trait<TechnicalCollective> for Runtime {
type Origin = Origin;
type Proposal = Call;
type Event = Event;
type MotionDuration = TechnicalMotionDuration;
type MaxProposals = TechnicalMaxProposals;
type MaxMembers = TechnicalMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
}
parameter_types! {
pub const LaunchPeriod: BlockNumber = 7 * DAYS;
pub const VotingPeriod: BlockNumber = 7 * DAYS;
pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS;
pub const MinimumDeposit: Balance = 1 * DOLLARS;
pub const EnactmentPeriod: BlockNumber = 8 * DAYS;
pub const CooloffPeriod: BlockNumber = 7 * DAYS;
// One cent: $10,000 / MB
pub const PreimageByteDeposit: Balance = 10 * MILLICENTS;
pub const InstantAllowed: bool = true;
pub const MaxVotes: u32 = 100;
pub const MaxProposals: u32 = 100;
}
/// pallet_democracy by skyh 1020
impl pallet_democracy::Trait for Runtime {
type Proposal = Call;
type Event = Event;
type Currency = Balances;
type EnactmentPeriod = EnactmentPeriod;
type LaunchPeriod = LaunchPeriod;
type VotingPeriod = VotingPeriod;
type CooloffPeriod = CooloffPeriod;
type PalletsOrigin = OriginCaller;
type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>;
type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime {
type Event = Event;
type AddOrigin = MoreThanHalfCouncil;
type RemoveOrigin = MoreThanHalfCouncil;
type SwapOrigin = MoreThanHalfCouncil;
type ResetOrigin = MoreThanHalfCouncil;
type PrimeOrigin = MoreThanHalfCouncil;
type MembershipInitialized = TechnicalCommittee;
type MembershipChanged = TechnicalCommittee;
}
parameter_types! {
pub const ProposalBond: Permill = Permill::from_percent(5);
pub const ProposalBondMinimum: Balance = 20 * DOLLARS;
pub const SpendPeriod: BlockNumber = 6 * DAYS;
pub const Burn: Permill = Permill::from_perthousand(2);
pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry");
pub const TipCountdown: BlockNumber = 1 * DAYS;
pub const TipFindersFee: Percent = Percent::from_percent(20);
pub const TipReportDepositBase: Balance = 1 * DOLLARS;
pub const DataDepositPerByte: Balance = 1 * CENTS;
pub const BountyDepositBase: Balance = 1 * DOLLARS;
pub const BountyDepositPayoutDelay: BlockNumber = 4 * DAYS;
pub const BountyUpdatePeriod: BlockNumber = 90 * DAYS;
pub const MaximumReasonLength: u32 = 16384;
pub const BountyCuratorDeposit: Permill = Permill::from_percent(50);
pub const BountyValueMinimum: Balance = 2 * DOLLARS;
}
type ApproveOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>
>;
/// pallet_treasury by skyh 1020
impl pallet_treasury::Trait for Runtime {
type ModuleId = TreasuryModuleId;
type Currency = Balances;
type ApproveOrigin = ApproveOrigin;
type RejectOrigin = MoreThanHalfCouncil;
type DataDepositPerByte = DataDepositPerByte;
type Tippers = ElectionsPhragmen;
type TipCountdown = TipCountdown;
type TipFindersFee = TipFindersFee;
type TipReportDepositBase = TipReportDepositBase;
type BountyDepositBase = BountyDepositBase;
type BountyDepositPayoutDelay = BountyDepositPayoutDelay;
type BountyUpdatePeriod = BountyUpdatePeriod;
type MaximumReasonLength = MaximumReasonLength;
type BountyCuratorDeposit = BountyCuratorDeposit;
type BountyValueMinimum = BountyValueMinimum;
// type BurnDestination = Society;
type ProposalBond = ProposalBond;
type ProposalBondMinimum = ProposalBondMinimum;
type SpendPeriod = SpendPeriod;
type OnSlash = Treasury;
type Burn = Burn;
type BurnDestination = ();
// type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const MaxScheduledPerBlock: u32 = 50;
}
/// pallet_scheduler by skyh 1020
impl pallet_scheduler::Trait for Runtime {
type PalletsOrigin = OriginCaller;
type MaximumWeight = MaximumBlockWeight;
type ScheduleOrigin = EnsureRoot<AccountId>;
type MaxScheduledPerBlock = MaxScheduledPerBlock;
type WeightInfo = ();
type Origin = Origin;
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _;
pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value();
}
impl pallet_im_online::Trait for Runtime {
type AuthorityId = ImOnlineId;
type SessionDuration = SessionDuration;
type ReportUnresponsiveness = Offences;
type UnsignedPriority = ImOnlineUnsignedPriority;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const IndexDeposit: Balance = DOLLARS;
}
/// pallet_indices by skyh 1020
impl pallet_indices::Trait for Runtime {
type AccountIndex = AccountIndex;
type Deposit = IndexDeposit;
type Currency = Balances;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get();
}
/// pallet_offences by skyh 1020
impl pallet_offences::Trait for Runtime {
type IdentificationTuple = pallet_session::historical::IdentificationTuple<Self>;
type WeightSoftLimit = OffencesWeightSoftLimit;
type OnOffenceHandler = Staking;
type Event = Event;
}
impl pallet_utility::Trait for Runtime {
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const MinVestedTransfer: Balance = 100 * DOLLARS;
}
/// pallet_vesting by skyh 1020
impl pallet_vesting::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type BlockNumberToBalance = ConvertInto;
type MinVestedTransfer = MinVestedTransfer;
type WeightInfo = ();
}
impl pallet_authority_discovery::Trait for Runtime {}
parameter_types! {
pub const UncleGenerations: BlockNumber = 5;
}
/// pallet_authorship origin
impl pallet_authorship::Trait for Runtime {
type FindAuthor = pallet_session::FindAccountFromAuthorIndex<Self, Babe>;
type UncleGenerations = UncleGenerations;
type FilterUncle = ();
type EventHandler = (Staking, ()); // ImOnline
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
/// pallet_timestamp origin
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = Moment;
type OnTimestampSet = Babe;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500; // 0
pub const MaxLocks: u32 = 50;
}
/// pallet_balances origin
impl pallet_balances::Trait for Runtime {
type Balance = Balance;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = MaxLocks;
type AccountStore = System; //
type Event = Event;
type WeightInfo = ();
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
}
/// pallet_transaction_payment origin
impl pallet_transaction_payment::Trait for Runtime {
type Currency = Balances;
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type OnTransactionPayment = (); // treasury
type FeeMultiplierUpdate = (); //
}
impl pallet_sudo::Trait for Runtime {
type Event = Event;
type Call = Call;
}
/// Configure the template pallet in pallets/template.
impl pallet_template::Trait for Runtime {
type Event = Event;
}
// TODO
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
TransactionPayment: pallet_transaction_payment::{Module, Storage},
Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>},
Authorship: pallet_authorship::{Module, Call, Storage, Inherent},
Babe: pallet_babe::{Module, Call, Storage, Config, Inherent},
Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event},
Staking: pallet_staking::{Module, Call, Config<T>, Storage, Event<T>},
Session: pallet_session::{Module, Call, Storage, Event, Config<T>},
AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config},
Council: pallet_collective::<Instance1>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
TechnicalCommittee: pallet_collective::<Instance2>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
Democracy: pallet_democracy::{Module, Call, Storage, Config, Event<T>},
ElectionsPhragmen: pallet_elections_phragmen::{Module, Call, Storage, Event<T>, Config<T>},
TechnicalMembership: pallet_membership::<Instance1>::{Module, Call, Storage, Event<T>, Config<T>},
ImOnline: pallet_im_online::{Module, Call, Storage, Event<T>, ValidateUnsigned, Config<T>},
Indices: pallet_indices::{Module, Call, Storage, Config<T>, Event<T>},
Scheduler: pallet_scheduler::{Module, Call, Storage, Event<T>},
Treasury: pallet_treasury::{Module, Call, Storage, Event<T>},
Offences: pallet_offences::{Module, Call, Storage, Event},
Historical: pallet_session_historical::{Module},
Vesting: pallet_vesting::{Module, Call, Storage, Event<T>, Config<T>},
Utility: pallet_utility::{Module, Call, Event},
// Include the custom logic from the template pallet in the runtime.
TemplateModule: pallet_template::{Module, Call, Storage, Event<T>},
}
);
/// The address format for describing accounts.
pub type Address = AccountId;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Extrinsic type that has already been checked.
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllModules,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block)
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
Runtime::metadata().into()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
fn random_seed() -> <Block as BlockT>::Hash {
RandomnessCollectiveFlip::random_seed()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
) -> TransactionValidity {
Executive::validate_transaction(source, tx)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
// TODO ### sp_consensus_babe by Skyh, 0927 ###
/// configuration
/// current_epoch_start
/// generate_key_ownership_proof
/// submit_report_equivocation_unsigned_extrinsic
impl sp_consensus_babe::BabeApi<Block> for Runtime {
fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration {
// The choice of `c` parameter (where `1 - c` represents the
// probability of a slot being empty), is done in accordance to the
// slot duration and expected target block time, for safely
// resisting network delays of maximum two seconds.
// <https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results>
sp_consensus_babe::BabeGenesisConfiguration {
slot_duration: Babe::slot_duration(),
epoch_length: EpochDuration::get(),
c: PRIMARY_PROBABILITY,
genesis_authorities: Babe::authorities(),
randomness: Babe::randomness(),
allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,
}
}
fn current_epoch_start() -> sp_consensus_babe::SlotNumber {
Babe::current_epoch_start()
}
fn generate_key_ownership_proof(
_slot_number: sp_consensus_babe::SlotNumber,
authority_id: sp_consensus_babe::AuthorityId,
) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
use codec::Encode;
Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id))
.map(|p| p.encode())
.map(sp_consensus_babe::OpaqueKeyOwnershipProof::new)
}
fn submit_report_equivocation_unsigned_extrinsic(
equivocation_proof: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof,
) -> Option<()> {
let key_owner_proof = key_owner_proof.decode()?;
Babe::submit_unsigned_equivocation_report(
equivocation_proof,
key_owner_proof,
)
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
fn account_nonce(account: AccountId) -> Index {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
use frame_system_benchmarking::Module as SystemBench;
impl frame_system_benchmarking::Trait for Runtime {}
let whitelist: Vec<TrackedStorageKey> = vec![
// Block Number
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
// Total Issuance
hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
// Execution Phase
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
// Event Count
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
// System Events
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
];
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
add_benchmark!(params, batches, pallet_balances, Balances);
add_benchmark!(params, batches, pallet_timestamp, Timestamp);
if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
Ok(batches)
}
}
}
| convert | identifier_name |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit="256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use sp_std::prelude::*;
use sp_core::{
crypto::KeyTypeId, OpaqueMetadata,
u32_trait::{_1, _2, _3, _4, _5}
};
use sp_runtime::{
create_runtime_str, impl_opaque_keys,
Percent, ModuleId,
generic, ApplyExtrinsicResult,
curve::PiecewiseLinear,
traits::ConvertInto,
transaction_validity::{TransactionPriority, TransactionValidity, TransactionSource},
};
use sp_runtime::traits::{
IdentityLookup, BlakeTwo256, Block as BlockT,
Convert, OpaqueKeys, Verify, IdentifyAccount,
NumberFor, Saturating,
};
use static_assertions::const_assert;
use sp_api::impl_runtime_apis;
use pallet_grandpa::fg_primitives;
use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
use pallet_session::historical as pallet_session_historical;
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
use frame_system::{EnsureRoot, EnsureOneOf};
use frame_support::{
construct_runtime, parameter_types, StorageValue,
traits::{KeyOwnerProofSystem, Randomness, LockIdentifier, Filter},
weights::{
Weight, IdentityFee,
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
},
};
// A few exports that help ease life for downstream crates.
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Permill, Perbill};
pub use pallet_timestamp::Call as TimestampCall;
pub use pallet_balances::Call as BalancesCall;
pub use pallet_staking::StakerStatus;
/// Import the template pallet.
pub use pallet_template;
pub mod common;
pub use common::*;
pub use common::opaque;
pub mod constants;
pub use constants::{time::*, currency::*, fee::*};
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("bandot"),
impl_name: create_runtime_str!("bandot"),
authoring_version: 1,
spec_version: 1,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
impl_opaque_keys! {
pub struct SessionKeys {
pub babe: Babe,
pub grandpa: Grandpa,
pub im_online: ImOnline,
pub authority_discovery: AuthorityDiscovery,
//pub parachain_validator: ParachainSessionKeyPlaceholder<Runtime>,
}
}
/// Avoid processing transactions from slots and parachain registrar.
pub struct BaseFilter;
impl Filter<Call> for BaseFilter {
fn filter(_: &Call) -> bool {
true
}
}
parameter_types! {
pub const BlockHashCount: BlockNumber = 2400;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
/// Assume 10% of weight for average on_initialize calls.
pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get()
.saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get();
pub const MaximumBlockLength: u32 = 5 * 1024 * 1024;
pub const Version: RuntimeVersion = VERSION;
}
/// frame_system by skyh 0927
impl frame_system::Trait for Runtime {
type BaseCallFilter = BaseFilter; /// filter 0927
type AccountId = AccountId;
type AccountData = pallet_balances::AccountData<Balance>;
type Header = generic::Header<BlockNumber, BlakeTwo256>;
type Lookup = IdentityLookup<AccountId>;
type Index = Index;
type Hash = Hash;
type Hashing = BlakeTwo256;
type BlockNumber = BlockNumber;
type BlockHashCount = BlockHashCount;
type BlockExecutionWeight = BlockExecutionWeight;
type ExtrinsicBaseWeight = ExtrinsicBaseWeight;
type MaximumBlockLength = MaximumBlockLength;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumExtrinsicWeight = MaximumExtrinsicWeight;
type AvailableBlockRatio = AvailableBlockRatio;
type DbWeight = RocksDbWeight;
type PalletInfo = PalletInfo;
type Version = Version;
type Call = Call;
type Event = Event;
type Origin = Origin;
type OnNewAccount = ();
type OnKilledAccount = (); ///
type SystemWeightInfo = ();
}
parameter_types! {
pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS;
pub const ExpectedBlockTime: u64 = MILLISECS_PER_BLOCK;
}
/// pallet_babe by skyh 0927
impl pallet_babe::Trait for Runtime {
type EpochDuration = EpochDuration;
type ExpectedBlockTime = ExpectedBlockTime;
type EpochChangeTrigger = pallet_babe::ExternalTrigger;
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::IdentificationTuple;
type HandleEquivocation = pallet_babe::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
}
/// pallet_grandpa by skyh 0927
impl pallet_grandpa::Trait for Runtime {
type KeyOwnerProofSystem = Historical;
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
type KeyOwnerIdentification =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
type HandleEquivocation = pallet_grandpa::EquivocationHandler<Self::KeyOwnerIdentification, ()>; // Offences
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17);
}
/// pallet_session by skyh 0927
impl pallet_session::Trait for Runtime {
type Keys = SessionKeys;
type ValidatorId = <Self as frame_system::Trait>::AccountId;
type ValidatorIdOf = pallet_staking::StashOf<Self>;
type ShouldEndSession = Babe;
type NextSessionRotation = Babe;
type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
type DisabledValidatorsThreshold = DisabledValidatorsThreshold;
type WeightInfo = ();
type Event = Event;
}
impl pallet_session::historical::Trait for Runtime {
type FullIdentification = pallet_staking::Exposure<AccountId, Balance>;
type FullIdentificationOf = pallet_staking::ExposureOf<Runtime>;
}
/// frame_system::offchain by skyh 0927
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where Call: From<C>,
{
type Extrinsic = UncheckedExtrinsic;
type OverarchingCall = Call;
}
pallet_staking_reward_curve::build! {
const REWARD_CURVE: PiecewiseLinear<'static> = curve!(
min_inflation: 0_025_000,
max_inflation: 0_100_000,
ideal_stake: 0_500_000,
falloff: 0_050_000,
max_piece_count: 40,
test_precision: 0_005_000,
);
}
/// Struct that handles the conversion of Balance -> `u64`. This is used for
/// staking's election calculation.
pub struct CurrencyToVoteHandler;
impl CurrencyToVoteHandler {
fn factor() -> Balance {
(Balances::total_issuance() / u64::max_value() as Balance).max(1)
}
}
impl Convert<Balance, u64> for CurrencyToVoteHandler {
fn convert(x: Balance) -> u64 {
(x / Self::factor()) as u64
}
}
impl Convert<u128, Balance> for CurrencyToVoteHandler {
fn convert(x: u128) -> Balance {
x * Self::factor()
}
}
parameter_types! {
pub const SessionsPerEra: sp_staking::SessionIndex = 3; // 3 hours
pub const BondingDuration: pallet_staking::EraIndex = 4; // 12 hours
pub const SlashDeferDuration: pallet_staking::EraIndex = 2; // 6 hours
pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
pub const MaxNominatorRewardedPerValidator: u32 = 64;
pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4;
pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
pub const MaxIterations: u32 = 5;
// 0.05%. The higher the value, the more strict solution acceptance becomes.
pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000);
}
/// pallet_staking::offchain by skyh 0927
impl pallet_staking::Trait for Runtime {
type Currency = Balances;
type UnixTime = Timestamp;
type CurrencyToVote = CurrencyToVoteHandler;
type RewardRemainder = Treasury;
type RewardCurve = RewardCurve;
type Slash = Treasury; // send the slashed funds to the pallet treasury.
type Reward = (); // rewards are minted from the void
type SessionInterface = Self;
type SessionsPerEra = SessionsPerEra;
type BondingDuration = BondingDuration;
type SlashDeferDuration = SlashDeferDuration;
/// A super-majority of the council can cancel the slash.
type SlashCancelOrigin = frame_system::EnsureRoot<Self::AccountId>; // TODO
type NextNewSession = Session;
type ElectionLookahead = ElectionLookahead;
type UnsignedPriority = StakingUnsignedPriority;
type MaxIterations = MaxIterations;
type MinSolutionScoreBump = MinSolutionScoreBump;
type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator;
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const CouncilMotionDuration: BlockNumber = 3 * DAYS;
pub const CouncilMaxProposals: u32 = 100;
pub const CouncilMaxMembers: u32 = 100;
}
type CouncilCollective = pallet_collective::Instance1;
pub type MoreThanHalfCouncil = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>
>;
pub type SlashCancelOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>
>;
/// pallet_collective by skyh 1020
impl pallet_collective::Trait<CouncilCollective> for Runtime {
type MotionDuration = CouncilMotionDuration;
type MaxProposals = CouncilMaxProposals;
type MaxMembers = CouncilMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
type Origin = Origin;
type Proposal = Call;
type Event = Event;
}
parameter_types! {
pub const TechnicalMotionDuration: BlockNumber = 3 * DAYS;
pub const TechnicalMaxProposals: u32 = 100;
pub const TechnicalMaxMembers: u32 = 100;
}
type TechnicalCollective = pallet_collective::Instance2;
impl pallet_collective::Trait<TechnicalCollective> for Runtime {
type Origin = Origin;
type Proposal = Call;
type Event = Event;
type MotionDuration = TechnicalMotionDuration;
type MaxProposals = TechnicalMaxProposals;
type MaxMembers = TechnicalMaxMembers;
type DefaultVote = pallet_collective::PrimeDefaultVote;
// type WeightInfo = weights::pallet_collective::WeightInfo<Runtime>;
type WeightInfo = ();
}
parameter_types! {
pub const LaunchPeriod: BlockNumber = 7 * DAYS;
pub const VotingPeriod: BlockNumber = 7 * DAYS;
pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS;
pub const MinimumDeposit: Balance = 1 * DOLLARS;
pub const EnactmentPeriod: BlockNumber = 8 * DAYS;
pub const CooloffPeriod: BlockNumber = 7 * DAYS;
// One cent: $10,000 / MB
pub const PreimageByteDeposit: Balance = 10 * MILLICENTS;
pub const InstantAllowed: bool = true;
pub const MaxVotes: u32 = 100;
pub const MaxProposals: u32 = 100;
}
/// pallet_democracy by skyh 1020
impl pallet_democracy::Trait for Runtime {
type Proposal = Call;
type Event = Event;
type Currency = Balances;
type EnactmentPeriod = EnactmentPeriod;
type LaunchPeriod = LaunchPeriod;
type VotingPeriod = VotingPeriod;
type CooloffPeriod = CooloffPeriod;
type PalletsOrigin = OriginCaller;
type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>;
type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>;
type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>;
/// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote
/// be tabled immediately and with a shorter voting/enactment period.
type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>;
type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>;
type InstantAllowed = InstantAllowed;
type FastTrackVotingPeriod = FastTrackVotingPeriod;
// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
type CancellationOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>,
>;
// To cancel a proposal before it has been passed, the technical committee must be unanimous or
// Root must agree.
// type CancelProposalOrigin = EnsureOneOf<
// AccountId,
// EnsureRoot<AccountId>,
// pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>,
// >;
// type BlacklistOrigin = EnsureRoot<AccountId>;
// Any single technical committee member may veto a coming council proposal, however they can
// only do it once and it lasts only for the cooloff period.
type Slash = Treasury;
type Scheduler = Scheduler;
type MaxVotes = MaxVotes;
type MinimumDeposit = MinimumDeposit;
type PreimageByteDeposit = PreimageByteDeposit;
type OperationalPreimageOrigin = pallet_collective::EnsureMember<AccountId, CouncilCollective>;
type WeightInfo = ();
// type MaxProposals = MaxProposals;
}
parameter_types! {
pub const CandidacyBond: Balance = 1 * DOLLARS;
pub const VotingBond: Balance = 5 * CENTS;
/// Daily council elections.
pub const TermDuration: BlockNumber = 24 * HOURS;
pub const DesiredMembers: u32 = 19;
pub const DesiredRunnersUp: u32 = 19;
pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect";
}
// Make sure that there are no more than MaxMembers members elected via phragmen.
const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get());
/// pallet_elections_phragmen by skyh 1020
impl pallet_elections_phragmen::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type ChangeMembers = Council;
type InitializeMembers = Council;
type CurrencyToVote = CurrencyToVoteHandler;
type CandidacyBond = CandidacyBond;
type VotingBond = VotingBond;
type LoserCandidate = Treasury;
type BadReport = Treasury;
type KickedMember = Treasury;
type DesiredMembers = DesiredMembers;
type DesiredRunnersUp = DesiredRunnersUp;
type TermDuration = TermDuration;
type ModuleId = ElectionsPhragmenModuleId;
// type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>;
type WeightInfo = ();
}
/// pallet_membership by skyh 1020
impl pallet_membership::Trait<pallet_membership::Instance1> for Runtime {
type Event = Event;
type AddOrigin = MoreThanHalfCouncil;
type RemoveOrigin = MoreThanHalfCouncil;
type SwapOrigin = MoreThanHalfCouncil;
type ResetOrigin = MoreThanHalfCouncil;
type PrimeOrigin = MoreThanHalfCouncil;
type MembershipInitialized = TechnicalCommittee;
type MembershipChanged = TechnicalCommittee;
}
parameter_types! {
pub const ProposalBond: Permill = Permill::from_percent(5);
pub const ProposalBondMinimum: Balance = 20 * DOLLARS;
pub const SpendPeriod: BlockNumber = 6 * DAYS;
pub const Burn: Permill = Permill::from_perthousand(2);
pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry");
pub const TipCountdown: BlockNumber = 1 * DAYS;
pub const TipFindersFee: Percent = Percent::from_percent(20);
pub const TipReportDepositBase: Balance = 1 * DOLLARS;
pub const DataDepositPerByte: Balance = 1 * CENTS;
pub const BountyDepositBase: Balance = 1 * DOLLARS;
pub const BountyDepositPayoutDelay: BlockNumber = 4 * DAYS;
pub const BountyUpdatePeriod: BlockNumber = 90 * DAYS;
pub const MaximumReasonLength: u32 = 16384;
pub const BountyCuratorDeposit: Permill = Permill::from_percent(50);
pub const BountyValueMinimum: Balance = 2 * DOLLARS;
}
type ApproveOrigin = EnsureOneOf<
AccountId,
EnsureRoot<AccountId>,
pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>
>;
/// pallet_treasury by skyh 1020
impl pallet_treasury::Trait for Runtime {
type ModuleId = TreasuryModuleId;
type Currency = Balances;
type ApproveOrigin = ApproveOrigin;
type RejectOrigin = MoreThanHalfCouncil;
type DataDepositPerByte = DataDepositPerByte;
type Tippers = ElectionsPhragmen;
type TipCountdown = TipCountdown;
type TipFindersFee = TipFindersFee;
type TipReportDepositBase = TipReportDepositBase;
type BountyDepositBase = BountyDepositBase;
type BountyDepositPayoutDelay = BountyDepositPayoutDelay;
type BountyUpdatePeriod = BountyUpdatePeriod;
type MaximumReasonLength = MaximumReasonLength;
type BountyCuratorDeposit = BountyCuratorDeposit;
type BountyValueMinimum = BountyValueMinimum;
// type BurnDestination = Society;
type ProposalBond = ProposalBond;
type ProposalBondMinimum = ProposalBondMinimum;
type SpendPeriod = SpendPeriod;
type OnSlash = Treasury;
type Burn = Burn;
type BurnDestination = ();
// type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const MaxScheduledPerBlock: u32 = 50;
}
/// pallet_scheduler by skyh 1020
impl pallet_scheduler::Trait for Runtime {
type PalletsOrigin = OriginCaller;
type MaximumWeight = MaximumBlockWeight;
type ScheduleOrigin = EnsureRoot<AccountId>;
type MaxScheduledPerBlock = MaxScheduledPerBlock;
type WeightInfo = ();
type Origin = Origin;
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _;
pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value();
}
impl pallet_im_online::Trait for Runtime {
type AuthorityId = ImOnlineId;
type SessionDuration = SessionDuration;
type ReportUnresponsiveness = Offences;
type UnsignedPriority = ImOnlineUnsignedPriority;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub const IndexDeposit: Balance = DOLLARS;
}
/// pallet_indices by skyh 1020
impl pallet_indices::Trait for Runtime {
type AccountIndex = AccountIndex;
type Deposit = IndexDeposit;
type Currency = Balances;
type WeightInfo = ();
type Event = Event;
}
parameter_types! {
pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get();
}
/// pallet_offences by skyh 1020
impl pallet_offences::Trait for Runtime {
type IdentificationTuple = pallet_session::historical::IdentificationTuple<Self>;
type WeightSoftLimit = OffencesWeightSoftLimit;
type OnOffenceHandler = Staking;
type Event = Event;
}
impl pallet_utility::Trait for Runtime {
type WeightInfo = ();
type Event = Event;
type Call = Call;
}
parameter_types! {
pub const MinVestedTransfer: Balance = 100 * DOLLARS;
}
/// pallet_vesting by skyh 1020
impl pallet_vesting::Trait for Runtime {
type Event = Event;
type Currency = Balances;
type BlockNumberToBalance = ConvertInto;
type MinVestedTransfer = MinVestedTransfer;
type WeightInfo = ();
}
impl pallet_authority_discovery::Trait for Runtime {}
parameter_types! {
pub const UncleGenerations: BlockNumber = 5;
}
/// pallet_authorship origin
impl pallet_authorship::Trait for Runtime {
type FindAuthor = pallet_session::FindAccountFromAuthorIndex<Self, Babe>;
type UncleGenerations = UncleGenerations;
type FilterUncle = ();
type EventHandler = (Staking, ()); // ImOnline
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
/// pallet_timestamp origin
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = Moment;
type OnTimestampSet = Babe;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500; // 0
pub const MaxLocks: u32 = 50;
}
/// pallet_balances origin
impl pallet_balances::Trait for Runtime {
type Balance = Balance;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type MaxLocks = MaxLocks; | type WeightInfo = ();
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
}
/// pallet_transaction_payment origin
impl pallet_transaction_payment::Trait for Runtime {
type Currency = Balances;
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type OnTransactionPayment = (); // treasury
type FeeMultiplierUpdate = (); //
}
impl pallet_sudo::Trait for Runtime {
type Event = Event;
type Call = Call;
}
/// Configure the template pallet in pallets/template.
impl pallet_template::Trait for Runtime {
type Event = Event;
}
// TODO
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
TransactionPayment: pallet_transaction_payment::{Module, Storage},
Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>},
Authorship: pallet_authorship::{Module, Call, Storage, Inherent},
Babe: pallet_babe::{Module, Call, Storage, Config, Inherent},
Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event},
Staking: pallet_staking::{Module, Call, Config<T>, Storage, Event<T>},
Session: pallet_session::{Module, Call, Storage, Event, Config<T>},
AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config},
Council: pallet_collective::<Instance1>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
TechnicalCommittee: pallet_collective::<Instance2>::{Module, Call, Storage, Origin<T>, Event<T>, Config<T>},
Democracy: pallet_democracy::{Module, Call, Storage, Config, Event<T>},
ElectionsPhragmen: pallet_elections_phragmen::{Module, Call, Storage, Event<T>, Config<T>},
TechnicalMembership: pallet_membership::<Instance1>::{Module, Call, Storage, Event<T>, Config<T>},
ImOnline: pallet_im_online::{Module, Call, Storage, Event<T>, ValidateUnsigned, Config<T>},
Indices: pallet_indices::{Module, Call, Storage, Config<T>, Event<T>},
Scheduler: pallet_scheduler::{Module, Call, Storage, Event<T>},
Treasury: pallet_treasury::{Module, Call, Storage, Event<T>},
Offences: pallet_offences::{Module, Call, Storage, Event},
Historical: pallet_session_historical::{Module},
Vesting: pallet_vesting::{Module, Call, Storage, Event<T>, Config<T>},
Utility: pallet_utility::{Module, Call, Event},
// Include the custom logic from the template pallet in the runtime.
TemplateModule: pallet_template::{Module, Call, Storage, Event<T>},
}
);
/// The address format for describing accounts.
pub type Address = AccountId;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Extrinsic type that has already been checked.
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllModules,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block)
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
Runtime::metadata().into()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
fn random_seed() -> <Block as BlockT>::Hash {
RandomnessCollectiveFlip::random_seed()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
) -> TransactionValidity {
Executive::validate_transaction(source, tx)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
// TODO ### sp_consensus_babe by Skyh, 0927 ###
/// configuration
/// current_epoch_start
/// generate_key_ownership_proof
/// submit_report_equivocation_unsigned_extrinsic
impl sp_consensus_babe::BabeApi<Block> for Runtime {
fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration {
// The choice of `c` parameter (where `1 - c` represents the
// probability of a slot being empty), is done in accordance to the
// slot duration and expected target block time, for safely
// resisting network delays of maximum two seconds.
// <https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results>
sp_consensus_babe::BabeGenesisConfiguration {
slot_duration: Babe::slot_duration(),
epoch_length: EpochDuration::get(),
c: PRIMARY_PROBABILITY,
genesis_authorities: Babe::authorities(),
randomness: Babe::randomness(),
allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,
}
}
fn current_epoch_start() -> sp_consensus_babe::SlotNumber {
Babe::current_epoch_start()
}
fn generate_key_ownership_proof(
_slot_number: sp_consensus_babe::SlotNumber,
authority_id: sp_consensus_babe::AuthorityId,
) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
use codec::Encode;
Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id))
.map(|p| p.encode())
.map(sp_consensus_babe::OpaqueKeyOwnershipProof::new)
}
fn submit_report_equivocation_unsigned_extrinsic(
equivocation_proof: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof,
) -> Option<()> {
let key_owner_proof = key_owner_proof.decode()?;
Babe::submit_unsigned_equivocation_report(
equivocation_proof,
key_owner_proof,
)
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
fn account_nonce(account: AccountId) -> Index {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
use frame_system_benchmarking::Module as SystemBench;
impl frame_system_benchmarking::Trait for Runtime {}
let whitelist: Vec<TrackedStorageKey> = vec![
// Block Number
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
// Total Issuance
hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
// Execution Phase
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
// Event Count
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
// System Events
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
];
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
add_benchmark!(params, batches, pallet_balances, Balances);
add_benchmark!(params, batches, pallet_timestamp, Timestamp);
if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
Ok(batches)
}
}
} | type AccountStore = System; //
type Event = Event; | random_line_split |
paxos.go | package paxos
//
// Paxos library, to be included in an application.
// Multiple applications will run, each including
// a Paxos peer.
//
// Manages a sequence of agreed-on values.
// The set of peers is fixed.
// Copes with network failures (partition, msg loss, &c).
// Does not store anything persistently, so cannot handle crash+restart.
//
// The application interface:
//
// px = paxos.Make(peers []string, me string)
// px.Start(seq int, v interface{}) -- start agreement on new instance
// px.Status(seq int) (decided bool, v interface{}) -- get info about an instance
// px.Done(seq int) -- ok to forget all instances <= seq
// px.Max() int -- highest instance seq known, or -1
// px.Min() int -- instances before this seq have been forgotten
//
import "net"
import "net/rpc"
import "log"
import "os"
import "syscall"
import "sync"
import "fmt"
import "math/rand"
const (
DBG_PREPARE = false
DBG_ACCEPT = false
DBG_PROPOSER = false
DBG_DECIDED = false
DBG_DONE = false
DBG_RPCCOUNT = false
)
type Slot_t struct {
Decided bool
V interface{}
}
type Proposal struct {
// should not use PNum 0
PNum int
Value interface{}
}
type Paxos struct {
mu sync.Mutex
l net.Listener
dead bool
unreliable bool
rpcCount int
peers []string
me int // index into peers[]
// Your data here.
peers_count int
majority int
max_seq int
// highest number ever passed to (all) Done
global_done int
local_done int
// [A] highest accept seen
APa map[int]Proposal
// [A] highest prepare seen
APp map[int]int
Lslots map[int]Slot_t
}
//
// call() sends an RPC to the rpcname handler on server srv
// with arguments args, waits for the reply, and leaves the
// reply in reply. the reply argument should be a pointer
// to a reply structure.
//
// the return value is true if the server responded, and false
// if call() was not able to contact the server. in particular,
// the replys contents are only valid if call() returned true.
//
// you should assume that call() will time out and return an
// error after a while if it does not get a reply from the server.
//
// please use call() to send all RPCs, in client.go and server.go.
// please do not change this function.
//
func call(srv string, name string, args interface{}, reply interface{}) bool {
c, err := rpc.Dial("unix", srv)
if err != nil {
err1 := err.(*net.OpError)
if err1.Err != syscall.ENOENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) | (seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Seq = seq
ok := call(peer, "Paxos.IfDone", args, reply)
px.clog(DBG_DONE, "send_IfDone", "peer %d retuen %t, %v", idx, ok, reply.Err)
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_DONE, "send_IfDone", "ok_count:%d", ok_count)
return (ok_count == px.peers_count)
}
//
// other peer ask that if args.Seq is done at this peer
//
func (px *Paxos) IfDone(args *IfDoneArgs, reply *IfDoneReply) error {
px.clog(DBG_DONE, "IfDone", "check %d, local_done=%d", args.Seq, px.local_done)
if args.Seq <= px.local_done {
reply.Err = OK
} else {
// make other peers wait for me
reply.Err = Reject
}
return nil
}
func (px *Paxos) send_IsDone(seq int) {
for _, peer := range px.peers {
args := &IsDoneArgs{}
reply := &IsDoneReply{}
args.Seq = seq
call(peer, "Paxos.IsDone", args, reply)
}
}
func (px *Paxos) IsDone(args *IsDoneArgs, reply *IsDoneReply) error {
px.global_done = args.Seq
px.Done(px.local_done)
return nil
}
//
// the application wants to know the
// highest instance sequence known to
// this peer.
//
func (px *Paxos) Max() int {
// Your code here.
return px.max_seq
}
//
// Min() should return one more than the minimum among z_i,
// where z_i is the highest number ever passed
// to Done() on peer i. A peers z_i is -1 if it has
// never called Done().
//
// Paxos is required to have forgotten all information
// about any instances it knows that are < Min().
// The point is to free up memory in long-running
// Paxos-based servers.
//
// It is illegal to call Done(i) on a peer and
// then call Start(j) on that peer for any j <= i.
//
// Paxos peers need to exchange their highest Done()
// arguments in order to implement Min(). These
// exchanges can be piggybacked on ordinary Paxos
// agreement protocol messages, so it is OK if one
// peers Min does not reflect another Peers Done()
// until after the next instance is agreed to.
//
// The fact that Min() is defined as a minimum over
// *all* Paxos peers means that Min() cannot increase until
// all peers have been heard from. So if a peer is dead
// or unreachable, other peers Min()s will not increase
// even if all reachable peers call Done. The reason for
// this is that when the unreachable peer comes back to
// life, it will need to catch up on instances that it
// missed -- the other peers therefor cannot forget these
// instances.
//
func (px *Paxos) Min() int {
// You code here.
return px.global_done + 1
}
//
// the application wants to know whether this
// peer thinks an instance has been decided,
// and if so what the agreed value is. Status()
// should just inspect the local peer state;
// it should not contact other Paxos peers.
//
func (px *Paxos) Status(seq int) (bool, interface{}) {
// Your code here.
return px.Lslots[seq].Decided, px.Lslots[seq].V
}
//
// tell the peer to shut itself down.
// for testing.
// please do not change this function.
//
func (px *Paxos) Kill() {
px.dead = true
if px.l != nil {
px.l.Close()
}
}
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
px := &Paxos{}
px.peers = peers
px.me = me
//fmt.Printf("#### Make %d/%d ####\n", me, len(peers))
// Your initialization code here.
px.peers_count = len(peers)
px.majority = (px.peers_count + 1) / 2
px.max_seq = -1
px.global_done = -1
px.local_done = -1
px.APp = map[int]int{}
px.APa = map[int]Proposal{}
px.Lslots = map[int]Slot_t{}
if rpcs != nil {
// caller will create socket &c
rpcs.Register(px)
} else {
rpcs = rpc.NewServer()
rpcs.Register(px)
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(peers[me]) // only needed for "unix"
l, e := net.Listen("unix", peers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
px.l = l
// please do not change any of the following code,
// or do anything to subvert it.
// create a thread to accept RPC connections
go func() {
for px.dead == false {
conn, err := px.l.Accept()
if err == nil && px.dead == false {
if px.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if px.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
} else {
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && px.dead == false {
fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
}
}
}()
}
return px
}
| send_decided | identifier_name |
paxos.go | package paxos
//
// Paxos library, to be included in an application.
// Multiple applications will run, each including
// a Paxos peer.
//
// Manages a sequence of agreed-on values.
// The set of peers is fixed.
// Copes with network failures (partition, msg loss, &c).
// Does not store anything persistently, so cannot handle crash+restart.
//
// The application interface:
//
// px = paxos.Make(peers []string, me string)
// px.Start(seq int, v interface{}) -- start agreement on new instance
// px.Status(seq int) (decided bool, v interface{}) -- get info about an instance
// px.Done(seq int) -- ok to forget all instances <= seq
// px.Max() int -- highest instance seq known, or -1
// px.Min() int -- instances before this seq have been forgotten
//
import "net"
import "net/rpc"
import "log"
import "os"
import "syscall"
import "sync"
import "fmt"
import "math/rand"
const (
DBG_PREPARE = false
DBG_ACCEPT = false
DBG_PROPOSER = false
DBG_DECIDED = false
DBG_DONE = false
DBG_RPCCOUNT = false
)
type Slot_t struct {
Decided bool
V interface{}
}
type Proposal struct {
// should not use PNum 0
PNum int
Value interface{}
}
type Paxos struct {
mu sync.Mutex
l net.Listener
dead bool
unreliable bool
rpcCount int
peers []string
me int // index into peers[]
// Your data here.
peers_count int
majority int
max_seq int
// highest number ever passed to (all) Done
global_done int
local_done int
// [A] highest accept seen
APa map[int]Proposal
// [A] highest prepare seen
APp map[int]int
Lslots map[int]Slot_t
}
//
// call() sends an RPC to the rpcname handler on server srv
// with arguments args, waits for the reply, and leaves the
// reply in reply. the reply argument should be a pointer
// to a reply structure.
//
// the return value is true if the server responded, and false
// if call() was not able to contact the server. in particular,
// the replys contents are only valid if call() returned true.
//
// you should assume that call() will time out and return an
// error after a while if it does not get a reply from the server.
//
// please use call() to send all RPCs, in client.go and server.go.
// please do not change this function.
//
func call(srv string, name string, args interface{}, reply interface{}) bool {
c, err := rpc.Dial("unix", srv)
if err != nil {
err1 := err.(*net.OpError)
if err1.Err != syscall.ENOENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error |
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Seq = seq
ok := call(peer, "Paxos.IfDone", args, reply)
px.clog(DBG_DONE, "send_IfDone", "peer %d retuen %t, %v", idx, ok, reply.Err)
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_DONE, "send_IfDone", "ok_count:%d", ok_count)
return (ok_count == px.peers_count)
}
//
// other peer ask that if args.Seq is done at this peer
//
func (px *Paxos) IfDone(args *IfDoneArgs, reply *IfDoneReply) error {
px.clog(DBG_DONE, "IfDone", "check %d, local_done=%d", args.Seq, px.local_done)
if args.Seq <= px.local_done {
reply.Err = OK
} else {
// make other peers wait for me
reply.Err = Reject
}
return nil
}
func (px *Paxos) send_IsDone(seq int) {
for _, peer := range px.peers {
args := &IsDoneArgs{}
reply := &IsDoneReply{}
args.Seq = seq
call(peer, "Paxos.IsDone", args, reply)
}
}
func (px *Paxos) IsDone(args *IsDoneArgs, reply *IsDoneReply) error {
px.global_done = args.Seq
px.Done(px.local_done)
return nil
}
//
// the application wants to know the
// highest instance sequence known to
// this peer.
//
func (px *Paxos) Max() int {
// Your code here.
return px.max_seq
}
//
// Min() should return one more than the minimum among z_i,
// where z_i is the highest number ever passed
// to Done() on peer i. A peers z_i is -1 if it has
// never called Done().
//
// Paxos is required to have forgotten all information
// about any instances it knows that are < Min().
// The point is to free up memory in long-running
// Paxos-based servers.
//
// It is illegal to call Done(i) on a peer and
// then call Start(j) on that peer for any j <= i.
//
// Paxos peers need to exchange their highest Done()
// arguments in order to implement Min(). These
// exchanges can be piggybacked on ordinary Paxos
// agreement protocol messages, so it is OK if one
// peers Min does not reflect another Peers Done()
// until after the next instance is agreed to.
//
// The fact that Min() is defined as a minimum over
// *all* Paxos peers means that Min() cannot increase until
// all peers have been heard from. So if a peer is dead
// or unreachable, other peers Min()s will not increase
// even if all reachable peers call Done. The reason for
// this is that when the unreachable peer comes back to
// life, it will need to catch up on instances that it
// missed -- the other peers therefor cannot forget these
// instances.
//
func (px *Paxos) Min() int {
// You code here.
return px.global_done + 1
}
//
// the application wants to know whether this
// peer thinks an instance has been decided,
// and if so what the agreed value is. Status()
// should just inspect the local peer state;
// it should not contact other Paxos peers.
//
func (px *Paxos) Status(seq int) (bool, interface{}) {
// Your code here.
return px.Lslots[seq].Decided, px.Lslots[seq].V
}
//
// tell the peer to shut itself down.
// for testing.
// please do not change this function.
//
func (px *Paxos) Kill() {
px.dead = true
if px.l != nil {
px.l.Close()
}
}
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
px := &Paxos{}
px.peers = peers
px.me = me
//fmt.Printf("#### Make %d/%d ####\n", me, len(peers))
// Your initialization code here.
px.peers_count = len(peers)
px.majority = (px.peers_count + 1) / 2
px.max_seq = -1
px.global_done = -1
px.local_done = -1
px.APp = map[int]int{}
px.APa = map[int]Proposal{}
px.Lslots = map[int]Slot_t{}
if rpcs != nil {
// caller will create socket &c
rpcs.Register(px)
} else {
rpcs = rpc.NewServer()
rpcs.Register(px)
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(peers[me]) // only needed for "unix"
l, e := net.Listen("unix", peers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
px.l = l
// please do not change any of the following code,
// or do anything to subvert it.
// create a thread to accept RPC connections
go func() {
for px.dead == false {
conn, err := px.l.Accept()
if err == nil && px.dead == false {
if px.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if px.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
} else {
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && px.dead == false {
fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
}
}
}()
}
return px
}
| {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
} | identifier_body |
paxos.go | package paxos
//
// Paxos library, to be included in an application.
// Multiple applications will run, each including
// a Paxos peer.
//
// Manages a sequence of agreed-on values.
// The set of peers is fixed.
// Copes with network failures (partition, msg loss, &c).
// Does not store anything persistently, so cannot handle crash+restart.
//
// The application interface:
//
// px = paxos.Make(peers []string, me string)
// px.Start(seq int, v interface{}) -- start agreement on new instance
// px.Status(seq int) (decided bool, v interface{}) -- get info about an instance
// px.Done(seq int) -- ok to forget all instances <= seq
// px.Max() int -- highest instance seq known, or -1
// px.Min() int -- instances before this seq have been forgotten
//
import "net"
import "net/rpc"
import "log"
import "os"
import "syscall"
import "sync"
import "fmt"
import "math/rand"
const (
DBG_PREPARE = false
DBG_ACCEPT = false
DBG_PROPOSER = false
DBG_DECIDED = false
DBG_DONE = false
DBG_RPCCOUNT = false
)
type Slot_t struct {
Decided bool
V interface{}
}
type Proposal struct {
// should not use PNum 0
PNum int
Value interface{}
}
type Paxos struct {
mu sync.Mutex
l net.Listener
dead bool
unreliable bool
rpcCount int
peers []string
me int // index into peers[]
// Your data here.
peers_count int
majority int
max_seq int
// highest number ever passed to (all) Done
global_done int
local_done int
// [A] highest accept seen
APa map[int]Proposal
// [A] highest prepare seen
APp map[int]int
Lslots map[int]Slot_t
}
//
// call() sends an RPC to the rpcname handler on server srv
// with arguments args, waits for the reply, and leaves the
// reply in reply. the reply argument should be a pointer
// to a reply structure.
//
// the return value is true if the server responded, and false
// if call() was not able to contact the server. in particular,
// the replys contents are only valid if call() returned true.
//
// you should assume that call() will time out and return an
// error after a while if it does not get a reply from the server.
//
// please use call() to send all RPCs, in client.go and server.go.
// please do not change this function.
//
func call(srv string, name string, args interface{}, reply interface{}) bool {
c, err := rpc.Dial("unix", srv)
if err != nil {
err1 := err.(*net.OpError)
if err1.Err != syscall.ENOENT && err1.Err != syscall.ECONNREFUSED |
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value)
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Seq = seq
ok := call(peer, "Paxos.IfDone", args, reply)
px.clog(DBG_DONE, "send_IfDone", "peer %d retuen %t, %v", idx, ok, reply.Err)
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_DONE, "send_IfDone", "ok_count:%d", ok_count)
return (ok_count == px.peers_count)
}
//
// other peer ask that if args.Seq is done at this peer
//
func (px *Paxos) IfDone(args *IfDoneArgs, reply *IfDoneReply) error {
px.clog(DBG_DONE, "IfDone", "check %d, local_done=%d", args.Seq, px.local_done)
if args.Seq <= px.local_done {
reply.Err = OK
} else {
// make other peers wait for me
reply.Err = Reject
}
return nil
}
func (px *Paxos) send_IsDone(seq int) {
for _, peer := range px.peers {
args := &IsDoneArgs{}
reply := &IsDoneReply{}
args.Seq = seq
call(peer, "Paxos.IsDone", args, reply)
}
}
func (px *Paxos) IsDone(args *IsDoneArgs, reply *IsDoneReply) error {
px.global_done = args.Seq
px.Done(px.local_done)
return nil
}
//
// the application wants to know the
// highest instance sequence known to
// this peer.
//
func (px *Paxos) Max() int {
// Your code here.
return px.max_seq
}
//
// Min() should return one more than the minimum among z_i,
// where z_i is the highest number ever passed
// to Done() on peer i. A peers z_i is -1 if it has
// never called Done().
//
// Paxos is required to have forgotten all information
// about any instances it knows that are < Min().
// The point is to free up memory in long-running
// Paxos-based servers.
//
// It is illegal to call Done(i) on a peer and
// then call Start(j) on that peer for any j <= i.
//
// Paxos peers need to exchange their highest Done()
// arguments in order to implement Min(). These
// exchanges can be piggybacked on ordinary Paxos
// agreement protocol messages, so it is OK if one
// peers Min does not reflect another Peers Done()
// until after the next instance is agreed to.
//
// The fact that Min() is defined as a minimum over
// *all* Paxos peers means that Min() cannot increase until
// all peers have been heard from. So if a peer is dead
// or unreachable, other peers Min()s will not increase
// even if all reachable peers call Done. The reason for
// this is that when the unreachable peer comes back to
// life, it will need to catch up on instances that it
// missed -- the other peers therefor cannot forget these
// instances.
//
func (px *Paxos) Min() int {
// You code here.
return px.global_done + 1
}
//
// the application wants to know whether this
// peer thinks an instance has been decided,
// and if so what the agreed value is. Status()
// should just inspect the local peer state;
// it should not contact other Paxos peers.
//
func (px *Paxos) Status(seq int) (bool, interface{}) {
// Your code here.
return px.Lslots[seq].Decided, px.Lslots[seq].V
}
//
// tell the peer to shut itself down.
// for testing.
// please do not change this function.
//
func (px *Paxos) Kill() {
px.dead = true
if px.l != nil {
px.l.Close()
}
}
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
px := &Paxos{}
px.peers = peers
px.me = me
//fmt.Printf("#### Make %d/%d ####\n", me, len(peers))
// Your initialization code here.
px.peers_count = len(peers)
px.majority = (px.peers_count + 1) / 2
px.max_seq = -1
px.global_done = -1
px.local_done = -1
px.APp = map[int]int{}
px.APa = map[int]Proposal{}
px.Lslots = map[int]Slot_t{}
if rpcs != nil {
// caller will create socket &c
rpcs.Register(px)
} else {
rpcs = rpc.NewServer()
rpcs.Register(px)
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(peers[me]) // only needed for "unix"
l, e := net.Listen("unix", peers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
px.l = l
// please do not change any of the following code,
// or do anything to subvert it.
// create a thread to accept RPC connections
go func() {
for px.dead == false {
conn, err := px.l.Accept()
if err == nil && px.dead == false {
if px.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if px.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
} else {
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && px.dead == false {
fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
}
}
}()
}
return px
}
| {
fmt.Printf("paxos Dial() failed: %v\n", err1)
} | conditional_block |
paxos.go | package paxos
//
// Paxos library, to be included in an application.
// Multiple applications will run, each including
// a Paxos peer.
//
// Manages a sequence of agreed-on values.
// The set of peers is fixed.
// Copes with network failures (partition, msg loss, &c).
// Does not store anything persistently, so cannot handle crash+restart.
//
// The application interface:
//
// px = paxos.Make(peers []string, me string)
// px.Start(seq int, v interface{}) -- start agreement on new instance
// px.Status(seq int) (decided bool, v interface{}) -- get info about an instance
// px.Done(seq int) -- ok to forget all instances <= seq
// px.Max() int -- highest instance seq known, or -1
// px.Min() int -- instances before this seq have been forgotten
//
import "net"
import "net/rpc"
import "log"
import "os"
import "syscall"
import "sync"
import "fmt"
import "math/rand"
const (
DBG_PREPARE = false
DBG_ACCEPT = false
DBG_PROPOSER = false
DBG_DECIDED = false
DBG_DONE = false
DBG_RPCCOUNT = false
)
type Slot_t struct {
Decided bool
V interface{}
}
type Proposal struct {
// should not use PNum 0
PNum int
Value interface{}
}
type Paxos struct {
mu sync.Mutex
l net.Listener
dead bool
unreliable bool
rpcCount int
peers []string
me int // index into peers[]
// Your data here.
peers_count int
majority int
max_seq int
// highest number ever passed to (all) Done
global_done int
local_done int
// [A] highest accept seen
APa map[int]Proposal
// [A] highest prepare seen
APp map[int]int
Lslots map[int]Slot_t
}
//
// call() sends an RPC to the rpcname handler on server srv
// with arguments args, waits for the reply, and leaves the
// reply in reply. the reply argument should be a pointer
// to a reply structure.
//
// the return value is true if the server responded, and false
// if call() was not able to contact the server. in particular,
// the replys contents are only valid if call() returned true.
//
// you should assume that call() will time out and return an
// error after a while if it does not get a reply from the server.
//
// please use call() to send all RPCs, in client.go and server.go.
// please do not change this function.
//
func call(srv string, name string, args interface{}, reply interface{}) bool {
c, err := rpc.Dial("unix", srv)
if err != nil {
err1 := err.(*net.OpError)
if err1.Err != syscall.ENOENT && err1.Err != syscall.ECONNREFUSED {
fmt.Printf("paxos Dial() failed: %v\n", err1)
}
return false
}
defer c.Close()
// fmt.Printf("Call srv:%s name:%s\n", srv, name)
err = c.Call(name, args, reply)
// fmt.Printf("After Call %s, err:%v, rpl:%v\n", srv, err, reply)
if err == nil {
return true
}
return false
}
/* clog(bool, func_name, format) */
func (px *Paxos) clog(dbg bool, funcname, format string, args ...interface{}) {
if dbg {
l1 := fmt.Sprintf("[%s] me:%d\n", funcname, px.me)
l2 := fmt.Sprintf("...."+format, args...)
fmt.Println(l1 + l2)
}
}
/* Proposer
* send prepare request for pNum in slot seq
* return
* - if OK to send accept request
* - highest-numbered proposal
*/
func (px *Paxos) send_prepare(seq int, pNum int) (bool, Proposal) {
ok_count := 0
max_reject_pnum := 0
p := Proposal{}
for idx, peer := range px.peers {
args := &PrepareArgs{}
reply := &PrepareReply{}
args.Seq = seq
args.PNum = pNum
// if DBG_PREPARE {
// fmt.Printf("[send_prepare] me:%d\n....to %s\n",
// px.me, peer)
// }
ok := false
if idx == px.me {
px.Prepare(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Prepare", args, reply)
}
// TODO: what if I got only one Reject?
if ok && reply.Err == OK {
ok_count++
if reply.Proposal.PNum > p.PNum {
p = reply.Proposal
}
}
if ok && reply.Err == Reject && reply.Proposal.PNum > max_reject_pnum {
// if rejected, record the highest PNum seen
max_reject_pnum = reply.Proposal.PNum
}
}
px.clog(DBG_PREPARE, "send_prepare", "seq=%d n=%d ok_count=%d/%d max_rej:%d", seq, pNum, ok_count, px.majority, max_reject_pnum)
if ok_count >= px.majority {
return true, p
} else {
return false, Proposal{max_reject_pnum, nil}
}
}
/* Acceptor
* handler for prepare request
*/
func (px *Paxos) Prepare(args *PrepareArgs, reply *PrepareReply) error {
if args.PNum > px.APp[args.Seq] {
// prepare request with higher Proposal Number
px.APp[args.Seq] = args.PNum
reply.Err = OK
reply.Proposal = px.APa[args.Seq]
} else {
// Already promised to Proposal with a higher Proposal Number
reply.Err = Reject
reply.Proposal = Proposal{px.APp[args.Seq], nil}
}
return nil
}
/* Proposer
* send accept requests
* return true if success
*/
func (px *Paxos) send_accept(seq int, p Proposal) bool {
ok_count := 0
for idx, peer := range px.peers {
args := &AcceptArgs{}
reply := &AcceptReply{}
args.Seq = seq
args.Proposal = p
ok := false
if idx == px.me {
px.Accept(args, reply)
ok = true
} else {
ok = call(peer, "Paxos.Accept", args, reply)
}
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_PREPARE, "send_accept", "seq=%d p=%v ok_count=%d/%d", seq, p, ok_count, px.majority)
return (ok_count >= px.majority)
}
/* Acceptor
* handler for Accept request
*/
func (px *Paxos) Accept(args *AcceptArgs, reply *AcceptReply) error {
if args.Proposal.PNum >= px.APp[args.Seq] {
px.APp[args.Seq] = args.Proposal.PNum
px.APa[args.Seq] = args.Proposal
reply.Err = OK
} else {
reply.Err = Reject
}
return nil
}
/* Proposer
* send decided value to all
*/
func (px *Paxos) send_decided(seq int, v interface{}) {
for idx, peer := range px.peers {
args := &DecdidedArgs{}
reply := &DecidedReply{}
args.Seq = seq
args.V = v
if idx == px.me {
px.Decided(args, reply)
} else {
call(peer, "Paxos.Decided", args, reply)
}
}
}
/* Learner
* handler for decide notification
*/
func (px *Paxos) Decided(args *DecdidedArgs, reply *DecidedReply) error {
px.clog(DBG_DECIDED, "Decided", "Seq=%d V=%v", args.Seq, args.V)
px.Lslots[args.Seq] = Slot_t{true, args.V}
if args.Seq > px.max_seq {
px.max_seq = args.Seq
}
reply.Err = OK
return nil
}
//
// the application wants paxos to start agreement on
// instance seq, with proposed value v.
// Start() returns right away; the application will
// call Status() to find out if/when agreement
// is reached.
//
func (px *Paxos) Start(seq int, v interface{}) {
// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)
// play the role of proposer
// Your code here.
px.clog(DBG_PROPOSER, "Start", "Start seq=%d v=%v", seq, v)
// I'm Proposer
go func() {
n := 0
max_reject_pnum := -1
for {
if px.dead {
// I'm dead
break
}
if px.Lslots[seq].Decided {
// locally decided, wouldn't send prepare and accept anymore
// just propagate the decision
px.send_decided(seq, px.Lslots[seq].V)
break
}
if px.APp[seq]+1 > n {
n = px.APp[seq] + 1
} else {
n++
}
if n < max_reject_pnum {
n = max_reject_pnum + 1
}
px.clog(DBG_PROPOSER, "Start", "send prepare, seq=%d n=%d", seq, n)
prepare_ok, p := px.send_prepare(seq, n)
if !prepare_ok {
max_reject_pnum = p.PNum
continue
}
new_p := Proposal{}
// no proposal yet, use v
if p.PNum == 0 {
new_p.Value = v
} else {
new_p.Value = p.Value
}
new_p.PNum = n
px.clog(DBG_PROPOSER, "Start", "prepare OK, proposal=%v", new_p)
accept_ok := px.send_accept(seq, new_p)
if !accept_ok {
continue
}
|
px.clog(DBG_PROPOSER, "Start", "decided")
break
}
}()
}
//
// the application on this machine is done with
// all instances <= seq.
//
// see the comments for Min() for more explanation.
//
func (px *Paxos) Done(seq int) {
// Your code here.
px.clog(DBG_DONE, "Done", "Done: %d", seq)
if seq <= px.local_done {
return
}
// update local_done if need
px.local_done = seq
for k, _ := range px.Lslots {
if k <= seq {
delete(px.Lslots, k)
delete(px.APa, k)
delete(px.APp, k)
px.clog(DBG_DONE, "Done", "delete %d", k)
}
}
px.clog(DBG_DONE, "Done", "local_done=%d", px.local_done)
// check to see if it is OK to update global_done
if px.send_IfDone(seq) {
px.global_done = seq
px.send_IsDone(seq)
}
}
func (px *Paxos) send_IfDone(seq int) bool {
ok_count := 0
px.clog(DBG_DONE, "send_IfDone", "check seq=%d ", seq)
for idx, peer := range px.peers {
args := &IfDoneArgs{}
reply := &IfDoneReply{}
args.Seq = seq
ok := call(peer, "Paxos.IfDone", args, reply)
px.clog(DBG_DONE, "send_IfDone", "peer %d retuen %t, %v", idx, ok, reply.Err)
if ok && reply.Err == OK {
ok_count++
}
}
px.clog(DBG_DONE, "send_IfDone", "ok_count:%d", ok_count)
return (ok_count == px.peers_count)
}
//
// other peer ask that if args.Seq is done at this peer
//
func (px *Paxos) IfDone(args *IfDoneArgs, reply *IfDoneReply) error {
px.clog(DBG_DONE, "IfDone", "check %d, local_done=%d", args.Seq, px.local_done)
if args.Seq <= px.local_done {
reply.Err = OK
} else {
// make other peers wait for me
reply.Err = Reject
}
return nil
}
func (px *Paxos) send_IsDone(seq int) {
for _, peer := range px.peers {
args := &IsDoneArgs{}
reply := &IsDoneReply{}
args.Seq = seq
call(peer, "Paxos.IsDone", args, reply)
}
}
func (px *Paxos) IsDone(args *IsDoneArgs, reply *IsDoneReply) error {
px.global_done = args.Seq
px.Done(px.local_done)
return nil
}
//
// the application wants to know the
// highest instance sequence known to
// this peer.
//
func (px *Paxos) Max() int {
// Your code here.
return px.max_seq
}
//
// Min() should return one more than the minimum among z_i,
// where z_i is the highest number ever passed
// to Done() on peer i. A peers z_i is -1 if it has
// never called Done().
//
// Paxos is required to have forgotten all information
// about any instances it knows that are < Min().
// The point is to free up memory in long-running
// Paxos-based servers.
//
// It is illegal to call Done(i) on a peer and
// then call Start(j) on that peer for any j <= i.
//
// Paxos peers need to exchange their highest Done()
// arguments in order to implement Min(). These
// exchanges can be piggybacked on ordinary Paxos
// agreement protocol messages, so it is OK if one
// peers Min does not reflect another Peers Done()
// until after the next instance is agreed to.
//
// The fact that Min() is defined as a minimum over
// *all* Paxos peers means that Min() cannot increase until
// all peers have been heard from. So if a peer is dead
// or unreachable, other peers Min()s will not increase
// even if all reachable peers call Done. The reason for
// this is that when the unreachable peer comes back to
// life, it will need to catch up on instances that it
// missed -- the other peers therefor cannot forget these
// instances.
//
func (px *Paxos) Min() int {
// You code here.
return px.global_done + 1
}
//
// the application wants to know whether this
// peer thinks an instance has been decided,
// and if so what the agreed value is. Status()
// should just inspect the local peer state;
// it should not contact other Paxos peers.
//
func (px *Paxos) Status(seq int) (bool, interface{}) {
// Your code here.
return px.Lslots[seq].Decided, px.Lslots[seq].V
}
//
// tell the peer to shut itself down.
// for testing.
// please do not change this function.
//
func (px *Paxos) Kill() {
px.dead = true
if px.l != nil {
px.l.Close()
}
}
//
// the application wants to create a paxos peer.
// the ports of all the paxos peers (including this one)
// are in peers[]. this servers port is peers[me].
//
func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {
px := &Paxos{}
px.peers = peers
px.me = me
//fmt.Printf("#### Make %d/%d ####\n", me, len(peers))
// Your initialization code here.
px.peers_count = len(peers)
px.majority = (px.peers_count + 1) / 2
px.max_seq = -1
px.global_done = -1
px.local_done = -1
px.APp = map[int]int{}
px.APa = map[int]Proposal{}
px.Lslots = map[int]Slot_t{}
if rpcs != nil {
// caller will create socket &c
rpcs.Register(px)
} else {
rpcs = rpc.NewServer()
rpcs.Register(px)
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(peers[me]) // only needed for "unix"
l, e := net.Listen("unix", peers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
px.l = l
// please do not change any of the following code,
// or do anything to subvert it.
// create a thread to accept RPC connections
go func() {
for px.dead == false {
conn, err := px.l.Accept()
if err == nil && px.dead == false {
if px.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if px.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
} else {
px.rpcCount++
if DBG_RPCCOUNT {
fmt.Println("*** RPC++ ***")
}
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && px.dead == false {
fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error())
}
}
}()
}
return px
} | px.clog(DBG_PROPOSER, "Start", "accept OK")
px.send_decided(seq, new_p.Value) | random_line_split |
event.rs | use crate::mappings::Mappings;
use crate::scheduler::TaskMessage;
use crate::system::{SystemCtx, SystemDataOutput, SYSTEM_ID_MAPPINGS};
use crate::{resource_id_for_component, MacroData, ResourceId, Resources, SystemData, SystemId};
use hashbrown::HashSet;
use lazy_static::lazy_static;
use legion::storage::ComponentTypeId;
use legion::world::World;
use parking_lot::Mutex;
use std::alloc::Layout;
use std::any::TypeId;
use std::ptr;
/// ID of an event type, allocated consecutively.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)]
pub struct EventId(pub usize);
impl From<usize> for EventId {
fn from(x: usize) -> Self {
Self(x)
}
}
lazy_static! {
pub static ref EVENT_ID_MAPPINGS: Mutex<Mappings<TypeId, EventId>> =
Mutex::new(Mappings::new());
}
/// Returns the event ID for the given type.
pub fn event_id_for<E>() -> EventId
where
E: Event,
{
EVENT_ID_MAPPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type.
pub unsafe trait RawEventHandler: Send + Sync + 'static {
/// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 |
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
self.queued
.drain(..)
.enumerate()
.for_each(|(index, event)| unsafe {
ptr::write(ptr.offset(index as isize), event);
});
self.ctx
.sender
.send(TaskMessage::TriggerEvents {
id: self.id,
ptr: ptr as *const (),
len,
})
.unwrap();
}
}
impl<E> Trigger<E>
where
E: Send + Sync + 'static,
{
pub fn trigger(&mut self, event: E) {
self.queued.push(event);
}
pub fn trigger_batched(&mut self, events: impl IntoIterator<Item = E>) {
self.queued.extend(events);
}
}
impl<'a, E> SystemDataOutput<'a> for &'a mut Trigger<E>
where
E: Send + Sync + 'static,
{
type SystemData = Trigger<E>;
}
impl<E> MacroData for &'static mut Trigger<E>
where
E: Event,
{
type SystemData = Trigger<E>;
}
#[cfg(test)]
mod tests {
#[test]
fn check_event_slice_size_and_align() {
// temp fix for https://github.com/nvzqz/static-assertions-rs/issues/21
assert_eq_size!(*const [()], *const [i32]);
assert_eq_align!(*const [()], *const [i32]);
}
}
| {
return; // Nothing to do
} | conditional_block |
event.rs | use crate::mappings::Mappings;
use crate::scheduler::TaskMessage;
use crate::system::{SystemCtx, SystemDataOutput, SYSTEM_ID_MAPPINGS};
use crate::{resource_id_for_component, MacroData, ResourceId, Resources, SystemData, SystemId};
use hashbrown::HashSet;
use lazy_static::lazy_static;
use legion::storage::ComponentTypeId;
use legion::world::World;
use parking_lot::Mutex;
use std::alloc::Layout;
use std::any::TypeId;
use std::ptr;
/// ID of an event type, allocated consecutively.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)]
pub struct EventId(pub usize);
impl From<usize> for EventId {
fn from(x: usize) -> Self {
Self(x)
}
}
lazy_static! {
pub static ref EVENT_ID_MAPPINGS: Mutex<Mappings<TypeId, EventId>> =
Mutex::new(Mappings::new());
}
/// Returns the event ID for the given type.
pub fn event_id_for<E>() -> EventId
where
E: Event,
{
EVENT_ID_MAPPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type.
pub unsafe trait RawEventHandler: Send + Sync + 'static {
/// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 {
return; // Nothing to do
}
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
self.queued
.drain(..)
.enumerate()
.for_each(|(index, event)| unsafe {
ptr::write(ptr.offset(index as isize), event);
});
self.ctx
.sender
.send(TaskMessage::TriggerEvents {
id: self.id,
ptr: ptr as *const (),
len,
})
.unwrap();
}
}
impl<E> Trigger<E>
where
E: Send + Sync + 'static,
{
pub fn | (&mut self, event: E) {
self.queued.push(event);
}
pub fn trigger_batched(&mut self, events: impl IntoIterator<Item = E>) {
self.queued.extend(events);
}
}
impl<'a, E> SystemDataOutput<'a> for &'a mut Trigger<E>
where
E: Send + Sync + 'static,
{
type SystemData = Trigger<E>;
}
impl<E> MacroData for &'static mut Trigger<E>
where
E: Event,
{
type SystemData = Trigger<E>;
}
#[cfg(test)]
mod tests {
#[test]
fn check_event_slice_size_and_align() {
// temp fix for https://github.com/nvzqz/static-assertions-rs/issues/21
assert_eq_size!(*const [()], *const [i32]);
assert_eq_align!(*const [()], *const [i32]);
}
}
| trigger | identifier_name |
event.rs | use crate::mappings::Mappings;
use crate::scheduler::TaskMessage;
use crate::system::{SystemCtx, SystemDataOutput, SYSTEM_ID_MAPPINGS};
use crate::{resource_id_for_component, MacroData, ResourceId, Resources, SystemData, SystemId};
use hashbrown::HashSet;
use lazy_static::lazy_static;
use legion::storage::ComponentTypeId;
use legion::world::World;
use parking_lot::Mutex;
use std::alloc::Layout;
use std::any::TypeId;
use std::ptr;
/// ID of an event type, allocated consecutively.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)]
pub struct EventId(pub usize);
impl From<usize> for EventId {
fn from(x: usize) -> Self {
Self(x)
}
}
lazy_static! {
pub static ref EVENT_ID_MAPPINGS: Mutex<Mappings<TypeId, EventId>> =
Mutex::new(Mappings::new());
}
/// Returns the event ID for the given type.
pub fn event_id_for<E>() -> EventId
where
E: Event,
{
EVENT_ID_MAPPINGS.lock().get_or_alloc(TypeId::of::<E>())
}
/// Marker trait for types which can be triggered as events.
pub trait Event: Send + Sync + 'static {}
impl<T> Event for T where T: Send + Sync + 'static {}
/// Strategy used to handle an event.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HandleStrategy {
/*/// The handler will be invoked in the call to `trigger` so that
/// the system triggering it will observe any side effects from
/// handling the event.
///
/// This is the default strategy.
Immediate,*/
/// The handler will be run at the end of the system which triggered the event.
EndOfSystem,
/// The handle will be scheduled for running at the end of tick.
///
/// This is the default strategy.
EndOfTick,
}
impl Default for HandleStrategy {
fn default() -> Self {
HandleStrategy::EndOfTick
}
}
/// A raw event handler.
///
/// # Safety
/// * The event type returned by `event_id()` must be the exact
/// type which is handled by `handle_raw`. `handle_raw` must
/// interpret any events as the same type. | /// Returns the unique ID of this event handler, as allocated by `system_id_for::<T>()`.
fn id(&self) -> SystemId;
/// Returns the name of this event handler.
fn name(&self) -> &'static str;
/// Returns the ID of the event which is handled by this handler.
fn event_id(&self) -> EventId;
/// Returns the strategy that should be used to invoke this handler.
fn strategy(&self) -> HandleStrategy;
/// Returns the resources read by this event handler.
fn resource_reads(&self) -> &[ResourceId];
/// Returns the resources written by this event handler.
fn resource_writes(&self) -> &[ResourceId];
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World);
/// Handles a slice of events, accessing any needed resources.
///
/// # Safety
/// * The handler must not access any resources not indicated by `resource_reads()` and `resource_writes()`.
/// * The given slice __must__ be transmuted to a slice of the event type returned by `event_id`.
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
resources: &Resources,
ctx: SystemCtx,
world: &World,
);
}
// High-level event handlers.
/// An event handler. This type should be used by users, not `RawEventHandler`.
pub trait EventHandler<E: Event>: Send + Sync + 'static {
/// The resources accessed by this event handler.
type HandlerData: for<'a> SystemData<'a>;
/// Handles a single event. Users may implement `handle_batch`
/// instead which handles multiple events at once.
fn handle(&mut self, event: &E, data: &mut <Self::HandlerData as SystemData>::Output);
/// Handles a slice of events. This function may be called instead of `handle`
/// when multiple events are concerned.
///
/// The default implementation for this function simply calls `handle` on each
/// event in the slice.
fn handle_batch(&mut self, events: &[E], mut data: <Self::HandlerData as SystemData>::Output) {
events
.iter()
.for_each(|event| self.handle(event, &mut data));
}
/// Returns the strategy that should be used to invoke this handler.
/// The default implementation of this function returns `HandleStrategy::default()`.
fn strategy(&self) -> HandleStrategy {
HandleStrategy::default()
}
}
pub struct CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
inner: H,
/// Cached system ID.
id: SystemId,
/// Cached event ID.
event_id: EventId,
/// Cached resource reads.
resource_reads: Vec<ResourceId>,
/// Cached resource writes.
resource_writes: Vec<ResourceId>,
/// Cached component reads.
component_reads: Vec<ComponentTypeId>,
/// Cached component writes.
component_writes: Vec<ComponentTypeId>,
/// Cached handler data, or `None` if it has not yet been accessed.
data: Option<H::HandlerData>,
name: &'static str,
}
impl<H, E> CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
/// Creates a new `CachedEventHandler` caching the given event handler.
pub fn new(inner: H, name: &'static str) -> Self {
let component_writes = H::HandlerData::component_writes()
.into_iter()
.collect::<HashSet<_>>();
let mut resource_reads = H::HandlerData::resource_reads();
resource_reads.extend(
H::HandlerData::component_reads()
.into_iter()
.filter(|comp| !component_writes.contains(comp))
.map(|comp| resource_id_for_component(comp)),
);
let mut resource_writes = H::HandlerData::resource_writes();
resource_writes.extend(
H::HandlerData::component_writes()
.into_iter()
.map(|comp| resource_id_for_component(comp)),
);
Self {
id: SYSTEM_ID_MAPPINGS.lock().alloc(),
event_id: event_id_for::<E>(),
resource_reads,
resource_writes,
component_reads: H::HandlerData::component_reads(),
component_writes: H::HandlerData::component_writes(),
data: None,
inner,
name,
}
}
}
unsafe impl<H, E> RawEventHandler for CachedEventHandler<H, E>
where
H: EventHandler<E>,
E: Event,
{
fn id(&self) -> SystemId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
fn event_id(&self) -> EventId {
self.event_id
}
fn strategy(&self) -> HandleStrategy {
self.inner.strategy()
}
fn resource_reads(&self) -> &[ResourceId] {
&self.resource_reads
}
fn resource_writes(&self) -> &[ResourceId] {
&self.resource_writes
}
fn init(&mut self, resources: &mut Resources, ctx: SystemCtx, world: &World) {
let mut data = unsafe { H::HandlerData::load_from_resources(resources, ctx, world) };
data.init(resources, &self.component_reads, &self.component_writes);
self.data = Some(data);
}
unsafe fn handle_raw_batch(
&mut self,
events: *const (),
events_len: usize,
_resources: &Resources,
_ctx: SystemCtx,
_world: &World,
) {
// https://github.com/nvzqz/static-assertions-rs/issues/21
/*assert_eq_size!(*const [()], *const [H::Event]);
assert_eq_align!(*const [()], *const [H::Event]);*/
let events = std::slice::from_raw_parts(events as *const E, events_len);
let data = self.data.as_mut().unwrap();
self.inner.handle_batch(events, data.before_execution());
data.after_execution();
}
}
/// System data which allows you to trigger events of a given type.
pub struct Trigger<E>
where
E: Event,
{
ctx: SystemCtx,
queued: Vec<E>,
id: EventId,
}
impl<'a, E> SystemData<'a> for Trigger<E>
where
E: Event,
{
type Output = &'a mut Self;
unsafe fn load_from_resources(
_resources: &mut Resources,
ctx: SystemCtx,
_world: &World,
) -> Self {
Self {
ctx,
queued: vec![],
id: event_id_for::<E>(),
}
}
fn resource_reads() -> Vec<ResourceId> {
vec![]
}
fn resource_writes() -> Vec<ResourceId> {
vec![]
}
fn component_reads() -> Vec<ComponentTypeId> {
vec![]
}
fn component_writes() -> Vec<ComponentTypeId> {
vec![]
}
fn before_execution(&'a mut self) -> Self::Output {
self
}
fn after_execution(&mut self) {
// TODO: end-of-system handlers
// Move events to bump-allocated slice and send to scheduler.
let len = self.queued.len();
if len == 0 {
return; // Nothing to do
}
let ptr: *mut E = self
.ctx
.bump
.get_or_default()
.alloc_layout(Layout::for_value(self.queued.as_slice()))
.cast::<E>()
.as_ptr();
self.queued
.drain(..)
.enumerate()
.for_each(|(index, event)| unsafe {
ptr::write(ptr.offset(index as isize), event);
});
self.ctx
.sender
.send(TaskMessage::TriggerEvents {
id: self.id,
ptr: ptr as *const (),
len,
})
.unwrap();
}
}
impl<E> Trigger<E>
where
E: Send + Sync + 'static,
{
pub fn trigger(&mut self, event: E) {
self.queued.push(event);
}
pub fn trigger_batched(&mut self, events: impl IntoIterator<Item = E>) {
self.queued.extend(events);
}
}
impl<'a, E> SystemDataOutput<'a> for &'a mut Trigger<E>
where
E: Send + Sync + 'static,
{
type SystemData = Trigger<E>;
}
impl<E> MacroData for &'static mut Trigger<E>
where
E: Event,
{
type SystemData = Trigger<E>;
}
#[cfg(test)]
mod tests {
#[test]
fn check_event_slice_size_and_align() {
// temp fix for https://github.com/nvzqz/static-assertions-rs/issues/21
assert_eq_size!(*const [()], *const [i32]);
assert_eq_align!(*const [()], *const [i32]);
}
} | pub unsafe trait RawEventHandler: Send + Sync + 'static { | random_line_split |
tutorial.js | var Tutorial = function() {
this.prev = this.index;
this.next = this._p1;
}
Tutorial.prototype = {
index: function() {
this.next = this._p1;
return P('Use `tutorial <chapter> <section>` or `next` `prev` commands to navigate.') + BR() +
P('Available sections are:') +
DIV(LIST([
'1. What is Gremlin?',
'2. What can I do with Gremlin?' +
LIST([
'a. Defining variables.',
'b. Using ”$_” and ”$_g” and ”.” special variables.',
'c. Using Gremlin build-in functions and data structures (maps, lists).',
'd. Gremlin Loops (foreach, while).',
'e. Defining custom functions/paths.',
'f. Basic graph traversals.'
]),
'3. Acknowledgements'
]));
},
_p1: function() {
this.prev = this.index;
this.next = this._p2;
return P("Chapter 1 - What is Gremlin?") + BR() +
P(' Gremlin is a domain specific programming language for ' + LINK("http://en.wikipedia.org/wiki/Graph_%28mathematics%29", "graphs") + '. Graphs are data structures where there exists vertices (i.e. dots, nodes) and edges (i.e. lines, arcs). Gremlin was designed to work with a type of graph called a property graph. Property graphs are defined, in detail, in the ' + LINK("http://wiki.github.com/tinkerpop/gremlin/defining-a-property-graph", "Defining a Property Graph") + ' section of ' + LINK("http://wiki.github.com/tinkerpop/gremlin/", "complete documentation") + '. Gremlin makes extensive use of ' + LINK("http://www.w3.org/TR/xpath", "XPath 1.0") + ' to define abstract path descriptions (path expressions) through a graph. It is important to learn and understand XPath as this will make it easier to understand Gremlin.');
},
_p2: function(sectionId) {
this._annotation = function() {
this.prev = this._p1;
this.next = this._a;
return P("Chapter 2 - What can I do with Gremlin?") + BR() +
P(" Before diving into the specifics of Gremlin, its good to know what you are getting yourself into. Moreover, its important to know if Gremlin can be of use to you. Below is a list of a few key benefits of Gremlin:") + BR() +
DIV(LIST(
[
'1. Gremlin is useful for manually working with your graph;',
'2. Gremlin allows you to query a graph;',
'3. Gremlin can express complex graph traversals succinctly;',
'4. Gremlin is useful for exploring and learning about graphs;',
'5. Gremlin allows you to explore the Semantic Web/Web of Data;',
'6. Gremlin allows for universal path-based computations.'
]));
}
this._a = function() {
this.prev = this._annotation;
this.next = this._b;
return P("Chapter 2a - Defining variables.") + BR() +
P(" Gremlin gives you possibility to work with variables.") + BR() +
P(" Variables in Gremlin must be proceeded by a $ character.") +
P(" The assignment operator is ':=' and it is used to assign a value to a variable or an element to a list or map:") + BR() +
PLIST("$foo := 'bar'") + PLIST("$i := 1 + 5");
}
this._b = function() {
this.prev = this._a;
this.next = this._c;
return P("Chapter 2b - Using ”$_” and ”$_g” and ”.” special variables.") + BR() +
P(" There are three special variables in Gremlin ”$_” and ”$_g” and ”.”:") + BR() +
PLIST("• ”$_” is a reserved variable that denotes the root list. In this way, the root list can be redefined.") +
PLIST("• ”$_g” denotes the graph object. It allows the user to assign a working graph that will be referenced by graph functions when no graph argument is provided.") +
PLIST("• ”.” denotes reference to the root list.");
}
this._c = function() {
this.prev = this._b;
this.next = this._d; | return P("Chapter 2c - Using Gremlin build-in functions and data structures (maps, lists).") + BR() +
P(" Gremlin provides build-in functions and data structures which will be very useful while working with graphs.") + BR() +
P("To execute a function you should call it using special format - ”<prefix>:<function_name>(<arg>, ...)”:") + BR() +
PLIST("g:print('hello world!') - will execute build-in print function.") + BR() +
P("or without arguments:") + BR() +
PLIST("g:print() - will print empty string.") + BR() +
P("There are functions which could be referenced without <prefix> - global functions - like: null(), false(), true()") + BR() +
PLIST("$foo := false() - value returned by false() will be assigned to $foo variable.") + BR() +
P(" Gremlin has own implementation of Map and List data structures (will be familiar to Java developers):") + BR() +
PLIST("g:map(<key>, <value>, ...) - function used to construct map objects:") +
PLIST("g:map('foo', 'bar') - will return {'foo'='bar'} map.") + BR() +
P("the same goes for List:") + BR() +
PLIST("g:list(<value>,...) - function used to construct list objecs:") +
PLIST("g:list(1,2,3,4) - will return [1.0, 2.0, 3.0, 4.0].") + BR() +
P("result of map or list function could be assigned to a variable:") + BR() +
PLIST("$foo := g:map('foo', 'bar')") +
PLIST("$foo := g:list('foo', 'bar')") + BR() +
P("to get value from map use g:get(element, string) function:") + BR() +
PLIST("g:get(g:map('foo', 'bar'), 'foo') - returns 'bar'") + BR() +
P("g:get(list, number) function used to get values from list:") + BR() +
PLIST("g:get(g:list(3, 4), 1) - returns '3.0'") + BR() +
P("to assign new elements to map use g:assign(map,object,object) function:") + BR() +
PLIST("$foo := g:map('foo', 'bar')") +
PLIST("g:assign($foo, 'foo2', 'bar2') - returns 'bar2'") +
PLIST("g:print($foo) - returns {foo2=bar2, foo=bar}") + BR() +
P("Gremlin Function Library Reference could be found " + LINK("http://wiki.github.com/tinkerpop/gremlin/gremlin-function-library", "here"));
}
this._d = function() {
this.prev = this._c;
this.next = this._e;
return P("Chapter 2d - Gremlin Loops (foreach, while).") + BR() +
P(" Gremlin also has build-in loop support - foreach and while:") + BR() +
P("1. Foreach") + BR() +
P(" The foreach statement will loop over its body the number of times as there are values in the provided loop list. Each item in the list is assigned to a variable and that variable can be referenced in the loop body. The generic structure and example of foreach is provided below.") + BR() +
PLIST("foreach variable in xpath_list<br/> statement*<br />end") + BR() +
P("Here is a little example how to use it:") + BR() +
PLIST("$i := 0") +
PLIST("foreach $j in 1 | 2 | 3") +
PLIST(" $i := $i + $j") +
PLIST("end") + BR() +
P("this will return - 6.0.") + BR() +
P("2. While") + BR() +
P(" The while statement will loop over its body until the provided condition is met. The generic structure and example of while is provided below.") + BR() +
PLIST("while xpath_boolean<br/> statement*<br/>end") + BR() +
P("Here is a little example how to use it:") + BR() +
PLIST("$i := 'g'") +
PLIST("while not(matches($i,'ggg'))") +
PLIST(" $i := concat($i,'g')") +
PLIST("end") + BR() +
P("this will return - 'ggg'");
}
this._e = function() {
this.prev = this._d;
this.next = this._f;
return P("Chapter 2e - Defining custom functions/paths.") + BR() +
P(" Gremlin gives you possibility to define custom functions and paths.") + BR() +
P("Function can be defined using following syntax:") + BR() +
PLIST("func <prefix>:<functiona-name>($var, ...)<br/> statement*<br/>end") +
P("Example") + BR() +
PLIST("func u:hello-name($name)<br/> g:print(concat('hello ', $name))<br/>end") + BR() +
P("and if you then run `u:hello-name('pavel')` result will be 'hello pavel'.") +
P("Please note - there are no return statement - function returns value of its last statement!") + BR() +
P("Path can be defined using following syntax:") + BR() +
PLIST("path string<br/> statement*<br/>end") + BR() +
P("Example") + BR() +
PLIST("path co-developer<br/> ./outE[@label='created']/inV/inE[@label='created']/outV[g:except($_)]<br/>end");
}
this._f = function() {
this.prev = this._e;
this.next = this._p3;
return P("Chapter 2f - Basic graph traversals.") + BR() +
P(" First of all we should learn how to open graph and how to load graph data from file.") +
P("As Gremlin has alot of backends graph could be opened using different functions, most common if then are:") + BR() +
PLIST("1. tg:open() - used to open TinkerGraph;") +
PLIST("2. neo4j:open(<database-name-as-string>) - used to open Neo4j database connection;") +
PLIST("3. sail:open() - used to open SAIL memorystore.") + BR() +
P("Each of those functions return unified graph object which could be assigned to a variable. Gremlin has special variable for this aim - '$_g'. When we have your graph opened next thing to do is to set root list to '$_' variable (if there are any existing vertices in the graph):") + BR() +
PLIST("$_ := g:id('1') - function g:id(graph?, id) used to get vertex from graph object by its id. If you have $_g variable assigned then you don't have to provide graph? argument to the g:id function.") +
("After $_ is set you can access your root list by using '.' statement") + BR() +
P("Here is the list of most used functions to work with graphs:") + BR() +
PLIST("1. vertex g:id(graph?, id) - get vertex by id.") +
PLIST("2. vertex g:add-v(graph?, object?, object?) - add new vertex with given attributes, e.g. g:add-v($g, g:map('name', 'pavel')).") +
PLIST("3. edge g:add-e(graph?, object?, vertex, string, vertex) - add new edge connecting to vertexes, e.g. g:add-e($g, $v1, 'knows', $v2).") +
PLIST("4. boolean g:save(string) - save graph data into XML file with given name, e.g. g:save('my-graph').") +
PLIST("5. boolean g:load(graph?, string) - load graph data from XML file with given name to graph object variable, e.g. g:load($g, 'my-graph').") +
PLIST("6. boolean g:clear(graph?) - clear contents of the graph, e.g. g:clear($g).") +
PLIST("7. boolean g:close(graph?) - close given graph.") + BR() +
P("Lets now make a simple graph traversal:") + BR() +
PLIST("# open a graph object") +
PLIST("$_g := tg:open()") + BR() +
PLIST("# assign root list to a new vertex") +
PLIST("$_ := g:add-v()") + BR() +
PLIST("# assign @name attribute to the root vertex") +
PLIST("./@name := 'pavel'") + BR() +
PLIST("# create new vertex and assign it to $marko variable") +
PLIST("$marko := g:add-v()") + BR() +
PLIST("# add @name attribute as we did before") +
PLIST("$marko/@name := 'marko'") + BR() +
PLIST("# add edge to connect pavel and marko") +
PLIST("g:add-e($_, 'knows', $marko)") + BR() +
PLIST("# lets traverse our graph from root vertex to find out outgoing edges") +
PLIST("./outE") + BR() +
PLIST("# lets get ingoing vertexs to your outgoing edge") +
PLIST("./outE/inV") + BR() +
PLIST("# let get a @name attribute of found vertex") +
PLIST("./outE/inV/@name") + BR() +
PLIST("# lets add few more vertices and search by criteria") +
PLIST("$max := g:add-v()") +
PLIST("$max/@name := 'max'") +
PLIST("$max/@age := 23") +
PLIST("$liza := g:add-v()") +
PLIST("$liza/@name := 'liza'") +
PLIST("$liza/@age := 15") +
PLIST("g:add-e($_, 'knows', $max'") +
PLIST("g:add-e($_, 'has syster', $liza)") +
PLIST("# filter by 'knows' edges") +
PLIST("./outE[@label = 'knows']/inV/@name") +
PLIST("# all outgoing edges with label 'knows' and age more then 20 years") +
PLIST("./outE[@label = 'knows']/inV[@age > 20]/@name") + BR() +
PLIST("# lets save and close the graph and then load data from file") +
PLIST("g:save('my-graph')") +
PLIST("g:close()") +
PLIST("g:load('my-graph')") + BR() +
P("For bigger example please visit " + LINK("http://wiki.github.com/tinkerpop/gremlin/basic-graph-traversals", "wiki"));
}
var section = this['_' + sectionId];
return ((section) ? section() : this._annotation());
},
_p3: function() {
this.prev = this._p2;
this.next = this._p1;
return P("Chapter 3 - Acknowledgements.") + BR() +
P("Special thanks to:") + BR() +
PLIST(LINK("http://www.linkedin.com/in/neubauer", "Peter Neubauer")) +
PLIST(LINK("http://markorodriguez.com/", "Marko A. Rodriguez"));
},
handle: function(req) {
if($.trim(req) == 'prev') return this.prev();
if($.trim(req) == 'next') return this.next();
// format is help <paragraph> <section>
var currentReqParts = req.split(" ");
var chapter = this['_p' + currentReqParts[1]];
return ((chapter) ? chapter(currentReqParts[2]) : this.index());
}
}; | random_line_split | |
main_classes.py | import random
import colorama
from termcolor import colored
from reusables.string_manipulation import int_to_words
from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds
from app.load_data import items, buildings, wild_mobs, names, adjectives
colorama.init()
def find_unique_names(quantity, name_list, taken_names):
free_names = [x for x in name_list if x not in taken_names]
random.shuffle(free_names)
return free_names[:quantity]
def dropper(rarity):
results = {'super rare': 100,
'rare': 50,
'uncommon': 25,
'common': 5, | if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
self.inventory.remove(nice_weapons[0])
return nice_weapons[0]
else:
return None
@staticmethod
def skills():
""" Pick the skills for a mob, these determine what a player can get from completing a quest """
all_skills = ["strength", "patience", "cleanliness", "leadership", "communication",
"science", "math", "engineering", "intelligence", "driving"]
random.shuffle(all_skills)
return all_skills[0:2]
def generate_quest(self):
"""
inventory based
bring me x of an object to learn a skill
"""
if odds(3):
quest_items = add_dicts_together(items["master"], items[self.p.square.square_type])
quest_item = random.choice(list(quest_items.keys()))
i = Item(quest_item, 0, **quest_items[quest_item])
self.inventory.append(i)
quantity = {'super rare': '1',
'rare': '2',
'uncommon': '3',
'common': '6',
'super common': '15'}
q = quantity[i.rarity]
self.quest = i, int(q), f"{self.p.name}, if you bring " \
f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \
f"I will teach you a valuable skill."
return
elif odds(5):
mobs = []
for biome, building in buildings.items():
for b, attributes in building.items():
if attributes.get('mobs'):
for k in attributes['mobs'].keys():
mobs.append(k)
for biome, mob in wild_mobs.items():
for k in mob.keys():
mobs.append(k)
target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}"
print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way "
f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...")
self.p.hit_list.append(target)
return False
else:
return None | 'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0: | random_line_split |
main_classes.py | import random
import colorama
from termcolor import colored
from reusables.string_manipulation import int_to_words
from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds
from app.load_data import items, buildings, wild_mobs, names, adjectives
colorama.init()
def find_unique_names(quantity, name_list, taken_names):
free_names = [x for x in name_list if x not in taken_names]
random.shuffle(free_names)
return free_names[:quantity]
def dropper(rarity):
results = {'super rare': 100,
'rare': 50,
'uncommon': 25,
'common': 5,
'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0:
if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
|
else:
return None
@staticmethod
def skills():
""" Pick the skills for a mob, these determine what a player can get from completing a quest """
all_skills = ["strength", "patience", "cleanliness", "leadership", "communication",
"science", "math", "engineering", "intelligence", "driving"]
random.shuffle(all_skills)
return all_skills[0:2]
def generate_quest(self):
"""
inventory based
bring me x of an object to learn a skill
"""
if odds(3):
quest_items = add_dicts_together(items["master"], items[self.p.square.square_type])
quest_item = random.choice(list(quest_items.keys()))
i = Item(quest_item, 0, **quest_items[quest_item])
self.inventory.append(i)
quantity = {'super rare': '1',
'rare': '2',
'uncommon': '3',
'common': '6',
'super common': '15'}
q = quantity[i.rarity]
self.quest = i, int(q), f"{self.p.name}, if you bring " \
f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \
f"I will teach you a valuable skill."
return
elif odds(5):
mobs = []
for biome, building in buildings.items():
for b, attributes in building.items():
if attributes.get('mobs'):
for k in attributes['mobs'].keys():
mobs.append(k)
for biome, mob in wild_mobs.items():
for k in mob.keys():
mobs.append(k)
target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}"
print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way "
f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...")
self.p.hit_list.append(target)
return False
else:
return None
| self.inventory.remove(nice_weapons[0])
return nice_weapons[0] | conditional_block |
main_classes.py | import random
import colorama
from termcolor import colored
from reusables.string_manipulation import int_to_words
from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds
from app.load_data import items, buildings, wild_mobs, names, adjectives
colorama.init()
def find_unique_names(quantity, name_list, taken_names):
free_names = [x for x in name_list if x not in taken_names]
random.shuffle(free_names)
return free_names[:quantity]
def dropper(rarity):
results = {'super rare': 100,
'rare': 50,
'uncommon': 25,
'common': 5,
'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0:
if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.")
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def | (self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
self.inventory.remove(nice_weapons[0])
return nice_weapons[0]
else:
return None
@staticmethod
def skills():
""" Pick the skills for a mob, these determine what a player can get from completing a quest """
all_skills = ["strength", "patience", "cleanliness", "leadership", "communication",
"science", "math", "engineering", "intelligence", "driving"]
random.shuffle(all_skills)
return all_skills[0:2]
def generate_quest(self):
"""
inventory based
bring me x of an object to learn a skill
"""
if odds(3):
quest_items = add_dicts_together(items["master"], items[self.p.square.square_type])
quest_item = random.choice(list(quest_items.keys()))
i = Item(quest_item, 0, **quest_items[quest_item])
self.inventory.append(i)
quantity = {'super rare': '1',
'rare': '2',
'uncommon': '3',
'common': '6',
'super common': '15'}
q = quantity[i.rarity]
self.quest = i, int(q), f"{self.p.name}, if you bring " \
f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \
f"I will teach you a valuable skill."
return
elif odds(5):
mobs = []
for biome, building in buildings.items():
for b, attributes in building.items():
if attributes.get('mobs'):
for k in attributes['mobs'].keys():
mobs.append(k)
for biome, mob in wild_mobs.items():
for k in mob.keys():
mobs.append(k)
target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}"
print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way "
f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...")
self.p.hit_list.append(target)
return False
else:
return None
| __init__ | identifier_name |
main_classes.py | import random
import colorama
from termcolor import colored
from reusables.string_manipulation import int_to_words
from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds
from app.load_data import items, buildings, wild_mobs, names, adjectives
colorama.init()
def find_unique_names(quantity, name_list, taken_names):
free_names = [x for x in name_list if x not in taken_names]
random.shuffle(free_names)
return free_names[:quantity]
def dropper(rarity):
results = {'super rare': 100,
'rare': 50,
'uncommon': 25,
'common': 5,
'super common': 2}
quantity = 0
countdown = random.randint(0, 10)
while countdown > 0:
if random.randint(0, results[rarity]) == 1:
quantity += 1
countdown -= 1
return quantity
def drop_building(dictionary, p, limit=None):
limit = limit or len(adjectives)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1 and v['category'] != 'residence':
n = random.randint(0, quantity)
unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)
p.square.unique_building_names += unique_names
for i in range(0, quantity - n):
drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v))
unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)
p.square.unique_building_names += unique_adjectives
for i in range(0, n):
drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v))
elif quantity > 1 and v['category'] == 'residence':
unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)
p.square.unique_house_names += unique_house_names
for i in range(0, quantity):
drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v))
else:
drops_i.append(Building(name=k, p=p, **v))
return drops_i
def drop_mob(dictionary, p, limit=None, square=None):
square = square or p.square
limit = limit or len(names) - len(square.unique_mob_names)
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
quantity = quantity if quantity < limit else limit
limit -= quantity
if quantity:
if quantity > 1:
unique_names = find_unique_names(quantity, names, square.unique_mob_names)
p.square.unique_mob_names += unique_names
for i in range(0, len(unique_names)):
drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v))
else:
if k not in [n.name for n in p.square.mobs]:
drops_i.append(Mob(name=k, p=p, **v))
else:
name = find_unique_names(1, names, square.unique_mob_names)[0]
drops_i.append(Mob(name=f"{k} named {name}", p=p, **v))
return drops_i
def drop_item(dictionary):
""" Randomly generates objects based on rarity """
drops_i = []
for k, v in dictionary.items():
quantity = dropper(v['rarity'])
if quantity:
drops_i.append(Item(name=k, quantity=quantity, **v))
return drops_i
class MapSquare:
def __init__(self, name="", square_type=None):
square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"]
self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]
self.name = name
self.unique_mob_names = []
self.unique_building_names = []
self.unique_house_names = []
mobs = []
items = []
buildings = []
def generate_items(self):
self.items = drop_item(add_dicts_together(items["master"], items[self.square_type]))
def generate_buildings(self, p):
self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p)
def generate_mobs(self, p):
self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p)
def clean_up_map(self):
""" Remove items with quantity of zero from the map inventory"""
self.items = [i for i in self.items if i.quantity != 0]
@staticmethod
def map_picture(the_map, p):
"""With the player's location in the center, draw a 5 x 5 map with map square type
and coordinates in each square"""
xy = (p.location[0] - 2, p.location[1] + 2)
map_coords = []
for y in range(0, 5):
row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]
map_coords.append(row)
pretty_map = []
for r in map_coords:
row = []
for coordinates in r:
if coordinates in the_map.keys():
if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:
star = '*$ '
elif p.quest and p.quest[1] == coordinates:
star = ' * '
elif p.job and p.job.location == coordinates:
star = ' $ '
else:
star = ' '
row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star))
else:
row.append("|{!s:12}|".format(' '))
pretty_map.append(row)
for row in pretty_map:
print(''.join(row))
class Player:
def __init__(self, name, location):
self.name = name
self.location = location
self.square = None
self.money = 0
self.quest = None
self.job = None
self.phase = "day"
self.equipped_weapon = None
self.major_armor = None
self.minor_armor = None
self.building_local = None
self.inventory = []
self.skills = {}
self.health = 100
self.greeting_count = 0
self.body_count = 0
self.assassination_count = 0
self.hit_list = []
self.death_count = 0
# TODO increase insurance cost every death?
self.food_count = 0
self.run_away_count = 0
self.speed_bonus = False
self.game_won = False
def game_over(self):
if self.game_won is False:
self.game_won = True
print(colored("You have won the game!", "green"))
print("You may continue playing to earn more achievements if you wish.")
if self.run_away_count == 0:
print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.")
if self.run_away_count > 100:
print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.")
def clean_up_inventory(self):
""" Remove items with quantity of zero from the map inventory"""
self.inventory = [i for i in self.inventory if i.quantity != 0]
def phase_change(self, the_map):
self.phase = 'day' if self.phase == 'night' else 'night'
for k, square in the_map.items():
if self.location != k:
square.generate_items()
for b in square.buildings:
if b.ware_list:
b.wares = drop_item(b.ware_list)
while not b.wares:
b.wares = drop_item(b.ware_list)
if b.name not in ('a castle', 'a volcanic base'):
jobs = {}
buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])
for key, v in buiding_dict.items():
if key == b.name and v.get('jobs'):
for name, values in v['jobs'].items():
jobs[name] = values
b.jobs = b.drop_job(jobs)
if self.phase == 'day':
self.speed_bonus = False
for mob in square.mobs:
mob.health = 100
mob.irritation_level = 0
mob.quest = None if self.quest is None else mob.quest
if not square.mobs:
square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]),
self, limit=len(names), square=square)
def formatted_inventory(self):
formatted = []
for item in self.inventory:
if item.quantity > 1:
formatted.append(f"{int_to_words(item.quantity)} {item.plural}")
else:
formatted.append(item.name)
if formatted:
return comma_separated(formatted)
else:
return "nothing"
def pretty_inventory(self):
w = self.equipped_weapon
major = self.major_armor.defense if self.major_armor else 0
minor = self.minor_armor.defense if self.minor_armor else 0
armor_defense = (major + minor) * 5
armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]
inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.",
'weapon': f"You are wielding {int_to_words(w.quantity)} "
f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None,
'armor': f"You are wearing {' and '.join(x for x in armors if x)}, "
f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None}
return '\n'.join(v for v in inventory.values() if v)
def status(self):
skills = [f"{k}: {v}%." for k, v in self.skills.items()]
job = f"You have a job as a {self.job.name}." if self.job else None
quest = "You have a quest." if self.quest else None
if job and quest:
job_string = "\n".join([job, quest])
elif job or quest:
job_string = job if job else quest
else:
job_string = "You do not have a job, and you are not contributing to society."
status_string = {
'health': f'Currently, you have {self.health} health.',
'location': f'You are located on map coordinates {self.location}, '
f'which is {self.square.square_type}.',
'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,
'skills': '\n'.join(skills) if skills else "You don't have any skills.",
'money': f"You have ${self.money} in your wallet.",
'job': job_string}
return '\n'.join(v for v in status_string.values() if v)
def statistics(self):
|
def view_hit_list(self):
if self.hit_list:
print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}")
else:
print("Looks like you don't know of anyone who needs to be dead.")
def increase_skill(self, skill, increase):
try:
self.skills[skill] += increase
except KeyError:
self.skills[skill] = increase
print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.")
class Item:
def __init__(self, name, quantity, plural, category=None, perishable=None,
flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):
self.name = name
self.quantity = quantity
self.plural = plural
self.category = category or None
self.perishable = perishable or None
self.flammable = flammable or None
self.rarity = rarity or None
self.price = price or None
self.weapon_rating = weapon_rating or None
self.defense = defense or None
def copy(self):
return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,
perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,
weapon_rating=self.weapon_rating, defense=self.defense)
class Building(object):
def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):
self.name = name
self.p = p
self.quantity = 1
self.plural = plural
self.category = category or None
self.rarity = rarity or None
self.ware_list = ware_list
self.wares = self.drop_wares()
self.mobs = drop_mob(mobs, p) if mobs else None
self.jobs = self.drop_job(jobs) if jobs else None
if self.name in ('a castle', 'a volcanic base'):
self.boss_mobs_and_jobs()
def drop_wares(self):
if self.ware_list:
wares = drop_item(self.ware_list)
while not wares:
wares = drop_item(self.ware_list)
return wares
else:
return []
def drop_job(self, jobs):
drops_i = []
for k, v in jobs.items():
if odds(2):
drops_i.append(Job(name=k, location=self.p.location, **v))
return drops_i
def boss_mobs_and_jobs(self):
boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),
Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),
Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]
boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),
Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]
boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),
Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),
Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]
boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ]
random.shuffle(boss_names)
random.shuffle(boss_weapons)
random.shuffle(boss_major_armors)
random.shuffle(boss_minor_armors)
boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')
boss.health = 500
boss.equipped_weapon = boss_weapons[0]
boss.major_armor = boss_major_armors[0]
boss.minor_armor = boss_minor_armors[0]
boss.irritation_level = 10
self.mobs = [boss]
if self.name == 'a castle':
self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]
if self.name == 'a volcanic base':
self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]
class Job:
def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):
self.name = name
self.location = location
self.skills_needed = skills_needed or None
self.salary = salary or 0
self.skills_learned = skills_learned or None
self.inventory_needed = inventory_needed or None
self.application_attempts = 0
class Mob:
def __init__(self, name, p, plural, rarity, inventory=None):
self.name = name
self.p = p
self.plural = plural
self.quantity = 1
self.rarity = rarity
self.skills = self.skills()
self.quest = None
self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))
self.health = 100
self.equipped_weapon = self.equip()
major = [x for x in self.inventory if x.category == 'major armor']
minor = [x for x in self.inventory if x.category == 'minor armor']
self.major_armor = major[0] if major else None
self.minor_armor = minor[0] if minor else None
self.irritation_level = 0
def equip(self):
nice_weapons = []
for i in self.inventory:
try:
if i.weapon_rating:
nice_weapons.append(i)
except AttributeError:
pass
nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)
if nice_weapons:
self.inventory.remove(nice_weapons[0])
return nice_weapons[0]
else:
return None
@staticmethod
def skills():
""" Pick the skills for a mob, these determine what a player can get from completing a quest """
all_skills = ["strength", "patience", "cleanliness", "leadership", "communication",
"science", "math", "engineering", "intelligence", "driving"]
random.shuffle(all_skills)
return all_skills[0:2]
def generate_quest(self):
"""
inventory based
bring me x of an object to learn a skill
"""
if odds(3):
quest_items = add_dicts_together(items["master"], items[self.p.square.square_type])
quest_item = random.choice(list(quest_items.keys()))
i = Item(quest_item, 0, **quest_items[quest_item])
self.inventory.append(i)
quantity = {'super rare': '1',
'rare': '2',
'uncommon': '3',
'common': '6',
'super common': '15'}
q = quantity[i.rarity]
self.quest = i, int(q), f"{self.p.name}, if you bring " \
f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \
f"I will teach you a valuable skill."
return
elif odds(5):
mobs = []
for biome, building in buildings.items():
for b, attributes in building.items():
if attributes.get('mobs'):
for k in attributes['mobs'].keys():
mobs.append(k)
for biome, mob in wild_mobs.items():
for k in mob.keys():
mobs.append(k)
target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}"
print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way "
f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...")
self.p.hit_list.append(target)
return False
else:
return None
| print(f"You have killed {self.body_count} mobs.")
print(f"You have ran away from {self.run_away_count} battles.")
print(f"You have eaten {self.food_count} items.")
print(f"You have performed {self.assassination_count} assassinations.")
print(f"You have talked to mobs {self.greeting_count} times.") | identifier_body |
tanzania-improved.py | # coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[2]:
#loading the two datasets
#loading the train data
train_data = pd.read_csv('../input/training.csv')
print("The train data")
print(train_data.head())
#loading the test data
test_data = pd.read_csv('../input/test (1).csv')
print("The test data")
print(test_data.head())
# In[3]:
# check if the data has missing points with seaborn heatmap
import seaborn as sns
sns.heatmap(train_data.isnull(),yticklabels=False, cmap='viridis')
# In[4]:
# view the columns
train_data.columns
# In[5]:
# view the columns in the test
x_test = test_data
x_test.columns
# In[6]:
#looking at the data above the X array has some features not in the x_test
# In[7]:
train_data.head(5)
# In[8]:
x_test.head()
# In[9]:
#dropping the labels that you are supposed to predict and the excess from train_head
cols = ['ID','mobile_money', 'savings', 'borrowing','insurance']
train_data = train_data.drop(cols, axis=1)
x_test = test_data.drop(['ID'], axis=1)
# In[10]:
#looking at the unique classification classes
train_data['mobile_money_classification'].unique()
# In[11]:
#lets look at both the train and test data and see if they match after the drop
train_data.columns
# In[12]:
x_test.columns
# In[13]:
X = train_data.drop(['mobile_money_classification'], axis=1)
y = train_data['mobile_money_classification']
# In[14]:
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
rf.fit(X, y)
#using the base model to build the feature importance
import pandas as pd
feature_importances = pd.DataFrame(rf.feature_importances_,
index = X.columns,
columns=['importance']).sort_values('importance',ascending=False)
print(feature_importances)
# In[15]:
#lets drop the most irrelevant columns in both X and the x_test
#from the already done tests we find that dropping the last three is what works best
X1 = X.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
x_test1 = x_test.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
# In[16]:
#lets normalize the datasets
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X2 = scaler.fit_transform(X1)
X_test1=scaler.fit_transform(x_test1)
# In[17]:
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
scores = []
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
cv = KFold(n_splits=10, random_state=42, shuffle=False)
for train_index, test_index in cv.split(X):
print("Train Index: ", train_index, "\n")
print("Test Index: ", test_index)
X_train, X_test, y_train, y_test = X2[train_index], X2[test_index], y[train_index], y[test_index]
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[18]:
from pprint import pprint
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
# In[19]:
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# In[20]:
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf1 = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf1_random = RandomizedSearchCV(estimator = rf1, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)
# In[21]:
# Fit the random search model
#rf1_random.fit(X_train, y_train)
# In[22]:
#rf1_random.best_params_
# In[23]:
#grid search using cross validation
#Random search allowed us to narrow down the range for each hyperparameter.
#Now that we know where to concentrate our search, we can explicitly specify every combination of settings to try.
#We do this with GridSearchCV, a method that, instead of sampling randomly from a distribution, evaluates all combinations we define.
#To use Grid Search, we make another grid based on the best values provided by random search:
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [100, 100, 110, 120],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [800, 1000, 1600, 2000]
}
# Create a based model
rf2 = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf2, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
#grid_search.fit(X_train,y_train)
#grid_search.best_params_
# In[24]:
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[25]:
scores.append(rf.score(X_test, y_test))
# In[26]:
print(np.mean(scores))
# In[27]:
from sklearn.ensemble import RandomForestClassifier
rfc2 = RandomForestClassifier(n_estimators=2000, max_depth=120, min_samples_split=10,
min_samples_leaf=3,max_features='sqrt', bootstrap=True,random_state=42)
rfc2 .fit(X2, y)
# In[28]:
from sklearn.ensemble import RandomForestClassifier
rfc1 = RandomForestClassifier(n_estimators=1400, max_depth=100, min_samples_split=5,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc1 .fit(X2, y)
# In[29]:
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=1400, max_depth=80, min_samples_split=10,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc .fit(X2, y)
# In[30]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred = pd.DataFrame(data=q)
df_pred = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[31]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc2.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc2.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred2 = pd.DataFrame(data=q)
df_pred2 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[32]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc1.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc1.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[33]:
| # In[34]:
df_pred1.head()
# In[35]:
#df_pred2.head()
df_pred1.round({"no_financial_services":4, "other_only":4, "mm_only":4, "mm_plus":4})
# In[36]:
df_pred1.to_csv('pred_set.csv', index=False) #save to csv file#
# FROM HERE THE CODE IS SPECIFIC TO XGBOOST MULTICLASS CLASSIFICATION
# In[37]:
#now since we have done some few possible things of trying to improve the model using
#random forest lets try using other algorithms (bring out the big guns)
# using xgboost
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,test_size=0.33)
X_train.shape, y_train.shape, X_test.shape, y_test.shape,X_test1.shape
# In[38]:
'''dtrain = xgb.DMatrix(data=X_train, label=y_train)
dtest = xgb.DMatrix(data=X_test)
dtest1 = xgb.DMatrix(data=X_test1)
params = {
'max_depth': 4,
'objective': 'multi:softmax', # error evaluation for multiclass training
'num_class': 4,
'n_gpus': 0
}
#bst=xgb.train(params,X_train, y_train)
bst = xgb.train(params, dtrain)
pred = bst.predict(dtest)
print(pred)
#test_pred = bst.predict(dtest1)
print(test_pred)
print(labels.classes_)
params = {
'max_depth': 4,
'objective': 'multi:softmax', # error evaluation for multiclass training
'num_class': 4,
'n_gpus': 0
}
#training_start = time.perf_counter()
#training_end = time.perf_counter()
#prediction_start = time.perf_counter()
preds = xgb.predict(X_test1)
prediction_end = time.perf_counter()
acc_xgb = (preds == y_test).sum().astype(float) / len(preds)*100
xgb_train_time = training_end-training_start
xgb_prediction_time = prediction_end-prediction_start
print("XGBoost's prediction accuracy is: %3.2f" % (acc_xgb))
print("Time consumed for training: %4.3f" % (xgb_train_time))
print("Time consumed for prediction: %6.5f seconds" % (xgb_prediction_time))
'''
# In[39]:
import xgboost as xgb
from xgboost import XGBClassifier
xgb = XGBClassifier(n_estimators=100,max_depth= 4,
objective= 'multi:softmax',
num_class= 4,
n_gpus= 0)
# error evaluation for multiclass training
xgb.fit(X_train, y_train)
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = pd.DataFrame(xgb.predict(X_test1), columns=labels.classes_)
#test_pred = pd.DataFrame(bst.predict(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[40]:
df_pred1.to_csv('pred_set.csv', index=False) #save to csv fil# |
df_pred.head()
| random_line_split |
tanzania-improved.py |
# coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[2]:
#loading the two datasets
#loading the train data
train_data = pd.read_csv('../input/training.csv')
print("The train data")
print(train_data.head())
#loading the test data
test_data = pd.read_csv('../input/test (1).csv')
print("The test data")
print(test_data.head())
# In[3]:
# check if the data has missing points with seaborn heatmap
import seaborn as sns
sns.heatmap(train_data.isnull(),yticklabels=False, cmap='viridis')
# In[4]:
# view the columns
train_data.columns
# In[5]:
# view the columns in the test
x_test = test_data
x_test.columns
# In[6]:
#looking at the data above the X array has some features not in the x_test
# In[7]:
train_data.head(5)
# In[8]:
x_test.head()
# In[9]:
#dropping the labels that you are supposed to predict and the excess from train_head
cols = ['ID','mobile_money', 'savings', 'borrowing','insurance']
train_data = train_data.drop(cols, axis=1)
x_test = test_data.drop(['ID'], axis=1)
# In[10]:
#looking at the unique classification classes
train_data['mobile_money_classification'].unique()
# In[11]:
#lets look at both the train and test data and see if they match after the drop
train_data.columns
# In[12]:
x_test.columns
# In[13]:
X = train_data.drop(['mobile_money_classification'], axis=1)
y = train_data['mobile_money_classification']
# In[14]:
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
rf.fit(X, y)
#using the base model to build the feature importance
import pandas as pd
feature_importances = pd.DataFrame(rf.feature_importances_,
index = X.columns,
columns=['importance']).sort_values('importance',ascending=False)
print(feature_importances)
# In[15]:
#lets drop the most irrelevant columns in both X and the x_test
#from the already done tests we find that dropping the last three is what works best
X1 = X.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
x_test1 = x_test.drop(['Q8_11','Q8_7','Q8_6'], axis=1)
# In[16]:
#lets normalize the datasets
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
X2 = scaler.fit_transform(X1)
X_test1=scaler.fit_transform(x_test1)
# In[17]:
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
scores = []
rf = RandomForestClassifier(random_state = 42,n_estimators=1400,criterion='gini')
cv = KFold(n_splits=10, random_state=42, shuffle=False)
for train_index, test_index in cv.split(X):
|
# In[18]:
from pprint import pprint
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
# In[19]:
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
# In[20]:
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf1 = RandomForestClassifier()
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf1_random = RandomizedSearchCV(estimator = rf1, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)
# In[21]:
# Fit the random search model
#rf1_random.fit(X_train, y_train)
# In[22]:
#rf1_random.best_params_
# In[23]:
#grid search using cross validation
#Random search allowed us to narrow down the range for each hyperparameter.
#Now that we know where to concentrate our search, we can explicitly specify every combination of settings to try.
#We do this with GridSearchCV, a method that, instead of sampling randomly from a distribution, evaluates all combinations we define.
#To use Grid Search, we make another grid based on the best values provided by random search:
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
param_grid = {
'bootstrap': [True],
'max_depth': [100, 100, 110, 120],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [800, 1000, 1600, 2000]
}
# Create a based model
rf2 = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf2, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
# Fit the grid search to the data
#grid_search.fit(X_train,y_train)
#grid_search.best_params_
# In[24]:
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test))
# In[25]:
scores.append(rf.score(X_test, y_test))
# In[26]:
print(np.mean(scores))
# In[27]:
from sklearn.ensemble import RandomForestClassifier
rfc2 = RandomForestClassifier(n_estimators=2000, max_depth=120, min_samples_split=10,
min_samples_leaf=3,max_features='sqrt', bootstrap=True,random_state=42)
rfc2 .fit(X2, y)
# In[28]:
from sklearn.ensemble import RandomForestClassifier
rfc1 = RandomForestClassifier(n_estimators=1400, max_depth=100, min_samples_split=5,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc1 .fit(X2, y)
# In[29]:
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=1400, max_depth=80, min_samples_split=10,
min_samples_leaf=4,max_features='sqrt', bootstrap=True,random_state=42)
rfc .fit(X2, y)
# In[30]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred = pd.DataFrame(data=q)
df_pred = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[31]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc2.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc2.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred2 = pd.DataFrame(data=q)
df_pred2 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[32]:
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = rfc1.predict_proba(X_test1)
test_pred = pd.DataFrame(rfc1.predict_proba(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[33]:
df_pred.head()
# In[34]:
df_pred1.head()
# In[35]:
#df_pred2.head()
df_pred1.round({"no_financial_services":4, "other_only":4, "mm_only":4, "mm_plus":4})
# In[36]:
df_pred1.to_csv('pred_set.csv', index=False) #save to csv file#
# FROM HERE THE CODE IS SPECIFIC TO XGBOOST MULTICLASS CLASSIFICATION
# In[37]:
#now since we have done some few possible things of trying to improve the model using
#random forest lets try using other algorithms (bring out the big guns)
# using xgboost
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,test_size=0.33)
X_train.shape, y_train.shape, X_test.shape, y_test.shape,X_test1.shape
# In[38]:
'''dtrain = xgb.DMatrix(data=X_train, label=y_train)
dtest = xgb.DMatrix(data=X_test)
dtest1 = xgb.DMatrix(data=X_test1)
params = {
'max_depth': 4,
'objective': 'multi:softmax', # error evaluation for multiclass training
'num_class': 4,
'n_gpus': 0
}
#bst=xgb.train(params,X_train, y_train)
bst = xgb.train(params, dtrain)
pred = bst.predict(dtest)
print(pred)
#test_pred = bst.predict(dtest1)
print(test_pred)
print(labels.classes_)
params = {
'max_depth': 4,
'objective': 'multi:softmax', # error evaluation for multiclass training
'num_class': 4,
'n_gpus': 0
}
#training_start = time.perf_counter()
#training_end = time.perf_counter()
#prediction_start = time.perf_counter()
preds = xgb.predict(X_test1)
prediction_end = time.perf_counter()
acc_xgb = (preds == y_test).sum().astype(float) / len(preds)*100
xgb_train_time = training_end-training_start
xgb_prediction_time = prediction_end-prediction_start
print("XGBoost's prediction accuracy is: %3.2f" % (acc_xgb))
print("Time consumed for training: %4.3f" % (xgb_train_time))
print("Time consumed for prediction: %6.5f seconds" % (xgb_prediction_time))
'''
# In[39]:
import xgboost as xgb
from xgboost import XGBClassifier
xgb = XGBClassifier(n_estimators=100,max_depth= 4,
objective= 'multi:softmax',
num_class= 4,
n_gpus= 0)
# error evaluation for multiclass training
xgb.fit(X_train, y_train)
from sklearn.preprocessing import LabelEncoder
labels = LabelEncoder()
y_train_labels_fit = labels.fit(y_train)
y_train_lables_trf = labels.transform(y_train)
test_pred = pd.DataFrame(xgb.predict(X_test1), columns=labels.classes_)
#test_pred = pd.DataFrame(bst.predict(X_test1), columns=labels.classes_)
q = {'ID': test_data["ID"], 'no_financial_services': test_pred[0], 'other_only': test_pred[1],
'mm_only': test_pred[2], 'mm_plus': test_pred[3]}
df_pred1 = pd.DataFrame(data=q)
df_pred1 = df_pred[['ID','no_financial_services', 'other_only', 'mm_only', 'mm_plus' ]]
# In[40]:
df_pred1.to_csv('pred_set.csv', index=False) #save to csv fil#
| print("Train Index: ", train_index, "\n")
print("Test Index: ", test_index)
X_train, X_test, y_train, y_test = X2[train_index], X2[test_index], y[train_index], y[test_index]
rf.fit(X_train, y_train)
scores.append(rf.score(X_test, y_test)) | conditional_block |
main.js | moment().format();
let CabSimple = 800;
let CabDoble = 1300;
let CabSuite = 2000;
const iva = (x) => x * 0.21;
let sumaPorDia = (a, b) => a + b;
let simpleIVA = sumaPorDia(CabSimple, iva(CabSimple));
let dobleIVA = sumaPorDia(CabDoble, iva(CabDoble));
let suiteIVA = sumaPorDia(CabSuite, iva(CabSuite));
let estadia = function (a, b) {
return a + b;
};
class Servicio {
constructor(
id,
nombre,
costo,
tipo,
descripcion,
selected,
cantidadPersonas
) {
this.id = id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion', | servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results=4&nat=us,fr,br',
dataType: 'json',
success: function (data) {
contenidoJSON = data.results;
for (let i in contenidoJSON) {
HTMLCard += ` <div class="card m-4 rounded-3">
<img class="card-img-top img-testimonial" src="${contenidoJSON[i].picture.large}">
<div class="card-body text-center">
<p class="card-text-testimonial">"Lorem ipsum dolor sit, amet consectetur adipisicing elit."</p>
<h5 class="card-title-testimonial fw-bolder">${contenidoJSON[i].name.first} ${contenidoJSON[i].name.last}</h5>
<h6 class="card-city-testimonial">${contenidoJSON[i].location.city}, ${contenidoJSON[i].location.country}</h6>
</div>
</div>`;
$('#testimonios').html(HTMLCard);
}
},
error: function () {
HTMLError =
"<div class='center-text'>" +
'<h4>El contenido parece no estar disponible. Intente nuevamente en unos minutos.</h4>' +
'</div>';
$('#testimonial-item').html(HTMLError);
}
});
}
Testimonios();
$('document').ready(function () {
testFadeIn.hide();
});
const testimoniosClientes = $('#testimoniosClientes');
const testFadeIn = $('#testimonios');
testimoniosClientes.mouseover(function () {
testFadeIn.fadeIn(3000);
});
formContacto.onsubmit = (evt) => {
evt.preventDefault();
localStorage.setItem('Nombre', nombreContacto.value);
localStorage.setItem('Telefono', telefonoContacto.value);
localStorage.setItem('Email', emailContacto.value);
swal({
title: `Gracias por contactarte con nosotros.`,
text: 'En breve nos comunicaremos para despejar tus dudas y continuar con el proceso de Reserva.'
});
}; | 'Tour a caballo guiado por el bosque.',
false,
0
)
); | random_line_split |
main.js | moment().format();
let CabSimple = 800;
let CabDoble = 1300;
let CabSuite = 2000;
const iva = (x) => x * 0.21;
let sumaPorDia = (a, b) => a + b;
let simpleIVA = sumaPorDia(CabSimple, iva(CabSimple));
let dobleIVA = sumaPorDia(CabDoble, iva(CabDoble));
let suiteIVA = sumaPorDia(CabSuite, iva(CabSuite));
let estadia = function (a, b) {
return a + b;
};
class Servicio {
constructor(
id,
nombre,
costo,
tipo,
descripcion,
selected,
cantidadPersonas
) {
this.id = id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
|
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results=4&nat=us,fr,br',
dataType: 'json',
success: function (data) {
contenidoJSON = data.results;
for (let i in contenidoJSON) {
HTMLCard += ` <div class="card m-4 rounded-3">
<img class="card-img-top img-testimonial" src="${contenidoJSON[i].picture.large}">
<div class="card-body text-center">
<p class="card-text-testimonial">"Lorem ipsum dolor sit, amet consectetur adipisicing elit."</p>
<h5 class="card-title-testimonial fw-bolder">${contenidoJSON[i].name.first} ${contenidoJSON[i].name.last}</h5>
<h6 class="card-city-testimonial">${contenidoJSON[i].location.city}, ${contenidoJSON[i].location.country}</h6>
</div>
</div>`;
$('#testimonios').html(HTMLCard);
}
},
error: function () {
HTMLError =
"<div class='center-text'>" +
'<h4>El contenido parece no estar disponible. Intente nuevamente en unos minutos.</h4>' +
'</div>';
$('#testimonial-item').html(HTMLError);
}
});
}
Testimonios();
$('document').ready(function () {
testFadeIn.hide();
});
const testimoniosClientes = $('#testimoniosClientes');
const testFadeIn = $('#testimonios');
testimoniosClientes.mouseover(function () {
testFadeIn.fadeIn(3000);
});
formContacto.onsubmit = (evt) => {
evt.preventDefault();
localStorage.setItem('Nombre', nombreContacto.value);
localStorage.setItem('Telefono', telefonoContacto.value);
localStorage.setItem('Email', emailContacto.value);
swal({
title: `Gracias por contactarte con nosotros.`,
text: 'En breve nos comunicaremos para despejar tus dudas y continuar con el proceso de Reserva.'
});
};
| cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
}; | conditional_block |
main.js | moment().format();
let CabSimple = 800;
let CabDoble = 1300;
let CabSuite = 2000;
const iva = (x) => x * 0.21;
let sumaPorDia = (a, b) => a + b;
let simpleIVA = sumaPorDia(CabSimple, iva(CabSimple));
let dobleIVA = sumaPorDia(CabDoble, iva(CabDoble));
let suiteIVA = sumaPorDia(CabSuite, iva(CabSuite));
let estadia = function (a, b) {
return a + b;
};
class Servicio {
constructor(
id,
nombre,
costo,
tipo,
descripcion,
selected,
cantidadPersonas
) {
this.id = id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
}
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
| url: 'https://randomuser.me/api/?results=4&nat=us,fr,br',
dataType: 'json',
success: function (data) {
contenidoJSON = data.results;
for (let i in contenidoJSON) {
HTMLCard += ` <div class="card m-4 rounded-3">
<img class="card-img-top img-testimonial" src="${contenidoJSON[i].picture.large}">
<div class="card-body text-center">
<p class="card-text-testimonial">"Lorem ipsum dolor sit, amet consectetur adipisicing elit."</p>
<h5 class="card-title-testimonial fw-bolder">${contenidoJSON[i].name.first} ${contenidoJSON[i].name.last}</h5>
<h6 class="card-city-testimonial">${contenidoJSON[i].location.city}, ${contenidoJSON[i].location.country}</h6>
</div>
</div>`;
$('#testimonios').html(HTMLCard);
}
},
error: function () {
HTMLError =
"<div class='center-text'>" +
'<h4>El contenido parece no estar disponible. Intente nuevamente en unos minutos.</h4>' +
'</div>';
$('#testimonial-item').html(HTMLError);
}
});
}
Testimonios();
$('document').ready(function () {
testFadeIn.hide();
});
const testimoniosClientes = $('#testimoniosClientes');
const testFadeIn = $('#testimonios');
testimoniosClientes.mouseover(function () {
testFadeIn.fadeIn(3000);
});
formContacto.onsubmit = (evt) => {
evt.preventDefault();
localStorage.setItem('Nombre', nombreContacto.value);
localStorage.setItem('Telefono', telefonoContacto.value);
localStorage.setItem('Email', emailContacto.value);
swal({
title: `Gracias por contactarte con nosotros.`,
text: 'En breve nos comunicaremos para despejar tus dudas y continuar con el proceso de Reserva.'
});
};
| $.ajax({
| identifier_name |
main.js | moment().format();
let CabSimple = 800;
let CabDoble = 1300;
let CabSuite = 2000;
const iva = (x) => x * 0.21;
let sumaPorDia = (a, b) => a + b;
let simpleIVA = sumaPorDia(CabSimple, iva(CabSimple));
let dobleIVA = sumaPorDia(CabDoble, iva(CabDoble));
let suiteIVA = sumaPorDia(CabSuite, iva(CabSuite));
let estadia = function (a, b) {
return a + b;
};
class Servicio {
constructor(
id,
nombre,
costo,
tipo,
descripcion,
selected,
cantidadPersonas
) |
}
const servicios = [];
servicios.push(
new Servicio(
1,
'Cabalgata',
600,
'Recreacion',
'Tour a caballo guiado por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
2,
'Tirolesa',
800,
'Recreacion',
'Deslizamiento por cable entre las copas de los arboles.',
false,
0
)
);
servicios.push(
new Servicio(
3,
'Trekking',
800,
'Recreacion',
'Caminata guiada por el bosque.',
false,
0
)
);
servicios.push(
new Servicio(
4,
'Cena gourmet',
1200,
'Gastronomia',
'Desgustación por pasos maridados con vino.',
false,
0
)
);
servicios.push(
new Servicio(
5,
'Desayuno buffet',
700,
'Gastronomia',
'Manjares artesanales acompañados de jugos naturales.',
false,
0
)
);
servicios.push(
new Servicio(
6,
'Tarde de spa',
1000,
'Relax',
'Sesión de Spa y masajes.',
false,
0
)
);
let pas = document.getElementById('pasajeros');
let cabanasimple = document.getElementById('simplePortada');
let cabanadoble = document.getElementById('doblePortada');
let cabanasuite = document.getElementById('suitePortada');
let fa, fb;
let estadiaTotal;
let pax;
pas.addEventListener('change', () => {
pax = pas.value;
sessionStorage.setItem('pax', pas.value);
localStorage.setItem('pasajeros', pas.value);
});
const fechaA = document.getElementById('checkIn');
fechaA.addEventListener('change', (event) => {
fa = event.target.value;
sessionStorage.setItem('ingreso', fa);
});
const fechaB = document.getElementById('checkOut');
fechaB.addEventListener('change', (event) => {
fb = event.target.value;
sessionStorage.setItem('egreso', fb);
});
let formRes = document.getElementById('formularioReserva');
formRes.onsubmit = (evt) => {
evt.preventDefault();
const checkIn = moment(fa, 'YYYY-MM-DD');
const checkOut = moment(fb, 'YYYY-MM-DD');
estadiaTotal = checkOut.diff(checkIn, 'days');
localStorage.setItem('Check In', fa);
localStorage.setItem('Check Out', fb);
sessionStorage.setItem('dias', estadiaTotal);
localStorage.setItem('estadia', estadiaTotal);
$('#ingreso').append(`${fa}`);
$('#egreso').append(`${fb}`);
$('#guests').append(`${pax}`);
$('#dias').append(`${estadiaTotal}`);
if (pas.value <= 3) {
cabanasimple.style.display = 'initial';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
} else if (pas.value >= 3 && pas.value <= 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'initial';
cabanasuite.style.display = 'initial';
aparecerSimple.style.display = 'none';
}
if (pas.value > 6) {
cabanasimple.style.display = 'none';
cabanadoble.style.display = 'none';
cabanasuite.style.display = 'initial';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
}
};
function Cabaña(id, nombre, precio, selected) {
this.id = id;
this.nombre = nombre;
this.precio = precio;
this.selected = selected;
}
const cabins = [];
cabins.push(new Cabaña(1, 'Cabaña simple', simpleIVA, false));
cabins.push(new Cabaña(2, 'Cabaña doble', dobleIVA, false));
cabins.push(new Cabaña(3, 'Cabaña suite', suiteIVA, false));
let cabElegida;
$('.addBtn').on('click', function (e) {
e.preventDefault();
const cabanaId = e.target.getAttribute('data-cabaña-id');
cabins.forEach((cabin) => {
if (cabin.id.toString() === cabanaId) {
if (!cabin.selected) {
$('#quecabana').append(`<h3>${cabin.nombre}</h3>`);
cabin.selected = true;
cabElegida = cabin.precio;
swal({
title: `Hecho!`,
text: `¡Cabaña reservada!`,
icon: 'success'
});
} else {
swal({
title: 'Ya elegiste esta cabaña',
icon: 'warning'
});
}
}
});
});
const btnSimple = document.getElementById('btnSimple');
const btnDoble = document.getElementById('btnDoble');
const btnSuite = document.getElementById('btnSuite');
const aparecerSimple = document.getElementById('simple');
const aparecerDoble = document.getElementById('doble');
const aparecerSuite = document.getElementById('suite');
btnSimple.onclick = () => {
$('#simple').toggle(1000);
aparecerSimple.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnDoble.onclick = () => {
$('#doble').toggle(1000);
aparecerDoble.style.display = 'flex';
aparecerSimple.style.display = 'none';
aparecerSuite.style.display = 'none';
};
btnSuite.onclick = () => {
$('#suite').toggle(1000);
aparecerSuite.style.display = 'flex';
aparecerDoble.style.display = 'none';
aparecerSimple.style.display = 'none';
};
const cabalgata = document.getElementById('imgCabalgata');
const tirolesa = document.getElementById('imgTirolesa');
const trekking = document.getElementById('imgTrekking');
const cocina = document.getElementById('imgCocina');
const desayuno = document.getElementById('imgDesayuno');
const spa = document.getElementById('imgSpa');
$('.services').on('click', function (event) {
event.preventDefault();
const serviceId = event.target.getAttribute('data-service-id');
servicios.forEach((service) => {
if (service.id.toString() === serviceId) {
if (!service.selected) {
swal('Cantidad de personas: ', {
content: 'input'
}).then((value) => {
swal(`Servicio agregado para ${value} personas`);
service.cantidadPersonas = value;
$('#serviciosfinales').append(
`<p>${service.nombre} para ${value} personas = $${
service.costo * value
} </p>`
);
});
service.selected = true;
} else {
swal({
title: 'Ya contrataste este servicio',
text: '¡Conoce los otros que tenemos!',
icon: 'warning'
});
}
}
});
});
const formContacto = document.getElementById('formContacto');
const nombreContacto = document.getElementById('fullName');
const telefonoContacto = document.getElementById('phone');
const emailContacto = document.getElementById('email');
$('#showForm').on('click', function (event) {
event.preventDefault();
$('#staticBackdrop').fadeOut();
if ((pax, fa, fb !== undefined)) {
$('#reservaFinal').fadeIn('3000');
swal({
title: '¡Perfecto!',
text: 'Agrega tus datos y finaliza la reserva.'
});
} else {
swal({
title: 'Elige la fecha y cantidad de pasajeros para continuar',
icon: 'error'
});
}
});
const precio = (a, b) => {
return a * b;
};
let cabFinal;
let costoService;
$('.btnfinal').on('click', () => {
costoService = 0;
servicios.forEach((service) => {
if (service.selected && service.cantidadPersonas > 0) {
costoService += precio(service.costo, service.cantidadPersonas);
}
});
cabFinal = precio(cabElegida, estadiaTotal);
let costoTotal = estadia(cabFinal, costoService);
$('#costoTotal').html('');
$('#costoTotal').append(`Precio final: <strong>${costoTotal}</strong>`);
});
const lastForm = document.getElementById('reservaFinal');
lastForm.onsubmit = (event) => {
event.preventDefault();
swal({
title: '¡Tu reserva fue hecha con éxito!',
text: 'Gracias por elegirnos.',
icon: 'success'
});
};
let HTMLCard = '';
let HTMLError = '';
let contenidoJSON = '';
//AJAX
function Testimonios() {
$.ajax({
url: 'https://randomuser.me/api/?results=4&nat=us,fr,br',
dataType: 'json',
success: function (data) {
contenidoJSON = data.results;
for (let i in contenidoJSON) {
HTMLCard += ` <div class="card m-4 rounded-3">
<img class="card-img-top img-testimonial" src="${contenidoJSON[i].picture.large}">
<div class="card-body text-center">
<p class="card-text-testimonial">"Lorem ipsum dolor sit, amet consectetur adipisicing elit."</p>
<h5 class="card-title-testimonial fw-bolder">${contenidoJSON[i].name.first} ${contenidoJSON[i].name.last}</h5>
<h6 class="card-city-testimonial">${contenidoJSON[i].location.city}, ${contenidoJSON[i].location.country}</h6>
</div>
</div>`;
$('#testimonios').html(HTMLCard);
}
},
error: function () {
HTMLError =
"<div class='center-text'>" +
'<h4>El contenido parece no estar disponible. Intente nuevamente en unos minutos.</h4>' +
'</div>';
$('#testimonial-item').html(HTMLError);
}
});
}
Testimonios();
$('document').ready(function () {
testFadeIn.hide();
});
const testimoniosClientes = $('#testimoniosClientes');
const testFadeIn = $('#testimonios');
testimoniosClientes.mouseover(function () {
testFadeIn.fadeIn(3000);
});
formContacto.onsubmit = (evt) => {
evt.preventDefault();
localStorage.setItem('Nombre', nombreContacto.value);
localStorage.setItem('Telefono', telefonoContacto.value);
localStorage.setItem('Email', emailContacto.value);
swal({
title: `Gracias por contactarte con nosotros.`,
text: 'En breve nos comunicaremos para despejar tus dudas y continuar con el proceso de Reserva.'
});
};
| {
this.id = id;
this.nombre = nombre;
this.costo = costo;
this.tipo = tipo;
this.descripcion = descripcion;
this.selected = selected;
this.cantidadPersonas = cantidadPersonas;
} | identifier_body |
nginx_controller.go | // Copyright 2020 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package controllers
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tsuru/nginx-operator/api/v1alpha1"
nginxv1alpha1 "github.com/tsuru/nginx-operator/api/v1alpha1"
"github.com/tsuru/nginx-operator/pkg/k8s"
)
// NginxReconciler reconciles a Nginx object
type NginxReconciler struct {
client.Client
EventRecorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
AnnotationFilter labels.Selector
}
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) | (ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
}
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1.NginxStatus{
CurrentReplicas: replicas,
PodSelector: k8s.LabelsForNginxString(nginx.Name),
Deployments: deployStatuses,
Services: services,
Ingresses: ingresses,
}
if reflect.DeepEqual(nginx.Status, status) {
return nil
}
nginx.Status = status
err = r.Client.Status().Update(ctx, nginx)
if err != nil {
return fmt.Errorf("failed to update nginx status: %v", err)
}
return nil
}
func listDeployments(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]appsv1.Deployment, error) {
var deployList appsv1.DeploymentList
err := c.List(ctx, &deployList, &client.ListOptions{
Namespace: nginx.Namespace,
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
})
if err != nil {
return nil, err
}
deploys := deployList.Items
// NOTE: specific implementation for backward compatibility w/ Deployments
// that does not have Nginx labels yet.
if len(deploys) == 0 {
err = c.List(ctx, &deployList, &client.ListOptions{Namespace: nginx.Namespace})
if err != nil {
return nil, err
}
desired := *metav1.NewControllerRef(nginx, schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: "Nginx",
})
for _, deploy := range deployList.Items {
for _, owner := range deploy.OwnerReferences {
if reflect.DeepEqual(owner, desired) {
deploys = append(deploys, deploy)
}
}
}
}
sort.Slice(deploys, func(i, j int) bool { return deploys[i].Name < deploys[j].Name })
return deploys, nil
}
// listServices return all the services for the given nginx sorted by name
func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {
serviceList := &corev1.ServiceList{}
labelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))
listOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}
err := c.List(ctx, serviceList, listOps)
if err != nil {
return nil, err
}
var services []nginxv1alpha1.ServiceStatus
for _, s := range serviceList.Items {
services = append(services, nginxv1alpha1.ServiceStatus{
Name: s.Name,
})
}
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return services, nil
}
func listIngresses(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.IngressStatus, error) {
var ingressList networkingv1.IngressList
options := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
Namespace: nginx.Namespace,
}
if err := c.List(ctx, &ingressList, options); err != nil {
return nil, err
}
var ingresses []nginxv1alpha1.IngressStatus
for _, i := range ingressList.Items {
ingresses = append(ingresses, nginxv1alpha1.IngressStatus{Name: i.Name})
}
sort.Slice(ingresses, func(i, j int) bool {
return ingresses[i].Name < ingresses[j].Name
})
return ingresses, nil
}
func (r *NginxReconciler) shouldManageNginx(nginx *v1alpha1.Nginx) bool {
// empty filter matches all resources
if r.AnnotationFilter == nil || r.AnnotationFilter.Empty() {
return true
}
return r.AnnotationFilter.Matches(labels.Set(nginx.Annotations))
}
| Reconcile | identifier_name |
nginx_controller.go | // Copyright 2020 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package controllers
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tsuru/nginx-operator/api/v1alpha1"
nginxv1alpha1 "github.com/tsuru/nginx-operator/api/v1alpha1"
"github.com/tsuru/nginx-operator/pkg/k8s"
)
// NginxReconciler reconciles a Nginx object
type NginxReconciler struct {
client.Client
EventRecorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
AnnotationFilter labels.Selector
}
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
}
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1.NginxStatus{
CurrentReplicas: replicas,
PodSelector: k8s.LabelsForNginxString(nginx.Name),
Deployments: deployStatuses,
Services: services,
Ingresses: ingresses,
}
if reflect.DeepEqual(nginx.Status, status) {
return nil
}
nginx.Status = status
err = r.Client.Status().Update(ctx, nginx)
if err != nil {
return fmt.Errorf("failed to update nginx status: %v", err)
}
return nil
}
func listDeployments(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]appsv1.Deployment, error) {
var deployList appsv1.DeploymentList
err := c.List(ctx, &deployList, &client.ListOptions{
Namespace: nginx.Namespace,
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
})
if err != nil {
return nil, err
}
deploys := deployList.Items
// NOTE: specific implementation for backward compatibility w/ Deployments
// that does not have Nginx labels yet.
if len(deploys) == 0 {
err = c.List(ctx, &deployList, &client.ListOptions{Namespace: nginx.Namespace})
if err != nil {
return nil, err
}
desired := *metav1.NewControllerRef(nginx, schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: "Nginx",
})
for _, deploy := range deployList.Items {
for _, owner := range deploy.OwnerReferences {
if reflect.DeepEqual(owner, desired) {
deploys = append(deploys, deploy)
}
}
}
}
sort.Slice(deploys, func(i, j int) bool { return deploys[i].Name < deploys[j].Name })
return deploys, nil
}
// listServices return all the services for the given nginx sorted by name
func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {
serviceList := &corev1.ServiceList{}
labelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))
listOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}
err := c.List(ctx, serviceList, listOps)
if err != nil {
return nil, err
}
var services []nginxv1alpha1.ServiceStatus
for _, s := range serviceList.Items {
services = append(services, nginxv1alpha1.ServiceStatus{
Name: s.Name,
})
}
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return services, nil
}
func listIngresses(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.IngressStatus, error) {
var ingressList networkingv1.IngressList
options := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
Namespace: nginx.Namespace,
}
if err := c.List(ctx, &ingressList, options); err != nil |
var ingresses []nginxv1alpha1.IngressStatus
for _, i := range ingressList.Items {
ingresses = append(ingresses, nginxv1alpha1.IngressStatus{Name: i.Name})
}
sort.Slice(ingresses, func(i, j int) bool {
return ingresses[i].Name < ingresses[j].Name
})
return ingresses, nil
}
func (r *NginxReconciler) shouldManageNginx(nginx *v1alpha1.Nginx) bool {
// empty filter matches all resources
if r.AnnotationFilter == nil || r.AnnotationFilter.Empty() {
return true
}
return r.AnnotationFilter.Matches(labels.Set(nginx.Annotations))
}
| {
return nil, err
} | conditional_block |
nginx_controller.go | // Copyright 2020 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package controllers
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tsuru/nginx-operator/api/v1alpha1"
nginxv1alpha1 "github.com/tsuru/nginx-operator/api/v1alpha1"
"github.com/tsuru/nginx-operator/pkg/k8s"
)
// NginxReconciler reconciles a Nginx object
type NginxReconciler struct {
client.Client
EventRecorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
AnnotationFilter labels.Selector
}
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error |
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1.NginxStatus{
CurrentReplicas: replicas,
PodSelector: k8s.LabelsForNginxString(nginx.Name),
Deployments: deployStatuses,
Services: services,
Ingresses: ingresses,
}
if reflect.DeepEqual(nginx.Status, status) {
return nil
}
nginx.Status = status
err = r.Client.Status().Update(ctx, nginx)
if err != nil {
return fmt.Errorf("failed to update nginx status: %v", err)
}
return nil
}
func listDeployments(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]appsv1.Deployment, error) {
var deployList appsv1.DeploymentList
err := c.List(ctx, &deployList, &client.ListOptions{
Namespace: nginx.Namespace,
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
})
if err != nil {
return nil, err
}
deploys := deployList.Items
// NOTE: specific implementation for backward compatibility w/ Deployments
// that does not have Nginx labels yet.
if len(deploys) == 0 {
err = c.List(ctx, &deployList, &client.ListOptions{Namespace: nginx.Namespace})
if err != nil {
return nil, err
}
desired := *metav1.NewControllerRef(nginx, schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: "Nginx",
})
for _, deploy := range deployList.Items {
for _, owner := range deploy.OwnerReferences {
if reflect.DeepEqual(owner, desired) {
deploys = append(deploys, deploy)
}
}
}
}
sort.Slice(deploys, func(i, j int) bool { return deploys[i].Name < deploys[j].Name })
return deploys, nil
}
// listServices return all the services for the given nginx sorted by name
func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {
serviceList := &corev1.ServiceList{}
labelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))
listOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}
err := c.List(ctx, serviceList, listOps)
if err != nil {
return nil, err
}
var services []nginxv1alpha1.ServiceStatus
for _, s := range serviceList.Items {
services = append(services, nginxv1alpha1.ServiceStatus{
Name: s.Name,
})
}
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return services, nil
}
func listIngresses(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.IngressStatus, error) {
var ingressList networkingv1.IngressList
options := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
Namespace: nginx.Namespace,
}
if err := c.List(ctx, &ingressList, options); err != nil {
return nil, err
}
var ingresses []nginxv1alpha1.IngressStatus
for _, i := range ingressList.Items {
ingresses = append(ingresses, nginxv1alpha1.IngressStatus{Name: i.Name})
}
sort.Slice(ingresses, func(i, j int) bool {
return ingresses[i].Name < ingresses[j].Name
})
return ingresses, nil
}
func (r *NginxReconciler) shouldManageNginx(nginx *v1alpha1.Nginx) bool {
// empty filter matches all resources
if r.AnnotationFilter == nil || r.AnnotationFilter.Empty() {
return true
}
return r.AnnotationFilter.Matches(labels.Set(nginx.Annotations))
}
| {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
} | identifier_body |
nginx_controller.go | // Copyright 2020 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package controllers
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1" | "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tsuru/nginx-operator/api/v1alpha1"
nginxv1alpha1 "github.com/tsuru/nginx-operator/api/v1alpha1"
"github.com/tsuru/nginx-operator/pkg/k8s"
)
// NginxReconciler reconciles a Nginx object
type NginxReconciler struct {
client.Client
EventRecorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
AnnotationFilter labels.Selector
}
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nginx.tsuru.io,resources=nginxes/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;update;patch
func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nginxv1alpha1.Nginx{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
func (r *NginxReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("nginx", req.NamespacedName)
var instance nginxv1alpha1.Nginx
err := r.Client.Get(ctx, req.NamespacedName, &instance)
if err != nil {
if errors.IsNotFound(err) {
log.Info("Nginx resource not found, skipping reconcile")
return ctrl.Result{}, nil
}
log.Error(err, "Unable to get Nginx resource")
return ctrl.Result{}, err
}
if !r.shouldManageNginx(&instance) {
log.V(1).Info("Nginx resource doesn't match annotations filters, skipping it")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Minute}, nil
}
if err := r.reconcileNginx(ctx, &instance); err != nil {
log.Error(err, "Fail to reconcile")
return ctrl.Result{}, err
}
if err := r.refreshStatus(ctx, &instance); err != nil {
log.Error(err, "Fail to refresh status subresource")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *NginxReconciler) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if err := r.reconcileDeployment(ctx, nginx); err != nil {
return err
}
if err := r.reconcileService(ctx, nginx); err != nil {
return err
}
if err := r.reconcileIngress(ctx, nginx); err != nil {
return err
}
return nil
}
func (r *NginxReconciler) reconcileDeployment(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newDeploy, err := k8s.NewDeployment(nginx)
if err != nil {
return fmt.Errorf("failed to build Deployment from Nginx: %w", err)
}
var currentDeploy appsv1.Deployment
err = r.Client.Get(ctx, types.NamespacedName{Name: newDeploy.Name, Namespace: newDeploy.Namespace}, ¤tDeploy)
if errors.IsNotFound(err) {
return r.Client.Create(ctx, newDeploy)
}
if err != nil {
return fmt.Errorf("failed to retrieve Deployment: %w", err)
}
existingNginxSpec, err := k8s.ExtractNginxSpec(currentDeploy.ObjectMeta)
if err != nil {
return fmt.Errorf("failed to extract Nginx spec from Deployment annotations: %w", err)
}
if reflect.DeepEqual(nginx.Spec, existingNginxSpec) {
return nil
}
replicas := currentDeploy.Spec.Replicas
patch := client.StrategicMergeFrom(currentDeploy.DeepCopy())
currentDeploy.Spec = newDeploy.Spec
if newDeploy.Spec.Replicas == nil {
// NOTE: replicas field is set to nil whenever it's managed by some
// autoscaler controller e.g HPA.
currentDeploy.Spec.Replicas = replicas
}
err = k8s.SetNginxSpec(¤tDeploy.ObjectMeta, nginx.Spec)
if err != nil {
return fmt.Errorf("failed to set Nginx spec in Deployment annotations: %w", err)
}
err = r.Client.Patch(ctx, ¤tDeploy, patch)
if err != nil {
return fmt.Errorf("failed to patch Deployment: %w", err)
}
return nil
}
func (r *NginxReconciler) reconcileService(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
newService := k8s.NewService(nginx)
var currentService corev1.Service
err := r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, ¤tService)
if errors.IsNotFound(err) {
err = r.Client.Create(ctx, newService)
if errors.IsForbidden(err) && strings.Contains(err.Error(), "exceeded quota") {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceQuotaExceeded", "failed to create Service: %s", err)
return err
}
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceCreationFailed", "failed to create Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceCreated", "service created successfully")
return nil
}
if err != nil {
return fmt.Errorf("failed to retrieve Service resource: %v", err)
}
newService.ResourceVersion = currentService.ResourceVersion
newService.Spec.ClusterIP = currentService.Spec.ClusterIP
newService.Spec.HealthCheckNodePort = currentService.Spec.HealthCheckNodePort
newService.Finalizers = currentService.Finalizers
for annotation, value := range currentService.Annotations {
if newService.Annotations[annotation] == "" {
newService.Annotations[annotation] = value
}
}
if newService.Spec.Type == corev1.ServiceTypeNodePort || newService.Spec.Type == corev1.ServiceTypeLoadBalancer {
// avoid nodeport reallocation preserving the current ones
for _, currentPort := range currentService.Spec.Ports {
for index, newPort := range newService.Spec.Ports {
if currentPort.Port == newPort.Port {
newService.Spec.Ports[index].NodePort = currentPort.NodePort
}
}
}
}
err = r.Client.Update(ctx, newService)
if err != nil {
r.EventRecorder.Eventf(nginx, corev1.EventTypeWarning, "ServiceUpdateFailed", "failed to update Service: %s", err)
return err
}
r.EventRecorder.Eventf(nginx, corev1.EventTypeNormal, "ServiceUpdated", "service updated successfully")
return nil
}
func (r *NginxReconciler) reconcileIngress(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
if nginx == nil {
return fmt.Errorf("nginx cannot be nil")
}
newIngress := k8s.NewIngress(nginx)
var currentIngress networkingv1.Ingress
err := r.Client.Get(ctx, types.NamespacedName{Name: newIngress.Name, Namespace: newIngress.Namespace}, ¤tIngress)
if errors.IsNotFound(err) {
if nginx.Spec.Ingress == nil {
return nil
}
return r.Client.Create(ctx, newIngress)
}
if err != nil {
return err
}
if nginx.Spec.Ingress == nil {
return r.Client.Delete(ctx, ¤tIngress)
}
if !shouldUpdateIngress(¤tIngress, newIngress) {
return nil
}
newIngress.ResourceVersion = currentIngress.ResourceVersion
newIngress.Finalizers = currentIngress.Finalizers
return r.Client.Update(ctx, newIngress)
}
func shouldUpdateIngress(currentIngress, newIngress *networkingv1.Ingress) bool {
if currentIngress == nil || newIngress == nil {
return false
}
return !reflect.DeepEqual(currentIngress.Annotations, newIngress.Annotations) ||
!reflect.DeepEqual(currentIngress.Labels, newIngress.Labels) ||
!reflect.DeepEqual(currentIngress.Spec, newIngress.Spec)
}
func (r *NginxReconciler) refreshStatus(ctx context.Context, nginx *nginxv1alpha1.Nginx) error {
deploys, err := listDeployments(ctx, r.Client, nginx)
if err != nil {
return err
}
var deployStatuses []v1alpha1.DeploymentStatus
var replicas int32
for _, d := range deploys {
replicas += d.Status.Replicas
deployStatuses = append(deployStatuses, v1alpha1.DeploymentStatus{Name: d.Name})
}
services, err := listServices(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list services for nginx: %v", err)
}
ingresses, err := listIngresses(ctx, r.Client, nginx)
if err != nil {
return fmt.Errorf("failed to list ingresses for nginx: %w", err)
}
sort.Slice(nginx.Status.Services, func(i, j int) bool {
return nginx.Status.Services[i].Name < nginx.Status.Services[j].Name
})
sort.Slice(nginx.Status.Ingresses, func(i, j int) bool {
return nginx.Status.Ingresses[i].Name < nginx.Status.Ingresses[j].Name
})
status := v1alpha1.NginxStatus{
CurrentReplicas: replicas,
PodSelector: k8s.LabelsForNginxString(nginx.Name),
Deployments: deployStatuses,
Services: services,
Ingresses: ingresses,
}
if reflect.DeepEqual(nginx.Status, status) {
return nil
}
nginx.Status = status
err = r.Client.Status().Update(ctx, nginx)
if err != nil {
return fmt.Errorf("failed to update nginx status: %v", err)
}
return nil
}
func listDeployments(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]appsv1.Deployment, error) {
var deployList appsv1.DeploymentList
err := c.List(ctx, &deployList, &client.ListOptions{
Namespace: nginx.Namespace,
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
})
if err != nil {
return nil, err
}
deploys := deployList.Items
// NOTE: specific implementation for backward compatibility w/ Deployments
// that does not have Nginx labels yet.
if len(deploys) == 0 {
err = c.List(ctx, &deployList, &client.ListOptions{Namespace: nginx.Namespace})
if err != nil {
return nil, err
}
desired := *metav1.NewControllerRef(nginx, schema.GroupVersionKind{
Group: v1alpha1.GroupVersion.Group,
Version: v1alpha1.GroupVersion.Version,
Kind: "Nginx",
})
for _, deploy := range deployList.Items {
for _, owner := range deploy.OwnerReferences {
if reflect.DeepEqual(owner, desired) {
deploys = append(deploys, deploy)
}
}
}
}
sort.Slice(deploys, func(i, j int) bool { return deploys[i].Name < deploys[j].Name })
return deploys, nil
}
// listServices return all the services for the given nginx sorted by name
func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {
serviceList := &corev1.ServiceList{}
labelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))
listOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}
err := c.List(ctx, serviceList, listOps)
if err != nil {
return nil, err
}
var services []nginxv1alpha1.ServiceStatus
for _, s := range serviceList.Items {
services = append(services, nginxv1alpha1.ServiceStatus{
Name: s.Name,
})
}
sort.Slice(services, func(i, j int) bool {
return services[i].Name < services[j].Name
})
return services, nil
}
func listIngresses(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.IngressStatus, error) {
var ingressList networkingv1.IngressList
options := &client.ListOptions{
LabelSelector: labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)),
Namespace: nginx.Namespace,
}
if err := c.List(ctx, &ingressList, options); err != nil {
return nil, err
}
var ingresses []nginxv1alpha1.IngressStatus
for _, i := range ingressList.Items {
ingresses = append(ingresses, nginxv1alpha1.IngressStatus{Name: i.Name})
}
sort.Slice(ingresses, func(i, j int) bool {
return ingresses[i].Name < ingresses[j].Name
})
return ingresses, nil
}
func (r *NginxReconciler) shouldManageNginx(nginx *v1alpha1.Nginx) bool {
// empty filter matches all resources
if r.AnnotationFilter == nil || r.AnnotationFilter.Empty() {
return true
}
return r.AnnotationFilter.Matches(labels.Set(nginx.Annotations))
} | networkingv1 "k8s.io/api/networking/v1" | random_line_split |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pulumi
import (
"context"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/grpc"
)
type constructFunc func(ctx *Context, typ, name string, inputs map[string]interface{},
options ResourceOption) (URNInput, Input, error)
// construct adapts the gRPC ConstructRequest/ConstructResponse to/from the Pulumi Go SDK programming model.
func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn *grpc.ClientConn,
constructF constructFunc) (*pulumirpc.ConstructResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
ConfigSecretKeys: req.GetConfigSecretKeys(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
inputDependencies := req.GetInputDependencies()
deserializedInputs, err := plugin.UnmarshalProperties(
req.GetInputs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
inputs := make(map[string]interface{}, len(deserializedInputs))
for key, value := range deserializedInputs {
k := string(key)
var deps []Resource
if inputDeps, ok := inputDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
inputs[k] = &constructInput{
value: value,
deps: deps,
}
}
// Rebuild the resource options.
aliases := make([]Alias, len(req.GetAliases()))
for i, urn := range req.GetAliases() {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil")
}
argsV := reflect.ValueOf(args)
typ := argsV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
state[tag] = v
} else {
state[tag] = ToOutput(val)
}
}
return resource.URN(), state, nil
}
type callFunc func(ctx *Context, tok string, args map[string]interface{}) (Input, error)
// call adapts the gRPC CallRequest/CallResponse to/from the Pulumi Go SDK programming model.
func call(ctx context.Context, req *pulumirpc.CallRequest, engineConn *grpc.ClientConn,
callF callFunc) (*pulumirpc.CallResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
argDependencies := req.GetArgDependencies()
deserializedArgs, err := plugin.UnmarshalProperties(
req.GetArgs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
args := make(map[string]interface{}, len(deserializedArgs))
for key, value := range deserializedArgs {
k := string(key)
var deps []Resource
if inputDeps, ok := argDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
args[k] = &constructInput{
value: value,
deps: deps,
}
}
result, err := callF(pulumiCtx, req.GetTok(), args)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
// Serialize all result properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(result)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.CallResponse_ReturnDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.CallResponse_ReturnDependencies{
Urns: urns,
}
}
return &pulumirpc.CallResponse{
Return: rpcProps,
ReturnDependencies: rpcPropertyDeps,
}, nil
}
// callArgsCopyTo sets the args on the given args struct. If there is a `__self__` argument, it will be
// returned, otherwise it will return nil.
func callArgsCopyTo(ctx *Context, source map[string]interface{}, args interface{}) (Resource, error) {
// Use the same implementation as construct.
if err := constructInputsCopyTo(ctx, source, args); err != nil {
return nil, err
}
// Retrieve the `__self__` arg.
self, err := callArgsSelf(ctx, source)
if err != nil {
return nil, err
}
return self, nil
}
// callArgsSelf retrieves the `__self__` argument. If `__self__` is present the value is returned,
// otherwise the returned value will be nil.
func | (ctx *Context, source map[string]interface{}) (Resource, error) {
v, ok := source["__self__"]
if !ok {
return nil, nil
}
ci := v.(*constructInput)
if ci.value.ContainsUnknowns() {
return nil, errors.New("__self__ is unknown")
}
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling __self__")
}
if secret {
return nil, errors.New("__self__ is a secret")
}
return value.(Resource), nil
}
// newCallResult converts a result struct into an input Map that can be marshalled.
func newCallResult(result interface{}) (Input, error) {
if result == nil {
return nil, errors.New("result must not be nil")
}
resultV := reflect.ValueOf(result)
typ := resultV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, errors.New("result must be a pointer to a struct")
}
resultV, typ = resultV.Elem(), typ.Elem()
ret := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resultV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
ret[tag] = v
} else {
ret[tag] = ToOutput(val)
}
}
return ret, nil
}
| callArgsSelf | identifier_name |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pulumi
import (
"context"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/grpc"
)
type constructFunc func(ctx *Context, typ, name string, inputs map[string]interface{},
options ResourceOption) (URNInput, Input, error)
// construct adapts the gRPC ConstructRequest/ConstructResponse to/from the Pulumi Go SDK programming model.
func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn *grpc.ClientConn,
constructF constructFunc) (*pulumirpc.ConstructResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
ConfigSecretKeys: req.GetConfigSecretKeys(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil |
// Deserialize the inputs and apply appropriate dependencies.
inputDependencies := req.GetInputDependencies()
deserializedInputs, err := plugin.UnmarshalProperties(
req.GetInputs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
inputs := make(map[string]interface{}, len(deserializedInputs))
for key, value := range deserializedInputs {
k := string(key)
var deps []Resource
if inputDeps, ok := inputDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
inputs[k] = &constructInput{
value: value,
deps: deps,
}
}
// Rebuild the resource options.
aliases := make([]Alias, len(req.GetAliases()))
for i, urn := range req.GetAliases() {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil")
}
argsV := reflect.ValueOf(args)
typ := argsV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
state[tag] = v
} else {
state[tag] = ToOutput(val)
}
}
return resource.URN(), state, nil
}
type callFunc func(ctx *Context, tok string, args map[string]interface{}) (Input, error)
// call adapts the gRPC CallRequest/CallResponse to/from the Pulumi Go SDK programming model.
func call(ctx context.Context, req *pulumirpc.CallRequest, engineConn *grpc.ClientConn,
callF callFunc) (*pulumirpc.CallResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
argDependencies := req.GetArgDependencies()
deserializedArgs, err := plugin.UnmarshalProperties(
req.GetArgs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
args := make(map[string]interface{}, len(deserializedArgs))
for key, value := range deserializedArgs {
k := string(key)
var deps []Resource
if inputDeps, ok := argDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
args[k] = &constructInput{
value: value,
deps: deps,
}
}
result, err := callF(pulumiCtx, req.GetTok(), args)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
// Serialize all result properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(result)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.CallResponse_ReturnDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.CallResponse_ReturnDependencies{
Urns: urns,
}
}
return &pulumirpc.CallResponse{
Return: rpcProps,
ReturnDependencies: rpcPropertyDeps,
}, nil
}
// callArgsCopyTo sets the args on the given args struct. If there is a `__self__` argument, it will be
// returned, otherwise it will return nil.
func callArgsCopyTo(ctx *Context, source map[string]interface{}, args interface{}) (Resource, error) {
// Use the same implementation as construct.
if err := constructInputsCopyTo(ctx, source, args); err != nil {
return nil, err
}
// Retrieve the `__self__` arg.
self, err := callArgsSelf(ctx, source)
if err != nil {
return nil, err
}
return self, nil
}
// callArgsSelf retrieves the `__self__` argument. If `__self__` is present the value is returned,
// otherwise the returned value will be nil.
func callArgsSelf(ctx *Context, source map[string]interface{}) (Resource, error) {
v, ok := source["__self__"]
if !ok {
return nil, nil
}
ci := v.(*constructInput)
if ci.value.ContainsUnknowns() {
return nil, errors.New("__self__ is unknown")
}
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling __self__")
}
if secret {
return nil, errors.New("__self__ is a secret")
}
return value.(Resource), nil
}
// newCallResult converts a result struct into an input Map that can be marshalled.
func newCallResult(result interface{}) (Input, error) {
if result == nil {
return nil, errors.New("result must not be nil")
}
resultV := reflect.ValueOf(result)
typ := resultV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, errors.New("result must be a pointer to a struct")
}
resultV, typ = resultV.Elem(), typ.Elem()
ret := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resultV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
ret[tag] = v
} else {
ret[tag] = ToOutput(val)
}
}
return ret, nil
}
| {
return nil, errors.Wrap(err, "constructing run context")
} | conditional_block |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pulumi
import (
"context"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/grpc"
)
type constructFunc func(ctx *Context, typ, name string, inputs map[string]interface{},
options ResourceOption) (URNInput, Input, error)
// construct adapts the gRPC ConstructRequest/ConstructResponse to/from the Pulumi Go SDK programming model.
func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn *grpc.ClientConn,
constructF constructFunc) (*pulumirpc.ConstructResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
ConfigSecretKeys: req.GetConfigSecretKeys(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
inputDependencies := req.GetInputDependencies()
deserializedInputs, err := plugin.UnmarshalProperties(
req.GetInputs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
inputs := make(map[string]interface{}, len(deserializedInputs))
for key, value := range deserializedInputs {
k := string(key)
var deps []Resource
if inputDeps, ok := inputDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
inputs[k] = &constructInput{
value: value,
deps: deps,
}
}
// Rebuild the resource options.
aliases := make([]Alias, len(req.GetAliases()))
for i, urn := range req.GetAliases() {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil") | if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
state[tag] = v
} else {
state[tag] = ToOutput(val)
}
}
return resource.URN(), state, nil
}
type callFunc func(ctx *Context, tok string, args map[string]interface{}) (Input, error)
// call adapts the gRPC CallRequest/CallResponse to/from the Pulumi Go SDK programming model.
func call(ctx context.Context, req *pulumirpc.CallRequest, engineConn *grpc.ClientConn,
callF callFunc) (*pulumirpc.CallResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
argDependencies := req.GetArgDependencies()
deserializedArgs, err := plugin.UnmarshalProperties(
req.GetArgs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
args := make(map[string]interface{}, len(deserializedArgs))
for key, value := range deserializedArgs {
k := string(key)
var deps []Resource
if inputDeps, ok := argDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
args[k] = &constructInput{
value: value,
deps: deps,
}
}
result, err := callF(pulumiCtx, req.GetTok(), args)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
// Serialize all result properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(result)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.CallResponse_ReturnDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.CallResponse_ReturnDependencies{
Urns: urns,
}
}
return &pulumirpc.CallResponse{
Return: rpcProps,
ReturnDependencies: rpcPropertyDeps,
}, nil
}
// callArgsCopyTo sets the args on the given args struct. If there is a `__self__` argument, it will be
// returned, otherwise it will return nil.
func callArgsCopyTo(ctx *Context, source map[string]interface{}, args interface{}) (Resource, error) {
// Use the same implementation as construct.
if err := constructInputsCopyTo(ctx, source, args); err != nil {
return nil, err
}
// Retrieve the `__self__` arg.
self, err := callArgsSelf(ctx, source)
if err != nil {
return nil, err
}
return self, nil
}
// callArgsSelf retrieves the `__self__` argument. If `__self__` is present the value is returned,
// otherwise the returned value will be nil.
func callArgsSelf(ctx *Context, source map[string]interface{}) (Resource, error) {
v, ok := source["__self__"]
if !ok {
return nil, nil
}
ci := v.(*constructInput)
if ci.value.ContainsUnknowns() {
return nil, errors.New("__self__ is unknown")
}
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling __self__")
}
if secret {
return nil, errors.New("__self__ is a secret")
}
return value.(Resource), nil
}
// newCallResult converts a result struct into an input Map that can be marshalled.
func newCallResult(result interface{}) (Input, error) {
if result == nil {
return nil, errors.New("result must not be nil")
}
resultV := reflect.ValueOf(result)
typ := resultV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, errors.New("result must be a pointer to a struct")
}
resultV, typ = resultV.Elem(), typ.Elem()
ret := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resultV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
ret[tag] = v
} else {
ret[tag] = ToOutput(val)
}
}
return ret, nil
} | }
argsV := reflect.ValueOf(args)
typ := argsV.Type() | random_line_split |
provider.go | // Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pulumi
import (
"context"
"reflect"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
"google.golang.org/grpc"
)
type constructFunc func(ctx *Context, typ, name string, inputs map[string]interface{},
options ResourceOption) (URNInput, Input, error)
// construct adapts the gRPC ConstructRequest/ConstructResponse to/from the Pulumi Go SDK programming model.
func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn *grpc.ClientConn,
constructF constructFunc) (*pulumirpc.ConstructResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
ConfigSecretKeys: req.GetConfigSecretKeys(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
inputDependencies := req.GetInputDependencies()
deserializedInputs, err := plugin.UnmarshalProperties(
req.GetInputs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
inputs := make(map[string]interface{}, len(deserializedInputs))
for key, value := range deserializedInputs {
k := string(key)
var deps []Resource
if inputDeps, ok := inputDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
inputs[k] = &constructInput{
value: value,
deps: deps,
}
}
// Rebuild the resource options.
aliases := make([]Alias, len(req.GetAliases()))
for i, urn := range req.GetAliases() {
aliases[i] = Alias{URN: URN(urn)}
}
dependencyURNs := urnSet{}
for _, urn := range req.GetDependencies() {
dependencyURNs.add(URN(urn))
}
providers := make(map[string]ProviderResource, len(req.GetProviders()))
for pkg, ref := range req.GetProviders() {
resource, err := createProviderResource(pulumiCtx, ref)
if err != nil {
return nil, err
}
providers[pkg] = resource
}
var parent Resource
if req.GetParent() != "" {
parent = pulumiCtx.newDependencyResource(URN(req.GetParent()))
}
opts := resourceOption(func(ro *resourceOptions) {
ro.Aliases = aliases
ro.DependsOn = []func(ctx context.Context) (urnSet, error){
func(ctx context.Context) (urnSet, error) {
return dependencyURNs, nil
},
}
ro.Protect = req.GetProtect()
ro.Providers = providers
ro.Parent = parent
})
urn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
rpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)
if err != nil {
return nil, err
}
// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(state)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{
Urns: urns,
}
}
return &pulumirpc.ConstructResponse{
Urn: string(rpcURN),
State: rpcProps,
StateDependencies: rpcPropertyDeps,
}, nil
}
// createProviderResource rehydrates the provider reference into a registered ProviderResource,
// otherwise it returns an instance of DependencyProviderResource.
func createProviderResource(ctx *Context, ref string) (ProviderResource, error) {
// Parse the URN and ID out of the provider reference.
lastSep := strings.LastIndex(ref, "::")
if lastSep == -1 {
return nil, errors.Errorf("expected '::' in provider reference %s", ref)
}
urn := ref[0:lastSep]
id := ref[lastSep+2:]
// Unmarshal the provider resource as a resource reference so we get back
// the intended provider type with its state, if it's been registered.
resource, err := unmarshalResourceReference(ctx, resource.ResourceReference{
URN: resource.URN(urn),
ID: resource.NewStringProperty(id),
})
if err != nil {
return nil, err
}
return resource.(ProviderResource), nil
}
type constructInput struct {
value resource.PropertyValue
deps []Resource
}
// constructInputsMap returns the inputs as a Map.
func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {
result := make(Map, len(inputs))
for k, v := range inputs {
ci := v.(*constructInput)
known := !ci.value.ContainsUnknowns()
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrapf(err, "unmarshaling input %s", k)
}
resultType := anyOutputType
if ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {
resultType = ot.(reflect.Type)
}
output := ctx.newOutput(resultType, ci.deps...)
output.getState().resolve(value, known, secret, nil)
result[k] = output
}
return result, nil
}
// constructInputsCopyTo sets the inputs on the given args struct.
func constructInputsCopyTo(ctx *Context, inputs map[string]interface{}, args interface{}) error {
if args == nil {
return errors.New("args must not be nil")
}
argsV := reflect.ValueOf(args)
typ := argsV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return errors.New("args must be a pointer to a struct")
}
argsV, typ = argsV.Elem(), typ.Elem()
for k, v := range inputs {
ci := v.(*constructInput)
for i := 0; i < typ.NumField(); i++ {
fieldV := argsV.Field(i)
if !fieldV.CanSet() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has || tag != k {
continue
}
handleField := func(typ reflect.Type, value resource.PropertyValue, deps []Resource) (reflect.Value, error) {
resultType := anyOutputType
if typ.Implements(outputType) {
resultType = typ
} else if typ.Implements(inputType) {
toOutputMethodName := "To" + strings.TrimSuffix(typ.Name(), "Input") + "Output"
if toOutputMethod, found := typ.MethodByName(toOutputMethodName); found {
mt := toOutputMethod.Type
if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Implements(outputType) {
resultType = mt.Out(0)
}
}
}
output := ctx.newOutput(resultType, deps...)
dest := reflect.New(output.ElementType()).Elem()
known := !ci.value.ContainsUnknowns()
secret, err := unmarshalOutput(ctx, value, dest)
if err != nil {
return reflect.Value{}, err
}
output.getState().resolve(dest.Interface(), known, secret, nil)
return reflect.ValueOf(output), nil
}
isInputType := func(typ reflect.Type) bool {
return typ.Implements(outputType) || typ.Implements(inputType)
}
if isInputType(field.Type) {
val, err := handleField(field.Type, ci.value, ci.deps)
if err != nil {
return err
}
fieldV.Set(val)
continue
}
if field.Type.Kind() == reflect.Slice && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ArrayValue())
dest := reflect.MakeSlice(field.Type, length, length)
for i := 0; i < length; i++ {
val, err := handleField(elemType, ci.value.ArrayValue()[i], ci.deps)
if err != nil {
return err
}
dest.Index(i).Set(val)
}
fieldV.Set(dest)
continue
}
if field.Type.Kind() == reflect.Map && isInputType(field.Type.Elem()) {
elemType := field.Type.Elem()
length := len(ci.value.ObjectValue())
dest := reflect.MakeMapWithSize(field.Type, length)
for k, v := range ci.value.ObjectValue() {
key := reflect.ValueOf(string(k))
val, err := handleField(elemType, v, ci.deps)
if err != nil {
return err
}
dest.SetMapIndex(key, val)
}
fieldV.Set(dest)
continue
}
if len(ci.deps) > 0 {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for input %q with dependencies",
typ, field.Name, field.Type, k)
}
dest := reflect.New(field.Type).Elem()
secret, err := unmarshalOutput(ctx, ci.value, dest)
if err != nil {
return errors.Wrapf(err, "unmarshaling input %s", k)
}
if secret {
return errors.Errorf(
"%s.%s is typed as %v but must be typed as Input or Output for secret input %q",
typ, field.Name, field.Type, k)
}
fieldV.Set(reflect.ValueOf(dest.Interface()))
}
}
return nil
}
// newConstructResult converts a resource into its associated URN and state.
func newConstructResult(resource ComponentResource) (URNInput, Input, error) |
type callFunc func(ctx *Context, tok string, args map[string]interface{}) (Input, error)
// call adapts the gRPC CallRequest/CallResponse to/from the Pulumi Go SDK programming model.
func call(ctx context.Context, req *pulumirpc.CallRequest, engineConn *grpc.ClientConn,
callF callFunc) (*pulumirpc.CallResponse, error) {
// Configure the RunInfo.
runInfo := RunInfo{
Project: req.GetProject(),
Stack: req.GetStack(),
Config: req.GetConfig(),
Parallel: int(req.GetParallel()),
DryRun: req.GetDryRun(),
MonitorAddr: req.GetMonitorEndpoint(),
engineConn: engineConn,
}
pulumiCtx, err := NewContext(ctx, runInfo)
if err != nil {
return nil, errors.Wrap(err, "constructing run context")
}
// Deserialize the inputs and apply appropriate dependencies.
argDependencies := req.GetArgDependencies()
deserializedArgs, err := plugin.UnmarshalProperties(
req.GetArgs(),
plugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},
)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling inputs")
}
args := make(map[string]interface{}, len(deserializedArgs))
for key, value := range deserializedArgs {
k := string(key)
var deps []Resource
if inputDeps, ok := argDependencies[k]; ok {
deps = make([]Resource, len(inputDeps.GetUrns()))
for i, depURN := range inputDeps.GetUrns() {
deps[i] = pulumiCtx.newDependencyResource(URN(depURN))
}
}
args[k] = &constructInput{
value: value,
deps: deps,
}
}
result, err := callF(pulumiCtx, req.GetTok(), args)
if err != nil {
return nil, err
}
// Wait for async work to finish.
if err = pulumiCtx.wait(); err != nil {
return nil, err
}
// Serialize all result properties, first by awaiting them, and then marshaling them to the requisite gRPC values.
resolvedProps, propertyDeps, _, err := marshalInputs(result)
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Marshal all properties for the RPC call.
keepUnknowns := req.GetDryRun()
rpcProps, err := plugin.MarshalProperties(
resolvedProps,
plugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})
if err != nil {
return nil, errors.Wrap(err, "marshaling properties")
}
// Convert the property dependencies map for RPC and remove duplicates.
rpcPropertyDeps := make(map[string]*pulumirpc.CallResponse_ReturnDependencies)
for k, deps := range propertyDeps {
sort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })
urns := make([]string, 0, len(deps))
for i, d := range deps {
if i > 0 && urns[i-1] == string(d) {
continue
}
urns = append(urns, string(d))
}
rpcPropertyDeps[k] = &pulumirpc.CallResponse_ReturnDependencies{
Urns: urns,
}
}
return &pulumirpc.CallResponse{
Return: rpcProps,
ReturnDependencies: rpcPropertyDeps,
}, nil
}
// callArgsCopyTo sets the args on the given args struct. If there is a `__self__` argument, it will be
// returned, otherwise it will return nil.
func callArgsCopyTo(ctx *Context, source map[string]interface{}, args interface{}) (Resource, error) {
// Use the same implementation as construct.
if err := constructInputsCopyTo(ctx, source, args); err != nil {
return nil, err
}
// Retrieve the `__self__` arg.
self, err := callArgsSelf(ctx, source)
if err != nil {
return nil, err
}
return self, nil
}
// callArgsSelf retrieves the `__self__` argument. If `__self__` is present the value is returned,
// otherwise the returned value will be nil.
func callArgsSelf(ctx *Context, source map[string]interface{}) (Resource, error) {
v, ok := source["__self__"]
if !ok {
return nil, nil
}
ci := v.(*constructInput)
if ci.value.ContainsUnknowns() {
return nil, errors.New("__self__ is unknown")
}
value, secret, err := unmarshalPropertyValue(ctx, ci.value)
if err != nil {
return nil, errors.Wrap(err, "unmarshaling __self__")
}
if secret {
return nil, errors.New("__self__ is a secret")
}
return value.(Resource), nil
}
// newCallResult converts a result struct into an input Map that can be marshalled.
func newCallResult(result interface{}) (Input, error) {
if result == nil {
return nil, errors.New("result must not be nil")
}
resultV := reflect.ValueOf(result)
typ := resultV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, errors.New("result must be a pointer to a struct")
}
resultV, typ = resultV.Elem(), typ.Elem()
ret := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resultV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
ret[tag] = v
} else {
ret[tag] = ToOutput(val)
}
}
return ret, nil
}
| {
if resource == nil {
return nil, nil, errors.New("resource must not be nil")
}
resourceV := reflect.ValueOf(resource)
typ := resourceV.Type()
if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
return nil, nil, errors.New("resource must be a pointer to a struct")
}
resourceV, typ = resourceV.Elem(), typ.Elem()
state := make(Map)
for i := 0; i < typ.NumField(); i++ {
fieldV := resourceV.Field(i)
if !fieldV.CanInterface() {
continue
}
field := typ.Field(i)
tag, has := field.Tag.Lookup("pulumi")
if !has {
continue
}
val := fieldV.Interface()
if v, ok := val.(Input); ok {
state[tag] = v
} else {
state[tag] = ToOutput(val)
}
}
return resource.URN(), state, nil
} | identifier_body |
build_server.go | package buildserver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
pathpkg "path"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/die-net/lrucache"
"github.com/gregjones/httpcache"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/ctxvfs"
"github.com/sourcegraph/go-langserver/gosrc"
"github.com/sourcegraph/go-langserver/langserver"
"github.com/sourcegraph/go-langserver/langserver/util"
"github.com/sourcegraph/go-langserver/pkg/lsp"
lspext "github.com/sourcegraph/go-lsp/lspext"
"github.com/sourcegraph/jsonrpc2"
)
// Debug if true will cause extra logging information to be printed
var Debug = true
// NewHandler creates a new build server wrapping a (also newly
// created) Go language server. I.e., it creates a BuildHandler
// wrapping a LangHandler. The two handlers share a file system (in
// memory).
//
// The build server is responsible for things such as fetching
// dependencies, setting up the right file system structure and paths,
// and mapping local file system paths to logical URIs (e.g.,
// /goroot/src/fmt/print.go ->
// git://github.com/golang/go?go1.7.1#src/fmt/print.go).
func NewHandler(defaultCfg langserver.Config) *BuildHandler {
if defaultCfg.MaxParallelism <= 0 {
panic(fmt.Sprintf("langserver.Config.MaxParallelism must be at least 1 (got %d)", defaultCfg.MaxParallelism))
}
shared := &langserver.HandlerShared{Shared: true}
h := &BuildHandler{
HandlerShared: shared,
lang: &langserver.LangHandler{
HandlerShared: shared,
DefaultConfig: defaultCfg,
},
}
shared.FindPackage = h.findPackageCached
return h
}
// BuildHandler is a Go build server LSP/JSON-RPC handler that wraps a
// Go language server handler.
type BuildHandler struct {
lang *langserver.LangHandler
mu sync.Mutex
depURLMutex *keyMutex
gopathDeps []*gosrc.Directory
pinnedDepsOnce sync.Once
pinnedDeps pinnedPkgs
findPkgMu sync.Mutex // guards findPkg
findPkg map[findPkgKey]*findPkgValue
langserver.HandlerCommon
*langserver.HandlerShared
init *lspext.InitializeParams // set by "initialize" request
originalRootURI *url.URL // derived from InitializeParams.OriginalRootURI
rootImportPath string // root import path of the workspace (e.g., "github.com/foo/bar")
cachingClient *http.Client // http.Client with a cache backed by an in-memory LRU cache
closers []io.Closer // values to dispose of when Close() is called
// Whether URIs in the same workspace begin with:
// - `file://` (true)
// - `git://` (false)
// This affects URI rewriting between the client and server.
clientUsesFileSchemeWithinWorkspace bool
}
// reset clears all internal state in h.
func (h *BuildHandler) reset(init *lspext.InitializeParams, conn *jsonrpc2.Conn, rootURI lsp.DocumentURI) error {
h.mu.Lock()
defer h.mu.Unlock()
h.findPkgMu.Lock()
defer h.findPkgMu.Unlock()
if err := h.HandlerCommon.Reset(rootURI); err != nil {
return err
}
if err := h.HandlerShared.Reset(false); err != nil {
return err
}
h.init = init
var err error
h.originalRootURI, err = url.Parse(string(h.init.OriginalRootURI))
if h.init.OriginalRootURI == "" || err != nil {
h.originalRootURI = nil
}
// 100 MiB cache, no age-based eviction
h.cachingClient = &http.Client{Transport: httpcache.NewTransport(lrucache.New(100*1024*1024, 0))}
h.depURLMutex = newKeyMutex()
h.gopathDeps = nil
h.pinnedDepsOnce = sync.Once{}
h.pinnedDeps = nil
h.findPkg = nil
return nil
}
func (h *BuildHandler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) {
// Prevent any uncaught panics from taking the entire server down.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("unexpected panic: %v", r)
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic serving %v: %v\n%s", req.Method, r, buf)
return
}
}()
h.mu.Lock()
if req.Method != "initialize" && h.init == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" {
err = nil
}
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
if err != nil {
return uri
}
if h.originalRootURI == nil {
return uri
}
path = currentURL.Fragment
currentURL.Fragment = ""
if *currentURL != *h.originalRootURI {
return uri // refers to a resource outside of this workspace
}
}
path = pathpkg.Join(h.RootFSPath, path)
if !util.PathHasPrefix(path, h.RootFSPath) {
panic(fmt.Sprintf("file path %q must have prefix %q (file URI is %q, root URI is %q)", path, h.RootFSPath, uri, h.init.RootPath))
}
newURI := lsp.DocumentURI("file://" + path)
urisInRequest = append(urisInRequest, newURI) // collect
return newURI
}
lspext.WalkURIFields(params, nil, rewriteURIFromClient)
// Store back to req.Params to avoid 2 different versions of the data.
if req.Params != nil {
b, err := json.Marshal(params)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
// Immediately handle notifications. We do not have a response
// to rewrite, so we can pass it on directly and avoid the
// cost of marshalling again. NOTE: FS operations are frequent
// and are notifications.
if req.Notif {
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
// Avoid extracting the tracer again, it is already attached to ctx.
req.Meta = nil
return h.lang.Handle(ctx, wrappedConn, req)
}
// workspace/symbol queries must have their `dir:` query filter
// rewritten for github.com/golang/go due to its specialized directory
// structure. e.g. `dir:src/net/http` should work, but the language
// server will expect `dir:net/http` as any real/valid Go project will
// have package paths align with the directory structure.
if req.Method == "workspace/symbol" && strings.HasPrefix(string(h.init.OriginalRootURI), "git://github.com/golang/go") {
var wsparams lspext.WorkspaceSymbolParams
if err := json.Unmarshal(*req.Params, &wsparams); err != nil {
return nil, err
}
q := langserver.ParseQuery(wsparams.Query)
if q.Filter == langserver.FilterDir {
// If the query does not start with `src/` and it is a request
// for a stdlib dir, it should return no results (the filter is
// dir, not package path).
if gosrc.IsStdlibPkg(q.Dir) && !strings.HasPrefix(q.Dir, "src") {
q.Dir = "sginvalid"
} else {
q.Dir = util.PathTrimPrefix(q.Dir, "src") // "src/net/http" -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams
if err := json.Unmarshal(*req.Params, &p); err != nil {
return nil, err
}
dirsHint, haveDirsHint := p.Hints["dirs"]
if haveDirsHint {
dirs := dirsHint.([]interface{})
for i, dir := range dirs {
dirs[i] = rewriteURIFromClient(lsp.DocumentURI(dir.(string)))
}
// Arbitrarily chosen limit on the number of directories that
// may be searched by workspace/xreferences. Large repositories
// like kubernetes would simply take too long (>15s) to fetch
// their dependencies and typecheck them otherwise. This number
// was chosen as a 'sweet-spot' based on kubernetes solely.
if len(dirs) > 15 {
dirs = dirs[:15]
}
dirsHint = dirs
p.Hints["dirs"] = dirs
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
}
var result interface{}
if err := h.callLangServer(ctx, conn, req.Method, req.ID, req.Params, &result); err != nil {
return nil, err
}
// (Un-)rewrite URI fields in the result. E.g.:
//
// file:///src/github.com/user/repo/dir/file.go -> file:///dir/file.go
var walkErr error
lspext.WalkURIFields(result, nil, func(uri lsp.DocumentURI) lsp.DocumentURI {
// HACK: Work around https://github.com/sourcegraph/sourcegraph/issues/10541 by
// converting uri == "file://" (which is actually an empty URI in the langserver result)
// to "file:///" instead of emitting an error. This will likely cause the result to be displayed
// with an error on the client, but it's better than the whole
// textDocument/implementation request failing.
if req.Method == "textDocument/implementation" && (uri == "" || uri == "file://") {
return "file:///"
}
newURI, err := h.rewriteURIFromLangServer(uri)
if err != nil {
walkErr = err
}
return newURI
})
if walkErr != nil {
return nil, fmt.Errorf("%s (in Go language server response)", walkErr)
}
return result, nil
}
}
func (h *BuildHandler) rewriteURIFromLangServer(uri lsp.DocumentURI) (lsp.DocumentURI, error) {
u, err := url.Parse(string(uri))
if err != nil {
return "", err
}
if !u.IsAbs() {
return "", fmt.Errorf("invalid relative URI %q", u)
}
switch u.Scheme {
case "file":
if !filepath.IsAbs(u.Path) {
return "", fmt.Errorf("invalid relative file path in URI %q", uri)
}
// Refers to a file in the Go stdlib?
if util.PathHasPrefix(u.Path, goroot) {
fileInGoStdlib := util.PathTrimPrefix(u.Path, goroot)
if h.rootImportPath == "" {
if h.clientUsesFileSchemeWithinWorkspace {
// The workspace is the Go stdlib and this refers to
// something in the Go stdlib, so let's use file:///
// so that the client adds our current rev, instead
// of using runtime.Version() (which is not
// necessarily the commit of the Go stdlib we're
// analyzing).
return lsp.DocumentURI("file:///" + fileInGoStdlib), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = fileInGoStdlib
return lsp.DocumentURI(newURI.String()), nil
}
return lsp.DocumentURI("git://github.com/golang/go?" + gosrc.RuntimeVersion + "#" + fileInGoStdlib), nil
}
// Refers to a file in the same workspace?
if util.PathHasPrefix(u.Path, h.RootFSPath) {
if h.clientUsesFileSchemeWithinWorkspace {
pathInThisWorkspace := util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI("file:///" + pathInThisWorkspace), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI(newURI.String()), nil
}
// Refers to a file in the GOPATH (that's from another repo)?
if gopathSrcDir := path.Join(gopath, "src"); util.PathHasPrefix(u.Path, gopathSrcDir) {
p := util.PathTrimPrefix(u.Path, gopathSrcDir) // "github.com/foo/bar/baz/qux.go"
// Go through the list of directories we have
// mounted. We make a copy instead of holding the lock
// in the for loop to avoid holding the lock for
// longer than necessary.
h.HandlerShared.Mu.Lock()
deps := make([]*gosrc.Directory, len(h.gopathDeps))
copy(deps, h.gopathDeps)
h.HandlerShared.Mu.Unlock()
var d *gosrc.Directory
for _, dep := range deps {
pathComponents := strings.Split(p, "/")
depComponents := strings.Split(dep.ProjectRoot, "/")
if reflect.DeepEqual(pathComponents[:len(depComponents)], depComponents) {
d = dep
}
}
if d != nil {
rev := d.Rev
if rev == "" {
rev = "HEAD"
}
i := strings.Index(d.CloneURL, "://")
if i >= 0 {
repo := d.CloneURL[i+len("://"):]
path := strings.TrimPrefix(strings.TrimPrefix(p, d.ProjectRoot), "/")
// HACK
// In some cases, we see import paths of the form "blah/blah.git" or "blah/blah.git/blah/blah".
// The name for the repository containing such a package is "blah/blah", so we strip the ".git"
// from the location URI here. In addition, we strip any leading ".git/" from the path that
// might get added as a side-effect of stripping the suffix.
repo = strings.TrimSuffix(repo, ".git")
path = strings.TrimPrefix(path, ".git/")
return lsp.DocumentURI(fmt.Sprintf("%s://%s?%s#%s", d.VCS, repo, rev, path)), nil
}
}
}
return lsp.DocumentURI("unresolved:" + u.Path), nil
default:
return "", fmt.Errorf("invalid non-file URI %q", uri)
}
}
// callLangServer sends the (usually modified) request to the wrapped Go
// language server. Do not send notifications via this interface! Rather just
// directly pass on the jsonrpc2.Request via h.lang.Handle.
//
// Although bypasses the JSON-RPC wire protocol ( just sending it
// in-memory for simplicity/speed), it behaves in the same way as
// though the peer language server were remote.
func (h *BuildHandler) callLangServer(ctx context.Context, conn *jsonrpc2.Conn, method string, id jsonrpc2.ID, params, result interface{}) error {
req := jsonrpc2.Request{
ID: id,
Method: method,
}
if err := req.SetParams(params); err != nil {
return err
}
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
result0, err := h.lang.Handle(ctx, wrappedConn, &req)
if err != nil {
return err
}
// Don't pass the interface{} value, to avoid the build and
// language servers from breaking the abstraction that they are in
// separate memory spaces.
b, err := json.Marshal(result0)
if err != nil {
return err
}
if result != nil {
if err := json.Unmarshal(b, result); err != nil {
return err
}
}
return nil
}
// Close implements io.Closer
func (h *BuildHandler) Close() error | {
var result error
for _, closer := range h.closers {
err := closer.Close()
if err != nil {
result = multierror.Append(result, err)
}
}
return result
} | identifier_body | |
build_server.go | package buildserver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
pathpkg "path"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/die-net/lrucache"
"github.com/gregjones/httpcache"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/ctxvfs"
"github.com/sourcegraph/go-langserver/gosrc"
"github.com/sourcegraph/go-langserver/langserver"
"github.com/sourcegraph/go-langserver/langserver/util"
"github.com/sourcegraph/go-langserver/pkg/lsp"
lspext "github.com/sourcegraph/go-lsp/lspext"
"github.com/sourcegraph/jsonrpc2"
)
// Debug if true will cause extra logging information to be printed
var Debug = true
// NewHandler creates a new build server wrapping a (also newly
// created) Go language server. I.e., it creates a BuildHandler
// wrapping a LangHandler. The two handlers share a file system (in
// memory).
//
// The build server is responsible for things such as fetching
// dependencies, setting up the right file system structure and paths,
// and mapping local file system paths to logical URIs (e.g.,
// /goroot/src/fmt/print.go ->
// git://github.com/golang/go?go1.7.1#src/fmt/print.go).
func NewHandler(defaultCfg langserver.Config) *BuildHandler {
if defaultCfg.MaxParallelism <= 0 {
panic(fmt.Sprintf("langserver.Config.MaxParallelism must be at least 1 (got %d)", defaultCfg.MaxParallelism))
}
shared := &langserver.HandlerShared{Shared: true}
h := &BuildHandler{
HandlerShared: shared,
lang: &langserver.LangHandler{
HandlerShared: shared,
DefaultConfig: defaultCfg,
},
}
shared.FindPackage = h.findPackageCached
return h
}
// BuildHandler is a Go build server LSP/JSON-RPC handler that wraps a
// Go language server handler.
type BuildHandler struct {
lang *langserver.LangHandler
mu sync.Mutex
depURLMutex *keyMutex
gopathDeps []*gosrc.Directory
pinnedDepsOnce sync.Once
pinnedDeps pinnedPkgs
findPkgMu sync.Mutex // guards findPkg
findPkg map[findPkgKey]*findPkgValue
langserver.HandlerCommon
*langserver.HandlerShared
init *lspext.InitializeParams // set by "initialize" request
originalRootURI *url.URL // derived from InitializeParams.OriginalRootURI
rootImportPath string // root import path of the workspace (e.g., "github.com/foo/bar")
cachingClient *http.Client // http.Client with a cache backed by an in-memory LRU cache
closers []io.Closer // values to dispose of when Close() is called
// Whether URIs in the same workspace begin with:
// - `file://` (true)
// - `git://` (false)
// This affects URI rewriting between the client and server.
clientUsesFileSchemeWithinWorkspace bool
}
// reset clears all internal state in h.
func (h *BuildHandler) reset(init *lspext.InitializeParams, conn *jsonrpc2.Conn, rootURI lsp.DocumentURI) error {
h.mu.Lock()
defer h.mu.Unlock()
h.findPkgMu.Lock()
defer h.findPkgMu.Unlock()
if err := h.HandlerCommon.Reset(rootURI); err != nil {
return err
}
if err := h.HandlerShared.Reset(false); err != nil {
return err
}
h.init = init
var err error
h.originalRootURI, err = url.Parse(string(h.init.OriginalRootURI))
if h.init.OriginalRootURI == "" || err != nil {
h.originalRootURI = nil
}
// 100 MiB cache, no age-based eviction
h.cachingClient = &http.Client{Transport: httpcache.NewTransport(lrucache.New(100*1024*1024, 0))}
h.depURLMutex = newKeyMutex()
h.gopathDeps = nil
h.pinnedDepsOnce = sync.Once{}
h.pinnedDeps = nil
h.findPkg = nil
return nil
}
func (h *BuildHandler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) {
// Prevent any uncaught panics from taking the entire server down.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("unexpected panic: %v", r)
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic serving %v: %v\n%s", req.Method, r, buf)
return
}
}()
h.mu.Lock()
if req.Method != "initialize" && h.init == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" {
err = nil
}
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
if err != nil {
return uri
}
if h.originalRootURI == nil {
return uri
}
path = currentURL.Fragment
currentURL.Fragment = ""
if *currentURL != *h.originalRootURI {
return uri // refers to a resource outside of this workspace
}
}
path = pathpkg.Join(h.RootFSPath, path)
if !util.PathHasPrefix(path, h.RootFSPath) {
panic(fmt.Sprintf("file path %q must have prefix %q (file URI is %q, root URI is %q)", path, h.RootFSPath, uri, h.init.RootPath))
}
newURI := lsp.DocumentURI("file://" + path)
urisInRequest = append(urisInRequest, newURI) // collect
return newURI
}
lspext.WalkURIFields(params, nil, rewriteURIFromClient)
// Store back to req.Params to avoid 2 different versions of the data.
if req.Params != nil {
b, err := json.Marshal(params)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
// Immediately handle notifications. We do not have a response
// to rewrite, so we can pass it on directly and avoid the
// cost of marshalling again. NOTE: FS operations are frequent
// and are notifications.
if req.Notif {
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
// Avoid extracting the tracer again, it is already attached to ctx.
req.Meta = nil
return h.lang.Handle(ctx, wrappedConn, req)
}
// workspace/symbol queries must have their `dir:` query filter
// rewritten for github.com/golang/go due to its specialized directory
// structure. e.g. `dir:src/net/http` should work, but the language
// server will expect `dir:net/http` as any real/valid Go project will
// have package paths align with the directory structure.
if req.Method == "workspace/symbol" && strings.HasPrefix(string(h.init.OriginalRootURI), "git://github.com/golang/go") {
var wsparams lspext.WorkspaceSymbolParams
if err := json.Unmarshal(*req.Params, &wsparams); err != nil {
return nil, err
}
q := langserver.ParseQuery(wsparams.Query)
if q.Filter == langserver.FilterDir {
// If the query does not start with `src/` and it is a request
// for a stdlib dir, it should return no results (the filter is
// dir, not package path).
if gosrc.IsStdlibPkg(q.Dir) && !strings.HasPrefix(q.Dir, "src") {
q.Dir = "sginvalid"
} else {
q.Dir = util.PathTrimPrefix(q.Dir, "src") // "src/net/http" -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams | if err := json.Unmarshal(*req.Params, &p); err != nil {
return nil, err
}
dirsHint, haveDirsHint := p.Hints["dirs"]
if haveDirsHint {
dirs := dirsHint.([]interface{})
for i, dir := range dirs {
dirs[i] = rewriteURIFromClient(lsp.DocumentURI(dir.(string)))
}
// Arbitrarily chosen limit on the number of directories that
// may be searched by workspace/xreferences. Large repositories
// like kubernetes would simply take too long (>15s) to fetch
// their dependencies and typecheck them otherwise. This number
// was chosen as a 'sweet-spot' based on kubernetes solely.
if len(dirs) > 15 {
dirs = dirs[:15]
}
dirsHint = dirs
p.Hints["dirs"] = dirs
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
}
var result interface{}
if err := h.callLangServer(ctx, conn, req.Method, req.ID, req.Params, &result); err != nil {
return nil, err
}
// (Un-)rewrite URI fields in the result. E.g.:
//
// file:///src/github.com/user/repo/dir/file.go -> file:///dir/file.go
var walkErr error
lspext.WalkURIFields(result, nil, func(uri lsp.DocumentURI) lsp.DocumentURI {
// HACK: Work around https://github.com/sourcegraph/sourcegraph/issues/10541 by
// converting uri == "file://" (which is actually an empty URI in the langserver result)
// to "file:///" instead of emitting an error. This will likely cause the result to be displayed
// with an error on the client, but it's better than the whole
// textDocument/implementation request failing.
if req.Method == "textDocument/implementation" && (uri == "" || uri == "file://") {
return "file:///"
}
newURI, err := h.rewriteURIFromLangServer(uri)
if err != nil {
walkErr = err
}
return newURI
})
if walkErr != nil {
return nil, fmt.Errorf("%s (in Go language server response)", walkErr)
}
return result, nil
}
}
func (h *BuildHandler) rewriteURIFromLangServer(uri lsp.DocumentURI) (lsp.DocumentURI, error) {
u, err := url.Parse(string(uri))
if err != nil {
return "", err
}
if !u.IsAbs() {
return "", fmt.Errorf("invalid relative URI %q", u)
}
switch u.Scheme {
case "file":
if !filepath.IsAbs(u.Path) {
return "", fmt.Errorf("invalid relative file path in URI %q", uri)
}
// Refers to a file in the Go stdlib?
if util.PathHasPrefix(u.Path, goroot) {
fileInGoStdlib := util.PathTrimPrefix(u.Path, goroot)
if h.rootImportPath == "" {
if h.clientUsesFileSchemeWithinWorkspace {
// The workspace is the Go stdlib and this refers to
// something in the Go stdlib, so let's use file:///
// so that the client adds our current rev, instead
// of using runtime.Version() (which is not
// necessarily the commit of the Go stdlib we're
// analyzing).
return lsp.DocumentURI("file:///" + fileInGoStdlib), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = fileInGoStdlib
return lsp.DocumentURI(newURI.String()), nil
}
return lsp.DocumentURI("git://github.com/golang/go?" + gosrc.RuntimeVersion + "#" + fileInGoStdlib), nil
}
// Refers to a file in the same workspace?
if util.PathHasPrefix(u.Path, h.RootFSPath) {
if h.clientUsesFileSchemeWithinWorkspace {
pathInThisWorkspace := util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI("file:///" + pathInThisWorkspace), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI(newURI.String()), nil
}
// Refers to a file in the GOPATH (that's from another repo)?
if gopathSrcDir := path.Join(gopath, "src"); util.PathHasPrefix(u.Path, gopathSrcDir) {
p := util.PathTrimPrefix(u.Path, gopathSrcDir) // "github.com/foo/bar/baz/qux.go"
// Go through the list of directories we have
// mounted. We make a copy instead of holding the lock
// in the for loop to avoid holding the lock for
// longer than necessary.
h.HandlerShared.Mu.Lock()
deps := make([]*gosrc.Directory, len(h.gopathDeps))
copy(deps, h.gopathDeps)
h.HandlerShared.Mu.Unlock()
var d *gosrc.Directory
for _, dep := range deps {
pathComponents := strings.Split(p, "/")
depComponents := strings.Split(dep.ProjectRoot, "/")
if reflect.DeepEqual(pathComponents[:len(depComponents)], depComponents) {
d = dep
}
}
if d != nil {
rev := d.Rev
if rev == "" {
rev = "HEAD"
}
i := strings.Index(d.CloneURL, "://")
if i >= 0 {
repo := d.CloneURL[i+len("://"):]
path := strings.TrimPrefix(strings.TrimPrefix(p, d.ProjectRoot), "/")
// HACK
// In some cases, we see import paths of the form "blah/blah.git" or "blah/blah.git/blah/blah".
// The name for the repository containing such a package is "blah/blah", so we strip the ".git"
// from the location URI here. In addition, we strip any leading ".git/" from the path that
// might get added as a side-effect of stripping the suffix.
repo = strings.TrimSuffix(repo, ".git")
path = strings.TrimPrefix(path, ".git/")
return lsp.DocumentURI(fmt.Sprintf("%s://%s?%s#%s", d.VCS, repo, rev, path)), nil
}
}
}
return lsp.DocumentURI("unresolved:" + u.Path), nil
default:
return "", fmt.Errorf("invalid non-file URI %q", uri)
}
}
// callLangServer sends the (usually modified) request to the wrapped Go
// language server. Do not send notifications via this interface! Rather just
// directly pass on the jsonrpc2.Request via h.lang.Handle.
//
// Although bypasses the JSON-RPC wire protocol ( just sending it
// in-memory for simplicity/speed), it behaves in the same way as
// though the peer language server were remote.
func (h *BuildHandler) callLangServer(ctx context.Context, conn *jsonrpc2.Conn, method string, id jsonrpc2.ID, params, result interface{}) error {
req := jsonrpc2.Request{
ID: id,
Method: method,
}
if err := req.SetParams(params); err != nil {
return err
}
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
result0, err := h.lang.Handle(ctx, wrappedConn, &req)
if err != nil {
return err
}
// Don't pass the interface{} value, to avoid the build and
// language servers from breaking the abstraction that they are in
// separate memory spaces.
b, err := json.Marshal(result0)
if err != nil {
return err
}
if result != nil {
if err := json.Unmarshal(b, result); err != nil {
return err
}
}
return nil
}
// Close implements io.Closer
func (h *BuildHandler) Close() error {
var result error
for _, closer := range h.closers {
err := closer.Close()
if err != nil {
result = multierror.Append(result, err)
}
}
return result
} | random_line_split | |
build_server.go | package buildserver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
pathpkg "path"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/die-net/lrucache"
"github.com/gregjones/httpcache"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/ctxvfs"
"github.com/sourcegraph/go-langserver/gosrc"
"github.com/sourcegraph/go-langserver/langserver"
"github.com/sourcegraph/go-langserver/langserver/util"
"github.com/sourcegraph/go-langserver/pkg/lsp"
lspext "github.com/sourcegraph/go-lsp/lspext"
"github.com/sourcegraph/jsonrpc2"
)
// Debug if true will cause extra logging information to be printed
var Debug = true
// NewHandler creates a new build server wrapping a (also newly
// created) Go language server. I.e., it creates a BuildHandler
// wrapping a LangHandler. The two handlers share a file system (in
// memory).
//
// The build server is responsible for things such as fetching
// dependencies, setting up the right file system structure and paths,
// and mapping local file system paths to logical URIs (e.g.,
// /goroot/src/fmt/print.go ->
// git://github.com/golang/go?go1.7.1#src/fmt/print.go).
func NewHandler(defaultCfg langserver.Config) *BuildHandler {
if defaultCfg.MaxParallelism <= 0 {
panic(fmt.Sprintf("langserver.Config.MaxParallelism must be at least 1 (got %d)", defaultCfg.MaxParallelism))
}
shared := &langserver.HandlerShared{Shared: true}
h := &BuildHandler{
HandlerShared: shared,
lang: &langserver.LangHandler{
HandlerShared: shared,
DefaultConfig: defaultCfg,
},
}
shared.FindPackage = h.findPackageCached
return h
}
// BuildHandler is a Go build server LSP/JSON-RPC handler that wraps a
// Go language server handler.
type BuildHandler struct {
lang *langserver.LangHandler
mu sync.Mutex
depURLMutex *keyMutex
gopathDeps []*gosrc.Directory
pinnedDepsOnce sync.Once
pinnedDeps pinnedPkgs
findPkgMu sync.Mutex // guards findPkg
findPkg map[findPkgKey]*findPkgValue
langserver.HandlerCommon
*langserver.HandlerShared
init *lspext.InitializeParams // set by "initialize" request
originalRootURI *url.URL // derived from InitializeParams.OriginalRootURI
rootImportPath string // root import path of the workspace (e.g., "github.com/foo/bar")
cachingClient *http.Client // http.Client with a cache backed by an in-memory LRU cache
closers []io.Closer // values to dispose of when Close() is called
// Whether URIs in the same workspace begin with:
// - `file://` (true)
// - `git://` (false)
// This affects URI rewriting between the client and server.
clientUsesFileSchemeWithinWorkspace bool
}
// reset clears all internal state in h.
func (h *BuildHandler) reset(init *lspext.InitializeParams, conn *jsonrpc2.Conn, rootURI lsp.DocumentURI) error {
h.mu.Lock()
defer h.mu.Unlock()
h.findPkgMu.Lock()
defer h.findPkgMu.Unlock()
if err := h.HandlerCommon.Reset(rootURI); err != nil {
return err
}
if err := h.HandlerShared.Reset(false); err != nil {
return err
}
h.init = init
var err error
h.originalRootURI, err = url.Parse(string(h.init.OriginalRootURI))
if h.init.OriginalRootURI == "" || err != nil {
h.originalRootURI = nil
}
// 100 MiB cache, no age-based eviction
h.cachingClient = &http.Client{Transport: httpcache.NewTransport(lrucache.New(100*1024*1024, 0))}
h.depURLMutex = newKeyMutex()
h.gopathDeps = nil
h.pinnedDepsOnce = sync.Once{}
h.pinnedDeps = nil
h.findPkg = nil
return nil
}
func (h *BuildHandler) | (ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) {
// Prevent any uncaught panics from taking the entire server down.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("unexpected panic: %v", r)
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic serving %v: %v\n%s", req.Method, r, buf)
return
}
}()
h.mu.Lock()
if req.Method != "initialize" && h.init == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" {
err = nil
}
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
if err != nil {
return uri
}
if h.originalRootURI == nil {
return uri
}
path = currentURL.Fragment
currentURL.Fragment = ""
if *currentURL != *h.originalRootURI {
return uri // refers to a resource outside of this workspace
}
}
path = pathpkg.Join(h.RootFSPath, path)
if !util.PathHasPrefix(path, h.RootFSPath) {
panic(fmt.Sprintf("file path %q must have prefix %q (file URI is %q, root URI is %q)", path, h.RootFSPath, uri, h.init.RootPath))
}
newURI := lsp.DocumentURI("file://" + path)
urisInRequest = append(urisInRequest, newURI) // collect
return newURI
}
lspext.WalkURIFields(params, nil, rewriteURIFromClient)
// Store back to req.Params to avoid 2 different versions of the data.
if req.Params != nil {
b, err := json.Marshal(params)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
// Immediately handle notifications. We do not have a response
// to rewrite, so we can pass it on directly and avoid the
// cost of marshalling again. NOTE: FS operations are frequent
// and are notifications.
if req.Notif {
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
// Avoid extracting the tracer again, it is already attached to ctx.
req.Meta = nil
return h.lang.Handle(ctx, wrappedConn, req)
}
// workspace/symbol queries must have their `dir:` query filter
// rewritten for github.com/golang/go due to its specialized directory
// structure. e.g. `dir:src/net/http` should work, but the language
// server will expect `dir:net/http` as any real/valid Go project will
// have package paths align with the directory structure.
if req.Method == "workspace/symbol" && strings.HasPrefix(string(h.init.OriginalRootURI), "git://github.com/golang/go") {
var wsparams lspext.WorkspaceSymbolParams
if err := json.Unmarshal(*req.Params, &wsparams); err != nil {
return nil, err
}
q := langserver.ParseQuery(wsparams.Query)
if q.Filter == langserver.FilterDir {
// If the query does not start with `src/` and it is a request
// for a stdlib dir, it should return no results (the filter is
// dir, not package path).
if gosrc.IsStdlibPkg(q.Dir) && !strings.HasPrefix(q.Dir, "src") {
q.Dir = "sginvalid"
} else {
q.Dir = util.PathTrimPrefix(q.Dir, "src") // "src/net/http" -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams
if err := json.Unmarshal(*req.Params, &p); err != nil {
return nil, err
}
dirsHint, haveDirsHint := p.Hints["dirs"]
if haveDirsHint {
dirs := dirsHint.([]interface{})
for i, dir := range dirs {
dirs[i] = rewriteURIFromClient(lsp.DocumentURI(dir.(string)))
}
// Arbitrarily chosen limit on the number of directories that
// may be searched by workspace/xreferences. Large repositories
// like kubernetes would simply take too long (>15s) to fetch
// their dependencies and typecheck them otherwise. This number
// was chosen as a 'sweet-spot' based on kubernetes solely.
if len(dirs) > 15 {
dirs = dirs[:15]
}
dirsHint = dirs
p.Hints["dirs"] = dirs
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
}
var result interface{}
if err := h.callLangServer(ctx, conn, req.Method, req.ID, req.Params, &result); err != nil {
return nil, err
}
// (Un-)rewrite URI fields in the result. E.g.:
//
// file:///src/github.com/user/repo/dir/file.go -> file:///dir/file.go
var walkErr error
lspext.WalkURIFields(result, nil, func(uri lsp.DocumentURI) lsp.DocumentURI {
// HACK: Work around https://github.com/sourcegraph/sourcegraph/issues/10541 by
// converting uri == "file://" (which is actually an empty URI in the langserver result)
// to "file:///" instead of emitting an error. This will likely cause the result to be displayed
// with an error on the client, but it's better than the whole
// textDocument/implementation request failing.
if req.Method == "textDocument/implementation" && (uri == "" || uri == "file://") {
return "file:///"
}
newURI, err := h.rewriteURIFromLangServer(uri)
if err != nil {
walkErr = err
}
return newURI
})
if walkErr != nil {
return nil, fmt.Errorf("%s (in Go language server response)", walkErr)
}
return result, nil
}
}
func (h *BuildHandler) rewriteURIFromLangServer(uri lsp.DocumentURI) (lsp.DocumentURI, error) {
u, err := url.Parse(string(uri))
if err != nil {
return "", err
}
if !u.IsAbs() {
return "", fmt.Errorf("invalid relative URI %q", u)
}
switch u.Scheme {
case "file":
if !filepath.IsAbs(u.Path) {
return "", fmt.Errorf("invalid relative file path in URI %q", uri)
}
// Refers to a file in the Go stdlib?
if util.PathHasPrefix(u.Path, goroot) {
fileInGoStdlib := util.PathTrimPrefix(u.Path, goroot)
if h.rootImportPath == "" {
if h.clientUsesFileSchemeWithinWorkspace {
// The workspace is the Go stdlib and this refers to
// something in the Go stdlib, so let's use file:///
// so that the client adds our current rev, instead
// of using runtime.Version() (which is not
// necessarily the commit of the Go stdlib we're
// analyzing).
return lsp.DocumentURI("file:///" + fileInGoStdlib), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = fileInGoStdlib
return lsp.DocumentURI(newURI.String()), nil
}
return lsp.DocumentURI("git://github.com/golang/go?" + gosrc.RuntimeVersion + "#" + fileInGoStdlib), nil
}
// Refers to a file in the same workspace?
if util.PathHasPrefix(u.Path, h.RootFSPath) {
if h.clientUsesFileSchemeWithinWorkspace {
pathInThisWorkspace := util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI("file:///" + pathInThisWorkspace), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI(newURI.String()), nil
}
// Refers to a file in the GOPATH (that's from another repo)?
if gopathSrcDir := path.Join(gopath, "src"); util.PathHasPrefix(u.Path, gopathSrcDir) {
p := util.PathTrimPrefix(u.Path, gopathSrcDir) // "github.com/foo/bar/baz/qux.go"
// Go through the list of directories we have
// mounted. We make a copy instead of holding the lock
// in the for loop to avoid holding the lock for
// longer than necessary.
h.HandlerShared.Mu.Lock()
deps := make([]*gosrc.Directory, len(h.gopathDeps))
copy(deps, h.gopathDeps)
h.HandlerShared.Mu.Unlock()
var d *gosrc.Directory
for _, dep := range deps {
pathComponents := strings.Split(p, "/")
depComponents := strings.Split(dep.ProjectRoot, "/")
if reflect.DeepEqual(pathComponents[:len(depComponents)], depComponents) {
d = dep
}
}
if d != nil {
rev := d.Rev
if rev == "" {
rev = "HEAD"
}
i := strings.Index(d.CloneURL, "://")
if i >= 0 {
repo := d.CloneURL[i+len("://"):]
path := strings.TrimPrefix(strings.TrimPrefix(p, d.ProjectRoot), "/")
// HACK
// In some cases, we see import paths of the form "blah/blah.git" or "blah/blah.git/blah/blah".
// The name for the repository containing such a package is "blah/blah", so we strip the ".git"
// from the location URI here. In addition, we strip any leading ".git/" from the path that
// might get added as a side-effect of stripping the suffix.
repo = strings.TrimSuffix(repo, ".git")
path = strings.TrimPrefix(path, ".git/")
return lsp.DocumentURI(fmt.Sprintf("%s://%s?%s#%s", d.VCS, repo, rev, path)), nil
}
}
}
return lsp.DocumentURI("unresolved:" + u.Path), nil
default:
return "", fmt.Errorf("invalid non-file URI %q", uri)
}
}
// callLangServer sends the (usually modified) request to the wrapped Go
// language server. Do not send notifications via this interface! Rather just
// directly pass on the jsonrpc2.Request via h.lang.Handle.
//
// Although bypasses the JSON-RPC wire protocol ( just sending it
// in-memory for simplicity/speed), it behaves in the same way as
// though the peer language server were remote.
func (h *BuildHandler) callLangServer(ctx context.Context, conn *jsonrpc2.Conn, method string, id jsonrpc2.ID, params, result interface{}) error {
req := jsonrpc2.Request{
ID: id,
Method: method,
}
if err := req.SetParams(params); err != nil {
return err
}
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
result0, err := h.lang.Handle(ctx, wrappedConn, &req)
if err != nil {
return err
}
// Don't pass the interface{} value, to avoid the build and
// language servers from breaking the abstraction that they are in
// separate memory spaces.
b, err := json.Marshal(result0)
if err != nil {
return err
}
if result != nil {
if err := json.Unmarshal(b, result); err != nil {
return err
}
}
return nil
}
// Close implements io.Closer
func (h *BuildHandler) Close() error {
var result error
for _, closer := range h.closers {
err := closer.Close()
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
| Handle | identifier_name |
build_server.go | package buildserver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
pathpkg "path"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/die-net/lrucache"
"github.com/gregjones/httpcache"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/ctxvfs"
"github.com/sourcegraph/go-langserver/gosrc"
"github.com/sourcegraph/go-langserver/langserver"
"github.com/sourcegraph/go-langserver/langserver/util"
"github.com/sourcegraph/go-langserver/pkg/lsp"
lspext "github.com/sourcegraph/go-lsp/lspext"
"github.com/sourcegraph/jsonrpc2"
)
// Debug if true will cause extra logging information to be printed
var Debug = true
// NewHandler creates a new build server wrapping a (also newly
// created) Go language server. I.e., it creates a BuildHandler
// wrapping a LangHandler. The two handlers share a file system (in
// memory).
//
// The build server is responsible for things such as fetching
// dependencies, setting up the right file system structure and paths,
// and mapping local file system paths to logical URIs (e.g.,
// /goroot/src/fmt/print.go ->
// git://github.com/golang/go?go1.7.1#src/fmt/print.go).
func NewHandler(defaultCfg langserver.Config) *BuildHandler {
if defaultCfg.MaxParallelism <= 0 {
panic(fmt.Sprintf("langserver.Config.MaxParallelism must be at least 1 (got %d)", defaultCfg.MaxParallelism))
}
shared := &langserver.HandlerShared{Shared: true}
h := &BuildHandler{
HandlerShared: shared,
lang: &langserver.LangHandler{
HandlerShared: shared,
DefaultConfig: defaultCfg,
},
}
shared.FindPackage = h.findPackageCached
return h
}
// BuildHandler is a Go build server LSP/JSON-RPC handler that wraps a
// Go language server handler.
type BuildHandler struct {
lang *langserver.LangHandler
mu sync.Mutex
depURLMutex *keyMutex
gopathDeps []*gosrc.Directory
pinnedDepsOnce sync.Once
pinnedDeps pinnedPkgs
findPkgMu sync.Mutex // guards findPkg
findPkg map[findPkgKey]*findPkgValue
langserver.HandlerCommon
*langserver.HandlerShared
init *lspext.InitializeParams // set by "initialize" request
originalRootURI *url.URL // derived from InitializeParams.OriginalRootURI
rootImportPath string // root import path of the workspace (e.g., "github.com/foo/bar")
cachingClient *http.Client // http.Client with a cache backed by an in-memory LRU cache
closers []io.Closer // values to dispose of when Close() is called
// Whether URIs in the same workspace begin with:
// - `file://` (true)
// - `git://` (false)
// This affects URI rewriting between the client and server.
clientUsesFileSchemeWithinWorkspace bool
}
// reset clears all internal state in h.
func (h *BuildHandler) reset(init *lspext.InitializeParams, conn *jsonrpc2.Conn, rootURI lsp.DocumentURI) error {
h.mu.Lock()
defer h.mu.Unlock()
h.findPkgMu.Lock()
defer h.findPkgMu.Unlock()
if err := h.HandlerCommon.Reset(rootURI); err != nil {
return err
}
if err := h.HandlerShared.Reset(false); err != nil {
return err
}
h.init = init
var err error
h.originalRootURI, err = url.Parse(string(h.init.OriginalRootURI))
if h.init.OriginalRootURI == "" || err != nil {
h.originalRootURI = nil
}
// 100 MiB cache, no age-based eviction
h.cachingClient = &http.Client{Transport: httpcache.NewTransport(lrucache.New(100*1024*1024, 0))}
h.depURLMutex = newKeyMutex()
h.gopathDeps = nil
h.pinnedDepsOnce = sync.Once{}
h.pinnedDeps = nil
h.findPkg = nil
return nil
}
func (h *BuildHandler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) {
// Prevent any uncaught panics from taking the entire server down.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("unexpected panic: %v", r)
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic serving %v: %v\n%s", req.Method, r, buf)
return
}
}()
h.mu.Lock()
if req.Method != "initialize" && h.init == nil {
h.mu.Unlock()
return nil, errors.New("server must be initialized")
}
h.mu.Unlock()
if err := h.CheckReady(); err != nil {
if req.Method == "exit" |
return nil, err
}
h.InitTracer(conn)
span, ctx, err := h.SpanForRequest(ctx, "build", req, opentracing.Tags{"mode": "go"})
if err != nil {
return nil, err
}
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
if Debug && h.init != nil {
var b []byte
if req.Params != nil && !req.Notif {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", h.init.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
resultJSON, err := json.Marshal(result)
var resultOrError string
if err == nil {
resultOrError = string(resultJSON)
} else {
resultOrError = err.Error()
}
log.Printf("<<< %s %s %s %dms %s", h.init.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond), resultOrError)
}(time.Now())
}
switch {
case req.Method == "initialize":
if h.init != nil {
return nil, errors.New("build server is already initialized")
}
if req.Params == nil {
return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams}
}
var params lspext.InitializeParams
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
// In the `rootUri`, clients can send either:
//
// - A `file://` URI, which indicates that:
// - Same-workspace file paths will also be `file://` URIs
// - Out-of-workspace file paths will be `git://` URIs
// - `originalRootUri` is present
// - A `git://` URI, which indicates that:
// - Both same-workspace and out-of-workspace file paths will be non-`file://` URIs
// - `originalRootUri` is absent and `rootUri` contains the original root URI
if strings.HasPrefix(string(params.RootURI), "file://") {
h.clientUsesFileSchemeWithinWorkspace = true
} else {
params.OriginalRootURI = params.RootURI
params.RootURI = "file:///"
h.clientUsesFileSchemeWithinWorkspace = false
}
if Debug {
var b []byte
if req.Params != nil {
b = []byte(*req.Params)
}
log.Printf(">>> %s %s %s %s", params.OriginalRootURI, req.ID, req.Method, string(b))
defer func(t time.Time) {
log.Printf("<<< %s %s %s %dms", params.OriginalRootURI, req.ID, req.Method, time.Since(t).Nanoseconds()/int64(time.Millisecond))
}(time.Now())
}
// Determine the root import path of this workspace (e.g., "github.com/user/repo").
span.SetTag("originalRootPath", params.OriginalRootURI)
fs, closer, err := RemoteFS(ctx, params)
if err != nil {
return nil, err
}
h.closers = append(h.closers, closer)
langInitParams, err := determineEnvironment(ctx, fs, params)
if err != nil {
return nil, err
}
log.Printf("Detected root import path %q for %q", langInitParams.RootImportPath, params.OriginalRootURI)
h.rootImportPath = langInitParams.RootImportPath
if err := h.reset(¶ms, conn, langInitParams.Root()); err != nil {
return nil, err
}
rootPath := strings.TrimPrefix(string(langInitParams.Root()), "file://")
h.FS.Bind(rootPath, fs, "/", ctxvfs.BindAfter)
var langInitResp lsp.InitializeResult
if err := h.callLangServer(ctx, conn, req.Method, req.ID, langInitParams, &langInitResp); err != nil {
return nil, err
}
return langInitResp, nil
case req.Method == "shutdown":
h.ShutDown()
return nil, nil
case req.Method == "exit":
conn.Close()
return nil, nil
case req.Method == "$/cancelRequest":
// Our caching layer is pretty bad, and can easily be poisened
// if we cancel something. So we do not pass on cancellation
// requests.
return nil, nil
case req.Method == "workspace/xpackages":
return h.handleWorkspacePackages(ctx, conn, req)
case req.Method == "workspace/xdependencies":
// The same as h.fetchAndSendDepsOnce except it operates locally to the
// request.
fetchAndSendDepsOnces := make(map[string]*sync.Once) // key is file URI
localFetchAndSendDepsOnce := func(fileURI string) *sync.Once {
once, ok := fetchAndSendDepsOnces[fileURI]
if !ok {
once = new(sync.Once)
fetchAndSendDepsOnces[fileURI] = once
}
return once
}
var (
mu sync.Mutex
finalReferences []*lspext.DependencyReference
references = make(map[string]*lspext.DependencyReference)
)
emitRef := func(path string, r goDependencyReference) {
// If the _reference_ to a definition is made from inside a
// vendored package, or from outside of the repository itself,
// exclude it.
if util.IsVendorDir(path) || !util.PathHasPrefix(path, h.RootFSPath) {
return
}
// If the package being referenced is defined in the repo, and
// it is NOT a vendor package, then exclude it.
if !r.vendor && util.PathHasPrefix(filepath.Join(gopath, "src", r.absolute), h.RootFSPath) {
return
}
newURI, err := h.rewriteURIFromLangServer(lsp.DocumentURI("file://" + path))
if err != nil {
log.Printf("error rewriting URI from language server: %s", err)
return
}
mu.Lock()
defer mu.Unlock()
existing, ok := references[r.absolute]
if !ok {
// Create a new dependency reference.
ref := &lspext.DependencyReference{
Attributes: r.attributes(),
Hints: map[string]interface{}{
"dirs": []string{string(newURI)},
},
}
finalReferences = append(finalReferences, ref)
references[r.absolute] = ref
return
}
// Append to the existing dependency reference's dirs list.
dirs := existing.Hints["dirs"].([]string)
dirs = append(dirs, string(newURI))
existing.Hints["dirs"] = dirs
}
// We need every transitive dependency, for every Go package in the
// repository.
var (
w = ctxvfs.Walk(ctx, h.RootFSPath, h.FS)
dc = newDepCache()
)
dc.collectReferences = true
for w.Step() {
if path.Ext(w.Path()) == ".go" {
d := path.Dir(w.Path())
localFetchAndSendDepsOnce(d).Do(func() {
if err := h.fetchTransitiveDepsOfFile(ctx, lsp.DocumentURI("file://"+d), dc); err != nil {
log.Printf("Warning: fetching deps for dir %s: %s.", d, err)
}
})
}
}
dc.references(emitRef, 1)
return finalReferences, nil
default:
// Pass the request onto the lang server.
// Rewrite URI fields in params to refer to file paths inside
// the GOPATH at the appropriate import path directory. E.g.:
//
// file:///dir/file.go -> file:///src/github.com/user/repo/dir/file.go
var urisInRequest []lsp.DocumentURI // rewritten
var params interface{}
if req.Params != nil {
if err := json.Unmarshal(*req.Params, ¶ms); err != nil {
return nil, err
}
}
rewriteURIFromClient := func(uri lsp.DocumentURI) lsp.DocumentURI {
var path string
if h.clientUsesFileSchemeWithinWorkspace {
if !strings.HasPrefix(string(uri), "file:///") {
return uri // refers to a resource outside of this workspace
}
path = strings.TrimPrefix(string(uri), "file://")
} else {
currentURL, err := url.Parse(string(uri))
if err != nil {
return uri
}
if h.originalRootURI == nil {
return uri
}
path = currentURL.Fragment
currentURL.Fragment = ""
if *currentURL != *h.originalRootURI {
return uri // refers to a resource outside of this workspace
}
}
path = pathpkg.Join(h.RootFSPath, path)
if !util.PathHasPrefix(path, h.RootFSPath) {
panic(fmt.Sprintf("file path %q must have prefix %q (file URI is %q, root URI is %q)", path, h.RootFSPath, uri, h.init.RootPath))
}
newURI := lsp.DocumentURI("file://" + path)
urisInRequest = append(urisInRequest, newURI) // collect
return newURI
}
lspext.WalkURIFields(params, nil, rewriteURIFromClient)
// Store back to req.Params to avoid 2 different versions of the data.
if req.Params != nil {
b, err := json.Marshal(params)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
// Immediately handle notifications. We do not have a response
// to rewrite, so we can pass it on directly and avoid the
// cost of marshalling again. NOTE: FS operations are frequent
// and are notifications.
if req.Notif {
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
// Avoid extracting the tracer again, it is already attached to ctx.
req.Meta = nil
return h.lang.Handle(ctx, wrappedConn, req)
}
// workspace/symbol queries must have their `dir:` query filter
// rewritten for github.com/golang/go due to its specialized directory
// structure. e.g. `dir:src/net/http` should work, but the language
// server will expect `dir:net/http` as any real/valid Go project will
// have package paths align with the directory structure.
if req.Method == "workspace/symbol" && strings.HasPrefix(string(h.init.OriginalRootURI), "git://github.com/golang/go") {
var wsparams lspext.WorkspaceSymbolParams
if err := json.Unmarshal(*req.Params, &wsparams); err != nil {
return nil, err
}
q := langserver.ParseQuery(wsparams.Query)
if q.Filter == langserver.FilterDir {
// If the query does not start with `src/` and it is a request
// for a stdlib dir, it should return no results (the filter is
// dir, not package path).
if gosrc.IsStdlibPkg(q.Dir) && !strings.HasPrefix(q.Dir, "src") {
q.Dir = "sginvalid"
} else {
q.Dir = util.PathTrimPrefix(q.Dir, "src") // "src/net/http" -> "net/http"
}
}
wsparams.Query = q.String()
b, err := json.Marshal(wsparams)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
if req.Method == "workspace/xreferences" {
// Parse the parameters and if a dirs hint is present, rewrite the
// URIs.
var p lspext.WorkspaceReferencesParams
if err := json.Unmarshal(*req.Params, &p); err != nil {
return nil, err
}
dirsHint, haveDirsHint := p.Hints["dirs"]
if haveDirsHint {
dirs := dirsHint.([]interface{})
for i, dir := range dirs {
dirs[i] = rewriteURIFromClient(lsp.DocumentURI(dir.(string)))
}
// Arbitrarily chosen limit on the number of directories that
// may be searched by workspace/xreferences. Large repositories
// like kubernetes would simply take too long (>15s) to fetch
// their dependencies and typecheck them otherwise. This number
// was chosen as a 'sweet-spot' based on kubernetes solely.
if len(dirs) > 15 {
dirs = dirs[:15]
}
dirsHint = dirs
p.Hints["dirs"] = dirs
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
req.Params = (*json.RawMessage)(&b)
}
}
var result interface{}
if err := h.callLangServer(ctx, conn, req.Method, req.ID, req.Params, &result); err != nil {
return nil, err
}
// (Un-)rewrite URI fields in the result. E.g.:
//
// file:///src/github.com/user/repo/dir/file.go -> file:///dir/file.go
var walkErr error
lspext.WalkURIFields(result, nil, func(uri lsp.DocumentURI) lsp.DocumentURI {
// HACK: Work around https://github.com/sourcegraph/sourcegraph/issues/10541 by
// converting uri == "file://" (which is actually an empty URI in the langserver result)
// to "file:///" instead of emitting an error. This will likely cause the result to be displayed
// with an error on the client, but it's better than the whole
// textDocument/implementation request failing.
if req.Method == "textDocument/implementation" && (uri == "" || uri == "file://") {
return "file:///"
}
newURI, err := h.rewriteURIFromLangServer(uri)
if err != nil {
walkErr = err
}
return newURI
})
if walkErr != nil {
return nil, fmt.Errorf("%s (in Go language server response)", walkErr)
}
return result, nil
}
}
func (h *BuildHandler) rewriteURIFromLangServer(uri lsp.DocumentURI) (lsp.DocumentURI, error) {
u, err := url.Parse(string(uri))
if err != nil {
return "", err
}
if !u.IsAbs() {
return "", fmt.Errorf("invalid relative URI %q", u)
}
switch u.Scheme {
case "file":
if !filepath.IsAbs(u.Path) {
return "", fmt.Errorf("invalid relative file path in URI %q", uri)
}
// Refers to a file in the Go stdlib?
if util.PathHasPrefix(u.Path, goroot) {
fileInGoStdlib := util.PathTrimPrefix(u.Path, goroot)
if h.rootImportPath == "" {
if h.clientUsesFileSchemeWithinWorkspace {
// The workspace is the Go stdlib and this refers to
// something in the Go stdlib, so let's use file:///
// so that the client adds our current rev, instead
// of using runtime.Version() (which is not
// necessarily the commit of the Go stdlib we're
// analyzing).
return lsp.DocumentURI("file:///" + fileInGoStdlib), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = fileInGoStdlib
return lsp.DocumentURI(newURI.String()), nil
}
return lsp.DocumentURI("git://github.com/golang/go?" + gosrc.RuntimeVersion + "#" + fileInGoStdlib), nil
}
// Refers to a file in the same workspace?
if util.PathHasPrefix(u.Path, h.RootFSPath) {
if h.clientUsesFileSchemeWithinWorkspace {
pathInThisWorkspace := util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI("file:///" + pathInThisWorkspace), nil
}
if h.originalRootURI == nil {
return uri, nil
}
newURI, _ := url.Parse(h.originalRootURI.String())
newURI.Fragment = util.PathTrimPrefix(u.Path, h.RootFSPath)
return lsp.DocumentURI(newURI.String()), nil
}
// Refers to a file in the GOPATH (that's from another repo)?
if gopathSrcDir := path.Join(gopath, "src"); util.PathHasPrefix(u.Path, gopathSrcDir) {
p := util.PathTrimPrefix(u.Path, gopathSrcDir) // "github.com/foo/bar/baz/qux.go"
// Go through the list of directories we have
// mounted. We make a copy instead of holding the lock
// in the for loop to avoid holding the lock for
// longer than necessary.
h.HandlerShared.Mu.Lock()
deps := make([]*gosrc.Directory, len(h.gopathDeps))
copy(deps, h.gopathDeps)
h.HandlerShared.Mu.Unlock()
var d *gosrc.Directory
for _, dep := range deps {
pathComponents := strings.Split(p, "/")
depComponents := strings.Split(dep.ProjectRoot, "/")
if reflect.DeepEqual(pathComponents[:len(depComponents)], depComponents) {
d = dep
}
}
if d != nil {
rev := d.Rev
if rev == "" {
rev = "HEAD"
}
i := strings.Index(d.CloneURL, "://")
if i >= 0 {
repo := d.CloneURL[i+len("://"):]
path := strings.TrimPrefix(strings.TrimPrefix(p, d.ProjectRoot), "/")
// HACK
// In some cases, we see import paths of the form "blah/blah.git" or "blah/blah.git/blah/blah".
// The name for the repository containing such a package is "blah/blah", so we strip the ".git"
// from the location URI here. In addition, we strip any leading ".git/" from the path that
// might get added as a side-effect of stripping the suffix.
repo = strings.TrimSuffix(repo, ".git")
path = strings.TrimPrefix(path, ".git/")
return lsp.DocumentURI(fmt.Sprintf("%s://%s?%s#%s", d.VCS, repo, rev, path)), nil
}
}
}
return lsp.DocumentURI("unresolved:" + u.Path), nil
default:
return "", fmt.Errorf("invalid non-file URI %q", uri)
}
}
// callLangServer sends the (usually modified) request to the wrapped Go
// language server. Do not send notifications via this interface! Rather just
// directly pass on the jsonrpc2.Request via h.lang.Handle.
//
// Although bypasses the JSON-RPC wire protocol ( just sending it
// in-memory for simplicity/speed), it behaves in the same way as
// though the peer language server were remote.
func (h *BuildHandler) callLangServer(ctx context.Context, conn *jsonrpc2.Conn, method string, id jsonrpc2.ID, params, result interface{}) error {
req := jsonrpc2.Request{
ID: id,
Method: method,
}
if err := req.SetParams(params); err != nil {
return err
}
wrappedConn := &jsonrpc2ConnImpl{rewriteURI: h.rewriteURIFromLangServer, conn: conn}
result0, err := h.lang.Handle(ctx, wrappedConn, &req)
if err != nil {
return err
}
// Don't pass the interface{} value, to avoid the build and
// language servers from breaking the abstraction that they are in
// separate memory spaces.
b, err := json.Marshal(result0)
if err != nil {
return err
}
if result != nil {
if err := json.Unmarshal(b, result); err != nil {
return err
}
}
return nil
}
// Close implements io.Closer
func (h *BuildHandler) Close() error {
var result error
for _, closer := range h.closers {
err := closer.Close()
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
| {
err = nil
} | conditional_block |
cdn_log.go | package cdn
import (
"bufio"
"context"
"crypto/rsa"
"fmt"
"net"
"strconv"
"strings"
"time"
gocache "github.com/patrickmn/go-cache"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/services"
"github.com/ovh/cds/engine/api/worker"
"github.com/ovh/cds/engine/api/workflow"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/jws"
"github.com/ovh/cds/sdk/log"
"github.com/ovh/cds/sdk/log/hook"
"github.com/ovh/cds/sdk/telemetry"
)
var (
logCache = gocache.New(20*time.Minute, 30*time.Minute)
keyJobLogQueue = cache.Key("cdn", "log", "job")
keyJobHearbeat = cache.Key("cdn", "log", "heartbeat")
keyJobLock = cache.Key("cdn", "log", "lock")
)
func (s *Service) RunTcpLogServer(ctx context.Context) {
log.Info(ctx, "Starting tcp server %s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port))
if err != nil {
log.Fatalf("unable to start tcp log server: %v", err)
}
//Gracefully shutdown the tcp server
go func() {
<-ctx.Done()
log.Info(ctx, "CDN> Shutdown tcp log Server")
_ = listener.Close()
}()
// Looking for something to dequeue
sdk.GoRoutine(ctx, "cdn-waiting-job", func(ctx context.Context) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) |
func (s *Service) dequeueJobMessages(ctx context.Context, jobLogsQueueKey string, jobID string) error {
log.Info(ctx, "Dequeue %s", jobLogsQueueKey)
var t0 = time.Now()
var t1 = time.Now()
var nbMessages int
defer func() {
delta := t1.Sub(t0)
log.Info(ctx, "processLogs[%s] - %d messages received in %v", jobLogsQueueKey, nbMessages, delta)
}()
defer func() {
// Remove heartbeat
_ = s.Cache.Delete(cache.Key(keyJobHearbeat, jobID))
}()
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
b, err := s.Cache.Exist(jobLogsQueueKey)
if err != nil {
log.Error(ctx, "unable to check if queue still exist: %v", err)
continue
} else if !b {
// leave dequeue if queue does not exist anymore
log.Info(ctx, "leaving job queue %s (queue no more exists)", jobLogsQueueKey)
return nil
}
// heartbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
log.Error(ctx, "unable to hearbeat %s: %v", heartbeatKey, err)
continue
}
default:
dequeuCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
var hm handledMessage
if err := s.Cache.DequeueWithContext(dequeuCtx, jobLogsQueueKey, 30*time.Millisecond, &hm); err != nil {
cancel()
if strings.Contains(err.Error(), "context deadline exceeded") {
return nil
}
log.Error(ctx, "unable to dequeue job logs queue %s: %v", jobLogsQueueKey, err)
continue
}
cancel()
if hm.Signature.Worker == nil {
continue
}
nbMessages++
t1 = time.Now()
currentLog := buildMessage(hm.Signature, hm.Msg)
if err := workflow.AppendLog(s.Db, hm.Signature.JobID, hm.Signature.NodeRunID, hm.Signature.Worker.StepOrder, currentLog, s.Cfg.Log.StepMaxSize); err != nil {
log.Error(ctx, "unable to process log: %+v", err)
}
}
}
}
func (s *Service) canDequeue(jobID string) (string, error) {
jobQueueKey := cache.Key(keyJobLogQueue, jobID)
heatbeatKey := cache.Key(keyJobHearbeat, jobID)
// Take a lock
lockKey := cache.Key(keyJobLock, jobID)
b, err := s.Cache.Lock(lockKey, 5*time.Second, 0, 1)
if err != nil {
return "", err
}
defer func() {
_ = s.Cache.Unlock(lockKey)
}()
if !b {
return "", nil
}
exist, err := s.Cache.Exist(heatbeatKey)
if err != nil {
return "", err
}
// if key exist, that mean that someone is already dequeuing
if exist {
return "", nil
}
//hearbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
return "", err
}
return jobQueueKey, nil
}
| {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" {
continue
}
sdk.GoRoutine(ctx, "cdn-dequeue-job-message", func(ctx context.Context) {
if err := s.dequeueJobMessages(ctx, jobQueueKey, jobID); err != nil {
log.Error(ctx, "unable to dequeue redis incoming job queue: %v", err)
}
})
}
time.Sleep(250 * time.Millisecond)
}
}
} | identifier_body |
cdn_log.go | package cdn
import (
"bufio"
"context"
"crypto/rsa"
"fmt"
"net"
"strconv"
"strings"
"time"
gocache "github.com/patrickmn/go-cache"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/services"
"github.com/ovh/cds/engine/api/worker"
"github.com/ovh/cds/engine/api/workflow"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/jws"
"github.com/ovh/cds/sdk/log"
"github.com/ovh/cds/sdk/log/hook"
"github.com/ovh/cds/sdk/telemetry"
)
var (
logCache = gocache.New(20*time.Minute, 30*time.Minute)
keyJobLogQueue = cache.Key("cdn", "log", "job")
keyJobHearbeat = cache.Key("cdn", "log", "heartbeat")
keyJobLock = cache.Key("cdn", "log", "lock")
)
func (s *Service) RunTcpLogServer(ctx context.Context) {
log.Info(ctx, "Starting tcp server %s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port))
if err != nil {
log.Fatalf("unable to start tcp log server: %v", err)
}
//Gracefully shutdown the tcp server
go func() {
<-ctx.Done()
log.Info(ctx, "CDN> Shutdown tcp log Server")
_ = listener.Close()
}()
// Looking for something to dequeue
sdk.GoRoutine(ctx, "cdn-waiting-job", func(ctx context.Context) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" |
sdk.GoRoutine(ctx, "cdn-dequeue-job-message", func(ctx context.Context) {
if err := s.dequeueJobMessages(ctx, jobQueueKey, jobID); err != nil {
log.Error(ctx, "unable to dequeue redis incoming job queue: %v", err)
}
})
}
time.Sleep(250 * time.Millisecond)
}
}
}
func (s *Service) dequeueJobMessages(ctx context.Context, jobLogsQueueKey string, jobID string) error {
log.Info(ctx, "Dequeue %s", jobLogsQueueKey)
var t0 = time.Now()
var t1 = time.Now()
var nbMessages int
defer func() {
delta := t1.Sub(t0)
log.Info(ctx, "processLogs[%s] - %d messages received in %v", jobLogsQueueKey, nbMessages, delta)
}()
defer func() {
// Remove heartbeat
_ = s.Cache.Delete(cache.Key(keyJobHearbeat, jobID))
}()
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
b, err := s.Cache.Exist(jobLogsQueueKey)
if err != nil {
log.Error(ctx, "unable to check if queue still exist: %v", err)
continue
} else if !b {
// leave dequeue if queue does not exist anymore
log.Info(ctx, "leaving job queue %s (queue no more exists)", jobLogsQueueKey)
return nil
}
// heartbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
log.Error(ctx, "unable to hearbeat %s: %v", heartbeatKey, err)
continue
}
default:
dequeuCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
var hm handledMessage
if err := s.Cache.DequeueWithContext(dequeuCtx, jobLogsQueueKey, 30*time.Millisecond, &hm); err != nil {
cancel()
if strings.Contains(err.Error(), "context deadline exceeded") {
return nil
}
log.Error(ctx, "unable to dequeue job logs queue %s: %v", jobLogsQueueKey, err)
continue
}
cancel()
if hm.Signature.Worker == nil {
continue
}
nbMessages++
t1 = time.Now()
currentLog := buildMessage(hm.Signature, hm.Msg)
if err := workflow.AppendLog(s.Db, hm.Signature.JobID, hm.Signature.NodeRunID, hm.Signature.Worker.StepOrder, currentLog, s.Cfg.Log.StepMaxSize); err != nil {
log.Error(ctx, "unable to process log: %+v", err)
}
}
}
}
func (s *Service) canDequeue(jobID string) (string, error) {
jobQueueKey := cache.Key(keyJobLogQueue, jobID)
heatbeatKey := cache.Key(keyJobHearbeat, jobID)
// Take a lock
lockKey := cache.Key(keyJobLock, jobID)
b, err := s.Cache.Lock(lockKey, 5*time.Second, 0, 1)
if err != nil {
return "", err
}
defer func() {
_ = s.Cache.Unlock(lockKey)
}()
if !b {
return "", nil
}
exist, err := s.Cache.Exist(heatbeatKey)
if err != nil {
return "", err
}
// if key exist, that mean that someone is already dequeuing
if exist {
return "", nil
}
//hearbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
return "", err
}
return jobQueueKey, nil
}
| {
continue
} | conditional_block |
cdn_log.go | package cdn
import (
"bufio"
"context"
"crypto/rsa"
"fmt"
"net"
"strconv"
"strings"
"time"
gocache "github.com/patrickmn/go-cache"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/services"
"github.com/ovh/cds/engine/api/worker"
"github.com/ovh/cds/engine/api/workflow"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/jws"
"github.com/ovh/cds/sdk/log"
"github.com/ovh/cds/sdk/log/hook"
"github.com/ovh/cds/sdk/telemetry"
)
var (
logCache = gocache.New(20*time.Minute, 30*time.Minute)
keyJobLogQueue = cache.Key("cdn", "log", "job")
keyJobHearbeat = cache.Key("cdn", "log", "heartbeat")
keyJobLock = cache.Key("cdn", "log", "lock")
)
func (s *Service) RunTcpLogServer(ctx context.Context) {
log.Info(ctx, "Starting tcp server %s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port))
if err != nil {
log.Fatalf("unable to start tcp log server: %v", err)
}
//Gracefully shutdown the tcp server
go func() {
<-ctx.Done()
log.Info(ctx, "CDN> Shutdown tcp log Server")
_ = listener.Close()
}()
// Looking for something to dequeue
sdk.GoRoutine(ctx, "cdn-waiting-job", func(ctx context.Context) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) getHatchery(ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" {
continue
}
sdk.GoRoutine(ctx, "cdn-dequeue-job-message", func(ctx context.Context) {
if err := s.dequeueJobMessages(ctx, jobQueueKey, jobID); err != nil {
log.Error(ctx, "unable to dequeue redis incoming job queue: %v", err)
}
})
}
time.Sleep(250 * time.Millisecond)
}
}
}
func (s *Service) dequeueJobMessages(ctx context.Context, jobLogsQueueKey string, jobID string) error {
log.Info(ctx, "Dequeue %s", jobLogsQueueKey)
var t0 = time.Now()
var t1 = time.Now()
var nbMessages int
defer func() {
delta := t1.Sub(t0)
log.Info(ctx, "processLogs[%s] - %d messages received in %v", jobLogsQueueKey, nbMessages, delta)
}()
defer func() {
// Remove heartbeat
_ = s.Cache.Delete(cache.Key(keyJobHearbeat, jobID))
}()
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
b, err := s.Cache.Exist(jobLogsQueueKey)
if err != nil {
log.Error(ctx, "unable to check if queue still exist: %v", err)
continue
} else if !b {
// leave dequeue if queue does not exist anymore
log.Info(ctx, "leaving job queue %s (queue no more exists)", jobLogsQueueKey)
return nil
}
// heartbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
log.Error(ctx, "unable to hearbeat %s: %v", heartbeatKey, err)
continue
}
default:
dequeuCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
var hm handledMessage
if err := s.Cache.DequeueWithContext(dequeuCtx, jobLogsQueueKey, 30*time.Millisecond, &hm); err != nil {
cancel()
if strings.Contains(err.Error(), "context deadline exceeded") {
return nil
}
log.Error(ctx, "unable to dequeue job logs queue %s: %v", jobLogsQueueKey, err)
continue
}
cancel()
if hm.Signature.Worker == nil {
continue
}
nbMessages++
t1 = time.Now()
currentLog := buildMessage(hm.Signature, hm.Msg)
if err := workflow.AppendLog(s.Db, hm.Signature.JobID, hm.Signature.NodeRunID, hm.Signature.Worker.StepOrder, currentLog, s.Cfg.Log.StepMaxSize); err != nil {
log.Error(ctx, "unable to process log: %+v", err)
}
}
}
}
func (s *Service) canDequeue(jobID string) (string, error) {
jobQueueKey := cache.Key(keyJobLogQueue, jobID)
heatbeatKey := cache.Key(keyJobHearbeat, jobID)
// Take a lock
lockKey := cache.Key(keyJobLock, jobID)
b, err := s.Cache.Lock(lockKey, 5*time.Second, 0, 1)
if err != nil {
return "", err
}
defer func() {
_ = s.Cache.Unlock(lockKey)
}()
if !b {
return "", nil
}
exist, err := s.Cache.Exist(heatbeatKey)
if err != nil {
return "", err
} | if exist {
return "", nil
}
//hearbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
return "", err
}
return jobQueueKey, nil
} | // if key exist, that mean that someone is already dequeuing | random_line_split |
cdn_log.go | package cdn
import (
"bufio"
"context"
"crypto/rsa"
"fmt"
"net"
"strconv"
"strings"
"time"
gocache "github.com/patrickmn/go-cache"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/services"
"github.com/ovh/cds/engine/api/worker"
"github.com/ovh/cds/engine/api/workflow"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/jws"
"github.com/ovh/cds/sdk/log"
"github.com/ovh/cds/sdk/log/hook"
"github.com/ovh/cds/sdk/telemetry"
)
var (
logCache = gocache.New(20*time.Minute, 30*time.Minute)
keyJobLogQueue = cache.Key("cdn", "log", "job")
keyJobHearbeat = cache.Key("cdn", "log", "heartbeat")
keyJobLock = cache.Key("cdn", "log", "lock")
)
func (s *Service) RunTcpLogServer(ctx context.Context) {
log.Info(ctx, "Starting tcp server %s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", s.Cfg.TCP.Addr, s.Cfg.TCP.Port))
if err != nil {
log.Fatalf("unable to start tcp log server: %v", err)
}
//Gracefully shutdown the tcp server
go func() {
<-ctx.Done()
log.Info(ctx, "CDN> Shutdown tcp log Server")
_ = listener.Close()
}()
// Looking for something to dequeue
sdk.GoRoutine(ctx, "cdn-waiting-job", func(ctx context.Context) {
s.waitingJobs(ctx)
})
go func() {
for {
conn, err := listener.Accept()
if err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "unable to accept connection: %v", err)
return
}
sdk.GoRoutine(ctx, "cdn-logServer", func(ctx context.Context) {
telemetry.Record(ctx, Hits, 1)
s.handleConnection(ctx, conn)
})
}
}()
}
func (s *Service) handleConnection(ctx context.Context, conn net.Conn) {
defer func() {
_ = conn.Close()
}()
bufReader := bufio.NewReader(conn)
for {
bytes, err := bufReader.ReadBytes(byte(0))
if err != nil {
log.Info(ctx, "client left")
return
}
// remove byte(0)
bytes = bytes[:len(bytes)-1]
if err := s.handleLogMessage(ctx, bytes); err != nil {
telemetry.Record(ctx, Errors, 1)
log.Error(ctx, "cdn.log> %v", err)
continue
}
}
}
func (s *Service) handleLogMessage(ctx context.Context, messageReceived []byte) error {
m := hook.Message{}
if err := m.UnmarshalJSON(messageReceived); err != nil {
return sdk.WrapError(err, "unable to unmarshall gelf message: %s", string(messageReceived))
}
sig, ok := m.Extra["_"+log.ExtraFieldSignature]
if !ok || sig == "" {
return sdk.WithStack(fmt.Errorf("signature not found on log message: %+v", m))
}
// Get worker datas
var signature log.Signature
if err := jws.UnsafeParse(sig.(string), &signature); err != nil {
return err
}
switch {
case signature.Worker != nil:
telemetry.Record(ctx, WorkerLogReceived, 1)
return s.handleWorkerLog(ctx, signature.Worker.WorkerID, sig, m)
case signature.Service != nil:
telemetry.Record(ctx, ServiceLogReceived, 1)
return s.handleServiceLog(ctx, signature.Service.HatcheryID, signature.Service.HatcheryName, signature.Service.WorkerName, sig, m)
default:
return sdk.WithStack(sdk.ErrWrongRequest)
}
}
func (s *Service) handleWorkerLog(ctx context.Context, workerID string, sig interface{}, m hook.Message) error {
var signature log.Signature
var workerData sdk.Worker
cacheData, ok := logCache.Get(fmt.Sprintf("worker-%s", workerID))
if !ok {
var err error
workerData, err = s.getWorker(ctx, workerID)
if err != nil {
return err
}
} else {
workerData = cacheData.(sdk.Worker)
}
if err := jws.Verify(workerData.PrivateKey, sig.(string), &signature); err != nil {
return err
}
if workerData.JobRunID == nil || *workerData.JobRunID != signature.JobID {
return sdk.WithStack(sdk.ErrForbidden)
}
hm := handledMessage{
Signature: signature,
Msg: m,
}
cacheKey := cache.Key(keyJobLogQueue, strconv.Itoa(int(signature.JobID)))
if err := s.Cache.Enqueue(cacheKey, hm); err != nil {
return err
}
return nil
}
type handledMessage struct {
Signature log.Signature
Msg hook.Message
}
func buildMessage(signature log.Signature, m hook.Message) string {
logDate := time.Unix(0, int64(m.Time*1e9))
logs := sdk.Log{
JobID: signature.JobID,
LastModified: &logDate,
NodeRunID: signature.NodeRunID,
Start: &logDate,
StepOrder: signature.Worker.StepOrder,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
var lvl string
switch m.Level {
case int32(hook.LOG_DEBUG):
lvl = "DEBUG"
case int32(hook.LOG_INFO):
lvl = "INFO"
case int32(hook.LOG_NOTICE):
lvl = "NOTICE"
case int32(hook.LOG_WARNING):
lvl = "WARN"
case int32(hook.LOG_ERR):
lvl = "ERROR"
case int32(hook.LOG_CRIT):
lvl = "CRITICAL"
case int32(hook.LOG_ALERT):
lvl = "ALERT"
case int32(hook.LOG_EMERG):
lvl = "EMERGENCY"
}
logs.Val = fmt.Sprintf("[%s] %s", lvl, logs.Val)
return logs.Val
}
func (s *Service) handleServiceLog(ctx context.Context, hatcheryID int64, hatcheryName string, workerName string, sig interface{}, m hook.Message) error {
var signature log.Signature
var pk *rsa.PublicKey
cacheData, ok := logCache.Get(fmt.Sprintf("hatchery-key-%d", hatcheryID))
if !ok {
var err error
pk, err = s.getHatchery(ctx, hatcheryID, hatcheryName)
if err != nil {
return err
}
} else {
pk = cacheData.(*rsa.PublicKey)
}
if err := jws.Verify(pk, sig.(string), &signature); err != nil {
return err
}
// Verified that worker has been spawn by this hatchery
workerCacheKey := fmt.Sprintf("service-worker-%s", workerName)
_, ok = logCache.Get(workerCacheKey)
if !ok {
// Verify that the worker has been spawn by this hatchery
wk, err := worker.LoadWorkerByName(ctx, s.Db, workerName)
if err != nil {
return err
}
if wk.HatcheryID == nil {
return sdk.WrapError(sdk.ErrWrongRequest, "hatchery %d cannot send service log for worker %s started by %s that is no more linked to an hatchery", signature.Service.HatcheryID, wk.ID, wk.HatcheryName)
}
if *wk.HatcheryID != signature.Service.HatcheryID {
return sdk.WrapError(sdk.ErrWrongRequest, "cannot send service log for worker %s from hatchery (expected: %d/actual: %d)", wk.ID, *wk.HatcheryID, signature.Service.HatcheryID)
}
logCache.Set(workerCacheKey, true, gocache.DefaultExpiration)
}
nodeRunJob, err := workflow.LoadNodeJobRun(ctx, s.Db, s.Cache, signature.JobID)
if err != nil {
return err
}
logs := sdk.ServiceLog{
ServiceRequirementName: signature.Service.RequirementName,
ServiceRequirementID: signature.Service.RequirementID,
WorkflowNodeJobRunID: signature.JobID,
WorkflowNodeRunID: nodeRunJob.WorkflowNodeRunID,
Val: m.Full,
}
if !strings.HasSuffix(logs.Val, "\n") {
logs.Val += "\n"
}
if err := workflow.AddServiceLog(s.Db, nodeRunJob, &logs, s.Cfg.Log.ServiceMaxSize); err != nil {
return err
}
return nil
}
func (s *Service) getWorker(ctx context.Context, workerID string) (sdk.Worker, error) {
w, err := worker.LoadWorkerByIDWithDecryptKey(ctx, s.Db, workerID)
if err != nil {
return sdk.Worker{}, err
}
logCache.Set(fmt.Sprintf("worker-%s", w.ID), *w, gocache.DefaultExpiration)
return *w, nil
}
func (s *Service) | (ctx context.Context, hatcheryID int64, hatcheryName string) (*rsa.PublicKey, error) {
h, err := services.LoadByNameAndType(ctx, s.Db, hatcheryName, services.TypeHatchery)
if err != nil {
return nil, err
}
if h.ID != hatcheryID {
return nil, sdk.WithStack(sdk.ErrWrongRequest)
}
// Verify signature
pk, err := jws.NewPublicKeyFromPEM(h.PublicKey)
if err != nil {
return nil, sdk.WithStack(err)
}
logCache.Set(fmt.Sprintf("hatchery-key-%d", hatcheryID), pk, gocache.DefaultExpiration)
return pk, nil
}
func (s *Service) waitingJobs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
// List all queues
keyListQueue := cache.Key(keyJobLogQueue, "*")
listKeys, err := s.Cache.Keys(keyListQueue)
if err != nil {
log.Error(ctx, "unable to list jobs queues %s", keyListQueue)
continue
}
// For each key, check if heartbeat key exist
for _, k := range listKeys {
keyParts := strings.Split(k, ":")
jobID := keyParts[len(keyParts)-1]
jobQueueKey, err := s.canDequeue(jobID)
if err != nil {
log.Error(ctx, "unable to check canDequeue %s: %v", jobQueueKey, err)
continue
}
if jobQueueKey == "" {
continue
}
sdk.GoRoutine(ctx, "cdn-dequeue-job-message", func(ctx context.Context) {
if err := s.dequeueJobMessages(ctx, jobQueueKey, jobID); err != nil {
log.Error(ctx, "unable to dequeue redis incoming job queue: %v", err)
}
})
}
time.Sleep(250 * time.Millisecond)
}
}
}
func (s *Service) dequeueJobMessages(ctx context.Context, jobLogsQueueKey string, jobID string) error {
log.Info(ctx, "Dequeue %s", jobLogsQueueKey)
var t0 = time.Now()
var t1 = time.Now()
var nbMessages int
defer func() {
delta := t1.Sub(t0)
log.Info(ctx, "processLogs[%s] - %d messages received in %v", jobLogsQueueKey, nbMessages, delta)
}()
defer func() {
// Remove heartbeat
_ = s.Cache.Delete(cache.Key(keyJobHearbeat, jobID))
}()
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
b, err := s.Cache.Exist(jobLogsQueueKey)
if err != nil {
log.Error(ctx, "unable to check if queue still exist: %v", err)
continue
} else if !b {
// leave dequeue if queue does not exist anymore
log.Info(ctx, "leaving job queue %s (queue no more exists)", jobLogsQueueKey)
return nil
}
// heartbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
log.Error(ctx, "unable to hearbeat %s: %v", heartbeatKey, err)
continue
}
default:
dequeuCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
var hm handledMessage
if err := s.Cache.DequeueWithContext(dequeuCtx, jobLogsQueueKey, 30*time.Millisecond, &hm); err != nil {
cancel()
if strings.Contains(err.Error(), "context deadline exceeded") {
return nil
}
log.Error(ctx, "unable to dequeue job logs queue %s: %v", jobLogsQueueKey, err)
continue
}
cancel()
if hm.Signature.Worker == nil {
continue
}
nbMessages++
t1 = time.Now()
currentLog := buildMessage(hm.Signature, hm.Msg)
if err := workflow.AppendLog(s.Db, hm.Signature.JobID, hm.Signature.NodeRunID, hm.Signature.Worker.StepOrder, currentLog, s.Cfg.Log.StepMaxSize); err != nil {
log.Error(ctx, "unable to process log: %+v", err)
}
}
}
}
func (s *Service) canDequeue(jobID string) (string, error) {
jobQueueKey := cache.Key(keyJobLogQueue, jobID)
heatbeatKey := cache.Key(keyJobHearbeat, jobID)
// Take a lock
lockKey := cache.Key(keyJobLock, jobID)
b, err := s.Cache.Lock(lockKey, 5*time.Second, 0, 1)
if err != nil {
return "", err
}
defer func() {
_ = s.Cache.Unlock(lockKey)
}()
if !b {
return "", nil
}
exist, err := s.Cache.Exist(heatbeatKey)
if err != nil {
return "", err
}
// if key exist, that mean that someone is already dequeuing
if exist {
return "", nil
}
//hearbeat
heartbeatKey := cache.Key(keyJobHearbeat, jobID)
if err := s.Cache.SetWithTTL(heartbeatKey, true, 30); err != nil {
return "", err
}
return jobQueueKey, nil
}
| getHatchery | identifier_name |
test_runner.go | /*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package integration
import (
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"fbc/lib/go/radius"
cwfprotos "magma/cwf/cloud/go/protos"
"magma/cwf/gateway/registry"
"magma/cwf/gateway/services/uesim"
fegprotos "magma/feg/cloud/go/protos"
"magma/lte/cloud/go/crypto"
lteprotos "magma/lte/cloud/go/protos"
"github.com/pkg/errors"
)
// todo make Op configurable, or export it in the UESimServer.
const (
Op = "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"
Secret = "123456"
MockHSSRemote = "HSS_REMOTE"
MockPCRFRemote = "PCRF_REMOTE"
MockOCSRemote = "OCS_REMOTE"
MockPCRFRemote2 = "PCRF_REMOTE2"
MockOCSRemote2 = "OCS_REMOTE2"
PipelinedRemote = "pipelined.local"
DirectorydRemote = "DIRECTORYD"
RedisRemote = "REDIS"
CwagIP = "192.168.70.101"
TrafficCltIP = "192.168.128.2"
IPDRControllerIP = "192.168.40.11"
OCSPort = 9201
PCRFPort = 9202
OCSPort2 = 9205
PCRFPort2 = 9206
HSSPort = 9204
PipelinedPort = 8443
RedisPort = 6380
DirectorydPort = 8443
// If updating these, also update the ipfix exported hex values
defaultMSISDN = "5100001234"
defaultCalledStationID = "98-DE-D0-84-B5-47:CWF-TP-LINK_B547_5G"
ipfixMSISDN = "0x35313030303031323334000000000000"
ipfixApnMacAddress = "0x98ded084b547"
ipfixApnName = "0x4357462d54502d4c494e4b5f423534375f35470000000000"
KiloBytes = 1024
MegaBytes = 1024 * KiloBytes
Buffer = 100 * KiloBytes
RevalidationTimeoutEvent = 17
ReAuthMaxUsageBytes = 5 * MegaBytes
ReAuthMaxUsageTimeSec = 1000 // in second
ReAuthValidityTime = 60 // in second
GyMaxUsageBytes = 5 * MegaBytes
GyMaxUsageTime = 1000 // in second
GyValidityTime = 60 // in second
)
//TestRunner helps setting up all associated services
type TestRunner struct {
t *testing.T
imsis map[string]bool
activePCRFs []string
activeOCSs []string
startTime time.Time
}
// imsi -> ruleID -> record
type RecordByIMSI map[string]map[string]*lteprotos.RuleRecord
// NewTestRunner initializes a new TestRunner by making a UESim client and
// and setting the next IMSI.
func NewTestRunner(t *testing.T) *TestRunner {
startTime := time.Now()
fmt.Println("************* TestRunner setup")
fmt.Printf("Adding Mock HSS service at %s:%d\n", CwagIP, HSSPort)
registry.AddService(MockHSSRemote, CwagIP, HSSPort)
fmt.Printf("Adding Mock PCRF service at %s:%d\n", CwagIP, PCRFPort)
registry.AddService(MockPCRFRemote, CwagIP, PCRFPort)
fmt.Printf("Adding Mock OCS service at %s:%d\n", CwagIP, OCSPort)
registry.AddService(MockOCSRemote, CwagIP, OCSPort)
fmt.Printf("Adding Pipelined service at %s:%d\n", CwagIP, PipelinedPort)
registry.AddService(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil |
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
}
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForNoEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs disappear for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v disappear from enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
fmt.Printf("%s are no longer in enforcement stats!\n", imsi)
return true
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] != nil {
return false
}
}
fmt.Printf("%s, %v are no longer in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForEnforcementStatsForRuleGreaterThan(imsi, ruleID string, min uint64) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
fmt.Printf("Waiting until %s, %s has more than %d bytes in enforcement stats...\n", imsi, ruleID, min)
records, err := tr.GetPolicyUsage()
imsi = prependIMSIPrefix(imsi)
if err != nil {
return false
}
if records[imsi] == nil {
return false
}
record := records[imsi][ruleID]
if record == nil {
return false
}
txBytes := record.BytesTx
if record.BytesTx <= min {
return false
}
fmt.Printf("%s, %s now passed %d > %d in enforcement stats!\n", imsi, ruleID, txBytes, min)
return true
}
}
//WaitForPolicyReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForPolicyReAuthToProcess(raa *fegprotos.PolicyReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
//WaitForChargingReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForChargingReAuthToProcess(raa *fegprotos.ChargingReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
func (tr *TestRunner) PrintElapsedTime() {
now := time.Now()
fmt.Printf("Elapsed Time: %s\n", now.Sub(tr.startTime))
}
// generateRandomIMSIS creates a slice of unique Random IMSIs taking into consideration a previous list with IMSIS
func generateRandomIMSIS(numIMSIs int, preExistingIMSIS map[string]interface{}) []string {
set := make(map[string]bool)
IMSIs := make([]string, 0, numIMSIs)
for i := 0; i < numIMSIs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
// Check if IMSI is in the preexisting list of IMSI or in the current generated list
presentPreExistingIMSIs := false
if preExistingIMSIS != nil {
_, presentPreExistingIMSIs = preExistingIMSIS[imsi]
}
_, present := set[imsi]
if !present && !presentPreExistingIMSIs {
break
}
}
set[imsi] = true
IMSIs = append(IMSIs, imsi)
}
return IMSIs
}
// getRandomIMSI makes a random 15-digit IMSI that is not added to the UESim or HSS.
func getRandomIMSI() string {
imsi := ""
for len(imsi) < 15 {
imsi += strconv.Itoa(rand.Intn(10))
}
return imsi
}
// RandKeyOpc makes a random 16-byte key and calculates the Opc based off the Op.
func getRandKeyOpcFromOp(op []byte) (key, opc []byte, err error) {
key = make([]byte, 16)
rand.Read(key)
tempOpc, err := crypto.GenerateOpc(key, op)
if err != nil {
return nil, nil, err
}
opc = tempOpc[:]
return
}
// getRandSeq makes a random 43-bit Seq.
func getRandSeq() uint64 {
return rand.Uint64() >> 21
}
// makeUE creates a new UE using the given values.
func makeUE(imsi string, key []byte, opc []byte, seq uint64) *cwfprotos.UEConfig {
return &cwfprotos.UEConfig{
Imsi: imsi,
AuthKey: key,
AuthOpc: opc,
Seq: seq,
}
}
func prependIMSIPrefix(imsi string) string {
if strings.HasPrefix(imsi, "IMSI") {
return imsi
} else {
return "IMSI" + imsi
}
}
// MakeSubcriber creates a new Subscriber using the given values.
func makeSubscriber(imsi string, key []byte, opc []byte, seq uint64) *lteprotos.SubscriberData {
return <eprotos.SubscriberData{
Sid: <eprotos.SubscriberID{
Id: imsi,
Type: 1,
},
Lte: <eprotos.LTESubscription{
State: 1,
AuthAlgo: 0,
AuthKey: key,
AuthOpc: opc,
},
State: <eprotos.SubscriberState{
LteAuthNextSeq: seq,
},
Non_3Gpp: <eprotos.Non3GPPUserProfile{
Msisdn: defaultMSISDN,
Non_3GppIpAccess: lteprotos.Non3GPPUserProfile_NON_3GPP_SUBSCRIPTION_ALLOWED,
Non_3GppIpAccessApn: lteprotos.Non3GPPUserProfile_NON_3GPP_APNS_ENABLE,
ApnConfig: []*lteprotos.APNConfiguration{{}},
},
}
}
// Get the Pipelined encoded version of IMSI (set in metadata register)
func getEncodedIMSI(imsiStr string) (string, error) {
imsi, err := strconv.Atoi(imsiStr)
if err != nil {
return "", err
}
prefixLen := len(imsiStr) - len(strings.TrimLeft(imsiStr, "0"))
compacted := (imsi << 2) | (prefixLen & 0x3)
return fmt.Sprintf("0x%016x", compacted<<1|0x1), nil
}
| {
return nil, err
} | conditional_block |
test_runner.go | /*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package integration
import (
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"fbc/lib/go/radius"
cwfprotos "magma/cwf/cloud/go/protos"
"magma/cwf/gateway/registry"
"magma/cwf/gateway/services/uesim"
fegprotos "magma/feg/cloud/go/protos"
"magma/lte/cloud/go/crypto"
lteprotos "magma/lte/cloud/go/protos"
"github.com/pkg/errors"
)
// todo make Op configurable, or export it in the UESimServer.
const (
Op = "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"
Secret = "123456"
MockHSSRemote = "HSS_REMOTE"
MockPCRFRemote = "PCRF_REMOTE"
MockOCSRemote = "OCS_REMOTE"
MockPCRFRemote2 = "PCRF_REMOTE2"
MockOCSRemote2 = "OCS_REMOTE2"
PipelinedRemote = "pipelined.local"
DirectorydRemote = "DIRECTORYD"
RedisRemote = "REDIS"
CwagIP = "192.168.70.101"
TrafficCltIP = "192.168.128.2"
IPDRControllerIP = "192.168.40.11"
OCSPort = 9201
PCRFPort = 9202
OCSPort2 = 9205
PCRFPort2 = 9206
HSSPort = 9204
PipelinedPort = 8443
RedisPort = 6380
DirectorydPort = 8443
// If updating these, also update the ipfix exported hex values
defaultMSISDN = "5100001234"
defaultCalledStationID = "98-DE-D0-84-B5-47:CWF-TP-LINK_B547_5G"
ipfixMSISDN = "0x35313030303031323334000000000000"
ipfixApnMacAddress = "0x98ded084b547"
ipfixApnName = "0x4357462d54502d4c494e4b5f423534375f35470000000000"
KiloBytes = 1024
MegaBytes = 1024 * KiloBytes
Buffer = 100 * KiloBytes
RevalidationTimeoutEvent = 17
ReAuthMaxUsageBytes = 5 * MegaBytes
ReAuthMaxUsageTimeSec = 1000 // in second
ReAuthValidityTime = 60 // in second
GyMaxUsageBytes = 5 * MegaBytes
GyMaxUsageTime = 1000 // in second
GyValidityTime = 60 // in second
)
//TestRunner helps setting up all associated services
type TestRunner struct {
t *testing.T
imsis map[string]bool
activePCRFs []string
activeOCSs []string
startTime time.Time
}
// imsi -> ruleID -> record
type RecordByIMSI map[string]map[string]*lteprotos.RuleRecord
// NewTestRunner initializes a new TestRunner by making a UESim client and
// and setting the next IMSI.
func NewTestRunner(t *testing.T) *TestRunner {
startTime := time.Now()
fmt.Println("************* TestRunner setup")
fmt.Printf("Adding Mock HSS service at %s:%d\n", CwagIP, HSSPort)
registry.AddService(MockHSSRemote, CwagIP, HSSPort)
fmt.Printf("Adding Mock PCRF service at %s:%d\n", CwagIP, PCRFPort)
registry.AddService(MockPCRFRemote, CwagIP, PCRFPort)
fmt.Printf("Adding Mock OCS service at %s:%d\n", CwagIP, OCSPort)
registry.AddService(MockOCSRemote, CwagIP, OCSPort)
fmt.Printf("Adding Pipelined service at %s:%d\n", CwagIP, PipelinedPort)
registry.AddService(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil {
return nil, err
}
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
}
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForNoEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs disappear for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v disappear from enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
fmt.Printf("%s are no longer in enforcement stats!\n", imsi)
return true
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] != nil {
return false
}
}
fmt.Printf("%s, %v are no longer in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForEnforcementStatsForRuleGreaterThan(imsi, ruleID string, min uint64) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
fmt.Printf("Waiting until %s, %s has more than %d bytes in enforcement stats...\n", imsi, ruleID, min)
records, err := tr.GetPolicyUsage()
imsi = prependIMSIPrefix(imsi)
if err != nil {
return false
}
if records[imsi] == nil {
return false
}
record := records[imsi][ruleID]
if record == nil {
return false
}
txBytes := record.BytesTx
if record.BytesTx <= min {
return false
}
fmt.Printf("%s, %s now passed %d > %d in enforcement stats!\n", imsi, ruleID, txBytes, min)
return true
}
}
//WaitForPolicyReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForPolicyReAuthToProcess(raa *fegprotos.PolicyReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
//WaitForChargingReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForChargingReAuthToProcess(raa *fegprotos.ChargingReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
func (tr *TestRunner) PrintElapsedTime() {
now := time.Now()
fmt.Printf("Elapsed Time: %s\n", now.Sub(tr.startTime))
}
// generateRandomIMSIS creates a slice of unique Random IMSIs taking into consideration a previous list with IMSIS
func | (numIMSIs int, preExistingIMSIS map[string]interface{}) []string {
set := make(map[string]bool)
IMSIs := make([]string, 0, numIMSIs)
for i := 0; i < numIMSIs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
// Check if IMSI is in the preexisting list of IMSI or in the current generated list
presentPreExistingIMSIs := false
if preExistingIMSIS != nil {
_, presentPreExistingIMSIs = preExistingIMSIS[imsi]
}
_, present := set[imsi]
if !present && !presentPreExistingIMSIs {
break
}
}
set[imsi] = true
IMSIs = append(IMSIs, imsi)
}
return IMSIs
}
// getRandomIMSI makes a random 15-digit IMSI that is not added to the UESim or HSS.
func getRandomIMSI() string {
imsi := ""
for len(imsi) < 15 {
imsi += strconv.Itoa(rand.Intn(10))
}
return imsi
}
// RandKeyOpc makes a random 16-byte key and calculates the Opc based off the Op.
func getRandKeyOpcFromOp(op []byte) (key, opc []byte, err error) {
key = make([]byte, 16)
rand.Read(key)
tempOpc, err := crypto.GenerateOpc(key, op)
if err != nil {
return nil, nil, err
}
opc = tempOpc[:]
return
}
// getRandSeq makes a random 43-bit Seq.
func getRandSeq() uint64 {
return rand.Uint64() >> 21
}
// makeUE creates a new UE using the given values.
func makeUE(imsi string, key []byte, opc []byte, seq uint64) *cwfprotos.UEConfig {
return &cwfprotos.UEConfig{
Imsi: imsi,
AuthKey: key,
AuthOpc: opc,
Seq: seq,
}
}
func prependIMSIPrefix(imsi string) string {
if strings.HasPrefix(imsi, "IMSI") {
return imsi
} else {
return "IMSI" + imsi
}
}
// MakeSubcriber creates a new Subscriber using the given values.
func makeSubscriber(imsi string, key []byte, opc []byte, seq uint64) *lteprotos.SubscriberData {
return <eprotos.SubscriberData{
Sid: <eprotos.SubscriberID{
Id: imsi,
Type: 1,
},
Lte: <eprotos.LTESubscription{
State: 1,
AuthAlgo: 0,
AuthKey: key,
AuthOpc: opc,
},
State: <eprotos.SubscriberState{
LteAuthNextSeq: seq,
},
Non_3Gpp: <eprotos.Non3GPPUserProfile{
Msisdn: defaultMSISDN,
Non_3GppIpAccess: lteprotos.Non3GPPUserProfile_NON_3GPP_SUBSCRIPTION_ALLOWED,
Non_3GppIpAccessApn: lteprotos.Non3GPPUserProfile_NON_3GPP_APNS_ENABLE,
ApnConfig: []*lteprotos.APNConfiguration{{}},
},
}
}
// Get the Pipelined encoded version of IMSI (set in metadata register)
func getEncodedIMSI(imsiStr string) (string, error) {
imsi, err := strconv.Atoi(imsiStr)
if err != nil {
return "", err
}
prefixLen := len(imsiStr) - len(strings.TrimLeft(imsiStr, "0"))
compacted := (imsi << 2) | (prefixLen & 0x3)
return fmt.Sprintf("0x%016x", compacted<<1|0x1), nil
}
| generateRandomIMSIS | identifier_name |
test_runner.go | /*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package integration
import (
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"fbc/lib/go/radius"
cwfprotos "magma/cwf/cloud/go/protos"
"magma/cwf/gateway/registry"
"magma/cwf/gateway/services/uesim"
fegprotos "magma/feg/cloud/go/protos"
"magma/lte/cloud/go/crypto"
lteprotos "magma/lte/cloud/go/protos"
"github.com/pkg/errors"
)
// todo make Op configurable, or export it in the UESimServer.
const (
Op = "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"
Secret = "123456"
MockHSSRemote = "HSS_REMOTE"
MockPCRFRemote = "PCRF_REMOTE"
MockOCSRemote = "OCS_REMOTE"
MockPCRFRemote2 = "PCRF_REMOTE2"
MockOCSRemote2 = "OCS_REMOTE2"
PipelinedRemote = "pipelined.local"
DirectorydRemote = "DIRECTORYD"
RedisRemote = "REDIS"
CwagIP = "192.168.70.101"
TrafficCltIP = "192.168.128.2"
IPDRControllerIP = "192.168.40.11"
OCSPort = 9201
PCRFPort = 9202
OCSPort2 = 9205
PCRFPort2 = 9206
HSSPort = 9204
PipelinedPort = 8443
RedisPort = 6380
DirectorydPort = 8443
// If updating these, also update the ipfix exported hex values
defaultMSISDN = "5100001234"
defaultCalledStationID = "98-DE-D0-84-B5-47:CWF-TP-LINK_B547_5G"
ipfixMSISDN = "0x35313030303031323334000000000000"
ipfixApnMacAddress = "0x98ded084b547"
ipfixApnName = "0x4357462d54502d4c494e4b5f423534375f35470000000000"
KiloBytes = 1024
MegaBytes = 1024 * KiloBytes
Buffer = 100 * KiloBytes
RevalidationTimeoutEvent = 17
ReAuthMaxUsageBytes = 5 * MegaBytes
ReAuthMaxUsageTimeSec = 1000 // in second
ReAuthValidityTime = 60 // in second
GyMaxUsageBytes = 5 * MegaBytes
GyMaxUsageTime = 1000 // in second
GyValidityTime = 60 // in second
)
//TestRunner helps setting up all associated services
type TestRunner struct {
t *testing.T
imsis map[string]bool
activePCRFs []string
activeOCSs []string
startTime time.Time
}
// imsi -> ruleID -> record
type RecordByIMSI map[string]map[string]*lteprotos.RuleRecord
// NewTestRunner initializes a new TestRunner by making a UESim client and
// and setting the next IMSI.
func NewTestRunner(t *testing.T) *TestRunner {
startTime := time.Now()
fmt.Println("************* TestRunner setup")
fmt.Printf("Adding Mock HSS service at %s:%d\n", CwagIP, HSSPort)
registry.AddService(MockHSSRemote, CwagIP, HSSPort)
fmt.Printf("Adding Mock PCRF service at %s:%d\n", CwagIP, PCRFPort)
registry.AddService(MockPCRFRemote, CwagIP, PCRFPort)
fmt.Printf("Adding Mock OCS service at %s:%d\n", CwagIP, OCSPort)
registry.AddService(MockOCSRemote, CwagIP, OCSPort)
fmt.Printf("Adding Pipelined service at %s:%d\n", CwagIP, PipelinedPort)
registry.AddService(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil {
return nil, err
}
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
|
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForNoEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs disappear for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v disappear from enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
fmt.Printf("%s are no longer in enforcement stats!\n", imsi)
return true
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] != nil {
return false
}
}
fmt.Printf("%s, %v are no longer in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForEnforcementStatsForRuleGreaterThan(imsi, ruleID string, min uint64) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
fmt.Printf("Waiting until %s, %s has more than %d bytes in enforcement stats...\n", imsi, ruleID, min)
records, err := tr.GetPolicyUsage()
imsi = prependIMSIPrefix(imsi)
if err != nil {
return false
}
if records[imsi] == nil {
return false
}
record := records[imsi][ruleID]
if record == nil {
return false
}
txBytes := record.BytesTx
if record.BytesTx <= min {
return false
}
fmt.Printf("%s, %s now passed %d > %d in enforcement stats!\n", imsi, ruleID, txBytes, min)
return true
}
}
//WaitForPolicyReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForPolicyReAuthToProcess(raa *fegprotos.PolicyReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
//WaitForChargingReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForChargingReAuthToProcess(raa *fegprotos.ChargingReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
func (tr *TestRunner) PrintElapsedTime() {
now := time.Now()
fmt.Printf("Elapsed Time: %s\n", now.Sub(tr.startTime))
}
// generateRandomIMSIS creates a slice of unique Random IMSIs taking into consideration a previous list with IMSIS
func generateRandomIMSIS(numIMSIs int, preExistingIMSIS map[string]interface{}) []string {
set := make(map[string]bool)
IMSIs := make([]string, 0, numIMSIs)
for i := 0; i < numIMSIs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
// Check if IMSI is in the preexisting list of IMSI or in the current generated list
presentPreExistingIMSIs := false
if preExistingIMSIS != nil {
_, presentPreExistingIMSIs = preExistingIMSIS[imsi]
}
_, present := set[imsi]
if !present && !presentPreExistingIMSIs {
break
}
}
set[imsi] = true
IMSIs = append(IMSIs, imsi)
}
return IMSIs
}
// getRandomIMSI makes a random 15-digit IMSI that is not added to the UESim or HSS.
func getRandomIMSI() string {
imsi := ""
for len(imsi) < 15 {
imsi += strconv.Itoa(rand.Intn(10))
}
return imsi
}
// RandKeyOpc makes a random 16-byte key and calculates the Opc based off the Op.
func getRandKeyOpcFromOp(op []byte) (key, opc []byte, err error) {
key = make([]byte, 16)
rand.Read(key)
tempOpc, err := crypto.GenerateOpc(key, op)
if err != nil {
return nil, nil, err
}
opc = tempOpc[:]
return
}
// getRandSeq makes a random 43-bit Seq.
func getRandSeq() uint64 {
return rand.Uint64() >> 21
}
// makeUE creates a new UE using the given values.
func makeUE(imsi string, key []byte, opc []byte, seq uint64) *cwfprotos.UEConfig {
return &cwfprotos.UEConfig{
Imsi: imsi,
AuthKey: key,
AuthOpc: opc,
Seq: seq,
}
}
func prependIMSIPrefix(imsi string) string {
if strings.HasPrefix(imsi, "IMSI") {
return imsi
} else {
return "IMSI" + imsi
}
}
// MakeSubcriber creates a new Subscriber using the given values.
func makeSubscriber(imsi string, key []byte, opc []byte, seq uint64) *lteprotos.SubscriberData {
return <eprotos.SubscriberData{
Sid: <eprotos.SubscriberID{
Id: imsi,
Type: 1,
},
Lte: <eprotos.LTESubscription{
State: 1,
AuthAlgo: 0,
AuthKey: key,
AuthOpc: opc,
},
State: <eprotos.SubscriberState{
LteAuthNextSeq: seq,
},
Non_3Gpp: <eprotos.Non3GPPUserProfile{
Msisdn: defaultMSISDN,
Non_3GppIpAccess: lteprotos.Non3GPPUserProfile_NON_3GPP_SUBSCRIPTION_ALLOWED,
Non_3GppIpAccessApn: lteprotos.Non3GPPUserProfile_NON_3GPP_APNS_ENABLE,
ApnConfig: []*lteprotos.APNConfiguration{{}},
},
}
}
// Get the Pipelined encoded version of IMSI (set in metadata register)
func getEncodedIMSI(imsiStr string) (string, error) {
imsi, err := strconv.Atoi(imsiStr)
if err != nil {
return "", err
}
prefixLen := len(imsiStr) - len(strings.TrimLeft(imsiStr, "0"))
compacted := (imsi << 2) | (prefixLen & 0x3)
return fmt.Sprintf("0x%016x", compacted<<1|0x1), nil
} | func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
} | random_line_split |
test_runner.go | /*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package integration
import (
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"fbc/lib/go/radius"
cwfprotos "magma/cwf/cloud/go/protos"
"magma/cwf/gateway/registry"
"magma/cwf/gateway/services/uesim"
fegprotos "magma/feg/cloud/go/protos"
"magma/lte/cloud/go/crypto"
lteprotos "magma/lte/cloud/go/protos"
"github.com/pkg/errors"
)
// todo make Op configurable, or export it in the UESimServer.
const (
Op = "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"
Secret = "123456"
MockHSSRemote = "HSS_REMOTE"
MockPCRFRemote = "PCRF_REMOTE"
MockOCSRemote = "OCS_REMOTE"
MockPCRFRemote2 = "PCRF_REMOTE2"
MockOCSRemote2 = "OCS_REMOTE2"
PipelinedRemote = "pipelined.local"
DirectorydRemote = "DIRECTORYD"
RedisRemote = "REDIS"
CwagIP = "192.168.70.101"
TrafficCltIP = "192.168.128.2"
IPDRControllerIP = "192.168.40.11"
OCSPort = 9201
PCRFPort = 9202
OCSPort2 = 9205
PCRFPort2 = 9206
HSSPort = 9204
PipelinedPort = 8443
RedisPort = 6380
DirectorydPort = 8443
// If updating these, also update the ipfix exported hex values
defaultMSISDN = "5100001234"
defaultCalledStationID = "98-DE-D0-84-B5-47:CWF-TP-LINK_B547_5G"
ipfixMSISDN = "0x35313030303031323334000000000000"
ipfixApnMacAddress = "0x98ded084b547"
ipfixApnName = "0x4357462d54502d4c494e4b5f423534375f35470000000000"
KiloBytes = 1024
MegaBytes = 1024 * KiloBytes
Buffer = 100 * KiloBytes
RevalidationTimeoutEvent = 17
ReAuthMaxUsageBytes = 5 * MegaBytes
ReAuthMaxUsageTimeSec = 1000 // in second
ReAuthValidityTime = 60 // in second
GyMaxUsageBytes = 5 * MegaBytes
GyMaxUsageTime = 1000 // in second
GyValidityTime = 60 // in second
)
//TestRunner helps setting up all associated services
type TestRunner struct {
t *testing.T
imsis map[string]bool
activePCRFs []string
activeOCSs []string
startTime time.Time
}
// imsi -> ruleID -> record
type RecordByIMSI map[string]map[string]*lteprotos.RuleRecord
// NewTestRunner initializes a new TestRunner by making a UESim client and
// and setting the next IMSI.
func NewTestRunner(t *testing.T) *TestRunner {
startTime := time.Now()
fmt.Println("************* TestRunner setup")
fmt.Printf("Adding Mock HSS service at %s:%d\n", CwagIP, HSSPort)
registry.AddService(MockHSSRemote, CwagIP, HSSPort)
fmt.Printf("Adding Mock PCRF service at %s:%d\n", CwagIP, PCRFPort)
registry.AddService(MockPCRFRemote, CwagIP, PCRFPort)
fmt.Printf("Adding Mock OCS service at %s:%d\n", CwagIP, OCSPort)
registry.AddService(MockOCSRemote, CwagIP, OCSPort)
fmt.Printf("Adding Pipelined service at %s:%d\n", CwagIP, PipelinedPort)
registry.AddService(PipelinedRemote, CwagIP, PipelinedPort)
fmt.Printf("Adding Redis service at %s:%d\n", CwagIP, RedisPort)
registry.AddService(RedisRemote, CwagIP, RedisPort)
fmt.Printf("Adding Directoryd service at %s:%d\n", CwagIP, DirectorydPort)
registry.AddService(DirectorydRemote, CwagIP, DirectorydPort)
testRunner := &TestRunner{t: t,
activePCRFs: []string{MockPCRFRemote},
activeOCSs: []string{MockOCSRemote},
startTime: startTime,
}
testRunner.imsis = make(map[string]bool)
return testRunner
}
// NewTestRunnerWithTwoPCRFandOCS does the same as NewTestRunner but it inclides 2 PCRF and 2 OCS
// Used in scenarios that run 2 PCRFs and 2 OCSs
func NewTestRunnerWithTwoPCRFandOCS(t *testing.T) *TestRunner {
tr := NewTestRunner(t)
fmt.Printf("Adding Mock PCRF #2 service at %s:%d\n", CwagIP, PCRFPort2)
registry.AddService(MockPCRFRemote2, CwagIP, PCRFPort2)
fmt.Printf("Adding Mock OCS #2 service at %s:%d\n", CwagIP, OCSPort2)
registry.AddService(MockOCSRemote2, CwagIP, OCSPort2)
// add the extra two servers for clean up
tr.activePCRFs = append(tr.activePCRFs, MockPCRFRemote2)
tr.activeOCSs = append(tr.activeOCSs, MockOCSRemote2)
return tr
}
// ConfigUEs creates and adds the specified number of UEs and Subscribers
// to the UE Simulator and the HSS.
func (tr *TestRunner) ConfigUEs(numUEs int) ([]*cwfprotos.UEConfig, error) {
IMSIs := make([]string, 0, numUEs)
for i := 0; i < numUEs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
_, present := tr.imsis[imsi]
if !present {
break
}
}
IMSIs = append(IMSIs, imsi)
}
return tr.ConfigUEsPerInstance(IMSIs, MockPCRFRemote, MockOCSRemote)
}
// ConfigUEsPerInstance same as ConfigUEs but per specific PCRF and OCS instance
func (tr *TestRunner) ConfigUEsPerInstance(IMSIs []string, pcrfInstance, ocsInstance string) ([]*cwfprotos.UEConfig, error) {
fmt.Printf("************* Configuring %d UE(s), PCRF instance: %s\n", len(IMSIs), pcrfInstance)
ues := make([]*cwfprotos.UEConfig, 0)
for _, imsi := range IMSIs {
// If IMSIs were generated properly they should never give an error here
if _, present := tr.imsis[imsi]; present {
return nil, errors.Errorf("IMSI %s already exist in database, use generateRandomIMSIS(num, tr.imsis) to create unique list", imsi)
}
key, opc, err := getRandKeyOpcFromOp([]byte(Op))
if err != nil {
return nil, err
}
seq := getRandSeq()
ue := makeUE(imsi, key, opc, seq)
sub := makeSubscriber(imsi, key, opc, seq+1)
err = uesim.AddUE(ue)
if err != nil {
return nil, errors.Wrap(err, "Error adding UE to UESimServer")
}
err = addSubscriberToHSS(sub)
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to HSS")
}
err = addSubscriberToPCRFPerInstance(pcrfInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to PCRF")
}
err = addSubscriberToOCSPerInstance(ocsInstance, sub.GetSid())
if err != nil {
return nil, errors.Wrap(err, "Error adding Subscriber to OCS")
}
ues = append(ues, ue)
fmt.Printf("Added UE to Simulator, %s, %s, and %s:\n"+
"\tIMSI: %s\tKey: %x\tOpc: %x\tSeq: %d\n", MockHSSRemote, pcrfInstance, ocsInstance, imsi, key, opc, seq)
tr.imsis[imsi] = true
}
fmt.Println("Successfully configured UE(s)")
return ues, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Authenticate(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Authenticating UE with IMSI: %s\n", imsi)
res, err := uesim.Authenticate(&cwfprotos.AuthenticateRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
fmt.Println(err)
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Authenticating UE")
return radiusP, nil
}
// Authenticate simulates an authentication between the UE and the HSS with the specified
// IMSI and CalledStationID, and returns the resulting Radius packet.
func (tr *TestRunner) Disconnect(imsi, calledStationID string) (*radius.Packet, error) {
fmt.Printf("************* Sending a disconnect request UE with IMSI: %s\n", imsi)
res, err := uesim.Disconnect(&cwfprotos.DisconnectRequest{Imsi: imsi, CalledStationID: calledStationID})
if err != nil {
return &radius.Packet{}, err
}
encoded := res.GetRadiusPacket()
radiusP, err := radius.Parse(encoded, []byte(Secret))
if err != nil {
err = errors.Wrap(err, "Error while parsing encoded Radius packet")
fmt.Println(err)
return &radius.Packet{}, err
}
fmt.Println("Finished Disconnecting UE")
return radiusP, nil
}
// GenULTraffic simulates the UE sending traffic through the CWAG to the Internet
// by running an iperf3 client on the UE simulator and an iperf3 server on the
// Magma traffic server.
func (tr *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) {
fmt.Printf("************* Generating Traffic for UE with Req: %v\n", req)
res, err := uesim.GenTraffic(req)
fmt.Printf("============> Total Sent: %d bytes\n", res.GetEndOutput().GetSumSent().GetBytes())
return res, err
}
// Remove subscribers, rules, flows, and monitors to clean up the state for
// consecutive test runs
func (tr *TestRunner) CleanUp() error {
for imsi := range tr.imsis {
err := deleteSubscribersFromHSS(imsi)
if err != nil {
return err
}
}
for _, instance := range tr.activePCRFs {
err := clearSubscribersFromPCRFPerInstance(instance)
if err != nil {
return err
}
}
for _, instance := range tr.activeOCSs {
err := clearSubscribersFromOCSPerInstance(instance)
if err != nil {
return err
}
}
return nil
}
// GetPolicyUsage is a wrapper around pipelined's GetPolicyUsage and returns
// the policy usage keyed by subscriber ID
func (tr *TestRunner) GetPolicyUsage() (RecordByIMSI, error) {
recordsBySubID := RecordByIMSI{}
table, err := getPolicyUsage()
if err != nil {
return recordsBySubID, err
}
for _, record := range table.Records {
fmt.Printf("Record %v\n", record)
_, exists := recordsBySubID[record.Sid]
if !exists {
recordsBySubID[record.Sid] = map[string]*lteprotos.RuleRecord{}
}
recordsBySubID[record.Sid][record.RuleId] = record
}
return recordsBySubID, nil
}
func (tr *TestRunner) WaitForEnforcementStatsToSync() {
// TODO load this value from pipelined.yml
enforcementPollPeriod := 1 * time.Second
time.Sleep(4 * enforcementPollPeriod)
}
func (tr *TestRunner) WaitForPoliciesToSync() {
// TODO load this value from sessiond.yml (rule_update_interval_sec)
ruleUpdatePeriod := 1 * time.Second
time.Sleep(4 * ruleUpdatePeriod)
}
func (tr *TestRunner) WaitForEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool |
func (tr *TestRunner) WaitForNoEnforcementStatsForRule(imsi string, ruleIDs ...string) func() bool {
// Wait until the ruleIDs disappear for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v disappear from enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
fmt.Printf("%s are no longer in enforcement stats!\n", imsi)
return true
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] != nil {
return false
}
}
fmt.Printf("%s, %v are no longer in enforcement stats!\n", imsi, ruleIDs)
return true
}
}
func (tr *TestRunner) WaitForEnforcementStatsForRuleGreaterThan(imsi, ruleID string, min uint64) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
fmt.Printf("Waiting until %s, %s has more than %d bytes in enforcement stats...\n", imsi, ruleID, min)
records, err := tr.GetPolicyUsage()
imsi = prependIMSIPrefix(imsi)
if err != nil {
return false
}
if records[imsi] == nil {
return false
}
record := records[imsi][ruleID]
if record == nil {
return false
}
txBytes := record.BytesTx
if record.BytesTx <= min {
return false
}
fmt.Printf("%s, %s now passed %d > %d in enforcement stats!\n", imsi, ruleID, txBytes, min)
return true
}
}
//WaitForPolicyReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForPolicyReAuthToProcess(raa *fegprotos.PolicyReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
//WaitForChargingReAuthToProcess returns a method which checks for reauth answer and
// if it has sessionID which contains the IMSI
func (tr *TestRunner) WaitForChargingReAuthToProcess(raa *fegprotos.ChargingReAuthAnswer, imsi string) func() bool {
// Todo figure out the best way to figure out when RAR is processed
return func() bool {
if raa != nil && strings.Contains(raa.SessionId, "IMSI"+imsi) {
return true
}
return false
}
}
func (tr *TestRunner) PrintElapsedTime() {
now := time.Now()
fmt.Printf("Elapsed Time: %s\n", now.Sub(tr.startTime))
}
// generateRandomIMSIS creates a slice of unique Random IMSIs taking into consideration a previous list with IMSIS
func generateRandomIMSIS(numIMSIs int, preExistingIMSIS map[string]interface{}) []string {
set := make(map[string]bool)
IMSIs := make([]string, 0, numIMSIs)
for i := 0; i < numIMSIs; i++ {
imsi := ""
for {
imsi = getRandomIMSI()
// Check if IMSI is in the preexisting list of IMSI or in the current generated list
presentPreExistingIMSIs := false
if preExistingIMSIS != nil {
_, presentPreExistingIMSIs = preExistingIMSIS[imsi]
}
_, present := set[imsi]
if !present && !presentPreExistingIMSIs {
break
}
}
set[imsi] = true
IMSIs = append(IMSIs, imsi)
}
return IMSIs
}
// getRandomIMSI makes a random 15-digit IMSI that is not added to the UESim or HSS.
func getRandomIMSI() string {
imsi := ""
for len(imsi) < 15 {
imsi += strconv.Itoa(rand.Intn(10))
}
return imsi
}
// RandKeyOpc makes a random 16-byte key and calculates the Opc based off the Op.
func getRandKeyOpcFromOp(op []byte) (key, opc []byte, err error) {
key = make([]byte, 16)
rand.Read(key)
tempOpc, err := crypto.GenerateOpc(key, op)
if err != nil {
return nil, nil, err
}
opc = tempOpc[:]
return
}
// getRandSeq makes a random 43-bit Seq.
func getRandSeq() uint64 {
return rand.Uint64() >> 21
}
// makeUE creates a new UE using the given values.
func makeUE(imsi string, key []byte, opc []byte, seq uint64) *cwfprotos.UEConfig {
return &cwfprotos.UEConfig{
Imsi: imsi,
AuthKey: key,
AuthOpc: opc,
Seq: seq,
}
}
func prependIMSIPrefix(imsi string) string {
if strings.HasPrefix(imsi, "IMSI") {
return imsi
} else {
return "IMSI" + imsi
}
}
// MakeSubcriber creates a new Subscriber using the given values.
func makeSubscriber(imsi string, key []byte, opc []byte, seq uint64) *lteprotos.SubscriberData {
return <eprotos.SubscriberData{
Sid: <eprotos.SubscriberID{
Id: imsi,
Type: 1,
},
Lte: <eprotos.LTESubscription{
State: 1,
AuthAlgo: 0,
AuthKey: key,
AuthOpc: opc,
},
State: <eprotos.SubscriberState{
LteAuthNextSeq: seq,
},
Non_3Gpp: <eprotos.Non3GPPUserProfile{
Msisdn: defaultMSISDN,
Non_3GppIpAccess: lteprotos.Non3GPPUserProfile_NON_3GPP_SUBSCRIPTION_ALLOWED,
Non_3GppIpAccessApn: lteprotos.Non3GPPUserProfile_NON_3GPP_APNS_ENABLE,
ApnConfig: []*lteprotos.APNConfiguration{{}},
},
}
}
// Get the Pipelined encoded version of IMSI (set in metadata register)
func getEncodedIMSI(imsiStr string) (string, error) {
imsi, err := strconv.Atoi(imsiStr)
if err != nil {
return "", err
}
prefixLen := len(imsiStr) - len(strings.TrimLeft(imsiStr, "0"))
compacted := (imsi << 2) | (prefixLen & 0x3)
return fmt.Sprintf("0x%016x", compacted<<1|0x1), nil
}
| {
// Wait until the ruleIDs show up for the IMSI
return func() bool {
fmt.Printf("Waiting until %s, %v shows up in enforcement stats...\n", imsi, ruleIDs)
records, err := tr.GetPolicyUsage()
if err != nil {
return false
}
if records[prependIMSIPrefix(imsi)] == nil {
return false
}
for _, ruleID := range ruleIDs {
if records[prependIMSIPrefix(imsi)][ruleID] == nil {
return false
}
}
fmt.Printf("%s, %v are now in enforcement stats!\n", imsi, ruleIDs)
return true
}
} | identifier_body |
dt.py | from __future__ import division
import matplotlib as mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.rc('figure', figsize=[12,8]) #set the default figure size
import itertools, random, math
import time
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
|
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data
training_result_list = dt.predict(training_data)
test_result_list = dt.predict(test_data)
training_error_ratio_sum += 1 - np.sum(training_result_list ==
training_data[CLASS_COLUMN]) / len(training_data)
test_error_ratio_sum += 1 - np.sum(test_result_list == test_data[CLASS_COLUMN]) / len(test_data)
training_error_ratio.append(training_error_ratio_sum / 3)
test_error_ratio.append(test_error_ratio_sum / 3)
print('layers ' + str(depth))
x = range(MAX_DEPTH + 1)[2::2]
plt.ylabel("$error \ ratio$")
plt.xlabel("$depth$")
plt.plot(x, training_error_ratio, label='training')
plt.plot(x, test_error_ratio, label='test')
plt.legend()
start = time.time()
validation_curve()
end = time.time()
print(end - start)
| data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s | identifier_body |
dt.py | from __future__ import division
import matplotlib as mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.rc('figure', figsize=[12,8]) #set the default figure size
import itertools, random, math
import time
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
|
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data
training_result_list = dt.predict(training_data)
test_result_list = dt.predict(test_data)
training_error_ratio_sum += 1 - np.sum(training_result_list ==
training_data[CLASS_COLUMN]) / len(training_data)
test_error_ratio_sum += 1 - np.sum(test_result_list == test_data[CLASS_COLUMN]) / len(test_data)
training_error_ratio.append(training_error_ratio_sum / 3)
test_error_ratio.append(test_error_ratio_sum / 3)
print('layers ' + str(depth))
x = range(MAX_DEPTH + 1)[2::2]
plt.ylabel("$error \ ratio$")
plt.xlabel("$depth$")
plt.plot(x, training_error_ratio, label='training')
plt.plot(x, test_error_ratio, label='test')
plt.legend()
start = time.time()
validation_curve()
end = time.time()
print(end - start)
| node = node.children[1] | conditional_block |
dt.py | from __future__ import division
import matplotlib as mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.rc('figure', figsize=[12,8]) #set the default figure size
import itertools, random, math
import time
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def compute_info_gain(self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
| for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data
training_result_list = dt.predict(training_data)
test_result_list = dt.predict(test_data)
training_error_ratio_sum += 1 - np.sum(training_result_list ==
training_data[CLASS_COLUMN]) / len(training_data)
test_error_ratio_sum += 1 - np.sum(test_result_list == test_data[CLASS_COLUMN]) / len(test_data)
training_error_ratio.append(training_error_ratio_sum / 3)
test_error_ratio.append(test_error_ratio_sum / 3)
print('layers ' + str(depth))
x = range(MAX_DEPTH + 1)[2::2]
plt.ylabel("$error \ ratio$")
plt.xlabel("$depth$")
plt.plot(x, training_error_ratio, label='training')
plt.plot(x, test_error_ratio, label='test')
plt.legend()
start = time.time()
validation_curve()
end = time.time()
print(end - start) | # initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = [] | random_line_split |
dt.py | from __future__ import division
import matplotlib as mpl
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
mpl.rc('figure', figsize=[12,8]) #set the default figure size
import itertools, random, math
import time
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
dsmall = df.iloc[0:10, list(range(3)) + [279]]
class Node(object):
def __init__(self, name, node_type, data, label=None, split=None):
self.name = name
self.node_type = node_type
self.label = label
self.data = data
self.split = split
self.children = []
def __repr__(self):
data = self.data
if self.node_type != 'leaf':
s = (f"{self.name} Internal node with {data[data.columns[0]].count()} rows; split"
f" {self.split.split_column} at {self.split.point:.2f} for children with"
f" {[p[p.columns[0]].count() for p in self.split.partitions()]} rows"
f" and infomation gain {self.split.info_gain:.5f}")
else:
s = (f"{self.name} Leaf with {data[data.columns[0]].count()} rows, and label"
f" {self.label}")
return s
class Split(object):
def __init__(self, data, class_column, split_column, point=None):
self.data = data
self.class_column = class_column
self.split_column = split_column
self.info_gain = None
self.point = point
self.partition_list = None # stores the data points on each side of the split
self.find_split_point()
self.partitions()
def compute_entropy(self, data):
data = data.astype(int)
#unique, count = np.unique(data, return_counts=True)
count = np.bincount(data)
count = count[count != 0]
p = count / np.sum(count)
return -np.sum(p * np.log2(p))
def | (self, neg, pos):
data = self.data[self.class_column].values.astype(int)
H0 = self.compute_entropy(data)
p_neg = len(neg) / len(data)
p_pos = len(pos) / len(data)
H_n = p_neg * self.compute_entropy(neg)
H_p = p_pos * self.compute_entropy(pos)
Ha = H_p + H_n
return H0 - Ha
def find_split_point(self):
data = self.data[[self.split_column, self.class_column]].values
attr_value = data[data[:,0].argsort()][:,0]
idx = data[data[:,0].argsort()][:,-1]
max_IG = -np.inf
for i in range(len(attr_value) - 1):
if attr_value[i] != attr_value[i + 1] and idx[i] != idx[i + 1]:
split_point = (attr_value[i] + attr_value[i + 1]) / 2
neg = idx[:i+1]
pos = idx[i+1:]
if self.compute_info_gain(neg, pos) > max_IG:
max_IG = self.compute_info_gain(neg, pos)
self.point = split_point
self.info_gain = max_IG
def partitions(self):
'''Get the two partitions (child nodes) for this split.'''
if self.partition_list:
# This check ensures that the list is computed at most once. Once computed
# it is stored
return self.partition_list
data = self.data
split_column = self.split_column
partition_list = []
partition_list.append(data[data[split_column] <= self.point])
partition_list.append(data[data[split_column] > self.point])
self.partition_list = partition_list
class DecisionTree(object):
def __init__(self, max_depth=None):
if (max_depth is not None and (max_depth != int(max_depth) or max_depth < 0)):
raise Exception("Invalid max depth value.")
self.max_depth = max_depth
def fit(self, data, class_column):
'''Fit a tree on data, in which class_column is the target.'''
if (not isinstance(data, pd.DataFrame) or class_column not in data.columns):
raise Exception("Invalid input")
self.data = data
self.class_column = class_column
self.non_class_columns = [c for c in data.columns if c != class_column]
self.root = self.recursive_build_tree(data, data, depth=0, attributes=self.non_class_columns, name='0')
# Node __init__(self, name, node_type, data, label=None, split=None)
def recursive_build_tree(self, data, parent_data, depth, attributes, name):
if len(data) == 0: # data set is empty
return Node(name=name, node_type='leaf', label=self.plurality_value(parent_data),
data=data)
elif depth == self.max_depth : # reach the max depth of the tree, can not split
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
elif np.all(data[self.class_column].values == data[self.class_column].values[0]):
return Node(name=name, node_type='leaf', label= list(set(data[self.class_column].values))[0], data=data)
elif len(attributes) == 0: # only has the class column, no attribute
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
#data[attributes].drop_duplicates()
elif len(data[attributes].drop_duplicates()) == 1: # noise data
return Node(name=name, node_type='leaf', label=self.plurality_value(data),
data=data)
else:
split = None
for attribute in attributes:
temp_split = Split(data, self.class_column, attribute)
# set the split with higher info gain as the true split
if not split or temp_split.info_gain > split.info_gain:
split = temp_split
root = Node(name=name, node_type='interval', data=data, split=split)
non_class_columns = attributes
if len(set(data[self.class_column].values)) == 2: # the attribute is discrete
attributes = [c for c in attributes if c != root.split.split_column]
# recursive_build_tree(self, data, parent_data, depth, attributes, name)
root.children.append(self.recursive_build_tree(root.split.partition_list[0][attributes +[self.class_column]], data, depth + 1, attributes, name + '.0'))
root.children.append(self.recursive_build_tree(root.split.partition_list[1][attributes +[self.class_column]], data, depth + 1, attributes, name + '.1'))
return root
def predict(self, test):
# WRITE YOUR CODE HERE
res = []
test = test.values
for i in range(len(test)):
node = self.root
while node.node_type != 'leaf':
split = node.split
#if test[split.split_column][i] <= split.point:
if test[i, split.split_column] <= split.point:
node = node.children[0]
else:
node = node.children[1]
res.append(node.label)
return res
def plurality_value(self, data):
#return data[self.class_column].value_counts().idxmax()
return np.argmax(np.bincount(data[self.class_column].astype(int).values))
def print(self):
self.recursive_print(self.root)
def recursive_print(self, node):
print(node)
for u in node.children:
self.recursive_print(u)
tree = tree = DecisionTree(3)
tree.fit(dsmall, 279)
tree.print()
def validation_curve():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/arrhythmia/arrhythmia.data"
df = pd.read_csv(url, header = None, na_values="?")
MAX_DEPTH = 20
NUMBER_OF_COLUMNS = 278
NUMBER_OF_ROWS = len(df)
CLASS_COLUMN = 279
# fill the empty value
for i in range(280):
if df[i].isnull().sum() > 0:
df.iloc[:,i].fillna(df[i].mode()[0], inplace=True)
df = df.iloc[:NUMBER_OF_ROWS,list(range(NUMBER_OF_COLUMNS)) + [CLASS_COLUMN]]
# shuffle the data
df = df.sample(frac=1).reset_index(drop=True)
# split the data into 3 parts
datasets = np.array_split(df, 3)
# initilize the correct ratio of training data and test data
training_error_ratio = []
test_error_ratio = []
for depth in range(MAX_DEPTH + 1)[2::2]:
# initialize the tree
dt = DecisionTree(depth)
training_error_ratio_sum = 0
test_error_ratio_sum = 0
for sets in [[0,1,2],[1,2,0],[0,2,1]]:
# get the training data and test data
training_data = pd.concat([datasets[sets[0]],datasets[sets[1]]])
test_data = datasets[sets[2]]
# train the model
dt.fit(training_data, CLASS_COLUMN)
# get the prediction result of the training data and test data
training_result_list = dt.predict(training_data)
test_result_list = dt.predict(test_data)
training_error_ratio_sum += 1 - np.sum(training_result_list ==
training_data[CLASS_COLUMN]) / len(training_data)
test_error_ratio_sum += 1 - np.sum(test_result_list == test_data[CLASS_COLUMN]) / len(test_data)
training_error_ratio.append(training_error_ratio_sum / 3)
test_error_ratio.append(test_error_ratio_sum / 3)
print('layers ' + str(depth))
x = range(MAX_DEPTH + 1)[2::2]
plt.ylabel("$error \ ratio$")
plt.xlabel("$depth$")
plt.plot(x, training_error_ratio, label='training')
plt.plot(x, test_error_ratio, label='test')
plt.legend()
start = time.time()
validation_curve()
end = time.time()
print(end - start)
| compute_info_gain | identifier_name |
main.go | // Copyright 2014, Successfulmatch Inc. All rights reserved.
// Author TonyXu<tonycbcd@gmail.com>,
// Build on dev-0.0.1
// MIT Licensed
// The Go mysql proxy main model file.
package models
import (
"fmt"
"errors"
"strconv"
"sync"
"log"
"git.masontest.com/branches/gomysqlproxy/app/models/schema"
"git.masontest.com/branches/gomysqlproxy/app/models/host"
"git.masontest.com/branches/gomysqlproxy/app/models/client"
"git.masontest.com/branches/gomysqlproxy/app/models/redis"
"git.masontest.com/branches/gomysqlproxy/app/models/planbuilder"
)
type MysqlProxy struct {
TableTotal uint64
SizeTotal uint64 // 单位:MB
CurGId uint64 // 当前全局表主ID,为新表主ID,不能减少
TableIds []string // 表主ID
Tables []*schema.MysqlTable `json:"-"`
ShardDBIds []string // Shard库的主ID
ShardDBs []*schema.MysqlShardDB `json:"-"` // 已生成的shard库
ShardDBCnt int // shard db 计数器
TablesMap map[string]*schema.MysqlTable `json:"-"`
}
var (
MyProxy *MysqlProxy
isUpdated = false
)
func CheckError(err error) {
if err != nil {
panic(fmt.Sprintf("init the table error in mysql.go: %s", err))
}
}
func NewMysqlProxy() *MysqlProxy {
proxy := &MysqlProxy {}
proxy.Init()
host.GetAn | m.InitMain()
m.InitMysqlDB()
m.InitMysqlTable()
m.InitConnPooling()
if isUpdated {
// save mysql proxy.
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
shardDb,err := m.GetShardDbById(shardDbId)
if err != nil { return nil, err }
shardTable := &schema.MysqlShardTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardDBId: shardDbId,
Created: int64(tb["Created"].(float64)),
ShardDB: shardDb,
}
tables = append(tables, shardTable)
}
return tables, nil
}
func (m *MysqlProxy) UpdateToRedisDB() error {
return redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
}
// get the shard db by ids.
func (m *MysqlProxy) GetShardDbById(sid string) (*schema.MysqlShardDB, error) {
if sid == "" { return nil, errors.New("Sorry, the shard db id connot is empty") }
sdb, err := redis.ReadDB("MysqlShardDB", sid)
if err != nil { return nil, err }
if len(sdb) != 1 { return nil, errors.New("Load shard db wrong!") }
tsdb := sdb[0][sid].(map[string]interface {})
groupId := tsdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
if err != nil { return nil, err }
shardDB := &schema.MysqlShardDB{
Id: tsdb["Id"].(string),
Name: tsdb["Name"].(string),
TableTotal: uint64(tsdb["TableTotal"].(float64)),
SizeTotal: uint64(tsdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(tsdb["Created"].(float64)),
HostGroup: curGroup,
}
schema.ShardDBCnt++
return shardDB, nil
}
// to init the connection pooling.
func (m *MysqlProxy) InitConnPooling() {
// because the database/sql support the connection pooling
// so just to use it.
// 这里决定不采用预先就将所有的链接生成,还是使用到时再初始化连接.
}
func (m *MysqlProxy) BuildNewShardDB(group *host.Group, name string) (*schema.MysqlShardDB, error) {
if name == "" { return nil, errors.New("Sorry, can not build the no name databases") }
// init the shard db to host.
master := group.Master[0]
db, err := (&master).ConnToDB("mysql")
if err != nil { return nil, err }
stmt, err := db.Prepare(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", name))
if err != nil { return nil, err }
_, err = stmt.Exec()
if err != nil { return nil, err }
stmt.Close()
shardDbId := redis.BuildPrimaryKey(name, true)
shardDb := &schema.MysqlShardDB{
Id: shardDbId,
Name: name,
TableTotal: 0,
SizeTotal: 0,
HostGroupId: group.Id,
Created: redis.GetCurTime(),
HostGroup: group,
}
// save this new shard database to tracker.
err = redis.WriteDB(shardDbId, redis.EncodeData(shardDb), "MysqlShardDB")
if err != nil { return nil, err }
(&master).CloseDB()
schema.ShardDBCnt++
return shardDb, nil
}
// add a new table to mysql proxy
func (m *MysqlProxy) AddTable(tab *schema.MysqlTable) error {
tables := m.Tables
tableIds := m.TableIds
if tables == nil {
tables = []*schema.MysqlTable{ tab }
tableIds = []string{ tab.Id }
} else {
tables = append(tables, tab)
tableIds = append(tableIds, tab.Id)
}
m.Tables = tables
schema.Tables = tables
m.TableIds = tableIds
return m.UpdateToRedisDB()
}
// to exel the sql
func (m *MysqlProxy) Exec(args []string, user *client.Client) (interface{}, error) {
execPlan, err := planbuilder.GetExecPlan(args, user)
if err != nil { return nil, err }
result, err := execPlan.Do()
if err != nil { return nil, err }
switch result.(type) {
case *schema.MysqlTable:
err = m.AddTable(result.(*schema.MysqlTable))
if err == nil { result = "create succesfully!" }
}
return result, err
}
// to execute exeplan
// func (m *MysqlProxy) DoExecPlan(plan *planbuilder.ExecPlan
| dLogHostStatus()
return proxy
}
// To init the necessary data.
func (m *MysqlProxy) Init() {
| identifier_body |
main.go | // Copyright 2014, Successfulmatch Inc. All rights reserved.
// Author TonyXu<tonycbcd@gmail.com>,
// Build on dev-0.0.1
// MIT Licensed
// The Go mysql proxy main model file.
package models
import (
"fmt"
"errors"
"strconv"
"sync"
"log"
"git.masontest.com/branches/gomysqlproxy/app/models/schema"
"git.masontest.com/branches/gomysqlproxy/app/models/host"
"git.masontest.com/branches/gomysqlproxy/app/models/client"
"git.masontest.com/branches/gomysqlproxy/app/models/redis"
"git.masontest.com/branches/gomysqlproxy/app/models/planbuilder"
)
type MysqlProxy struct {
TableTotal uint64
SizeTotal uint64 // 单位:MB
CurGId uint64 // 当前全局表主ID,为新表主ID,不能减少
TableIds []string // 表主ID
Tables []*schema.MysqlTable `json:"-"`
ShardDBIds []string // Shard库的主ID
ShardDBs []*schema.MysqlShardDB `json:"-"` // 已生成的shard库
ShardDBCnt int // shard db 计数器
TablesMap map[string]*schema.MysqlTable `json:"-"`
}
var (
MyProxy *MysqlProxy
isUpdated = false
)
func CheckError(err error) {
if err != nil {
panic(fmt.Sprintf("init the table error in mysql.go: %s", err))
}
}
func NewMysqlProxy() *MysqlProxy {
proxy := &MysqlProxy {}
proxy.Init()
host.GetAndLogHostStatus()
return proxy
}
// To init the necessary data.
func (m *MysqlProxy) Init() {
m.InitMain()
m.InitMysqlDB()
m.InitMysqlTable()
m.InitConnPooling()
if isUpdated {
// save mysql proxy.
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
| schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
shardDb,err := m.GetShardDbById(shardDbId)
if err != nil { return nil, err }
shardTable := &schema.MysqlShardTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardDBId: shardDbId,
Created: int64(tb["Created"].(float64)),
ShardDB: shardDb,
}
tables = append(tables, shardTable)
}
return tables, nil
}
func (m *MysqlProxy) UpdateToRedisDB() error {
return redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
}
// get the shard db by ids.
func (m *MysqlProxy) GetShardDbById(sid string) (*schema.MysqlShardDB, error) {
if sid == "" { return nil, errors.New("Sorry, the shard db id connot is empty") }
sdb, err := redis.ReadDB("MysqlShardDB", sid)
if err != nil { return nil, err }
if len(sdb) != 1 { return nil, errors.New("Load shard db wrong!") }
tsdb := sdb[0][sid].(map[string]interface {})
groupId := tsdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
if err != nil { return nil, err }
shardDB := &schema.MysqlShardDB{
Id: tsdb["Id"].(string),
Name: tsdb["Name"].(string),
TableTotal: uint64(tsdb["TableTotal"].(float64)),
SizeTotal: uint64(tsdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(tsdb["Created"].(float64)),
HostGroup: curGroup,
}
schema.ShardDBCnt++
return shardDB, nil
}
// to init the connection pooling.
func (m *MysqlProxy) InitConnPooling() {
// because the database/sql support the connection pooling
// so just to use it.
// 这里决定不采用预先就将所有的链接生成,还是使用到时再初始化连接.
}
func (m *MysqlProxy) BuildNewShardDB(group *host.Group, name string) (*schema.MysqlShardDB, error) {
if name == "" { return nil, errors.New("Sorry, can not build the no name databases") }
// init the shard db to host.
master := group.Master[0]
db, err := (&master).ConnToDB("mysql")
if err != nil { return nil, err }
stmt, err := db.Prepare(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", name))
if err != nil { return nil, err }
_, err = stmt.Exec()
if err != nil { return nil, err }
stmt.Close()
shardDbId := redis.BuildPrimaryKey(name, true)
shardDb := &schema.MysqlShardDB{
Id: shardDbId,
Name: name,
TableTotal: 0,
SizeTotal: 0,
HostGroupId: group.Id,
Created: redis.GetCurTime(),
HostGroup: group,
}
// save this new shard database to tracker.
err = redis.WriteDB(shardDbId, redis.EncodeData(shardDb), "MysqlShardDB")
if err != nil { return nil, err }
(&master).CloseDB()
schema.ShardDBCnt++
return shardDb, nil
}
// add a new table to mysql proxy
func (m *MysqlProxy) AddTable(tab *schema.MysqlTable) error {
tables := m.Tables
tableIds := m.TableIds
if tables == nil {
tables = []*schema.MysqlTable{ tab }
tableIds = []string{ tab.Id }
} else {
tables = append(tables, tab)
tableIds = append(tableIds, tab.Id)
}
m.Tables = tables
schema.Tables = tables
m.TableIds = tableIds
return m.UpdateToRedisDB()
}
// to exel the sql
func (m *MysqlProxy) Exec(args []string, user *client.Client) (interface{}, error) {
execPlan, err := planbuilder.GetExecPlan(args, user)
if err != nil { return nil, err }
result, err := execPlan.Do()
if err != nil { return nil, err }
switch result.(type) {
case *schema.MysqlTable:
err = m.AddTable(result.(*schema.MysqlTable))
if err == nil { result = "create succesfully!" }
}
return result, err
}
// to execute exeplan
// func (m *MysqlProxy) DoExecPlan(plan *planbuilder.ExecPlan
| tables := []* | identifier_name |
main.go | // Copyright 2014, Successfulmatch Inc. All rights reserved.
// Author TonyXu<tonycbcd@gmail.com>,
// Build on dev-0.0.1
// MIT Licensed
// The Go mysql proxy main model file.
package models
import (
"fmt"
"errors"
"strconv"
"sync"
"log"
"git.masontest.com/branches/gomysqlproxy/app/models/schema"
"git.masontest.com/branches/gomysqlproxy/app/models/host"
"git.masontest.com/branches/gomysqlproxy/app/models/client"
"git.masontest.com/branches/gomysqlproxy/app/models/redis"
"git.masontest.com/branches/gomysqlproxy/app/models/planbuilder"
)
type MysqlProxy struct {
TableTotal uint64
SizeTotal uint64 // 单位:MB
CurGId uint64 // 当前全局表主ID,为新表主ID,不能减少
TableIds []string // 表主ID
Tables []*schema.MysqlTable `json:"-"`
ShardDBIds []string // Shard库的主ID
ShardDBs []*schema.MysqlShardDB `json:"-"` // 已生成的shard库
ShardDBCnt int // shard db 计数器
TablesMap map[string]*schema.MysqlTable `json:"-"`
}
var (
MyProxy *MysqlProxy
isUpdated = false
)
func CheckError(err error) {
if err != nil {
panic(fmt.Sprintf("init the table error in mysql.go: %s", err))
}
}
func NewMysqlProxy() *MysqlProxy {
proxy := &MysqlProxy {}
proxy.Init()
host.GetAndLogHostStatus()
return proxy
}
// To init the necessary data.
func (m *MysqlProxy) Init() {
m.InitMain()
m.InitMysqlDB()
m.InitMysqlTable()
m.InitConnPooling()
if isUpdated {
// save mysql proxy.
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
| = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
shardDb,err := m.GetShardDbById(shardDbId)
if err != nil { return nil, err }
shardTable := &schema.MysqlShardTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardDBId: shardDbId,
Created: int64(tb["Created"].(float64)),
ShardDB: shardDb,
}
tables = append(tables, shardTable)
}
return tables, nil
}
func (m *MysqlProxy) UpdateToRedisDB() error {
return redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
}
// get the shard db by ids.
func (m *MysqlProxy) GetShardDbById(sid string) (*schema.MysqlShardDB, error) {
if sid == "" { return nil, errors.New("Sorry, the shard db id connot is empty") }
sdb, err := redis.ReadDB("MysqlShardDB", sid)
if err != nil { return nil, err }
if len(sdb) != 1 { return nil, errors.New("Load shard db wrong!") }
tsdb := sdb[0][sid].(map[string]interface {})
groupId := tsdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
if err != nil { return nil, err }
shardDB := &schema.MysqlShardDB{
Id: tsdb["Id"].(string),
Name: tsdb["Name"].(string),
TableTotal: uint64(tsdb["TableTotal"].(float64)),
SizeTotal: uint64(tsdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(tsdb["Created"].(float64)),
HostGroup: curGroup,
}
schema.ShardDBCnt++
return shardDB, nil
}
// to init the connection pooling.
func (m *MysqlProxy) InitConnPooling() {
// because the database/sql support the connection pooling
// so just to use it.
// 这里决定不采用预先就将所有的链接生成,还是使用到时再初始化连接.
}
func (m *MysqlProxy) BuildNewShardDB(group *host.Group, name string) (*schema.MysqlShardDB, error) {
if name == "" { return nil, errors.New("Sorry, can not build the no name databases") }
// init the shard db to host.
master := group.Master[0]
db, err := (&master).ConnToDB("mysql")
if err != nil { return nil, err }
stmt, err := db.Prepare(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", name))
if err != nil { return nil, err }
_, err = stmt.Exec()
if err != nil { return nil, err }
stmt.Close()
shardDbId := redis.BuildPrimaryKey(name, true)
shardDb := &schema.MysqlShardDB{
Id: shardDbId,
Name: name,
TableTotal: 0,
SizeTotal: 0,
HostGroupId: group.Id,
Created: redis.GetCurTime(),
HostGroup: group,
}
// save this new shard database to tracker.
err = redis.WriteDB(shardDbId, redis.EncodeData(shardDb), "MysqlShardDB")
if err != nil { return nil, err }
(&master).CloseDB()
schema.ShardDBCnt++
return shardDb, nil
}
// add a new table to mysql proxy
func (m *MysqlProxy) AddTable(tab *schema.MysqlTable) error {
tables := m.Tables
tableIds := m.TableIds
if tables == nil {
tables = []*schema.MysqlTable{ tab }
tableIds = []string{ tab.Id }
} else {
tables = append(tables, tab)
tableIds = append(tableIds, tab.Id)
}
m.Tables = tables
schema.Tables = tables
m.TableIds = tableIds
return m.UpdateToRedisDB()
}
// to exel the sql
func (m *MysqlProxy) Exec(args []string, user *client.Client) (interface{}, error) {
execPlan, err := planbuilder.GetExecPlan(args, user)
if err != nil { return nil, err }
result, err := execPlan.Do()
if err != nil { return nil, err }
switch result.(type) {
case *schema.MysqlTable:
err = m.AddTable(result.(*schema.MysqlTable))
if err == nil { result = "create succesfully!" }
}
return result, err
}
// to execute exeplan
// func (m *MysqlProxy) DoExecPlan(plan *planbuilder.ExecPlan
| if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs | conditional_block |
main.go | // Copyright 2014, Successfulmatch Inc. All rights reserved.
// Author TonyXu<tonycbcd@gmail.com>,
// Build on dev-0.0.1
// MIT Licensed
// The Go mysql proxy main model file.
package models
import (
"fmt"
"errors"
"strconv"
"sync"
"log"
"git.masontest.com/branches/gomysqlproxy/app/models/schema"
"git.masontest.com/branches/gomysqlproxy/app/models/host"
"git.masontest.com/branches/gomysqlproxy/app/models/client"
"git.masontest.com/branches/gomysqlproxy/app/models/redis"
"git.masontest.com/branches/gomysqlproxy/app/models/planbuilder"
)
type MysqlProxy struct {
TableTotal uint64
SizeTotal uint64 // 单位:MB
CurGId uint64 // 当前全局表主ID,为新表主ID,不能减少
TableIds []string // 表主ID
Tables []*schema.MysqlTable `json:"-"`
ShardDBIds []string // Shard库的主ID
ShardDBs []*schema.MysqlShardDB `json:"-"` // 已生成的shard库
ShardDBCnt int // shard db 计数器
TablesMap map[string]*schema.MysqlTable `json:"-"`
}
var (
MyProxy *MysqlProxy
isUpdated = false
)
func CheckError(err error) {
if err != nil {
panic(fmt.Sprintf("init the table error in mysql.go: %s", err))
}
}
func NewMysqlProxy() *MysqlProxy {
proxy := &MysqlProxy {}
proxy.Init()
host.GetAndLogHostStatus()
return proxy
}
// To init the necessary data.
func (m *MysqlProxy) Init() {
m.InitMain()
m.InitMysqlDB()
m.InitMysqlTable()
m.InitConnPooling()
if isUpdated {
// save mysql proxy.
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
CheckError(err)
}
// panic(fmt.Sprintf("OK: %#v", m))
}
// get the table status.
func (m *MysqlProxy) GetStatus() (map[string]interface{}, error) {
result := map[string]interface{}{}
result["main"] = redis.EncodeData(m)
tables := []string{}
shardDB := []string{}
for _, table := range m.Tables {
tables = append(tables, redis.EncodeData(table))
}
for _, db := range m.ShardDBs {
shardDB = append(shardDB, redis.EncodeData(db))
}
result["tables"] = tables
result["sharddbs"] = shardDB
return result, nil
}
// restore the main proxy data.
func (m *MysqlProxy) InitMain() {
pr, err := redis.ReadDB("MysqlProxy", "main")
CheckError(err)
if len(pr) == 0 { return }
for _, proxy := range pr {
proxy = proxy["main"].(map[string]interface {})
m.TableTotal = uint64(proxy["TableTotal"].(float64))
m.SizeTotal = uint64(proxy["SizeTotal"].(float64))
m.CurGId = uint64(proxy["CurGId"].(float64))
if ttableIds, isOk := proxy["TableIds"].([]interface{}); isOk && len(ttableIds) > 0 {
m.TableIds = redis.RestorePrimaryId(ttableIds)
} else {
m.TableIds = []string{}
}
if dbIds, isOk := proxy["ShardDBIds"].([]interface{}); isOk && len(dbIds) > 0 {
m.ShardDBIds = redis.RestorePrimaryId(dbIds)
} else {
m.ShardDBIds = []string{}
}
m.ShardDBCnt = int(proxy["ShardDBCnt"].(float64))
schema.ShardDBCnt = m.ShardDBCnt
}
// panic(fmt.Sprintf("%#v", m))
}
// get the current db cluster data infomations
func (m *MysqlProxy) InitMysqlDB() {
// panic(fmt.Sprintf("%#v, %#v", m.ShardDBIds, len(m.ShardDBIds)))
if len(m.ShardDBIds) == 0 {
// init the shard DB
shardDBs := []*schema.MysqlShardDB{}
shardDBIds := []string{}
m.ShardDBCnt = 0
for _, group := range host.Groups {
m.ShardDBCnt++
shardDb, err := m.BuildNewShardDB(&group, "shard" + strconv.Itoa(m.ShardDBCnt))
CheckError(err)
shardDBs = append(shardDBs, shardDb)
shardDBIds = append(shardDBIds, shardDb.Id)
}
m.ShardDBs = shardDBs
m.ShardDBIds = shardDBIds
// to prepare save new data.
isUpdated = true
// add shard dbs map.
schema.Sdbs = shardDBs
} else {
// 分析数据,并恢复至MysqlProxy结构体中.
shardDBs := []*schema.MysqlShardDB{}
for _, sid := range m.ShardDBIds {
dbs, err := redis.ReadDB("MysqlShardDB", sid)
CheckError(err)
if len(dbs) != 1 { panic("no found relation shard db for id:" + sid) }
sdb := dbs[0][sid].(map[string]interface {})
groupId := sdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
CheckError(err)
shardDB := &schema.MysqlShardDB{
Id: sdb["Id"].(string),
Name: sdb["Name"].(string),
TableTotal: uint64(sdb["TableTotal"].(float64)),
SizeTotal: uint64(sdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(sdb["Created"].(float64)),
HostGroup: curGroup,
}
shardDBs = append(shardDBs, shardDB)
}
m.ShardDBs = shardDBs
// add shard dbs map.
schema.Sdbs = shardDBs
}
// listen the sharddb change status.
locker := &sync.Mutex{}
go func() {
for {
newShardDB := <-schema.NewShardDBCh
locker.Lock()
defer locker.Unlock()
m.ShardDBIds = append(m.ShardDBIds, newShardDB.Id)
m.ShardDBs = append(m.ShardDBs, newShardDB)
schema.Sdbs = m.ShardDBs
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil {
log.Printf("new shard db listener error:%s", err)
}
m.ShardDBCnt++
schema.ShardDBCnt = m.ShardDBCnt
fmt.Printf("current shard total: %d\n", schema.ShardDBCnt)
}
}()
// listen the table drop action.
go func() {
for {
dropedTable := <-schema.DropedTableCh
m.DeleteTable(dropedTable)
}
}()
// panic(fmt.Sprintf("in init shard db: %#v, %#v", m))
}
func (m *MysqlProxy) DeleteTable(table *schema.MysqlTable) {
curTables := []*schema.MysqlTable{}
curTableIds := []string{}
for _, one := range m.Tables {
if one.Name != table.Name {
curTables = append(curTables, one)
}
}
for _, one := range m.TableIds {
if one != table.Id {
curTableIds = append(curTableIds, one)
}
}
// delete the relations.
m.TableIds = curTableIds
m.Tables = curTables
err := redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
if err != nil { fmt.Printf("Delete table error when write redis: %s\n", err); return }
schema.Tables = curTables
// delete selfs.
table.Destroy()
}
// to init or restore the table infomation.
func (m *MysqlProxy) InitMysqlTable() {
if len(m.TableIds) == 0 { return }
// 分析数据,并恢复至MysqlProxy结构体中.
tables := []*schema.MysqlTable{}
for _, tid := range m.TableIds {
tbs, err := redis.ReadDB("MysqlTable", tid)
CheckError(err)
if len(tbs) != 1 { panic("no found relation table for id: " + tid) }
tb := tbs[0][tid].(map[string]interface {})
// panic(fmt.Sprintf("%#v", tbs))
shardTbIds := []string{}
if std, isOk := tb["ShardIds"].([]interface{}); isOk && len(std) > 0 {
shardTbIds = redis.RestorePrimaryId(std)
}
shardTb := []*schema.MysqlShardTable{}
table := &schema.MysqlTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
CurGId: uint64(tb["CurGId"].(float64)),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardIds: shardTbIds,
Created: int64(tb["Created"].(float64)),
Shards: shardTb,
}
if len(shardTbIds) > 0 {
// create new shard table
shardTb, err = m.GetShardTableByIds(shardTbIds)
CheckError(err)
table.Shards = shardTb
err = table.RestoreColumnsByDB()
CheckError(err)
}
// fmt.Printf("Init table `%s` done\n", table.Name)
tables = append(tables, table)
}
m.Tables = tables
schema.Tables = m.Tables
}
// to get shard table info.
func (m *MysqlProxy) GetShardTableByIds(ids []string) ([]*schema.MysqlShardTable, error) {
if len(ids) == 0 { return nil, nil }
tables := []*schema.MysqlShardTable{}
for _, id := range ids {
tbs, err := redis.ReadDB("MysqlShardTable", id)
if err != nil { return nil, err }
if len(tbs) != 1 { return nil, errors.New("no found the shard table for id: " + id) }
tb := tbs[0][id].(map[string]interface {})
shardDbId := tb["ShardDBId"].(string)
shardDb,err := m.GetShardDbById(shardDbId)
if err != nil { return nil, err }
shardTable := &schema.MysqlShardTable{
Id: tb["Id"].(string),
Name: tb["Name"].(string),
RowTotal: uint64(tb["RowTotal"].(float64)),
ShardDBId: shardDbId,
Created: int64(tb["Created"].(float64)),
ShardDB: shardDb,
}
tables = append(tables, shardTable)
}
return tables, nil
}
func (m *MysqlProxy) UpdateToRedisDB() error {
return redis.UpdateDB("main", redis.EncodeData(m), "MysqlProxy")
}
// get the shard db by ids.
func (m *MysqlProxy) GetShardDbById(sid string) (*schema.MysqlShardDB, error) {
if sid == "" { return nil, errors.New("Sorry, the shard db id connot is empty") }
sdb, err := redis.ReadDB("MysqlShardDB", sid)
if err != nil { return nil, err }
if len(sdb) != 1 { return nil, errors.New("Load shard db wrong!") }
tsdb := sdb[0][sid].(map[string]interface {})
groupId := tsdb["HostGroupId"].(string)
curGroup, err := host.GetHostGroupById(groupId)
if err != nil { return nil, err }
shardDB := &schema.MysqlShardDB{
Id: tsdb["Id"].(string),
Name: tsdb["Name"].(string),
TableTotal: uint64(tsdb["TableTotal"].(float64)),
SizeTotal: uint64(tsdb["SizeTotal"].(float64)),
HostGroupId:groupId,
Created: int64(tsdb["Created"].(float64)),
HostGroup: curGroup,
}
schema.ShardDBCnt++
return shardDB, nil
}
// to init the connection pooling.
func (m *MysqlProxy) InitConnPooling() {
// because the database/sql support the connection pooling
// so just to use it.
// 这里决定不采用预先就将所有的链接生成,还是使用到时再初始化连接.
}
func (m *MysqlProxy) BuildNewShardDB(group *host.Group, name string) (*schema.MysqlShardDB, error) {
if name == "" { return nil, errors.New("Sorry, can not build the no name databases") }
// init the shard db to host.
master := group.Master[0]
db, err := (&master).ConnToDB("mysql")
if err != nil { return nil, err }
stmt, err := db.Prepare(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci", name))
if err != nil { return nil, err }
_, err = stmt.Exec()
if err != nil { return nil, err }
stmt.Close()
shardDbId := redis.BuildPrimaryKey(name, true)
shardDb := &schema.MysqlShardDB{
Id: shardDbId,
Name: name,
TableTotal: 0,
SizeTotal: 0,
HostGroupId: group.Id,
Created: redis.GetCurTime(),
HostGroup: group,
}
// save this new shard database to tracker.
err = redis.WriteDB(shardDbId, redis.EncodeData(shardDb), "MysqlShardDB")
if err != nil { return nil, err }
(&master).CloseDB()
schema.ShardDBCnt++
return shardDb, nil
}
// add a new table to mysql proxy
func (m *MysqlProxy) AddTable(tab *schema.MysqlTable) error {
tables := m.Tables
tableIds := m.TableIds
if tables == nil {
tables = []*schema.MysqlTable{ tab }
tableIds = []string{ tab.Id }
} else {
tables = append(tables, tab)
tableIds = append(tableIds, tab.Id)
}
m.Tables = tables | }
// to exel the sql
func (m *MysqlProxy) Exec(args []string, user *client.Client) (interface{}, error) {
execPlan, err := planbuilder.GetExecPlan(args, user)
if err != nil { return nil, err }
result, err := execPlan.Do()
if err != nil { return nil, err }
switch result.(type) {
case *schema.MysqlTable:
err = m.AddTable(result.(*schema.MysqlTable))
if err == nil { result = "create succesfully!" }
}
return result, err
}
// to execute exeplan
// func (m *MysqlProxy) DoExecPlan(plan *planbuilder.ExecPlan | schema.Tables = tables
m.TableIds = tableIds
return m.UpdateToRedisDB() | random_line_split |
test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2014-2019 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor
from .util import (
PortSeed,
GENESISTIME,
MAX_NODES,
assert_equal,
bitcoind_processes,
check_json_precision,
connect_nodes_bi,
connect_nodes,
copy_datadir,
disable_mocktime,
disconnect_nodes,
enable_coverage,
get_mocktime,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_cache_mocktime,
set_genesis_mocktime,
set_mocktime,
set_node_times,
satoshi_round,
_start_node,
_start_nodes,
_stop_node,
_stop_nodes,
sync_blocks,
sync_mempools,
sync_masternodes,
wait_for_bitcoind_start,
wait_to_sync)
from .authproxy import JSONRPCException
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self, stderr=None):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args, stderr=stderr)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dashds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop dashds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing dashd/dash-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
self.log.info("Note: dashds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: dashd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(self.start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time.time()
locked = False
while True:
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
except:
# TX not received yet?
pass
if time.time() > start + 10:
break
time.sleep(0.5)
return locked
def wait_for_sporks_same(self, timeout=30):
st = time.time()
while time.time() < st + timeout:
if self.check_sporks_same():
return
time.sleep(0.5)
raise AssertionError("wait_for_sporks_same timed out")
def check_sporks_same(self):
sporks = self.nodes[0].spork('show')
for node in self.nodes[1:]:
sporks2 = node.spork('show')
if sporks != sporks2:
return False
return True
def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for mn in self.mninfo:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_5_60" not in s:
all_ok = False
break
s = s["llmq_5_60"]
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
|
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_phase timed out")
def wait_for_quorum_commitment(self, timeout = 15):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for node in self.nodes:
s = node.quorum("dkgstatus")
if "minableCommitments" not in s:
all_ok = False
break
s = s["minableCommitments"]
if "llmq_5_60" not in s:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_commitment timed out")
def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_justifications=0, expected_commitments=5):
quorums = self.nodes[0].quorum("list")
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(skip_count)
sync_blocks(self.nodes)
# Make sure all reached phase 1 (init)
self.wait_for_quorum_phase(1, None, 0)
# Give nodes some time to connect to neighbors
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 2 (contribute) and received all contributions
self.wait_for_quorum_phase(2, "receivedContributions", expected_contributions)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 3 (complain) and received all complaints
self.wait_for_quorum_phase(3, "receivedComplaints", expected_complaints)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 4 (justify)
self.wait_for_quorum_phase(4, "receivedJustifications", expected_justifications)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 5 (commit)
self.wait_for_quorum_phase(5, "receivedPrematureCommitments", expected_commitments)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 6 (mining)
self.wait_for_quorum_phase(6, None, 0)
# Wait for final commitment
self.wait_for_quorum_commitment()
# mine the final commitment
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
while quorums == self.nodes[0].quorum("list"):
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
sync_blocks(self.nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_5_60"][0]
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self.nodes[0].generate(8)
sync_blocks(self.nodes)
return new_quorum
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| if s[check_received_messages] < check_received_messages_count:
all_ok = False
break | conditional_block |
test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2014-2019 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor
from .util import (
PortSeed,
GENESISTIME,
MAX_NODES,
assert_equal,
bitcoind_processes,
check_json_precision,
connect_nodes_bi,
connect_nodes,
copy_datadir,
disable_mocktime,
disconnect_nodes,
enable_coverage,
get_mocktime,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_cache_mocktime,
set_genesis_mocktime,
set_mocktime,
set_node_times,
satoshi_round,
_start_node,
_start_nodes,
_stop_node,
_stop_nodes,
sync_blocks,
sync_mempools,
sync_masternodes,
wait_for_bitcoind_start,
wait_to_sync)
from .authproxy import JSONRPCException
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self, stderr=None):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args, stderr=stderr)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dashds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop dashds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing dashd/dash-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
self.log.info("Note: dashds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: dashd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(self.start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def | (self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time.time()
locked = False
while True:
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
except:
# TX not received yet?
pass
if time.time() > start + 10:
break
time.sleep(0.5)
return locked
def wait_for_sporks_same(self, timeout=30):
st = time.time()
while time.time() < st + timeout:
if self.check_sporks_same():
return
time.sleep(0.5)
raise AssertionError("wait_for_sporks_same timed out")
def check_sporks_same(self):
sporks = self.nodes[0].spork('show')
for node in self.nodes[1:]:
sporks2 = node.spork('show')
if sporks != sporks2:
return False
return True
def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for mn in self.mninfo:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_5_60" not in s:
all_ok = False
break
s = s["llmq_5_60"]
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
if s[check_received_messages] < check_received_messages_count:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_phase timed out")
def wait_for_quorum_commitment(self, timeout = 15):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for node in self.nodes:
s = node.quorum("dkgstatus")
if "minableCommitments" not in s:
all_ok = False
break
s = s["minableCommitments"]
if "llmq_5_60" not in s:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_commitment timed out")
def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_justifications=0, expected_commitments=5):
quorums = self.nodes[0].quorum("list")
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(skip_count)
sync_blocks(self.nodes)
# Make sure all reached phase 1 (init)
self.wait_for_quorum_phase(1, None, 0)
# Give nodes some time to connect to neighbors
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 2 (contribute) and received all contributions
self.wait_for_quorum_phase(2, "receivedContributions", expected_contributions)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 3 (complain) and received all complaints
self.wait_for_quorum_phase(3, "receivedComplaints", expected_complaints)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 4 (justify)
self.wait_for_quorum_phase(4, "receivedJustifications", expected_justifications)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 5 (commit)
self.wait_for_quorum_phase(5, "receivedPrematureCommitments", expected_commitments)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 6 (mining)
self.wait_for_quorum_phase(6, None, 0)
# Wait for final commitment
self.wait_for_quorum_commitment()
# mine the final commitment
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
while quorums == self.nodes[0].quorum("list"):
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
sync_blocks(self.nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_5_60"][0]
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self.nodes[0].generate(8)
sync_blocks(self.nodes)
return new_quorum
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| create_raw_tx | identifier_name |
test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2014-2019 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor
from .util import (
PortSeed,
GENESISTIME,
MAX_NODES,
assert_equal,
bitcoind_processes,
check_json_precision,
connect_nodes_bi,
connect_nodes,
copy_datadir,
disable_mocktime,
disconnect_nodes,
enable_coverage,
get_mocktime,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_cache_mocktime,
set_genesis_mocktime,
set_mocktime,
set_node_times,
satoshi_round,
_start_node,
_start_nodes,
_stop_node,
_stop_nodes,
sync_blocks,
sync_mempools,
sync_masternodes,
wait_for_bitcoind_start,
wait_to_sync)
from .authproxy import JSONRPCException
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self, stderr=None):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args, stderr=stderr)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dashds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop dashds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing dashd/dash-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
self.log.info("Note: dashds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
|
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: dashd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(self.start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time.time()
locked = False
while True:
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
except:
# TX not received yet?
pass
if time.time() > start + 10:
break
time.sleep(0.5)
return locked
def wait_for_sporks_same(self, timeout=30):
st = time.time()
while time.time() < st + timeout:
if self.check_sporks_same():
return
time.sleep(0.5)
raise AssertionError("wait_for_sporks_same timed out")
def check_sporks_same(self):
sporks = self.nodes[0].spork('show')
for node in self.nodes[1:]:
sporks2 = node.spork('show')
if sporks != sporks2:
return False
return True
def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for mn in self.mninfo:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_5_60" not in s:
all_ok = False
break
s = s["llmq_5_60"]
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
if s[check_received_messages] < check_received_messages_count:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_phase timed out")
def wait_for_quorum_commitment(self, timeout = 15):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for node in self.nodes:
s = node.quorum("dkgstatus")
if "minableCommitments" not in s:
all_ok = False
break
s = s["minableCommitments"]
if "llmq_5_60" not in s:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_commitment timed out")
def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_justifications=0, expected_commitments=5):
quorums = self.nodes[0].quorum("list")
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(skip_count)
sync_blocks(self.nodes)
# Make sure all reached phase 1 (init)
self.wait_for_quorum_phase(1, None, 0)
# Give nodes some time to connect to neighbors
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 2 (contribute) and received all contributions
self.wait_for_quorum_phase(2, "receivedContributions", expected_contributions)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 3 (complain) and received all complaints
self.wait_for_quorum_phase(3, "receivedComplaints", expected_complaints)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 4 (justify)
self.wait_for_quorum_phase(4, "receivedJustifications", expected_justifications)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 5 (commit)
self.wait_for_quorum_phase(5, "receivedPrematureCommitments", expected_commitments)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 6 (mining)
self.wait_for_quorum_phase(6, None, 0)
# Wait for final commitment
self.wait_for_quorum_commitment()
# mine the final commitment
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
while quorums == self.nodes[0].quorum("list"):
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
sync_blocks(self.nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_5_60"][0]
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self.nodes[0].generate(8)
sync_blocks(self.nodes)
return new_quorum
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1)) | Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache.""" | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.